diff --git a/go.mod b/go.mod index f90b857c..941ad0da 100644 --- a/go.mod +++ b/go.mod @@ -7,17 +7,17 @@ require ( github.com/Masterminds/semver/v3 v3.2.1 github.com/adhocore/gronx v1.6.5 github.com/atrox/haikunatorgo/v2 v2.0.1 - github.com/caddyserver/certmagic v0.19.1 - github.com/casbin/casbin/v2 v2.73.1 + github.com/caddyserver/certmagic v0.19.2 + github.com/casbin/casbin/v2 v2.77.1 github.com/datarhei/core-client-go/v16 v16.11.1-0.20230809104853-391c13f9d400 github.com/datarhei/gosrt v0.5.4 github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a github.com/fujiwara/shapeio v1.0.0 - github.com/go-playground/validator/v10 v10.15.0 + github.com/go-playground/validator/v10 v10.15.3 github.com/gobwas/glob v0.2.3 github.com/golang-jwt/jwt/v4 v4.5.0 - github.com/google/gops v0.3.27 - github.com/google/uuid v1.3.0 + github.com/google/gops v0.3.28 + github.com/google/uuid v1.3.1 github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/raft v1.5.0 github.com/hashicorp/raft-boltdb/v2 v2.2.2 @@ -28,13 +28,13 @@ require ( github.com/lestrrat-go/strftime v1.0.6 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.19 - github.com/minio/minio-go/v7 v7.0.61 + github.com/minio/minio-go/v7 v7.0.63 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 github.com/prometheus/client_golang v1.16.0 github.com/shirou/gopsutil/v3 v3.23.7 github.com/stretchr/testify v1.8.4 - github.com/swaggo/echo-swagger v1.4.0 - github.com/swaggo/swag v1.16.1 + github.com/swaggo/echo-swagger v1.4.1 + github.com/swaggo/swag v1.16.2 github.com/vektah/gqlparser/v2 v2.5.8 github.com/xeipuuv/gojsonschema v1.2.0 go.etcd.io/bbolt v1.3.7 @@ -60,7 +60,8 @@ require ( github.com/dustin/go-humanize v1.0.1 // indirect github.com/fatih/color v1.15.0 // indirect github.com/gabriel-vasile/mimetype v1.4.2 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.20.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/spec v0.20.9 // indirect @@ -72,8 +73,8 @@ require ( github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-msgpack v0.5.5 // indirect - github.com/hashicorp/golang-lru v0.5.4 // indirect - github.com/hashicorp/golang-lru/v2 v2.0.4 // indirect + github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.6 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -103,10 +104,10 @@ require ( github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/swaggo/files/v2 v2.0.0 // indirect - github.com/tidwall/gjson v1.15.0 // indirect + github.com/tidwall/gjson v1.16.0 // indirect github.com/tidwall/match v1.1.1 // indirect github.com/tidwall/pretty v1.2.1 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect github.com/tklauser/numcpus v0.6.1 // indirect github.com/urfave/cli/v2 v2.25.5 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect @@ -124,5 +125,6 @@ require ( golang.org/x/tools v0.12.0 // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index beb65314..28daff70 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,6 @@ github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/adhocore/gronx v1.6.5 h1:/pryEagBKz3WqUgpgvtL51eBN2rJLXowuW7rpS+jrew= github.com/adhocore/gronx v1.6.5/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= @@ -36,10 +34,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caddyserver/certmagic v0.19.1 h1:4jyOYm2DHvQI8YM0sk6qm62Gl5XznHxiMBMWjMTlQkw= -github.com/caddyserver/certmagic v0.19.1/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8= -github.com/casbin/casbin/v2 v2.73.1 h1:E7ncCA4EVAjJVSfYcfguQOYFbSJcWlLVBSqipLtAqxI= -github.com/casbin/casbin/v2 v2.73.1/go.mod h1:mzGx0hYW9/ksOSpw3wNjk3NRAroq5VMFYUQ6G43iGPk= +github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0= +github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8= +github.com/casbin/casbin/v2 v2.77.1 h1:+H46VamJCTlmCPcb0N99Zaj4tSorfuvBh3v5lyGopeU= +github.com/casbin/casbin/v2 v2.77.1/go.mod h1:mzGx0hYW9/ksOSpw3wNjk3NRAroq5VMFYUQ6G43iGPk= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -48,10 +46,6 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230725151052-26252f73d23b h1:b8TWP11Tx9QwZ8lEPoyv9L2hfkH8YaUkAc0bGxRLYmg= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230725151052-26252f73d23b/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230809080523-222b4b4efd5a h1:24dPnxb6eFMzyX2BjP2iZn4q7SxVBN41v5OUDRpXGWY= -github.com/datarhei/core-client-go/v16 v16.11.1-0.20230809080523-222b4b4efd5a/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8= github.com/datarhei/core-client-go/v16 v16.11.1-0.20230809104853-391c13f9d400 h1:xk4R13HF2vdG3aXKQ3Bce7hBMDnjBGC+HU4daun4wJI= github.com/datarhei/core-client-go/v16 v16.11.1-0.20230809104853-391c13f9d400/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8= github.com/datarhei/gosrt v0.5.4 h1:dE3mmSB+n1GeviGM8xQAW3+UD3mKeFmd84iefDul5Vs= @@ -73,13 +67,15 @@ github.com/fujiwara/shapeio v1.0.0 h1:xG5D9oNqCSUUbryZ/jQV3cqe1v2suEjwPIcEg1gKM8 github.com/fujiwara/shapeio v1.0.0/go.mod h1:LmEmu6L/8jetyj1oewewFb7bZCNRwE7wLCUNzDLaLVA= github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -100,8 +96,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.15.0 h1:nDU5XeOKtB3GEa+uB7GNYwhVKsgjAR7VgKoNB6ryXfw= -github.com/go-playground/validator/v10 v10.15.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-playground/validator/v10 v10.15.3 h1:S+sSpunYjNPDuXkWbK+x+bA7iXiW296KG4dL3X7xUZo= +github.com/go-playground/validator/v10 v10.15.3/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -125,10 +121,11 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gops v0.3.27 h1:BDdWfedShsBbeatZ820oA4DbVOC8yJ4NI8xAlDFWfgI= -github.com/google/gops v0.3.27/go.mod h1:lYqabmfnq4Q6UumWNx96Hjup5BDAVc8zmfIy0SkNCSk= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark= +github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -144,10 +141,10 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.4 h1:7GHuZcgid37q8o5i3QI9KMT4nCWQQ3Kx3Ov6bb9MfK0= -github.com/hashicorp/golang-lru/v2 v2.0.4/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= +github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/golang-lru/v2 v2.0.6 h1:3xi/Cafd1NaoEnS/yDssIiuVeDVywU0QdFGl3aQaQHM= +github.com/hashicorp/golang-lru/v2 v2.0.6/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8= github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M= @@ -164,13 +161,11 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= @@ -227,8 +222,8 @@ github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.61 h1:87c+x8J3jxQ5VUGimV9oHdpjsAvy3fhneEBKuoKEVUI= -github.com/minio/minio-go/v7 v7.0.61/go.mod h1:BTu8FcrEw+HidY0zd/0eny43QnVNkXRPXrLXFuQBHXg= +github.com/minio/minio-go/v7 v7.0.63 h1:GbZ2oCvaUdgT5640WJOpyDhhDxvknAJU2/T3yurwcbQ= +github.com/minio/minio-go/v7 v7.0.63/go.mod h1:Q6X7Qjb7WMhvG65qKf4gUgA5XaiSox74kR1uAEjxRS4= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -241,7 +236,6 @@ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3Rllmb github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -293,7 +287,6 @@ github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFt github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -315,22 +308,23 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/swaggo/echo-swagger v1.4.0 h1:RCxLKySw1SceHLqnmc41pKyiIeE+OiD7NSI7FUOBlLo= -github.com/swaggo/echo-swagger v1.4.0/go.mod h1:Wh3VlwjZGZf/LH0s81tz916JokuPG7y/ZqaqnckYqoQ= +github.com/swaggo/echo-swagger v1.4.1 h1:Yf0uPaJWp1uRtDloZALyLnvdBeoEL5Kc7DtnjzO/TUk= +github.com/swaggo/echo-swagger v1.4.1/go.mod h1:C8bSi+9yH2FLZsnhqMZLIZddpUxZdBYuNHbtaS1Hljc= github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw= github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM= -github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg= -github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto= +github.com/swaggo/swag v1.16.2 h1:28Pp+8DkQoV+HLzLx8RGJZXNGKbFqnuvSbAAtoxiY04= +github.com/swaggo/swag v1.16.2/go.mod h1:6YzXnDcpr0767iOejs318CwYkCQqyGer6BizOg03f+E= github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= -github.com/tidwall/gjson v1.15.0 h1:5n/pM+v3r5ujuNl4YLZLsQ+UE5jlkLVm7jMzT5Mpolw= -github.com/tidwall/gjson v1.15.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.16.0 h1:SyXa+dsSPpUlcwEDuKuEBJEz5vzTvOea+9rjyYodQFg= +github.com/tidwall/gjson v1.16.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= @@ -404,6 +398,7 @@ golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go index a3d714e8..1e9928ba 100644 --- a/vendor/github.com/caddyserver/certmagic/handshake.go +++ b/vendor/github.com/caddyserver/certmagic/handshake.go @@ -46,8 +46,7 @@ import ( // GetCertificate will run in a new context, use GetCertificateWithContext to provide // a context. func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { - ctx := context.TODO() // TODO: get a proper context? from somewhere... - return cfg.GetCertificateWithContext(ctx, clientHello) + return cfg.GetCertificateWithContext(clientHello.Context(), clientHello) } func (cfg *Config) GetCertificateWithContext(ctx context.Context, clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) { @@ -255,7 +254,6 @@ func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificat // This function is safe for concurrent use. func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.ClientHelloInfo, loadOrObtainIfNecessary bool) (Certificate, error) { logger := logWithRemote(cfg.Logger.Named("handshake"), hello) - name := cfg.getNameFromClientHello(hello) // First check our in-memory cache to see if we've already loaded it cert, matched, defaulted := cfg.getCertificateFromCache(hello) @@ -274,8 +272,10 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client return cert, nil } + name := cfg.getNameFromClientHello(hello) + // By this point, we need to load or obtain a certificate. If a swarm of requests comes in for the same - // domain, avoid pounding manager or storage thousands of times simultaneously. We do a similar sync + // domain, avoid pounding manager or storage thousands of times simultaneously. We use a similar sync // strategy for obtaining certificate during handshake. certLoadWaitChansMu.Lock() wait, ok := certLoadWaitChans[name] @@ -283,7 +283,7 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client // another goroutine is already loading the cert; just wait and we'll get it from the in-memory cache certLoadWaitChansMu.Unlock() - timeout := time.NewTimer(2 * time.Minute) // TODO: have Caddy use the context param to establish a timeout + timeout := time.NewTimer(2 * time.Minute) select { case <-timeout.C: return Certificate{}, fmt.Errorf("timed out waiting to load certificate for %s", name) @@ -385,8 +385,10 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client return Certificate{}, fmt.Errorf("no certificate available for '%s'", name) } +// loadCertFromStorage loads the certificate for name from storage and maintains it +// (as this is only called with on-demand TLS enabled). func (cfg *Config) loadCertFromStorage(ctx context.Context, logger *zap.Logger, hello *tls.ClientHelloInfo) (Certificate, error) { - name := normalizedName(hello.ServerName) + name := cfg.getNameFromClientHello(hello) loadedCert, err := cfg.CacheManagedCertificate(ctx, name) if errors.Is(err, fs.ErrNotExist) { // If no exact match, try a wildcard variant, which is something we can still use @@ -477,6 +479,9 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli // wait for it to finish obtaining the cert and then we'll use it. obtainCertWaitChansMu.Unlock() + log.Debug("new certificate is needed, but is already being obtained; waiting for that issuance to complete", + zap.String("subject", name)) + // TODO: see if we can get a proper context in here, for true cancellation timeout := time.NewTimer(2 * time.Minute) select { @@ -486,7 +491,9 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli timeout.Stop() } - return cfg.loadCertFromStorage(ctx, log, hello) + // it should now be loaded in the cache, ready to go; if not, + // the goroutine in charge of that probably had an error + return cfg.getCertDuringHandshake(ctx, hello, false) } // looks like it's up to us to do all the work and obtain the cert. @@ -504,28 +511,28 @@ func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.Cli log.Info("obtaining new certificate", zap.String("server_name", name)) - // TODO: we are only adding a timeout because we don't know if the context passed in is actually cancelable... + // set a timeout so we don't inadvertently hold a client handshake open too long // (timeout duration is based on https://caddy.community/t/zerossl-dns-challenge-failing-often-route53-plugin/13822/24?u=matt) var cancel context.CancelFunc ctx, cancel = context.WithTimeout(ctx, 180*time.Second) defer cancel() - // Obtain the certificate + // obtain the certificate (this puts it in storage) and if successful, + // load it from storage so we and any other waiting goroutine can use it + var cert Certificate err := cfg.ObtainCertAsync(ctx, name) - - // immediately unblock anyone waiting for it; doing this in - // a defer would risk deadlock because of the recursive call - // to getCertDuringHandshake below when we return! - unblockWaiters() - - if err != nil { - // shucks; failed to solve challenge on-demand - return Certificate{}, err + if err == nil { + // load from storage while others wait to make the op as atomic as possible + cert, err = cfg.loadCertFromStorage(ctx, log, hello) + if err != nil { + log.Error("loading newly-obtained certificate from storage", zap.String("server_name", name), zap.Error(err)) + } } - // success; certificate was just placed on disk, so - // we need only restart serving the certificate - return cfg.loadCertFromStorage(ctx, log, hello) + // immediately unblock anyone waiting for it + unblockWaiters() + + return cert, err } // handshakeMaintenance performs a check on cert for expiration and OCSP validity. @@ -550,6 +557,7 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe // quite common considering not all certs have issuer URLs that support it. log.Warn("stapling OCSP", zap.String("server_name", hello.ServerName), + zap.Strings("sans", cert.Names), zap.Error(err)) } else { log.Debug("successfully stapled new OCSP response", @@ -607,7 +615,7 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe // // This function is safe for use by multiple concurrent goroutines. func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) { - log := cfg.Logger.Named("on_demand") + log := logWithRemote(cfg.Logger.Named("on_demand"), hello) name := cfg.getNameFromClientHello(hello) timeLeft := time.Until(expiresAt(currentCert.Leaf)) @@ -647,7 +655,9 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien timeout.Stop() } - return cfg.loadCertFromStorage(ctx, log, hello) + // it should now be loaded in the cache, ready to go; if not, + // the goroutine in charge of that probably had an error + return cfg.getCertDuringHandshake(ctx, hello, false) } // looks like it's up to us to do all the work and renew the cert @@ -699,16 +709,8 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien } else { err = cfg.RenewCertAsync(ctx, name, false) if err == nil { - // even though the recursive nature of the dynamic cert loading - // would just call this function anyway, we do it here to - // make the replacement as atomic as possible. - newCert, err = cfg.CacheManagedCertificate(ctx, name) - if err != nil { - log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err)) - } else { - // replace the old certificate with the new one - cfg.certCache.replaceCertificate(currentCert, newCert) - } + // load from storage while in lock to make the replacement as atomic as possible + newCert, err = cfg.reloadManagedCertificate(ctx, currentCert) } } @@ -718,16 +720,15 @@ func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.Clien unblockWaiters() if err != nil { - log.Error("renewing and reloading certificate", zap.Error(err)) - return newCert, err + log.Error("renewing and reloading certificate", zap.String("server_name", name), zap.Error(err)) } - return cfg.loadCertFromStorage(ctx, log, hello) + return newCert, err } // if the certificate hasn't expired, we can serve what we have and renew in the background if timeLeft > 0 { - ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) go renewAndReload(ctx, cancel) return currentCert, nil } @@ -868,6 +869,8 @@ var ( obtainCertWaitChans = make(map[string]chan struct{}) obtainCertWaitChansMu sync.Mutex ) + +// TODO: this lockset should probably be per-cache var ( certLoadWaitChans = make(map[string]chan struct{}) certLoadWaitChansMu sync.Mutex diff --git a/vendor/github.com/caddyserver/certmagic/storage.go b/vendor/github.com/caddyserver/certmagic/storage.go index 5ee39291..6089bb9b 100644 --- a/vendor/github.com/caddyserver/certmagic/storage.go +++ b/vendor/github.com/caddyserver/certmagic/storage.go @@ -289,7 +289,7 @@ func acquireLock(ctx context.Context, storage Storage, lockKey string) error { } func releaseLock(ctx context.Context, storage Storage, lockKey string) error { - err := storage.Unlock(ctx, lockKey) + err := storage.Unlock(context.TODO(), lockKey) // TODO: in Go 1.21, use WithoutCancel (see #247) if err == nil { locksMu.Lock() delete(locks, lockKey) diff --git a/vendor/github.com/casbin/casbin/v2/README.md b/vendor/github.com/casbin/casbin/v2/README.md index 8fd6b8af..141f14ff 100644 --- a/vendor/github.com/casbin/casbin/v2/README.md +++ b/vendor/github.com/casbin/casbin/v2/README.md @@ -2,15 +2,14 @@ Casbin ==== [![Go Report Card](https://goreportcard.com/badge/github.com/casbin/casbin)](https://goreportcard.com/report/github.com/casbin/casbin) +[![Build](https://github.com/casbin/casbin/actions/workflows/default.yml/badge.svg)](https://github.com/casbin/casbin/actions/workflows/default.yml) [![Coverage Status](https://coveralls.io/repos/github/casbin/casbin/badge.svg?branch=master)](https://coveralls.io/github/casbin/casbin?branch=master) [![Godoc](https://godoc.org/github.com/casbin/casbin?status.svg)](https://pkg.go.dev/github.com/casbin/casbin/v2) [![Release](https://img.shields.io/github/release/casbin/casbin.svg)](https://github.com/casbin/casbin/releases/latest) [![Discord](https://img.shields.io/discord/1022748306096537660?logo=discord&label=discord&color=5865F2)](https://discord.gg/S5UjpzGZjN) [![Sourcegraph](https://sourcegraph.com/github.com/casbin/casbin/-/badge.svg)](https://sourcegraph.com/github.com/casbin/casbin?badge) -💖 [**Looking for an open-source identity and access management solution like Okta, Auth0, Keycloak ? Learn more about: Casdoor**](https://casdoor.org/) - -casdoor +[![image](https://door.casdoor.com/image.png)](https://door.casdoor.com/redirect) **News**: still worry about how to write the correct Casbin policy? ``Casbin online editor`` is coming to help! Try it at: https://casbin.org/editor/ @@ -261,6 +260,13 @@ Support this project by becoming a sponsor. Your logo will show up here with a l + + + + + + + ## Star History diff --git a/vendor/github.com/casbin/casbin/v2/enforcer.go b/vendor/github.com/casbin/casbin/v2/enforcer.go index 6b32436d..9caed722 100644 --- a/vendor/github.com/casbin/casbin/v2/enforcer.go +++ b/vendor/github.com/casbin/casbin/v2/enforcer.go @@ -46,6 +46,7 @@ type Enforcer struct { watcher persist.Watcher dispatcher persist.Dispatcher rmMap map[string]rbac.RoleManager + condRmMap map[string]rbac.ConditionalRoleManager matcherMap sync.Map enabled bool @@ -205,6 +206,7 @@ func (e *Enforcer) SetLogger(logger log.Logger) { func (e *Enforcer) initialize() { e.rmMap = map[string]rbac.RoleManager{} + e.condRmMap = map[string]rbac.ConditionalRoleManager{} e.eft = effector.NewDefaultEffector() e.watcher = nil e.matcherMap = sync.Map{} @@ -340,18 +342,50 @@ func (e *Enforcer) LoadPolicy() error { if e.autoBuildRoleLinks { needToRebuild = true + if err := e.rebuildRoleLinks(newModel); err != nil { + return err + } + + if err := e.rebuildConditionalRoleLinks(newModel); err != nil { + return err + } + } + e.model = newModel + return nil +} + +func (e *Enforcer) rebuildRoleLinks(newModel model.Model) error { + if len(e.rmMap) != 0 { for _, rm := range e.rmMap { err := rm.Clear() if err != nil { return err } } - err = newModel.BuildRoleLinks(e.rmMap) + + err := newModel.BuildRoleLinks(e.rmMap) + if err != nil { + return err + } + } + + return nil +} + +func (e *Enforcer) rebuildConditionalRoleLinks(newModel model.Model) error { + if len(e.condRmMap) != 0 { + for _, crm := range e.condRmMap { + err := crm.Clear() + if err != nil { + return err + } + } + + err := newModel.BuildConditionalRoleLinks(e.condRmMap) if err != nil { return err } } - e.model = newModel return nil } @@ -432,11 +466,27 @@ func (e *Enforcer) SavePolicy() error { } func (e *Enforcer) initRmMap() { - for ptype := range e.model["g"] { + for ptype, assertion := range e.model["g"] { if rm, ok := e.rmMap[ptype]; ok { _ = rm.Clear() - } else { - e.rmMap[ptype] = defaultrolemanager.NewRoleManager(10) + continue + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManagerImpl(10) + e.rmMap[ptype] = assertion.RM + } + if len(assertion.Tokens) <= 2 && len(assertion.ParamsTokens) != 0 { + assertion.CondRM = defaultrolemanager.NewConditionalRoleManager(10) + e.condRmMap[ptype] = assertion.CondRM + } + if len(assertion.Tokens) > 2 { + if len(assertion.ParamsTokens) == 0 { + assertion.RM = defaultrolemanager.NewRoleManager(10) + e.rmMap[ptype] = assertion.RM + } else { + assertion.CondRM = defaultrolemanager.NewConditionalDomainManager(10) + e.condRmMap[ptype] = assertion.CondRM + } matchFun := "keyMatch(r_dom, p_dom)" if strings.Contains(e.model["m"]["m"].Value, matchFun) { e.AddNamedDomainMatchingFunc(ptype, "g", util.KeyMatch) @@ -503,6 +553,12 @@ func (e *Enforcer) BuildIncrementalRoleLinks(op model.PolicyOp, ptype string, ru return e.model.BuildIncrementalRoleLinks(e.rmMap, op, "g", ptype, rules) } +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations with conditions. +func (e *Enforcer) BuildIncrementalConditionalRoleLinks(op model.PolicyOp, ptype string, rules [][]string) error { + e.invalidateMatcherMap() + return e.model.BuildIncrementalConditionalRoleLinks(e.condRmMap, op, "g", ptype, rules) +} + // NewEnforceContext Create a default structure based on the suffix func NewEnforceContext(suffix string) EnforceContext { return EnforceContext{ @@ -532,8 +588,15 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac functions := e.fm.GetFunctions() if _, ok := e.model["g"]; ok { for key, ast := range e.model["g"] { - rm := ast.RM - functions[key] = util.GenerateGFunction(rm) + // g must be a normal role definition (ast.RM != nil) + // or a conditional role definition (ast.CondRM != nil) + // ast.RM and ast.CondRM shouldn't be nil at the same time + if ast.RM != nil { + functions[key] = util.GenerateGFunction(ast.RM) + } + if ast.CondRM != nil { + functions[key] = util.GenerateConditionalGFunction(ast.CondRM) + } } } @@ -843,6 +906,45 @@ func (e *Enforcer) AddNamedDomainMatchingFunc(ptype, name string, fn rbac.Matchi return false } +// AddNamedLinkConditionFunc Add condition function fn for Link userName->roleName, +// when fn returns true, Link is valid, otherwise invalid +func (e *Enforcer) AddNamedLinkConditionFunc(ptype, user, role string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddLinkConditionFunc(user, role, fn) + return true + } + return false +} + +// AddNamedDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, +// when fn returns true, Link is valid, otherwise invalid +func (e *Enforcer) AddNamedDomainLinkConditionFunc(ptype, user, role string, domain string, fn rbac.LinkConditionFunc) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.AddDomainLinkConditionFunc(user, role, domain, fn) + return true + } + return false +} + +// SetNamedLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName +func (e *Enforcer) SetNamedLinkConditionFuncParams(ptype, user, role string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetLinkConditionFuncParams(user, role, params...) + return true + } + return false +} + +// SetNamedDomainLinkConditionFuncParams Sets the parameters of the condition function fn +// for Link userName->{roleName, domain} +func (e *Enforcer) SetNamedDomainLinkConditionFuncParams(ptype, user, role, domain string, params ...string) bool { + if rm, ok := e.condRmMap[ptype]; ok { + rm.SetDomainLinkConditionFuncParams(user, role, domain, params...) + return true + } + return false +} + // assumes bounds have already been checked type enforceParameters struct { rTokens map[string]int diff --git a/vendor/github.com/casbin/casbin/v2/frontend.go b/vendor/github.com/casbin/casbin/v2/frontend.go index f6650957..99434e10 100644 --- a/vendor/github.com/casbin/casbin/v2/frontend.go +++ b/vendor/github.com/casbin/casbin/v2/frontend.go @@ -22,15 +22,27 @@ import ( func CasbinJsGetPermissionForUser(e IEnforcer, user string) (string, error) { model := e.GetModel() m := map[string]interface{}{} + m["m"] = model.ToText() - policies := make([][]string, 0) + + pRules := [][]string{} for ptype := range model["p"] { - policy := model.GetPolicy("p", ptype) - for i := range policy { - policies = append(policies, append([]string{ptype}, policy[i]...)) + policies := model.GetPolicy("p", ptype) + for _, rules := range policies { + pRules = append(pRules, append([]string{ptype}, rules...)) } } - m["p"] = policies + m["p"] = pRules + + gRules := [][]string{} + for ptype := range model["g"] { + policies := model.GetPolicy("g", ptype) + for _, rules := range policies { + gRules = append(gRules, append([]string{ptype}, rules...)) + } + } + m["g"] = gRules + result := bytes.NewBuffer([]byte{}) encoder := json.NewEncoder(result) encoder.SetEscapeHTML(false) diff --git a/vendor/github.com/casbin/casbin/v2/internal_api.go b/vendor/github.com/casbin/casbin/v2/internal_api.go index d3d56409..42991b54 100644 --- a/vendor/github.com/casbin/casbin/v2/internal_api.go +++ b/vendor/github.com/casbin/casbin/v2/internal_api.go @@ -91,6 +91,11 @@ func (e *Enforcer) addPoliciesWithoutNotify(sec string, ptype string, rules [][] if err != nil { return true, err } + + err = e.BuildIncrementalConditionalRoleLinks(model.PolicyAdd, ptype, rules) + if err != nil { + return true, err + } } return true, nil diff --git a/vendor/github.com/casbin/casbin/v2/log/default_logger.go b/vendor/github.com/casbin/casbin/v2/log/default_logger.go index 1ad3a92b..9994f390 100644 --- a/vendor/github.com/casbin/casbin/v2/log/default_logger.go +++ b/vendor/github.com/casbin/casbin/v2/log/default_logger.go @@ -95,3 +95,10 @@ func (l *DefaultLogger) LogRole(roles []string) { log.Println("Roles: ", strings.Join(roles, "\n")) } + +func (l *DefaultLogger) LogError(err error, msg ...string) { + if !l.enabled { + return + } + log.Println(msg, err) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/log_util.go b/vendor/github.com/casbin/casbin/v2/log/log_util.go index 191c8179..617c0393 100644 --- a/vendor/github.com/casbin/casbin/v2/log/log_util.go +++ b/vendor/github.com/casbin/casbin/v2/log/log_util.go @@ -45,3 +45,8 @@ func LogRole(roles []string) { func LogPolicy(policy map[string][][]string) { logger.LogPolicy(policy) } + +// LogError logs the error information +func LogError(err error, msg ...string) { + logger.LogError(err, msg...) +} diff --git a/vendor/github.com/casbin/casbin/v2/log/logger.go b/vendor/github.com/casbin/casbin/v2/log/logger.go index c60e07aa..8982cae6 100644 --- a/vendor/github.com/casbin/casbin/v2/log/logger.go +++ b/vendor/github.com/casbin/casbin/v2/log/logger.go @@ -35,4 +35,7 @@ type Logger interface { // LogPolicy log info related to policy. LogPolicy(policy map[string][][]string) + + // LogError log info relate to error + LogError(err error, msg ...string) } diff --git a/vendor/github.com/casbin/casbin/v2/model/assertion.go b/vendor/github.com/casbin/casbin/v2/model/assertion.go index 054dd0f4..cbe0edd7 100644 --- a/vendor/github.com/casbin/casbin/v2/model/assertion.go +++ b/vendor/github.com/casbin/casbin/v2/model/assertion.go @@ -28,9 +28,11 @@ type Assertion struct { Key string Value string Tokens []string + ParamsTokens []string Policy [][]string PolicyMap map[string]int RM rbac.RoleManager + CondRM rbac.ConditionalRoleManager FieldIndexMap map[string]int logger log.Logger @@ -63,7 +65,6 @@ func (ast *Assertion) buildIncrementalRoleLinks(rm rbac.RoleManager, op PolicyOp } } } - return nil } @@ -89,6 +90,81 @@ func (ast *Assertion) buildRoleLinks(rm rbac.RoleManager) error { return nil } +func (ast *Assertion) buildIncrementalConditionalRoleLinks(condRM rbac.ConditionalRoleManager, op PolicyOp, rules [][]string) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + + for _, rule := range rules { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + var err error + domainRule := rule[2:len(ast.Tokens)] + + switch op { + case PolicyAdd: + err = ast.addConditionalRoleLink(rule, domainRule) + case PolicyRemove: + err = ast.CondRM.DeleteLink(rule[0], rule[1], rule[2:]...) + } + if err != nil { + return err + } + } + + return nil +} + +func (ast *Assertion) buildConditionalRoleLinks(condRM rbac.ConditionalRoleManager) error { + ast.CondRM = condRM + count := strings.Count(ast.Value, "_") + if count < 2 { + return errors.New("the number of \"_\" in role definition should be at least 2") + } + for _, rule := range ast.Policy { + if len(rule) < count { + return errors.New("grouping policy elements do not meet role definition") + } + if len(rule) > count { + rule = rule[:count] + } + + domainRule := rule[2:len(ast.Tokens)] + + err := ast.addConditionalRoleLink(rule, domainRule) + if err != nil { + return err + } + } + + return nil +} + +// addConditionalRoleLinks adds Link to rbac.ConditionalRoleManager and sets the parameters for LinkConditionFunc +func (ast *Assertion) addConditionalRoleLink(rule []string, domainRule []string) error { + var err error + if len(domainRule) == 0 { + err = ast.CondRM.AddLink(rule[0], rule[1]) + if err == nil { + ast.CondRM.SetLinkConditionFuncParams(rule[0], rule[1], rule[len(ast.Tokens):]...) + } + } else { + domain := domainRule[0] + err = ast.CondRM.AddLink(rule[0], rule[1], domain) + if err == nil { + ast.CondRM.SetDomainLinkConditionFuncParams(rule[0], rule[1], domain, rule[len(ast.Tokens):]...) + } + } + return err +} + func (ast *Assertion) setLogger(logger log.Logger) { ast.logger = logger } diff --git a/vendor/github.com/casbin/casbin/v2/model/model.go b/vendor/github.com/casbin/casbin/v2/model/model.go index 25ce8243..5db4c2c3 100644 --- a/vendor/github.com/casbin/casbin/v2/model/model.go +++ b/vendor/github.com/casbin/casbin/v2/model/model.go @@ -54,6 +54,18 @@ func loadAssertion(model Model, cfg config.ConfigInterface, sec string, key stri return model.AddDef(sec, key, value) } +var paramsRegex = regexp.MustCompile(`\((.*?)\)`) + +// getParamsToken Get ParamsToken from Assertion.Value +func getParamsToken(value string) []string { + paramsString := paramsRegex.FindString(value) + if paramsString == "" { + return nil + } + paramsString = strings.TrimSuffix(strings.TrimPrefix(paramsString, "("), ")") + return strings.Split(paramsString, ",") +} + // AddDef adds an assertion to the model. func (model Model) AddDef(sec string, key string, value string) bool { if value == "" { @@ -73,7 +85,9 @@ func (model Model) AddDef(sec string, key string, value string) bool { ast.Tokens[i] = key + "_" + strings.TrimSpace(ast.Tokens[i]) } } else if sec == "g" { + ast.ParamsTokens = getParamsToken(ast.Value) ast.Tokens = strings.Split(ast.Value, ",") + ast.Tokens = ast.Tokens[:len(ast.Tokens)-len(ast.ParamsTokens)] } else { ast.Value = util.RemoveComments(util.EscapeAssertion(ast.Value)) } diff --git a/vendor/github.com/casbin/casbin/v2/model/policy.go b/vendor/github.com/casbin/casbin/v2/model/policy.go index d4b4a7a5..48dcb13d 100644 --- a/vendor/github.com/casbin/casbin/v2/model/policy.go +++ b/vendor/github.com/casbin/casbin/v2/model/policy.go @@ -37,7 +37,7 @@ const DefaultSep = "," // BuildIncrementalRoleLinks provides incremental build the role inheritance relations. func (model Model) BuildIncrementalRoleLinks(rmMap map[string]rbac.RoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { - if sec == "g" { + if sec == "g" && rmMap[ptype] != nil { return model[sec][ptype].buildIncrementalRoleLinks(rmMap[ptype], op, rules) } return nil @@ -47,10 +47,34 @@ func (model Model) BuildIncrementalRoleLinks(rmMap map[string]rbac.RoleManager, func (model Model) BuildRoleLinks(rmMap map[string]rbac.RoleManager) error { model.PrintPolicy() for ptype, ast := range model["g"] { - rm := rmMap[ptype] - err := ast.buildRoleLinks(rm) - if err != nil { - return err + if rm := rmMap[ptype]; rm != nil { + err := ast.buildRoleLinks(rm) + if err != nil { + return err + } + } + } + + return nil +} + +// BuildIncrementalConditionalRoleLinks provides incremental build the role inheritance relations. +func (model Model) BuildIncrementalConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager, op PolicyOp, sec string, ptype string, rules [][]string) error { + if sec == "g" && condRmMap[ptype] != nil { + return model[sec][ptype].buildIncrementalConditionalRoleLinks(condRmMap[ptype], op, rules) + } + return nil +} + +// BuildConditionalRoleLinks initializes the roles in RBAC. +func (model Model) BuildConditionalRoleLinks(condRmMap map[string]rbac.ConditionalRoleManager) error { + model.PrintPolicy() + for ptype, ast := range model["g"] { + if condRm := condRmMap[ptype]; condRm != nil { + err := ast.buildConditionalRoleLinks(condRm) + if err != nil { + return err + } } } diff --git a/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go new file mode 100644 index 00000000..8e54ad0d --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/persist/adapter_context.go @@ -0,0 +1,41 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package persist + +import ( + "context" + + "github.com/casbin/casbin/v2/model" +) + +// ContextAdapter provides a context-aware interface for Casbin adapters. +type ContextAdapter interface { + Adapter + + // LoadPolicyCtx loads all policy rules from the storage with context. + LoadPolicyCtx(ctx context.Context, model model.Model) error + // SavePolicyCtx saves all policy rules to the storage with context. + SavePolicyCtx(ctx context.Context, model model.Model) error + + // AddPolicyCtx adds a policy rule to the storage with context. + // This is part of the Auto-Save feature. + AddPolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemovePolicyCtx removes a policy rule from the storage with context. + // This is part of the Auto-Save feature. + RemovePolicyCtx(ctx context.Context, sec string, ptype string, rule []string) error + // RemoveFilteredPolicyCtx removes policy rules that match the filter from the storage with context. + // This is part of the Auto-Save feature. + RemoveFilteredPolicyCtx(ctx context.Context, sec string, ptype string, fieldIndex int, fieldValues ...string) error +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go new file mode 100644 index 00000000..dcaa37f7 --- /dev/null +++ b/vendor/github.com/casbin/casbin/v2/rbac/context_role_manager.go @@ -0,0 +1,46 @@ +// Copyright 2023 The casbin Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rbac + +import "context" + +// ContextRoleManager provides a context-aware interface to define the operations for managing roles. +// Prefer this over RoleManager interface for context propagation, which is useful for things like handling +// request timeouts. +type ContextRoleManager interface { + RoleManager + + // ClearCtx clears all stored data and resets the role manager to the initial state with context. + ClearCtx(ctx context.Context) error + // AddLinkCtx adds the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + AddLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // DeleteLinkCtx deletes the inheritance link between two roles. role: name1 and role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + DeleteLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) error + // HasLinkCtx determines whether a link exists between two roles. role: name1 inherits role: name2 with context. + // domain is a prefix to the roles (can be used for other purposes). + HasLinkCtx(ctx context.Context, name1 string, name2 string, domain ...string) (bool, error) + // GetRolesCtx gets the roles that a user inherits with context. + // domain is a prefix to the roles (can be used for other purposes). + GetRolesCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetUsersCtx gets the users that inherits a role with context. + // domain is a prefix to the users (can be used for other purposes). + GetUsersCtx(ctx context.Context, name string, domain ...string) ([]string, error) + // GetDomainsCtx gets domains that a user has with context. + GetDomainsCtx(ctx context.Context, name string) ([]string, error) + // GetAllDomainsCtx gets all domains with context. + GetAllDomainsCtx(ctx context.Context) ([]string, error) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go index a0f01048..3b6ad8b4 100644 --- a/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go +++ b/vendor/github.com/casbin/casbin/v2/rbac/default-role-manager/role_manager.go @@ -19,7 +19,6 @@ import ( "strings" "sync" - "github.com/casbin/casbin/v2/errors" "github.com/casbin/casbin/v2/log" "github.com/casbin/casbin/v2/rbac" "github.com/casbin/casbin/v2/util" @@ -29,11 +28,13 @@ const defaultDomain string = "" // Role represents the data structure for a role in RBAC. type Role struct { - name string - roles *sync.Map - users *sync.Map - matched *sync.Map - matchedBy *sync.Map + name string + roles *sync.Map + users *sync.Map + matched *sync.Map + matchedBy *sync.Map + linkConditionFuncMap *sync.Map + linkConditionFuncParamsMap *sync.Map } func newRole(name string) *Role { @@ -43,6 +44,8 @@ func newRole(name string) *Role { r.users = &sync.Map{} r.matched = &sync.Map{} r.matchedBy = &sync.Map{} + r.linkConditionFuncMap = &sync.Map{} + r.linkConditionFuncParamsMap = &sync.Map{} return &r } @@ -163,6 +166,35 @@ func (r *Role) getUsers() []string { return names } +type linkConditionFuncKey struct { + roleName string + domainName string +} + +func (r *Role) addLinkConditionFunc(role *Role, domain string, fn rbac.LinkConditionFunc) { + r.linkConditionFuncMap.Store(linkConditionFuncKey{role.name, domain}, fn) +} + +func (r *Role) getLinkConditionFunc(role *Role, domain string) (rbac.LinkConditionFunc, bool) { + fn, ok := r.linkConditionFuncMap.Load(linkConditionFuncKey{role.name, domain}) + if fn == nil { + return nil, ok + } + return fn.(rbac.LinkConditionFunc), ok +} + +func (r *Role) setLinkConditionFuncParams(role *Role, domain string, params ...string) { + r.linkConditionFuncParamsMap.Store(linkConditionFuncKey{role.name, domain}, params) +} + +func (r *Role) getLinkConditionFuncParams(role *Role, domain string) ([]string, bool) { + params, ok := r.linkConditionFuncParamsMap.Load(linkConditionFuncKey{role.name, domain}) + if params == nil { + return nil, ok + } + return params.([]string), ok +} + // RoleManagerImpl provides a default implementation for the RoleManager interface type RoleManagerImpl struct { allRoles *sync.Map @@ -500,10 +532,8 @@ func (dm *DomainManager) getDomain(domains ...string) (domain string, err error) switch len(domains) { case 0: return defaultDomain, nil - case 1: - return domains[0], nil default: - return "", errors.ErrDomainParameter + return domains[0], nil } } @@ -691,3 +721,294 @@ func NewRoleManager(maxHierarchyLevel int) *RoleManager { rm.DomainManager = NewDomainManager(maxHierarchyLevel) return rm } + +type ConditionalRoleManager struct { + RoleManagerImpl +} + +func (crm *ConditionalRoleManager) copyFrom(other *ConditionalRoleManager) { + other.Range(func(name1, name2 string, domain ...string) bool { + _ = crm.AddLink(name1, name2, domain...) + return true + }) +} + +// use this constructor to avoid rebuild of AddMatchingFunc +func newConditionalRoleManagerWithMatchingFunc(maxHierarchyLevel int, fn rbac.MatchingFunc) *ConditionalRoleManager { + rm := NewConditionalRoleManager(maxHierarchyLevel) + rm.matchingFunc = fn + return rm +} + +// NewConditionalRoleManager is the constructor for creating an instance of the +// ConditionalRoleManager implementation. +func NewConditionalRoleManager(maxHierarchyLevel int) *ConditionalRoleManager { + rm := ConditionalRoleManager{} + _ = rm.Clear() //init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (crm *ConditionalRoleManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + if name1 == name2 || (crm.matchingFunc != nil && crm.Match(name1, name2)) { + return true, nil + } + + user, userCreated := crm.getRole(name1) + role, roleCreated := crm.getRole(name2) + + if userCreated { + defer crm.removeRole(user.name) + } + if roleCreated { + defer crm.removeRole(role.name) + } + + return crm.hasLinkHelper(role.name, map[string]*Role{user.name: user}, crm.maxHierarchyLevel, domains...), nil +} + +// hasLinkHelper use the Breadth First Search algorithm to traverse the Role tree +// Judging whether the user has a role (has link) is to judge whether the role node can be reached from the user node +func (crm *ConditionalRoleManager) hasLinkHelper(targetName string, roles map[string]*Role, level int, domains ...string) bool { + if level < 0 || len(roles) == 0 { + return false + } + nextRoles := map[string]*Role{} + for _, role := range roles { + if targetName == role.name || (crm.matchingFunc != nil && crm.Match(role.name, targetName)) { + return true + } + role.rangeRoles(func(key, value interface{}) bool { + nextRole := value.(*Role) + return crm.getNextRoles(role, nextRole, domains, nextRoles) + }) + } + + return crm.hasLinkHelper(targetName, nextRoles, level-1) +} + +func (crm *ConditionalRoleManager) getNextRoles(currentRole, nextRole *Role, domains []string, nextRoles map[string]*Role) bool { + passLinkConditionFunc := true + var err error + // If LinkConditionFunc exists, it needs to pass the verification to get nextRole + if len(domains) == 0 { + if linkConditionFunc, existLinkCondition := crm.GetLinkConditionFunc(currentRole.name, nextRole.name); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } else { + if linkConditionFunc, existLinkCondition := crm.GetDomainLinkConditionFunc(currentRole.name, nextRole.name, domains[0]); existLinkCondition { + params, _ := crm.GetLinkConditionFuncParams(currentRole.name, nextRole.name, domains[0]) + passLinkConditionFunc, err = linkConditionFunc(params...) + } + } + + if err != nil { + crm.logger.LogError(err, "hasLinkHelper LinkCondition Error") + return false + } + + if passLinkConditionFunc { + nextRoles[nextRole.name] = nextRole + } + + return true +} + +// GetLinkConditionFunc get LinkConditionFunc based on userName, roleName +func (crm *ConditionalRoleManager) GetLinkConditionFunc(userName, roleName string) (rbac.LinkConditionFunc, bool) { + return crm.GetDomainLinkConditionFunc(userName, roleName, defaultDomain) +} + +// GetDomainLinkConditionFunc get LinkConditionFunc based on userName, roleName, domain +func (crm *ConditionalRoleManager) GetDomainLinkConditionFunc(userName, roleName, domain string) (rbac.LinkConditionFunc, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + return user.getLinkConditionFunc(role, domain) +} + +// GetLinkConditionFuncParams gets parameters of LinkConditionFunc based on userName, roleName, domain +func (crm *ConditionalRoleManager) GetLinkConditionFuncParams(userName, roleName string, domain ...string) ([]string, bool) { + user, userCreated := crm.getRole(userName) + role, roleCreated := crm.getRole(roleName) + + if userCreated { + crm.removeRole(user.name) + return nil, false + } + + if roleCreated { + crm.removeRole(role.name) + return nil, false + } + + domainName := defaultDomain + if len(domain) != 0 { + domainName = domain[0] + } + + if params, ok := user.getLinkConditionFuncParams(role, domainName); ok { + return params, true + } else { + return nil, false + } +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc +func (crm *ConditionalRoleManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + crm.AddDomainLinkConditionFunc(userName, roleName, defaultDomain, fn) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc +func (crm *ConditionalRoleManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.addLinkConditionFunc(role, domain, fn) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain +func (crm *ConditionalRoleManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + crm.SetDomainLinkConditionFuncParams(userName, roleName, defaultDomain, params...) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain +func (crm *ConditionalRoleManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + user, _ := crm.getRole(userName) + role, _ := crm.getRole(roleName) + + user.setLinkConditionFuncParams(role, domain, params...) +} + +type ConditionalDomainManager struct { + ConditionalRoleManager + DomainManager +} + +// NewConditionalDomainManager is the constructor for creating an instance of the +// ConditionalDomainManager implementation. +func NewConditionalDomainManager(maxHierarchyLevel int) *ConditionalDomainManager { + rm := ConditionalDomainManager{} + _ = rm.Clear() //init allRoles and matchingFuncCache + rm.maxHierarchyLevel = maxHierarchyLevel + rm.SetLogger(&log.DefaultLogger{}) + return &rm +} + +func (dm *ConditionalDomainManager) load(name interface{}) (value *ConditionalRoleManager, ok bool) { + if r, ok := dm.rmMap.Load(name); ok { + return r.(*ConditionalRoleManager), true + } + return nil, false +} + +// load or create a ConditionalRoleManager instance of domain +func (cdm *ConditionalDomainManager) getConditionalRoleManager(domain string, store bool) *ConditionalRoleManager { + var rm *ConditionalRoleManager + var ok bool + + if rm, ok = cdm.load(domain); !ok { + rm = newConditionalRoleManagerWithMatchingFunc(cdm.maxHierarchyLevel, cdm.matchingFunc) + if store { + cdm.rmMap.Store(domain, rm) + } + if cdm.domainMatchingFunc != nil { + cdm.rmMap.Range(func(key, value interface{}) bool { + domain2 := key.(string) + rm2 := value.(*ConditionalRoleManager) + if domain != domain2 && cdm.Match(domain, domain2) { + rm.copyFrom(rm2) + } + return true + }) + } + } + return rm +} + +// HasLink determines whether role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) HasLink(name1 string, name2 string, domains ...string) (bool, error) { + domain, err := cdm.getDomain(domains...) + if err != nil { + return false, err + } + rm := cdm.getConditionalRoleManager(domain, false) + return rm.HasLink(name1, name2, domains...) +} + +// AddLink adds the inheritance link between role: name1 and role: name2. +// aka role: name1 inherits role: name2. +func (cdm *ConditionalDomainManager) AddLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) //create role manager if it does not exist + _ = conditionalRoleManager.AddLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.AddLink(name1, name2, domain) + }) + return nil +} + +// DeleteLink deletes the inheritance link between role: name1 and role: name2. +// aka role: name1 does not inherit role: name2 any more. +func (cdm *ConditionalDomainManager) DeleteLink(name1 string, name2 string, domains ...string) error { + domain, err := cdm.getDomain(domains...) + if err != nil { + return err + } + conditionalRoleManager := cdm.getConditionalRoleManager(domain, true) //create role manager if it does not exist + _ = conditionalRoleManager.DeleteLink(name1, name2, domain) + + cdm.rangeAffectedRoleManagers(domain, func(rm *RoleManagerImpl) { + _ = rm.DeleteLink(name1, name2, domain) + }) + return nil +} + +// AddLinkConditionFunc is based on userName, roleName, add LinkConditionFunc +func (cdm *ConditionalDomainManager) AddLinkConditionFunc(userName, roleName string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddLinkConditionFunc(userName, roleName, fn) + return true + }) +} + +// AddDomainLinkConditionFunc is based on userName, roleName, domain, add LinkConditionFunc +func (cdm *ConditionalDomainManager) AddDomainLinkConditionFunc(userName, roleName, domain string, fn rbac.LinkConditionFunc) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).AddDomainLinkConditionFunc(userName, roleName, domain, fn) + return true + }) +} + +// SetLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName +func (cdm *ConditionalDomainManager) SetLinkConditionFuncParams(userName, roleName string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetLinkConditionFuncParams(userName, roleName, params...) + return true + }) +} + +// SetDomainLinkConditionFuncParams sets parameters of LinkConditionFunc based on userName, roleName, domain +func (cdm *ConditionalDomainManager) SetDomainLinkConditionFuncParams(userName, roleName, domain string, params ...string) { + cdm.rmMap.Range(func(key, value interface{}) bool { + value.(*ConditionalRoleManager).SetDomainLinkConditionFuncParams(userName, roleName, domain, params...) + return true + }) +} diff --git a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go index 18b4ca8f..d8a36691 100644 --- a/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go +++ b/vendor/github.com/casbin/casbin/v2/rbac/role_manager.go @@ -14,14 +14,12 @@ package rbac -import ( - "context" - - "github.com/casbin/casbin/v2/log" -) +import "github.com/casbin/casbin/v2/log" type MatchingFunc func(arg1 string, arg2 string) bool +type LinkConditionFunc = func(args ...string) (bool, error) + // RoleManager provides interface to define the operations for managing roles. type RoleManager interface { // Clear clears all stored data and resets the role manager to the initial state. @@ -59,33 +57,20 @@ type RoleManager interface { AddDomainMatchingFunc(name string, fn MatchingFunc) } -// RoleManagerWithContext provides a context-aware interface to define the operations for managing roles. -// Prefer this over RoleManager interface for context propagation, which is useful for things like handling -// request timeouts. -type RoleManagerWithContext interface { - // Clear clears all stored data and resets the role manager to the initial state. - Clear(ctx context.Context) error - // AddLink adds the inheritance link between two roles. role: name1 and role: name2. - // domain is a prefix to the roles (can be used for other purposes). - AddLink(ctx context.Context, name1 string, name2 string, domain ...string) error - // DeleteLink deletes the inheritance link between two roles. role: name1 and role: name2. - // domain is a prefix to the roles (can be used for other purposes). - DeleteLink(ctx context.Context, name1 string, name2 string, domain ...string) error - // HasLink determines whether a link exists between two roles. role: name1 inherits role: name2. - // domain is a prefix to the roles (can be used for other purposes). - HasLink(ctx context.Context, name1 string, name2 string, domain ...string) (bool, error) - // GetRoles gets the roles that a user inherits. - // domain is a prefix to the roles (can be used for other purposes). - GetRoles(ctx context.Context, name string, domain ...string) ([]string, error) - // GetUsers gets the users that inherits a role. - // domain is a prefix to the users (can be used for other purposes). - GetUsers(ctx context.Context, name string, domain ...string) ([]string, error) - // GetDomains gets domains that a user has - GetDomains(ctx context.Context, name string) ([]string, error) - // GetAllDomains gets all domains - GetAllDomains(ctx context.Context) ([]string, error) - // PrintRoles prints all the roles to log. - PrintRoles() error - // SetLogger sets role manager's logger. - SetLogger(logger log.Logger) +// ConditionalRoleManager provides interface to define the operations for managing roles. +// Link with conditions is supported +type ConditionalRoleManager interface { + RoleManager + + // AddLinkConditionFunc Add condition function fn for Link userName->roleName, + // when fn returns true, Link is valid, otherwise invalid + AddLinkConditionFunc(userName, roleName string, fn LinkConditionFunc) + // SetLinkConditionFuncParams Sets the parameters of the condition function fn for Link userName->roleName + SetLinkConditionFuncParams(userName, roleName string, params ...string) + // AddDomainLinkConditionFunc Add condition function fn for Link userName-> {roleName, domain}, + // when fn returns true, Link is valid, otherwise invalid + AddDomainLinkConditionFunc(user string, role string, domain string, fn LinkConditionFunc) + // SetDomainLinkConditionFuncParams Sets the parameters of the condition function fn + // for Link userName->{roleName, domain} + SetDomainLinkConditionFuncParams(user string, role string, domain string, params ...string) } diff --git a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go index 41713806..2206d076 100644 --- a/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go +++ b/vendor/github.com/casbin/casbin/v2/util/builtin_operators.go @@ -22,9 +22,11 @@ import ( "regexp" "strings" "sync" + "time" + + "github.com/casbin/casbin/v2/rbac" "github.com/Knetic/govaluate" - "github.com/casbin/casbin/v2/rbac" ) var ( @@ -47,6 +49,14 @@ func validateVariadicArgs(expectedLen int, args ...interface{}) error { return nil } +// validate the variadic string parameter size +func validateVariadicStringArgs(expectedLen int, args ...string) error { + if len(args) != expectedLen { + return fmt.Errorf("expected %d arguments, but got %d", expectedLen, len(args)) + } + return nil +} + // KeyMatch determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *. // For example, "/foo/bar" matches "/foo/*" func KeyMatch(key1 string, key2 string) bool { @@ -273,7 +283,7 @@ func KeyMatch4Func(args ...interface{}) (interface{}, error) { } // KeyMatch5 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a * -// For example, +// For example, // - "/foo/bar?status=1&type=2" matches "/foo/bar" // - "/parent/child1" and "/parent/child1" matches "/parent/*" // - "/parent/child1?status=1" matches "/parent/*" @@ -417,3 +427,56 @@ func GenerateGFunction(rm rbac.RoleManager) govaluate.ExpressionFunction { return v, nil } } + +// GenerateConditionalGFunction is the factory method of the g(_, _[, _]) function with conditions. +func GenerateConditionalGFunction(crm rbac.ConditionalRoleManager) govaluate.ExpressionFunction { + return func(args ...interface{}) (interface{}, error) { + // Like all our other govaluate functions, all args are strings. + var hasLink bool + + name1, name2 := args[0].(string), args[1].(string) + if crm == nil { + hasLink = name1 == name2 + } else if len(args) == 2 { + hasLink, _ = crm.HasLink(name1, name2) + } else { + domain := args[2].(string) + hasLink, _ = crm.HasLink(name1, name2, domain) + } + + return hasLink, nil + } +} + +// builtin LinkConditionFunc + +// TimeMatchFunc is the wrapper for TimeMatch. +func TimeMatchFunc(args ...string) (bool, error) { + if err := validateVariadicStringArgs(2, args...); err != nil { + return false, fmt.Errorf("%s: %s", "TimeMatch", err) + } + return TimeMatch(args[0], args[1]) +} + +// TimeMatch determines whether the current time is between startTime and endTime. +// You can use "_" to indicate that the parameter is ignored +func TimeMatch(startTime, endTime string) (bool, error) { + now := time.Now() + if startTime != "_" { + if start, err := time.Parse("2006-01-02 15:04:05", startTime); err != nil { + return false, err + } else if !now.After(start) { + return false, nil + } + } + + if endTime != "_" { + if end, err := time.Parse("2006-01-02 15:04:05", endTime); err != nil { + return false, err + } else if !now.Before(end) { + return false, nil + } + } + + return true, nil +} diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000..e256a31e --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000..0e9d6edc --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000..7805d36d --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000..0200f75b --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000..58600740 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000..4fb4054a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/go-ole/go-ole/SECURITY.md b/vendor/github.com/go-ole/go-ole/SECURITY.md new file mode 100644 index 00000000..dac28152 --- /dev/null +++ b/vendor/github.com/go-ole/go-ole/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are applied only to the latest release. + +## Reporting a Vulnerability + +If you have discovered a security vulnerability in this project, please report it privately. **Do not disclose it as a public issue.** This gives us time to work with you to fix the issue before public exposure, reducing the chance that the exploit will be used before a patch is released. + +Please disclose it at [security advisory](https://github.com/go-ole/go-ole/security/advisories/new). + +This project is maintained by a team of volunteers on a reasonable-effort basis. As such, please give us at least 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml index 0d557ac2..8df7fa26 100644 --- a/vendor/github.com/go-ole/go-ole/appveyor.yml +++ b/vendor/github.com/go-ole/go-ole/appveyor.yml @@ -6,7 +6,29 @@ version: "1.3.0.{build}-alpha-{branch}" -os: Windows Server 2012 R2 +os: Visual Studio 2019 + +build: off + +skip_tags: true + +clone_folder: c:\gopath\src\github.com\go-ole\go-ole + +environment: + GOPATH: c:\gopath + GOROOT: c:\go + DOWNLOADPLATFORM: "x64" + +before_test: + # - Download COM Server + - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" + - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL + - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat + +test_script: + - go test -v -cover ./... + # go vet has false positives on unsafe.Pointer with windows/sys. Disabling since it is recommended to use go test instead. + # - go vet ./... branches: only: @@ -15,26 +37,19 @@ branches: - v1.1 - v1.0 -skip_tags: true - -clone_folder: c:\gopath\src\github.com\go-ole\go-ole - -environment: - GOPATH: c:\gopath - matrix: - - GOARCH: amd64 - GOVERSION: 1.5 - GOROOT: c:\go - DOWNLOADPLATFORM: "x64" +matrix: + allow_failures: + - environment: + GOROOT: C:\go-x86 + DOWNLOADPLATFORM: "x86" + - environment: + GOROOT: C:\go118 + DOWNLOADPLATFORM: "x64" + - environment: + GOROOT: C:\go118-x86 + DOWNLOADPLATFORM: "x86" install: - - choco install mingw - - SET PATH=c:\tools\mingw64\bin;%PATH% - # - Download COM Server - - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip" - - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL - - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat - # - set - go version - go env - go get -u golang.org/x/tools/cmd/cover @@ -45,10 +60,9 @@ build_script: - cd c:\gopath\src\github.com\go-ole\go-ole - go get -v -t ./... - go build - - go test -v -cover ./... # disable automatic tests -test: off +test: on # disable deployment deploy: off diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go index a9bef150..cabbac01 100644 --- a/vendor/github.com/go-ole/go-ole/com.go +++ b/vendor/github.com/go-ole/go-ole/com.go @@ -11,6 +11,7 @@ import ( var ( procCoInitialize = modole32.NewProc("CoInitialize") procCoInitializeEx = modole32.NewProc("CoInitializeEx") + procCoInitializeSecurity = modole32.NewProc("CoInitializeSecurity") procCoUninitialize = modole32.NewProc("CoUninitialize") procCoCreateInstance = modole32.NewProc("CoCreateInstance") procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") @@ -37,6 +38,9 @@ var ( procDispatchMessageW = moduser32.NewProc("DispatchMessageW") ) +// This is to enable calling COM Security initialization multiple times +var bSecurityInit bool = false + // coInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -68,6 +72,35 @@ func coInitializeEx(coinit uint32) (err error) { return } +// coInitializeSecurity: Registers security and sets the default security values +// for the process. +func coInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + // Check COM Security initialization has done previously + if !bSecurityInit { + // https://learn.microsoft.com/en-us/windows/win32/api/combaseapi/nf-combaseapi-coinitializesecurity + hr, _, _ := procCoInitializeSecurity.Call( + uintptr(0), // Allow *all* VSS writers to communicate back! + uintptr(cAuthSvc), // Default COM authentication service + uintptr(0), // Default COM authorization service + uintptr(0), // Reserved parameter + uintptr(dwAuthnLevel), // Strongest COM authentication level + uintptr(dwImpLevel), // Minimal impersonation abilities + uintptr(0), // Default COM authentication settings + uintptr(dwCapabilities), // Cloaking + uintptr(0)) // eserved parameter + if hr != 0 { + err = NewError(hr) + } else { + // COM Security initialization done make global flag true. + bSecurityInit = true + } + } + return +} + // CoInitialize initializes COM library on current thread. // // MSDN documentation suggests that this function should not be called. Call @@ -96,6 +129,15 @@ func CoUninitialize() { procCoUninitialize.Call() } +// CoInitializeSecurity: Registers security and sets the default security values +// for the process. +func CoInitializeSecurity(cAuthSvc int32, + dwAuthnLevel uint32, + dwImpLevel uint32, + dwCapabilities uint32) (err error) { + return coInitializeSecurity(cAuthSvc, dwAuthnLevel, dwImpLevel, dwCapabilities) +} + // CoTaskMemFree frees memory pointer. func CoTaskMemFree(memptr uintptr) { procCoTaskMemFree.Call(memptr) diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go index b399f047..649c0734 100644 --- a/vendor/github.com/go-ole/go-ole/idispatch_windows.go +++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package ole @@ -92,7 +93,7 @@ func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{} case int8: vargs[n] = NewVariant(VT_I1, int64(v.(int8))) case *int8: - vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8))))) + vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int8))))) case int16: vargs[n] = NewVariant(VT_I2, int64(v.(int16))) case *int16: diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go index 967a23fe..a2c8402f 100644 --- a/vendor/github.com/go-ole/go-ole/variant.go +++ b/vendor/github.com/go-ole/go-ole/variant.go @@ -99,7 +99,7 @@ func (v *VARIANT) Value() interface{} { case VT_DISPATCH: return v.ToIDispatch() case VT_BOOL: - return v.Val != 0 + return (v.Val & 0xffff) != 0 } return nil } diff --git a/vendor/github.com/go-playground/validator/v10/Makefile b/vendor/github.com/go-playground/validator/v10/Makefile index ec3455bd..09f171ba 100644 --- a/vendor/github.com/go-playground/validator/v10/Makefile +++ b/vendor/github.com/go-playground/validator/v10/Makefile @@ -13,6 +13,6 @@ test: $(GOCMD) test -cover -race ./... bench: - $(GOCMD) test -bench=. -benchmem ./... + $(GOCMD) test -run=NONE -bench=. -benchmem ./... .PHONY: test lint linters-install \ No newline at end of file diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index b2e0e2d9..30b60f80 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.15.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.15.3-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) @@ -67,6 +67,12 @@ Please see https://pkg.go.dev/github.com/go-playground/validator/v10 for detaile Baked-in Validations ------ +### Special Notes: +- If new to using validator it is highly recommended to initialize it using the `WithRequiredStructEnabled` option which is opt-in to new behaviour that will become the default behaviour in v11+. See documentation for more details. +```go +validate := validator.New(validator.WithRequiredStructEnabled()) +``` + ### Fields: | Tag | Description | @@ -260,71 +266,72 @@ Benchmarks ------ ###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 ```go +go version go1.21.0 darwin/arm64 goos: darwin -goarch: amd64 -pkg: github.com/go-playground/validator -BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op -BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op -BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op -BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op -BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op -BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op -BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op -BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op -BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op -BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op -BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op -BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op -BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op -BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op -BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op -BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op -BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op -BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op -BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op -BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op -BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op -BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op -BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op -BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op -BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op -BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op -BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op -BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op -BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op -BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op -BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op -BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op -BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op -BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op -BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op -BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op -BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op -BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op -BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op -BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op -BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op -BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op -BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op -BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op -BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op -BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op -BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op -BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op -BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op -BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op -BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op -BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op -BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +goarch: arm64 +pkg: github.com/go-playground/validator/v10 +BenchmarkFieldSuccess-8 33142266 35.94 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 200816191 6.568 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 6779707 175.1 ns/op 200 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 11044147 108.4 ns/op 200 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 6054232 194.4 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 12523388 94.07 ns/op 97 B/op 5 allocs/op +BenchmarkFieldArrayDiveFailure-8 3587043 334.3 ns/op 300 B/op 10 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5816665 200.8 ns/op 300 B/op 10 allocs/op +BenchmarkFieldMapDiveSuccess-8 2217910 540.1 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 4446698 258.7 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveFailure-8 2392759 504.6 ns/op 376 B/op 13 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 4244199 286.9 ns/op 376 B/op 13 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 2005857 592.1 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 4400850 296.9 ns/op 288 B/op 14 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1850227 643.8 ns/op 553 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3293233 375.1 ns/op 553 B/op 16 allocs/op +BenchmarkFieldCustomTypeSuccess-8 12174412 98.25 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 34389907 35.49 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 7582524 156.6 ns/op 184 B/op 3 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 13019902 92.79 ns/op 184 B/op 3 allocs/op +BenchmarkFieldOrTagSuccess-8 3427260 349.4 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 15144128 81.25 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 5913546 201.9 ns/op 216 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 9810212 113.7 ns/op 216 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 13456327 87.66 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 41818888 27.77 ns/op 16 B/op 1 allocs/op +BenchmarkStructLevelValidationFailure-8 4166284 272.6 ns/op 264 B/op 7 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 7594581 152.1 ns/op 264 B/op 7 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 6508082 182.6 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 23078605 54.78 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 3118352 381.0 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5300738 224.1 ns/op 432 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 4761807 251.1 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredSuccessParallel-8 8792598 128.6 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailure-8 5202573 232.1 ns/op 216 B/op 5 allocs/op +BenchmarkStructFilteredFailureParallel-8 9591267 121.4 ns/op 216 B/op 5 allocs/op +BenchmarkStructPartialSuccess-8 5188512 231.6 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialSuccessParallel-8 9179776 123.1 ns/op 224 B/op 4 allocs/op +BenchmarkStructPartialFailure-8 3071212 392.5 ns/op 440 B/op 9 allocs/op +BenchmarkStructPartialFailureParallel-8 5344261 223.7 ns/op 440 B/op 9 allocs/op +BenchmarkStructExceptSuccess-8 3184230 375.0 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptSuccessParallel-8 10090130 108.9 ns/op 208 B/op 3 allocs/op +BenchmarkStructExceptFailure-8 3347226 357.7 ns/op 424 B/op 8 allocs/op +BenchmarkStructExceptFailureParallel-8 5654923 209.5 ns/op 424 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 5232265 229.1 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 17436674 64.75 ns/op 56 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 3128613 383.6 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 6994113 168.8 ns/op 272 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3506487 340.9 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 13431300 91.77 ns/op 64 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2410566 500.9 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 6344510 188.2 ns/op 288 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 8922726 133.8 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 55291153 23.63 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 3171553 378.4 ns/op 416 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5571692 212.0 ns/op 416 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1683750 714.5 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexSuccessParallel-8 4578046 257.0 ns/op 224 B/op 5 allocs/op +BenchmarkStructComplexFailure-8 481585 2547 ns/op 3041 B/op 48 allocs/op +BenchmarkStructComplexFailureParallel-8 965764 1577 ns/op 3040 B/op 48 allocs/op +BenchmarkOneof-8 17380881 68.50 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 8084733 153.5 ns/op 0 B/op 0 allocs/op ``` Complementary Software diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index f7b55ab3..cc92b784 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -23,7 +23,7 @@ import ( "golang.org/x/text/language" "github.com/gabriel-vasile/mimetype" - "github.com/leodido/go-urn" + urn "github.com/leodido/go-urn" ) // Func accepts a FieldLevel interface for all validation needs. The return @@ -1295,8 +1295,13 @@ func isEq(fl FieldLevel) bool { return field.Uint() == p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) + + return field.Float() == p + + case reflect.Float64: + p := asFloat64(param) return field.Float() == p @@ -1760,8 +1765,11 @@ func requireCheckFieldValue( case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: return field.Uint() == asUint(value) - case reflect.Float32, reflect.Float64: - return field.Float() == asFloat(value) + case reflect.Float32: + return field.Float() == asFloat32(value) + + case reflect.Float64: + return field.Float() == asFloat64(value) case reflect.Slice, reflect.Map, reflect.Array: return int64(field.Len()) == asInt(value) @@ -2060,8 +2068,13 @@ func isGte(fl FieldLevel) bool { return field.Uint() >= p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) + + return field.Float() >= p + + case reflect.Float64: + p := asFloat64(param) return field.Float() >= p @@ -2106,10 +2119,16 @@ func isGt(fl FieldLevel) bool { return field.Uint() > p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) return field.Float() > p + + case reflect.Float64: + p := asFloat64(param) + + return field.Float() > p + case reflect.Struct: if field.Type().ConvertibleTo(timeType) { @@ -2148,8 +2167,13 @@ func hasLengthOf(fl FieldLevel) bool { return field.Uint() == p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) + + return field.Float() == p + + case reflect.Float64: + p := asFloat64(param) return field.Float() == p } @@ -2281,8 +2305,13 @@ func isLte(fl FieldLevel) bool { return field.Uint() <= p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) + + return field.Float() <= p + + case reflect.Float64: + p := asFloat64(param) return field.Float() <= p @@ -2327,8 +2356,13 @@ func isLt(fl FieldLevel) bool { return field.Uint() < p - case reflect.Float32, reflect.Float64: - p := asFloat(param) + case reflect.Float32: + p := asFloat32(param) + + return field.Float() < p + + case reflect.Float64: + p := asFloat64(param) return field.Float() < p diff --git a/vendor/github.com/go-playground/validator/v10/cache.go b/vendor/github.com/go-playground/validator/v10/cache.go index ddd37b83..bbfd2a4a 100644 --- a/vendor/github.com/go-playground/validator/v10/cache.go +++ b/vendor/github.com/go-playground/validator/v10/cache.go @@ -20,7 +20,6 @@ const ( typeOr typeKeys typeEndKeys - typeNestedStructLevel ) const ( @@ -153,7 +152,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr // and so only struct level caching can be used instead of combined with Field tag caching if len(tag) > 0 { - ctag, _ = v.parseFieldTagsRecursive(tag, fld, "", false) + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) } else { // even if field doesn't have validations need cTag for traversing to potential inner/nested // elements of the field. @@ -172,7 +171,7 @@ func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStr return cs } -func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { var t string noAlias := len(alias) == 0 tags := strings.Split(tag, tagSeparator) @@ -186,9 +185,9 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField // check map for alias and process new tags, otherwise process as usual if tagsVal, found := v.aliases[t]; found { if i == 0 { - firstCtag, current = v.parseFieldTagsRecursive(tagsVal, field, t, true) + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) } else { - next, curr := v.parseFieldTagsRecursive(tagsVal, field, t, true) + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) current.next, current = next, curr } @@ -236,7 +235,7 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField } } - current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), field, "", false) + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) continue case endKeysTag: @@ -285,18 +284,14 @@ func (v *Validate) parseFieldTagsRecursive(tag string, field reflect.StructField current.tag = vals[0] if len(current.tag) == 0 { - panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, field.Name))) + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) } if wrapper, ok := v.validations[current.tag]; ok { current.fn = wrapper.fn current.runValidationWhenNil = wrapper.runValidatinOnNil } else { - panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, field.Name))) - } - - if current.typeof == typeDefault && isNestedStructOrStructPtr(field) { - current.typeof = typeNestedStructLevel + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) } if len(orVals) > 1 { @@ -324,7 +319,7 @@ func (v *Validate) fetchCacheTag(tag string) *cTag { // isn't parsed again. ctag, found = v.tagCache.Get(tag) if !found { - ctag, _ = v.parseFieldTagsRecursive(tag, reflect.StructField{}, "", false) + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) v.tagCache.Set(tag, ctag) } } diff --git a/vendor/github.com/go-playground/validator/v10/doc.go b/vendor/github.com/go-playground/validator/v10/doc.go index d1eff50f..c4dbb595 100644 --- a/vendor/github.com/go-playground/validator/v10/doc.go +++ b/vendor/github.com/go-playground/validator/v10/doc.go @@ -247,7 +247,7 @@ Example #2 This validates that the value is not the data types default zero value. For numbers ensures value is not zero. For strings ensures value is not "". For slices, maps, pointers, interfaces, channels and functions -ensures the value is not nil. For structs ensures value is not the zero value. +ensures the value is not nil. For structs ensures value is not the zero value when using WithRequiredStructEnabled. Usage: required diff --git a/vendor/github.com/go-playground/validator/v10/options.go b/vendor/github.com/go-playground/validator/v10/options.go new file mode 100644 index 00000000..1dea56fd --- /dev/null +++ b/vendor/github.com/go-playground/validator/v10/options.go @@ -0,0 +1,16 @@ +package validator + +// Option represents a configurations option to be applied to validator during initialization. +type Option func(*Validate) + +// WithRequiredStructEnabled enables required tag on non-pointer structs to be applied instead of ignored. +// +// This was made opt-in behaviour in order to maintain backward compatibility with the behaviour previous +// to being able to apply struct level validations on struct fields directly. +// +// It is recommended you enabled this as it will be the default behaviour in v11+ +func WithRequiredStructEnabled() Option { + return func(v *Validate) { + v.requiredStructEnabled = true + } +} diff --git a/vendor/github.com/go-playground/validator/v10/util.go b/vendor/github.com/go-playground/validator/v10/util.go index b81d9543..4bd947bd 100644 --- a/vendor/github.com/go-playground/validator/v10/util.go +++ b/vendor/github.com/go-playground/validator/v10/util.go @@ -261,13 +261,19 @@ func asUint(param string) uint64 { return i } -// asFloat returns the parameter as a float64 +// asFloat64 returns the parameter as a float64 // or panics if it can't convert -func asFloat(param string) float64 { - +func asFloat64(param string) float64 { i, err := strconv.ParseFloat(param, 64) panicIf(err) + return i +} +// asFloat64 returns the parameter as a float64 +// or panics if it can't convert +func asFloat32(param string) float64 { + i, err := strconv.ParseFloat(param, 32) + panicIf(err) return i } @@ -286,11 +292,3 @@ func panicIf(err error) { panic(err.Error()) } } - -func isNestedStructOrStructPtr(v reflect.StructField) bool { - if v.Type == nil { - return false - } - kind := v.Type.Kind() - return kind == reflect.Struct || kind == reflect.Ptr && v.Type.Elem().Kind() == reflect.Struct -} diff --git a/vendor/github.com/go-playground/validator/v10/validator.go b/vendor/github.com/go-playground/validator/v10/validator.go index a6fa1f5d..2cae8f7e 100644 --- a/vendor/github.com/go-playground/validator/v10/validator.go +++ b/vendor/github.com/go-playground/validator/v10/validator.go @@ -99,6 +99,8 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + var isNestedStruct bool + switch kind { case reflect.Ptr, reflect.Interface, reflect.Invalid: @@ -160,86 +162,61 @@ func (v *validate) traverseField(ctx context.Context, parent reflect.Value, curr } } - case reflect.Struct: - - typ = current.Type() - - if !typ.ConvertibleTo(timeType) { - - if ct != nil { - - if ct.typeof == typeStructOnly { - goto CONTINUE - } else if ct.typeof == typeIsDefault || ct.typeof == typeNestedStructLevel { - // set Field Level fields - v.slflParent = parent - v.flField = current - v.cf = cf - v.ct = ct - - if !ct.fn(ctx, v) { - v.str1 = string(append(ns, cf.altName...)) - - if v.v.hasTagNameFunc { - v.str2 = string(append(structNs, cf.name...)) - } else { - v.str2 = v.str1 - } - - v.errs = append(v.errs, - &fieldError{ - v: v.v, - tag: ct.aliasTag, - actualTag: ct.tag, - ns: v.str1, - structNs: v.str2, - fieldLen: uint8(len(cf.altName)), - structfieldLen: uint8(len(cf.name)), - value: current.Interface(), - param: ct.param, - kind: kind, - typ: typ, - }, - ) - return - } - } - - ct = ct.next - } - - if ct != nil && ct.typeof == typeNoStructLevel { - return - } - - CONTINUE: - // if len == 0 then validating using 'Var' or 'VarWithValue' - // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... - // VarWithField - this allows for validating against each field within the struct against a specific value - // pretty handy in certain situations - if len(cf.name) > 0 { - ns = append(append(ns, cf.altName...), '.') - structNs = append(append(structNs, cf.name...), '.') - } - - v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + if kind == reflect.Invalid { return } - } - if ct == nil || !ct.hasTag { - return + case reflect.Struct: + isNestedStruct = !current.Type().ConvertibleTo(timeType) + // For backward compatibility before struct level validation tags were supported + // as there were a number of projects relying on `required` not failing on non-pointer + // structs. Since it's basically nonsensical to use `required` with a non-pointer struct + // are explicitly skipping the required validation for it. This WILL be removed in the + // next major version. + if !v.v.requiredStructEnabled && ct != nil && ct.tag == requiredTag { + ct = ct.next + } } typ = current.Type() OUTER: for { - if ct == nil { + if ct == nil || !ct.hasTag || (isNestedStruct && len(cf.name) == 0) { + // isNestedStruct check here + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } return } switch ct.typeof { + case typeNoStructLevel: + return + + case typeStructOnly: + if isNestedStruct { + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, parent, current, typ, ns, structNs, ct) + } + return case typeOmitEmpty: @@ -366,7 +343,7 @@ OUTER: ct = ct.next if ct == nil { - return + continue OUTER } if ct.typeof != typeOr { diff --git a/vendor/github.com/go-playground/validator/v10/validator_instance.go b/vendor/github.com/go-playground/validator/v10/validator_instance.go index d9dbf0ce..a4dbdd09 100644 --- a/vendor/github.com/go-playground/validator/v10/validator_instance.go +++ b/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -79,19 +79,20 @@ type internalValidationFuncWrapper struct { // Validate contains the validator settings and cache type Validate struct { - tagName string - pool *sync.Pool - hasCustomFuncs bool - hasTagNameFunc bool - tagNameFunc TagNameFunc - structLevelFuncs map[reflect.Type]StructLevelFuncCtx - customFuncs map[reflect.Type]CustomTypeFunc - aliases map[string]string - validations map[string]internalValidationFuncWrapper - transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc - rules map[reflect.Type]map[string]string - tagCache *tagCache - structCache *structCache + tagName string + pool *sync.Pool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + rules map[reflect.Type]map[string]string + tagCache *tagCache + structCache *structCache + hasCustomFuncs bool + hasTagNameFunc bool + requiredStructEnabled bool } // New returns a new instance of 'validate' with sane defaults. @@ -99,7 +100,7 @@ type Validate struct { // It caches information about your struct and validations, // in essence only parsing your validation tags once per struct type. // Using multiple instances neglects the benefit of caching. -func New() *Validate { +func New(options ...Option) *Validate { tc := new(tagCache) tc.m.Store(make(map[string]*cTag)) @@ -146,6 +147,9 @@ func New() *Validate { }, } + for _, o := range options { + o(v) + } return v } diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60..00000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 00000000..2bd78667 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f..55668887 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f..3e9a6188 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc..b2a0bc87 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207ae..a56138cc 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE index be2cc4df..0e5d580e 100644 --- a/vendor/github.com/hashicorp/golang-lru/LICENSE +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -1,3 +1,5 @@ +Copyright (c) 2014 HashiCorp, Inc. + Mozilla Public License, version 2.0 1. Definitions diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index a86c8539..9233583c 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -25,7 +25,7 @@ type entry struct { // NewLRU constructs an LRU of the given size func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { if size <= 0 { - return nil, errors.New("Must provide a positive size") + return nil, errors.New("must provide a positive size") } c := &LRU{ size: size, @@ -109,7 +109,7 @@ func (c *LRU) Remove(key interface{}) (present bool) { } // RemoveOldest removes the oldest item from the cache. -func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { +func (c *LRU) RemoveOldest() (key, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { c.removeElement(ent) @@ -120,7 +120,7 @@ func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { } // GetOldest returns the oldest entry -func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { +func (c *LRU) GetOldest() (key, value interface{}, ok bool) { ent := c.evictList.Back() if ent != nil { kv := ent.Value.(*entry) diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 92d70934..cb7f8caf 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -1,3 +1,4 @@ +// Package simplelru provides simple LRU implementation based on build-in container/list. package simplelru // LRUCache is the interface for simple LRU cache. @@ -34,6 +35,6 @@ type LRUCache interface { // Clears all cache entries. Purge() - // Resizes cache, returning number evicted - Resize(int) int + // Resizes cache, returning number evicted + Resize(int) int } diff --git a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml index 95700902..7e7b8a96 100644 --- a/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml +++ b/vendor/github.com/hashicorp/golang-lru/v2/.golangci.yml @@ -2,12 +2,13 @@ # SPDX-License-Identifier: MPL-2.0 linters: + fast: false + disable-all: true enable: - - megacheck - revive + - megacheck - govet - unconvert - - megacheck - gas - gocyclo - dupl @@ -16,18 +17,30 @@ linters: - unused - typecheck - ineffassign - - stylecheck + # - stylecheck - exportloopref - gocritic - nakedret - gosimple - prealloc - fast: false - disable-all: true + +# golangci-lint configuration file +linters-settings: + revive: + ignore-generated-header: true + severity: warning + rules: + - name: package-comments + severity: warning + disabled: true + - name: exported + severity: warning + disabled: false + arguments: ["checkPrivateReceivers", "disableStutteringCheck"] issues: + exclude-use-default: false exclude-rules: - path: _test\.go linters: - dupl - exclude-use-default: false diff --git a/vendor/github.com/hashicorp/golang-lru/v2/README.md b/vendor/github.com/hashicorp/golang-lru/v2/README.md index b60785fc..a942eb53 100644 --- a/vendor/github.com/hashicorp/golang-lru/v2/README.md +++ b/vendor/github.com/hashicorp/golang-lru/v2/README.md @@ -9,10 +9,8 @@ Documentation Full docs are available on [Go Packages](https://pkg.go.dev/github.com/hashicorp/golang-lru/v2) -Example -======= - -Using the LRU is very simple: +LRU cache example +================= ```go package main @@ -23,12 +21,59 @@ import ( ) func main() { - l, _ := lru.New[int, any](128) - for i := 0; i < 256; i++ { - l.Add(i, nil) - } - if l.Len() != 128 { - panic(fmt.Sprintf("bad len: %v", l.Len())) - } + l, _ := lru.New[int, any](128) + for i := 0; i < 256; i++ { + l.Add(i, nil) + } + if l.Len() != 128 { + panic(fmt.Sprintf("bad len: %v", l.Len())) + } +} +``` + +Expirable LRU cache example +=========================== + +```go +package main + +import ( + "fmt" + "time" + + "github.com/hashicorp/golang-lru/v2/expirable" +) + +func main() { + // make cache with 10ms TTL and 5 max keys + cache := expirable.NewLRU[string, string](5, nil, time.Millisecond*10) + + + // set value under key1. + cache.Add("key1", "val1") + + // get value under key1 + r, ok := cache.Get("key1") + + // check for OK value + if ok { + fmt.Printf("value before expiration is found: %v, value: %q\n", ok, r) + } + + // wait for cache to expire + time.Sleep(time.Millisecond * 12) + + // get value under key1 after key expiration + r, ok = cache.Get("key1") + fmt.Printf("value after expiration is found: %v, value: %q\n", ok, r) + + // set value under key2, would evict old entry because it is already expired. + cache.Add("key2", "val2") + + fmt.Printf("Cache len: %d\n", cache.Len()) + // Output: + // value before expiration is found: true, value: "val1" + // value after expiration is found: false, value: "" + // Cache len: 1 } ``` diff --git a/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go new file mode 100644 index 00000000..5cd74a03 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/v2/internal/list.go @@ -0,0 +1,142 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE_list file. + +package internal + +import "time" + +// Entry is an LRU Entry +type Entry[K comparable, V any] struct { + // Next and previous pointers in the doubly-linked list of elements. + // To simplify the implementation, internally a list l is implemented + // as a ring, such that &l.root is both the next element of the last + // list element (l.Back()) and the previous element of the first list + // element (l.Front()). + next, prev *Entry[K, V] + + // The list to which this element belongs. + list *LruList[K, V] + + // The LRU Key of this element. + Key K + + // The Value stored with this element. + Value V + + // The time this element would be cleaned up, optional + ExpiresAt time.Time + + // The expiry bucket item was put in, optional + ExpireBucket uint8 +} + +// PrevEntry returns the previous list element or nil. +func (e *Entry[K, V]) PrevEntry() *Entry[K, V] { + if p := e.prev; e.list != nil && p != &e.list.root { + return p + } + return nil +} + +// LruList represents a doubly linked list. +// The zero Value for LruList is an empty list ready to use. +type LruList[K comparable, V any] struct { + root Entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used + len int // current list Length excluding (this) sentinel element +} + +// Init initializes or clears list l. +func (l *LruList[K, V]) Init() *LruList[K, V] { + l.root.next = &l.root + l.root.prev = &l.root + l.len = 0 + return l +} + +// NewList returns an initialized list. +func NewList[K comparable, V any]() *LruList[K, V] { return new(LruList[K, V]).Init() } + +// Length returns the number of elements of list l. +// The complexity is O(1). +func (l *LruList[K, V]) Length() int { return l.len } + +// Back returns the last element of list l or nil if the list is empty. +func (l *LruList[K, V]) Back() *Entry[K, V] { + if l.len == 0 { + return nil + } + return l.root.prev +} + +// lazyInit lazily initializes a zero List Value. +func (l *LruList[K, V]) lazyInit() { + if l.root.next == nil { + l.Init() + } +} + +// insert inserts e after at, increments l.len, and returns e. +func (l *LruList[K, V]) insert(e, at *Entry[K, V]) *Entry[K, V] { + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e + e.list = l + l.len++ + return e +} + +// insertValue is a convenience wrapper for insert(&Entry{Value: v, ExpiresAt: ExpiresAt}, at). +func (l *LruList[K, V]) insertValue(k K, v V, expiresAt time.Time, at *Entry[K, V]) *Entry[K, V] { + return l.insert(&Entry[K, V]{Value: v, Key: k, ExpiresAt: expiresAt}, at) +} + +// Remove removes e from its list, decrements l.len +func (l *LruList[K, V]) Remove(e *Entry[K, V]) V { + e.prev.next = e.next + e.next.prev = e.prev + e.next = nil // avoid memory leaks + e.prev = nil // avoid memory leaks + e.list = nil + l.len-- + + return e.Value +} + +// move moves e to next to at. +func (l *LruList[K, V]) move(e, at *Entry[K, V]) { + if e == at { + return + } + e.prev.next = e.next + e.next.prev = e.prev + + e.prev = at + e.next = at.next + e.prev.next = e + e.next.prev = e +} + +// PushFront inserts a new element e with value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFront(k K, v V) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, time.Time{}, &l.root) +} + +// PushFrontExpirable inserts a new expirable element e with Value v at the front of list l and returns e. +func (l *LruList[K, V]) PushFrontExpirable(k K, v V, expiresAt time.Time) *Entry[K, V] { + l.lazyInit() + return l.insertValue(k, v, expiresAt, &l.root) +} + +// MoveToFront moves element e to the front of list l. +// If e is not an element of l, the list is not modified. +// The element must not be nil. +func (l *LruList[K, V]) MoveToFront(e *Entry[K, V]) { + if e.list != l || l.root.next == e { + return + } + // see comment in List.Remove about initialization of l + l.move(e, &l.root) +} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/list.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/list.go deleted file mode 100644 index c39da3c1..00000000 --- a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/list.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE_list file. - -package simplelru - -// entry is an LRU entry -type entry[K comparable, V any] struct { - // Next and previous pointers in the doubly-linked list of elements. - // To simplify the implementation, internally a list l is implemented - // as a ring, such that &l.root is both the next element of the last - // list element (l.Back()) and the previous element of the first list - // element (l.Front()). - next, prev *entry[K, V] - - // The list to which this element belongs. - list *lruList[K, V] - - // The LRU key of this element. - key K - - // The value stored with this element. - value V -} - -// prevEntry returns the previous list element or nil. -func (e *entry[K, V]) prevEntry() *entry[K, V] { - if p := e.prev; e.list != nil && p != &e.list.root { - return p - } - return nil -} - -// lruList represents a doubly linked list. -// The zero value for lruList is an empty list ready to use. -type lruList[K comparable, V any] struct { - root entry[K, V] // sentinel list element, only &root, root.prev, and root.next are used - len int // current list length excluding (this) sentinel element -} - -// init initializes or clears list l. -func (l *lruList[K, V]) init() *lruList[K, V] { - l.root.next = &l.root - l.root.prev = &l.root - l.len = 0 - return l -} - -// newList returns an initialized list. -func newList[K comparable, V any]() *lruList[K, V] { return new(lruList[K, V]).init() } - -// length returns the number of elements of list l. -// The complexity is O(1). -func (l *lruList[K, V]) length() int { return l.len } - -// back returns the last element of list l or nil if the list is empty. -func (l *lruList[K, V]) back() *entry[K, V] { - if l.len == 0 { - return nil - } - return l.root.prev -} - -// lazyInit lazily initializes a zero List value. -func (l *lruList[K, V]) lazyInit() { - if l.root.next == nil { - l.init() - } -} - -// insert inserts e after at, increments l.len, and returns e. -func (l *lruList[K, V]) insert(e, at *entry[K, V]) *entry[K, V] { - e.prev = at - e.next = at.next - e.prev.next = e - e.next.prev = e - e.list = l - l.len++ - return e -} - -// insertValue is a convenience wrapper for insert(&Element{Value: v}, at). -func (l *lruList[K, V]) insertValue(k K, v V, at *entry[K, V]) *entry[K, V] { - return l.insert(&entry[K, V]{value: v, key: k}, at) -} - -// remove removes e from its list, decrements l.len -func (l *lruList[K, V]) remove(e *entry[K, V]) V { - e.prev.next = e.next - e.next.prev = e.prev - e.next = nil // avoid memory leaks - e.prev = nil // avoid memory leaks - e.list = nil - l.len-- - - return e.value -} - -// move moves e to next to at. -func (l *lruList[K, V]) move(e, at *entry[K, V]) { - if e == at { - return - } - e.prev.next = e.next - e.next.prev = e.prev - - e.prev = at - e.next = at.next - e.prev.next = e - e.next.prev = e -} - -// pushFront inserts a new element e with value v at the front of list l and returns e. -func (l *lruList[K, V]) pushFront(k K, v V) *entry[K, V] { - l.lazyInit() - return l.insertValue(k, v, &l.root) -} - -// moveToFront moves element e to the front of list l. -// If e is not an element of l, the list is not modified. -// The element must not be nil. -func (l *lruList[K, V]) moveToFront(e *entry[K, V]) { - if e.list != l || l.root.next == e { - return - } - // see comment in List.Remove about initialization of l - l.move(e, &l.root) -} diff --git a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go index b165ea2a..f6979238 100644 --- a/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/v2/simplelru/lru.go @@ -5,6 +5,8 @@ package simplelru import ( "errors" + + "github.com/hashicorp/golang-lru/v2/internal" ) // EvictCallback is used to get a callback when a cache entry is evicted @@ -13,8 +15,8 @@ type EvictCallback[K comparable, V any] func(key K, value V) // LRU implements a non-thread safe fixed size LRU cache type LRU[K comparable, V any] struct { size int - evictList *lruList[K, V] - items map[K]*entry[K, V] + evictList *internal.LruList[K, V] + items map[K]*internal.Entry[K, V] onEvict EvictCallback[K, V] } @@ -26,8 +28,8 @@ func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, c := &LRU[K, V]{ size: size, - evictList: newList[K, V](), - items: make(map[K]*entry[K, V]), + evictList: internal.NewList[K, V](), + items: make(map[K]*internal.Entry[K, V]), onEvict: onEvict, } return c, nil @@ -37,30 +39,27 @@ func NewLRU[K comparable, V any](size int, onEvict EvictCallback[K, V]) (*LRU[K, func (c *LRU[K, V]) Purge() { for k, v := range c.items { if c.onEvict != nil { - c.onEvict(k, v.value) + c.onEvict(k, v.Value) } delete(c.items, k) } - c.evictList.init() + c.evictList.Init() } // Add adds a value to the cache. Returns true if an eviction occurred. func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { // Check for existing item if ent, ok := c.items[key]; ok { - c.evictList.moveToFront(ent) - if c.onEvict != nil { - c.onEvict(key, ent.value) - } - ent.value = value + c.evictList.MoveToFront(ent) + ent.Value = value return false } // Add new item - ent := c.evictList.pushFront(key, value) + ent := c.evictList.PushFront(key, value) c.items[key] = ent - evict := c.evictList.length() > c.size + evict := c.evictList.Length() > c.size // Verify size not exceeded if evict { c.removeOldest() @@ -71,8 +70,8 @@ func (c *LRU[K, V]) Add(key K, value V) (evicted bool) { // Get looks up a key's value from the cache. func (c *LRU[K, V]) Get(key K) (value V, ok bool) { if ent, ok := c.items[key]; ok { - c.evictList.moveToFront(ent) - return ent.value, true + c.evictList.MoveToFront(ent) + return ent.Value, true } return } @@ -87,9 +86,9 @@ func (c *LRU[K, V]) Contains(key K) (ok bool) { // Peek returns the key value (or undefined if not found) without updating // the "recently used"-ness of the key. func (c *LRU[K, V]) Peek(key K) (value V, ok bool) { - var ent *entry[K, V] + var ent *internal.Entry[K, V] if ent, ok = c.items[key]; ok { - return ent.value, true + return ent.Value, true } return } @@ -106,27 +105,27 @@ func (c *LRU[K, V]) Remove(key K) (present bool) { // RemoveOldest removes the oldest item from the cache. func (c *LRU[K, V]) RemoveOldest() (key K, value V, ok bool) { - if ent := c.evictList.back(); ent != nil { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) - return ent.key, ent.value, true + return ent.Key, ent.Value, true } return } // GetOldest returns the oldest entry func (c *LRU[K, V]) GetOldest() (key K, value V, ok bool) { - if ent := c.evictList.back(); ent != nil { - return ent.key, ent.value, true + if ent := c.evictList.Back(); ent != nil { + return ent.Key, ent.Value, true } return } // Keys returns a slice of the keys in the cache, from oldest to newest. func (c *LRU[K, V]) Keys() []K { - keys := make([]K, c.evictList.length()) + keys := make([]K, c.evictList.Length()) i := 0 - for ent := c.evictList.back(); ent != nil; ent = ent.prevEntry() { - keys[i] = ent.key + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + keys[i] = ent.Key i++ } return keys @@ -136,8 +135,8 @@ func (c *LRU[K, V]) Keys() []K { func (c *LRU[K, V]) Values() []V { values := make([]V, len(c.items)) i := 0 - for ent := c.evictList.back(); ent != nil; ent = ent.prevEntry() { - values[i] = ent.value + for ent := c.evictList.Back(); ent != nil; ent = ent.PrevEntry() { + values[i] = ent.Value i++ } return values @@ -145,7 +144,7 @@ func (c *LRU[K, V]) Values() []V { // Len returns the number of items in the cache. func (c *LRU[K, V]) Len() int { - return c.evictList.length() + return c.evictList.Length() } // Resize changes the cache size. @@ -163,16 +162,16 @@ func (c *LRU[K, V]) Resize(size int) (evicted int) { // removeOldest removes the oldest item from the cache. func (c *LRU[K, V]) removeOldest() { - if ent := c.evictList.back(); ent != nil { + if ent := c.evictList.Back(); ent != nil { c.removeElement(ent) } } // removeElement is used to remove a given list element from the cache -func (c *LRU[K, V]) removeElement(e *entry[K, V]) { - c.evictList.remove(e) - delete(c.items, e.key) +func (c *LRU[K, V]) removeElement(e *internal.Entry[K, V]) { + c.evictList.Remove(e) + delete(c.items, e.Key) if c.onEvict != nil { - c.onEvict(e.key, e.value) + c.onEvict(e.Key, e.Value) } } diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index 849471e3..983ed674 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -48,6 +48,10 @@ type SnowballOptions struct { // Compression will typically reduce memory and network usage, // Compression can safely be enabled with MinIO hosts. Compress bool + + // SkipErrs if enabled will skip any errors while reading the + // object content while creating the snowball archive + SkipErrs bool } // SnowballObject contains information about a single object to be added to the snowball. @@ -184,10 +188,16 @@ objectLoop: n, err := io.Copy(t, obj.Content) if err != nil { closeObj() + if opts.SkipErrs { + continue + } return err } if n != obj.Size { closeObj() + if opts.SkipErrs { + continue + } return io.ErrUnexpectedEOF } closeObj() diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 6179b78c..e8e324a9 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -127,7 +127,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.61" + libraryVersion = "v7.0.63" ) // User Agent should always following the below style. @@ -158,10 +158,6 @@ func New(endpoint string, opts *Options) (*Client, error) { if err != nil { return nil, err } - // Google cloud storage should be set to signature V2, force it if not. - if s3utils.IsGoogleEndpoint(*clnt.endpointURL) { - clnt.overrideSignerType = credentials.SignatureV2 - } // If Amazon S3 set to signature v4. if s3utils.IsAmazonEndpoint(*clnt.endpointURL) { clnt.overrideSignerType = credentials.SignatureV4 diff --git a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go index 8dd62100..0c9536de 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/credentials/iam_aws.go @@ -291,7 +291,13 @@ func getCredentials(client *http.Client, endpoint string) (ec2RoleCredRespBody, // https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html token, err := fetchIMDSToken(client, endpoint) if err != nil { - return ec2RoleCredRespBody{}, err + // Return only errors for valid situations, if the IMDSv2 is not enabled + // we will not be able to get the token, in such a situation we have + // to rely on IMDSv1 behavior as a fallback, this check ensures that. + // Refer https://github.com/minio/minio-go/issues/1866 + if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) { + return ec2RoleCredRespBody{}, err + } } // http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html diff --git a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go index 1425a62a..0abbf6ef 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/replication/replication.go @@ -689,45 +689,83 @@ func (e ExistingObjectReplication) Validate() error { // TargetMetrics represents inline replication metrics // such as pending, failed and completed bytes in total for a bucket remote target type TargetMetrics struct { - // Pending size in bytes - PendingSize uint64 `json:"pendingReplicationSize"` - // Completed size in bytes - ReplicatedSize uint64 `json:"completedReplicationSize"` - // Total Replica size in bytes - ReplicaSize uint64 `json:"replicaSize"` - // Failed size in bytes - FailedSize uint64 `json:"failedReplicationSize"` - // Total number of pending operations including metadata updates - PendingCount uint64 `json:"pendingReplicationCount"` - // Total number of failed operations including metadata updates - FailedCount uint64 `json:"failedReplicationCount"` - // Bandwidth limit in bytes/sec for this target - BandWidthLimitInBytesPerSecond int64 `json:"limitInBits"` - // Current bandwidth used in bytes/sec for this target - CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth"` // Completed count - ReplicatedCount uint64 `json:"replicationCount"` + ReplicatedCount uint64 `json:"replicationCount,omitempty"` + // Completed size in bytes + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` + // Bandwidth limit in bytes/sec for this target + BandWidthLimitInBytesPerSecond int64 `json:"limitInBits,omitempty"` + // Current bandwidth used in bytes/sec for this target + CurrentBandwidthInBytesPerSecond float64 `json:"currentBandwidth,omitempty"` + // errors seen in replication in last minute, hour and total + Failed TimedErrStats `json:"failed,omitempty"` + // Deprecated fields + // Pending size in bytes + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Total Replica size in bytes + ReplicaSize uint64 `json:"replicaSize,omitempty"` + // Failed size in bytes + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates + FailedCount uint64 `json:"failedReplicationCount,omitempty"` } // Metrics represents inline replication metrics for a bucket. type Metrics struct { Stats map[string]TargetMetrics - // Total Pending size in bytes across targets - PendingSize uint64 `json:"pendingReplicationSize"` // Completed size in bytes across targets - ReplicatedSize uint64 `json:"completedReplicationSize"` + ReplicatedSize uint64 `json:"completedReplicationSize,omitempty"` // Total Replica size in bytes across targets - ReplicaSize uint64 `json:"replicaSize"` - // Failed size in bytes across targets - FailedSize uint64 `json:"failedReplicationSize"` - // Total number of pending operations including metadata updates across targets - PendingCount uint64 `json:"pendingReplicationCount"` - // Total number of failed operations including metadata updates across targets - FailedCount uint64 `json:"failedReplicationCount"` + ReplicaSize uint64 `json:"replicaSize,omitempty"` // Total Replica counts ReplicaCount int64 `json:"replicaCount,omitempty"` // Total Replicated count ReplicatedCount int64 `json:"replicationCount,omitempty"` + // errors seen in replication in last minute, hour and total + Errors TimedErrStats `json:"failed,omitempty"` + // Total number of entries that are queued for replication + QStats InQueueMetric `json:"queued"` + // Deprecated fields + // Total Pending size in bytes across targets + PendingSize uint64 `json:"pendingReplicationSize,omitempty"` + // Failed size in bytes across targets + FailedSize uint64 `json:"failedReplicationSize,omitempty"` + // Total number of pending operations including metadata updates across targets + PendingCount uint64 `json:"pendingReplicationCount,omitempty"` + // Total number of failed operations including metadata updates across targets + FailedCount uint64 `json:"failedReplicationCount,omitempty"` +} + +// RStat - has count and bytes for replication metrics +type RStat struct { + Count float64 `json:"count"` + Bytes int64 `json:"bytes"` +} + +// Add two RStat +func (r RStat) Add(r1 RStat) RStat { + return RStat{ + Count: r.Count + r1.Count, + Bytes: r.Bytes + r1.Bytes, + } +} + +// TimedErrStats holds error stats for a time period +type TimedErrStats struct { + LastMinute RStat `json:"lastMinute"` + LastHour RStat `json:"lastHour"` + Totals RStat `json:"totals"` +} + +// Add two TimedErrStats +func (te TimedErrStats) Add(o TimedErrStats) TimedErrStats { + return TimedErrStats{ + LastMinute: te.LastMinute.Add(o.LastMinute), + LastHour: te.LastHour.Add(o.LastHour), + Totals: te.Totals.Add(o.Totals), + } } // ResyncTargetsInfo provides replication target information to resync replicated data. @@ -763,10 +801,30 @@ type XferStats struct { CurrRate float64 `json:"currRate"` } -// InQueueStats holds stats for objects in replication queue -type InQueueStats struct { - Count int32 `json:"count"` - Bytes int64 `json:"bytes"` +// Merge two XferStats +func (x *XferStats) Merge(x1 XferStats) { + x.AvgRate += x1.AvgRate + x.PeakRate += x1.PeakRate + x.CurrRate += x1.CurrRate +} + +// QStat holds count and bytes for objects in replication queue +type QStat struct { + Count float64 `json:"count"` + Bytes float64 `json:"bytes"` +} + +// Add 2 QStat entries +func (q *QStat) Add(q1 QStat) { + q.Count += q1.Count + q.Bytes += q1.Bytes +} + +// InQueueMetric holds stats for objects in replication queue +type InQueueMetric struct { + Curr QStat `json:"curr" msg:"cq"` + Avg QStat `json:"avg" msg:"aq"` + Max QStat `json:"peak" msg:"pq"` } // MetricName name of replication metric @@ -781,14 +839,34 @@ const ( Total MetricName = "Total" ) +// WorkerStat has stats on number of replication workers +type WorkerStat struct { + Curr int32 `json:"curr"` + Avg float32 `json:"avg"` + Max int32 `json:"max"` +} + +// ReplMRFStats holds stats of MRF backlog saved to disk in the last 5 minutes +// and number of entries that failed replication after 3 retries +type ReplMRFStats struct { + LastFailedCount uint64 `json:"failedCount_last5min"` + // Count of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedCount uint64 `json:"droppedCount_since_uptime"` + // Bytes of unreplicated entries that were dropped after MRF retry limit reached since cluster start. + TotalDroppedBytes uint64 `json:"droppedBytes_since_uptime"` +} + // ReplQNodeStats holds stats for a node in replication queue type ReplQNodeStats struct { - NodeName string `json:"nodeName"` - Uptime int64 `json:"uptime"` - ActiveWorkers int32 `json:"activeWorkers"` + NodeName string `json:"nodeName"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"activeWorkers"` - XferStats map[MetricName]XferStats `json:"xferStats"` - QStats map[MetricName]InQueueStats `json:"qStats"` + XferStats map[MetricName]XferStats `json:"transferSummary"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtTransferStats"` + + QStats InQueueMetric `json:"queueStats"` + MRFStats ReplMRFStats `json:"mrfStats"` } // ReplQueueStats holds stats for replication queue across nodes @@ -797,29 +875,73 @@ type ReplQueueStats struct { } // Workers returns number of workers across all nodes -func (q ReplQueueStats) Workers() int64 { - var workers int64 +func (q ReplQueueStats) Workers() (tot WorkerStat) { for _, node := range q.Nodes { - workers += int64(node.ActiveWorkers) + tot.Avg += node.Workers.Avg + tot.Curr += node.Workers.Curr + if tot.Max < node.Workers.Max { + tot.Max = node.Workers.Max + } } - return workers + if len(q.Nodes) > 0 { + tot.Avg /= float32(len(q.Nodes)) + tot.Curr /= int32(len(q.Nodes)) + } + return tot +} + +// qStatSummary returns cluster level stats for objects in replication queue +func (q ReplQueueStats) qStatSummary() InQueueMetric { + m := InQueueMetric{} + for _, v := range q.Nodes { + m.Avg.Add(v.QStats.Avg) + m.Curr.Add(v.QStats.Curr) + if m.Max.Count < v.QStats.Max.Count { + m.Max.Add(v.QStats.Max) + } + } + return m } // ReplQStats holds stats for objects in replication queue type ReplQStats struct { - Uptime int64 `json:"uptime"` - Workers int64 `json:"workers"` + Uptime int64 `json:"uptime"` + Workers WorkerStat `json:"workers"` - XferStats map[MetricName]XferStats `json:"xferStats"` - QStats map[MetricName]InQueueStats `json:"qStats"` + XferStats map[MetricName]XferStats `json:"xferStats"` + TgtXferStats map[string]map[MetricName]XferStats `json:"tgtXferStats"` + + QStats InQueueMetric `json:"qStats"` + MRFStats ReplMRFStats `json:"mrfStats"` } // QStats returns cluster level stats for objects in replication queue func (q ReplQueueStats) QStats() (r ReplQStats) { - r.QStats = make(map[MetricName]InQueueStats) + r.QStats = q.qStatSummary() r.XferStats = make(map[MetricName]XferStats) + r.TgtXferStats = make(map[string]map[MetricName]XferStats) + r.Workers = q.Workers() + for _, node := range q.Nodes { - r.Workers += int64(node.ActiveWorkers) + for arn := range node.TgtXferStats { + xmap, ok := node.TgtXferStats[arn] + if !ok { + xmap = make(map[MetricName]XferStats) + } + for m, v := range xmap { + st, ok := r.XferStats[m] + if !ok { + st = XferStats{} + } + st.AvgRate += v.AvgRate + st.CurrRate += v.CurrRate + st.PeakRate = math.Max(st.PeakRate, v.PeakRate) + if _, ok := r.TgtXferStats[arn]; !ok { + r.TgtXferStats[arn] = make(map[MetricName]XferStats) + } + r.TgtXferStats[arn][m] = st + } + } for k, v := range node.XferStats { st, ok := r.XferStats[k] if !ok { @@ -830,33 +952,20 @@ func (q ReplQueueStats) QStats() (r ReplQStats) { st.PeakRate = math.Max(st.PeakRate, v.PeakRate) r.XferStats[k] = st } - for k, v := range node.QStats { - st, ok := r.QStats[k] - if !ok { - st = InQueueStats{} - } - st.Count += v.Count - st.Bytes += v.Bytes - r.QStats[k] = st - } + r.MRFStats.LastFailedCount += node.MRFStats.LastFailedCount + r.MRFStats.TotalDroppedCount += node.MRFStats.TotalDroppedCount + r.MRFStats.TotalDroppedBytes += node.MRFStats.TotalDroppedBytes r.Uptime += node.Uptime } if len(q.Nodes) > 0 { - for k := range r.XferStats { - st := r.XferStats[k] - st.AvgRate /= float64(len(q.Nodes)) - st.CurrRate /= float64(len(q.Nodes)) - r.XferStats[k] = st - } r.Uptime /= int64(len(q.Nodes)) // average uptime } - return } // MetricsV2 represents replication metrics for a bucket. type MetricsV2 struct { - History Metrics `json:"history"` + Uptime int64 `json:"uptime"` CurrentStats Metrics `json:"currStats"` QueueStats ReplQueueStats `json:"queueStats"` } diff --git a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go index 51a04b06..056e78a6 100644 --- a/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go +++ b/vendor/github.com/minio/minio-go/v7/pkg/s3utils/utils.go @@ -121,49 +121,54 @@ func GetRegionFromURL(endpointURL url.URL) string { if endpointURL.Host == "s3-external-1.amazonaws.com" { return "" } - if IsAmazonGovCloudEndpoint(endpointURL) { - return "us-gov-west-1" - } + // if elb's are used we cannot calculate which region it may be, just return empty. if elbAmazonRegex.MatchString(endpointURL.Host) || elbAmazonCnRegex.MatchString(endpointURL.Host) { return "" } - parts := amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + + // We check for FIPS dualstack matching first to avoid the non-greedy + // regex for FIPS non-dualstack matching a dualstack URL + parts := amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } - if IsAmazonFIPSUSEastWestEndpoint(endpointURL) { - // We check for FIPS dualstack matching first to avoid the non-greedy - // regex for FIPS non-dualstack matching a dualstack URL - parts = amazonS3HostFIPSDualStack.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } - parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) - if len(parts) > 1 { - return parts[1] - } + + parts = amazonS3HostFIPS.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] } + + parts = amazonS3HostDualStack.FindStringSubmatch(endpointURL.Host) + if len(parts) > 1 { + return parts[1] + } + parts = amazonS3HostHyphen.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3ChinaHost.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3ChinaHostDualStack.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3HostDot.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + parts = amazonS3HostPrivateLink.FindStringSubmatch(endpointURL.Host) if len(parts) > 1 { return parts[1] } + return "" } @@ -186,45 +191,25 @@ func IsAmazonGovCloudEndpoint(endpointURL url.URL) bool { return false } return (endpointURL.Host == "s3-us-gov-west-1.amazonaws.com" || + endpointURL.Host == "s3-us-gov-east-1.amazonaws.com" || IsAmazonFIPSGovCloudEndpoint(endpointURL)) } -// IsAmazonFIPSGovCloudEndpoint - Match if it is exactly Amazon S3 FIPS GovCloud endpoint. -// See https://aws.amazon.com/compliance/fips. +// IsAmazonFIPSGovCloudEndpoint - match if the endpoint is FIPS and GovCloud. func IsAmazonFIPSGovCloudEndpoint(endpointURL url.URL) bool { if endpointURL == sentinelURL { return false } - return endpointURL.Host == "s3-fips-us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.us-gov-west-1.amazonaws.com" || - endpointURL.Host == "s3-fips.dualstack.us-gov-west-1.amazonaws.com" -} - -// IsAmazonFIPSUSEastWestEndpoint - Match if it is exactly Amazon S3 FIPS US East/West endpoint. -// See https://aws.amazon.com/compliance/fips. -func IsAmazonFIPSUSEastWestEndpoint(endpointURL url.URL) bool { - if endpointURL == sentinelURL { - return false - } - switch endpointURL.Host { - case "s3-fips.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-west-1.amazonaws.com": - case "s3-fips.dualstack.us-west-2.amazonaws.com": - case "s3-fips.dualstack.us-east-2.amazonaws.com": - case "s3-fips.dualstack.us-east-1.amazonaws.com": - case "s3-fips.us-west-1.amazonaws.com": - case "s3-fips.us-west-2.amazonaws.com": - case "s3-fips.us-east-1.amazonaws.com": - default: - return false - } - return true + return IsAmazonFIPSEndpoint(endpointURL) && strings.Contains(endpointURL.Host, "us-gov-") } // IsAmazonFIPSEndpoint - Match if it is exactly Amazon S3 FIPS endpoint. // See https://aws.amazon.com/compliance/fips. func IsAmazonFIPSEndpoint(endpointURL url.URL) bool { - return IsAmazonFIPSUSEastWestEndpoint(endpointURL) || IsAmazonFIPSGovCloudEndpoint(endpointURL) + if endpointURL == sentinelURL { + return false + } + return strings.HasPrefix(endpointURL.Host, "s3-fips") && strings.HasSuffix(endpointURL.Host, ".amazonaws.com") } // IsAmazonPrivateLinkEndpoint - Match if it is exactly Amazon S3 PrivateLink interface endpoint diff --git a/vendor/github.com/swaggo/echo-swagger/swagger.go b/vendor/github.com/swaggo/echo-swagger/swagger.go index 57520853..b8dc937d 100644 --- a/vendor/github.com/swaggo/echo-swagger/swagger.go +++ b/vendor/github.com/swaggo/echo-swagger/swagger.go @@ -6,6 +6,7 @@ import ( "path/filepath" "regexp" + "github.com/ghodss/yaml" "github.com/labstack/echo/v4" swaggerFiles "github.com/swaggo/files/v2" "github.com/swaggo/swag" @@ -14,7 +15,7 @@ import ( // Config stores echoSwagger configuration variables. type Config struct { // The url pointing to API definition (normally swagger.json or swagger.yaml). Default is `mockedSwag.json`. - URL string + URLs []string DocExpansion string DomID string InstanceName string @@ -42,7 +43,7 @@ type OAuthConfig struct { // URL presents the url pointing to API definition (normally swagger.json or swagger.yaml). func URL(url string) func(*Config) { return func(c *Config) { - c.URL = url + c.URLs = append(c.URLs, url) } } @@ -97,7 +98,7 @@ func OAuth(config *OAuthConfig) func(*Config) { func newConfig(configFns ...func(*Config)) *Config { config := Config{ - URL: "doc.json", + URLs: []string{"doc.json", "doc.yaml"}, DocExpansion: "list", DomID: "swagger-ui", InstanceName: "swagger", @@ -146,6 +147,8 @@ func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { c.Response().Header().Set("Content-Type", "application/javascript") case ".json": c.Response().Header().Set("Content-Type", "application/json; charset=utf-8") + case ".yaml": + c.Response().Header().Set("Content-Type", "text/plain; charset=utf-8") case ".png": c.Response().Header().Set("Content-Type", "image/png") } @@ -170,7 +173,20 @@ func EchoWrapHandler(options ...func(*Config)) echo.HandlerFunc { } _, _ = c.Response().Writer.Write([]byte(doc)) + case "doc.yaml": + jsonString, err := swag.ReadDoc(config.InstanceName) + if err != nil { + c.Error(err) + return nil + } + doc, err := yaml.JSONToYAML([]byte(jsonString)) + if err != nil { + c.Error(err) + + return nil + } + _, _ = c.Response().Writer.Write(doc) default: c.Request().URL.Path = matches[2] http.FileServer(http.FS(swaggerFiles.FS)).ServeHTTP(c.Response(), c.Request()) @@ -254,7 +270,14 @@ const indexTemplate = ` window.onload = function() { // Build a system const ui = SwaggerUIBundle({ - url: "{{.URL}}", + urls: [ + {{range $index, $url := .URLs}} + { + name: "{{$url}}", + url: "{{$url}}", + }, + {{end}} + ], syntaxHighlight: {{.SyntaxHighlight}}, deepLinking: {{.DeepLinking}}, docExpansion: "{{.DocExpansion}}", diff --git a/vendor/github.com/swaggo/swag/README_pt.md b/vendor/github.com/swaggo/swag/README_pt.md index 16f021e4..1415e9c8 100644 --- a/vendor/github.com/swaggo/swag/README_pt.md +++ b/vendor/github.com/swaggo/swag/README_pt.md @@ -95,7 +95,7 @@ OPÇÕES: --parseInternal Parse go ficheiros em pacotes internos, desactivados por padrão (padrão: falso) --generatedTime Gerar timestamp no topo dos docs.go, desactivado por padrão (padrão: falso) --parteDepth value Dependência profundidade parse (por padrão: 100) - + --templateDelims value, --td value fornecem delimitadores personalizados para a geração de modelos Go. O formato é leftDelim,rightDelim. Por exemplo: "[[,]]" ... --help, -h mostrar ajuda (por padrão: falso) @@ -418,7 +418,7 @@ Quando uma pequena sequência na sua documentação é insuficiente, ou precisa | success | resposta de sucesso que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` |. | failure | Resposta de falha que separou por espaços. `return code or default`,`{param type}`,`data type`,`comment` | | response | Igual ao `sucesso` e `falha` | -| header | Cabeçalho em resposta que separou por espaços. `código de retorno`,`{{tipo de parâmetro}`,`tipo de dados`,`comentário` |. +| header | Cabeçalho em resposta que separou por espaços. `código de retorno`,`{tipo de parâmetro}`,`tipo de dados`,`comentário` |. | router | Definição do caminho que separou por espaços. caminho",`path`,`[httpMethod]` |[httpMethod]` | | x-name | A chave de extensão, deve ser iniciada por x- e tomar apenas o valor json. | | x-codeSample | Optional Markdown use. tomar `file` como parâmetro. Isto irá então procurar um ficheiro nomeado como o resumo na pasta dada. | @@ -905,6 +905,18 @@ Por defeito, o comando `swag` gera especificação Swagger em três tipos difere Se desejar limitar um conjunto de tipos de ficheiros que devem ser gerados pode utilizar a bandeira `--outputTypes` (short `-ot`). O valor por defeito é `go,json,yaml` - tipos de saída separados por vírgula. Para limitar a saída apenas a ficheiros `go` e `yaml`, escrever-se-ia `go,yaml'. Com comando completo que seria `swag init --outputTypes go,yaml`. +### Alterar os delimitadores de acção padrão Go Template +[#980](https://github.com/swaggo/swag/issues/980) +[#1177](https://github.com/swaggo/swag/issues/1177) + +Se as suas anotações ou campos estruturantes contêm "{{" or "}}", a geração de modelos irá muito provavelmente falhar, uma vez que estes são os delimitadores por defeito para [go templates](https://pkg.go.dev/text/template#Template.Delims). + +Para que a geração funcione correctamente, pode alterar os delimitadores por defeito com `-td'. Por exemplo: +``console +swag init -g http/api.go -td "[[,]" +``` + +O novo delimitador é um fio com o formato "``,``". ## Sobre o projecto Este projecto foi inspirado por [yvasiyarov/swagger](https://github.com/yvasiyarov/swagger) mas simplificámos a utilização e acrescentámos apoio a uma variedade de [frameworks web](#estruturas-web-suportadas). A fonte de imagem Gopher é [tenntenn/gopher-stickers](https://github.com/tenntenn/gopher-stickers). Tem licenças [creative commons licensing](http://creativecommons.org/licenses/by/3.0/deed.en). diff --git a/vendor/github.com/swaggo/swag/golist.go b/vendor/github.com/swaggo/swag/golist.go index ca916a8e..fa0b2cd9 100644 --- a/vendor/github.com/swaggo/swag/golist.go +++ b/vendor/github.com/swaggo/swag/golist.go @@ -47,16 +47,20 @@ func listPackages(ctx context.Context, dir string, env []string, args ...string) return pkgs, nil } -func (parser *Parser) getAllGoFileInfoFromDepsByList(pkg *build.Package) error { +func (parser *Parser) getAllGoFileInfoFromDepsByList(pkg *build.Package, parseFlag ParseFlag) error { ignoreInternal := pkg.Goroot && !parser.ParseInternal if ignoreInternal { // ignored internal return nil } + if parser.skipPackageByPrefix(pkg.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + srcDir := pkg.Dir var err error for i := range pkg.GoFiles { - err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.GoFiles[i]), nil, ParseModels) + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.GoFiles[i]), nil, parseFlag) if err != nil { return err } @@ -64,7 +68,7 @@ func (parser *Parser) getAllGoFileInfoFromDepsByList(pkg *build.Package) error { // parse .go source files that import "C" for i := range pkg.CgoFiles { - err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.CgoFiles[i]), nil, ParseModels) + err = parser.parseFile(pkg.ImportPath, filepath.Join(srcDir, pkg.CgoFiles[i]), nil, parseFlag) if err != nil { return err } diff --git a/vendor/github.com/swaggo/swag/operation.go b/vendor/github.com/swaggo/swag/operation.go index 3485f51f..edf25100 100644 --- a/vendor/github.com/swaggo/swag/operation.go +++ b/vendor/github.com/swaggo/swag/operation.go @@ -381,7 +381,8 @@ func (operation *Operation) ParseParamComment(commentLine string, astFile *ast.F return fmt.Errorf("%s is not supported paramType", paramType) } - err := operation.parseParamAttribute(commentLine, objectType, refType, ¶m) + err := operation.parseParamAttribute(commentLine, objectType, refType, paramType, ¶m) + if err != nil { return err } @@ -436,7 +437,7 @@ var regexAttributes = map[string]*regexp.Regexp{ schemaExampleTag: regexp.MustCompile(`(?i)\s+schemaExample\(.*\)`), } -func (operation *Operation) parseParamAttribute(comment, objectType, schemaType string, param *spec.Parameter) error { +func (operation *Operation) parseParamAttribute(comment, objectType, schemaType, paramType string, param *spec.Parameter) error { schemaType = TransToValidSchemeType(schemaType) for attrKey, re := range regexAttributes { @@ -447,7 +448,7 @@ func (operation *Operation) parseParamAttribute(comment, objectType, schemaType switch attrKey { case enumsTag: - err = setEnumParam(param, attr, objectType, schemaType) + err = setEnumParam(param, attr, objectType, schemaType, paramType) case minimumTag, maximumTag: err = setNumberParam(param, attrKey, schemaType, attr, comment) case defaultTag: @@ -526,7 +527,7 @@ func setNumberParam(param *spec.Parameter, name, schemaType, attr, commentLine s } } -func setEnumParam(param *spec.Parameter, attr, objectType, schemaType string) error { +func setEnumParam(param *spec.Parameter, attr, objectType, schemaType, paramType string) error { for _, e := range strings.Split(attr, ",") { e = strings.TrimSpace(e) @@ -539,7 +540,12 @@ func setEnumParam(param *spec.Parameter, attr, objectType, schemaType string) er case ARRAY: param.Items.Enum = append(param.Items.Enum, value) default: - param.Enum = append(param.Enum, value) + switch paramType { + case "body": + param.Schema.Enum = append(param.Schema.Enum, value) + default: + param.Enum = append(param.Enum, value) + } } } @@ -715,6 +721,11 @@ func (operation *Operation) ParseRouterComment(commentLine string) error { // ParseSecurityComment parses comment for given `security` comment string. func (operation *Operation) ParseSecurityComment(commentLine string) error { + if len(commentLine) == 0 { + operation.Security = []map[string][]string{} + return nil + } + var ( securityMap = make(map[string][]string) securitySource = commentLine[strings.Index(commentLine, "@Security")+1:] diff --git a/vendor/github.com/swaggo/swag/package.go b/vendor/github.com/swaggo/swag/package.go index bc25786c..6c3129e5 100644 --- a/vendor/github.com/swaggo/swag/package.go +++ b/vendor/github.com/swaggo/swag/package.go @@ -5,6 +5,7 @@ import ( "go/token" "reflect" "strconv" + "strings" ) // PackageDefinitions files and definition in a package. @@ -94,22 +95,26 @@ func (pkg *PackageDefinitions) evaluateConstValue(file *ast.File, iota int, expr case *ast.BasicLit: switch valueExpr.Kind { case token.INT: - // hexadecimal - if len(valueExpr.Value) > 2 && valueExpr.Value[0] == '0' && valueExpr.Value[1] == 'x' { - if x, err := strconv.ParseInt(valueExpr.Value[2:], 16, 64); err == nil { - return int(x), nil - } else if x, err := strconv.ParseUint(valueExpr.Value[2:], 16, 64); err == nil { - return x, nil - } else { - panic(err) - } + // handle underscored number, such as 1_000_000 + if strings.ContainsRune(valueExpr.Value, '_') { + valueExpr.Value = strings.Replace(valueExpr.Value, "_", "", -1) } - - //octet - if len(valueExpr.Value) > 1 && valueExpr.Value[0] == '0' { - if x, err := strconv.ParseInt(valueExpr.Value[1:], 8, 64); err == nil { + if len(valueExpr.Value) >= 2 && valueExpr.Value[0] == '0' { + var start, base = 2, 8 + switch valueExpr.Value[1] { + case 'x', 'X': + //hex + base = 16 + case 'b', 'B': + //binary + base = 2 + default: + //octet + start = 1 + } + if x, err := strconv.ParseInt(valueExpr.Value[start:], base, 64); err == nil { return int(x), nil - } else if x, err := strconv.ParseUint(valueExpr.Value[1:], 8, 64); err == nil { + } else if x, err := strconv.ParseUint(valueExpr.Value[start:], base, 64); err == nil { return x, nil } else { panic(err) diff --git a/vendor/github.com/swaggo/swag/packages.go b/vendor/github.com/swaggo/swag/packages.go index 2c5693fc..b897ea97 100644 --- a/vendor/github.com/swaggo/swag/packages.go +++ b/vendor/github.com/swaggo/swag/packages.go @@ -19,7 +19,7 @@ type PackagesDefinitions struct { files map[*ast.File]*AstFileInfo packages map[string]*PackageDefinitions uniqueDefinitions map[string]*TypeSpecDef - parseDependency bool + parseDependency ParseFlag debug Debugger } @@ -324,7 +324,7 @@ func (pkgDefs *PackagesDefinitions) EvaluateConstValueByName(file *ast.File, pkg } } } - if pkgDefs.parseDependency { + if pkgDefs.parseDependency > 0 { for _, pkgPath := range externalPkgPaths { if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { if pkg, ok := pkgDefs.packages[pkgPath]; ok { @@ -513,7 +513,7 @@ func (pkgDefs *PackagesDefinitions) findTypeSpecFromPackagePaths(matchedPkgPaths } } - if pkgDefs.parseDependency { + if pkgDefs.parseDependency > 0 { for _, pkgPath := range externalPkgPaths { if err := pkgDefs.loadExternalPackage(pkgPath); err == nil { typeDef = pkgDefs.findTypeSpec(pkgPath, name) diff --git a/vendor/github.com/swaggo/swag/parser.go b/vendor/github.com/swaggo/swag/parser.go index 2e453960..6bf991e9 100644 --- a/vendor/github.com/swaggo/swag/parser.go +++ b/vendor/github.com/swaggo/swag/parser.go @@ -74,10 +74,10 @@ type ParseFlag int const ( // ParseNone parse nothing ParseNone ParseFlag = 0x00 - // ParseOperations parse operations - ParseOperations = 0x01 // ParseModels parse models - ParseModels = 0x02 + ParseModels = 0x01 + // ParseOperations parse operations + ParseOperations = 0x02 // ParseAll parse operations and models ParseAll = ParseOperations | ParseModels ) @@ -126,8 +126,8 @@ type Parser struct { // ParseVendor parse vendor folder ParseVendor bool - // ParseDependencies whether swag should be parse outside dependency folder - ParseDependency bool + // ParseDependencies whether swag should be parse outside dependency folder: 0 none, 1 models, 2 operations, 3 all + ParseDependency ParseFlag // ParseInternal whether swag should parse internal packages ParseInternal bool @@ -153,6 +153,10 @@ type Parser struct { // excludes excludes dirs and files in SearchDir excludes map[string]struct{} + // packagePrefix is a list of package path prefixes, packages that do not + // match any one of them will be excluded when searching. + packagePrefix []string + // tells parser to include only specific extension parseExtension string @@ -237,11 +241,11 @@ func New(options ...func(*Parser)) *Parser { } // SetParseDependency sets whether to parse the dependent packages. -func SetParseDependency(parseDependency bool) func(*Parser) { +func SetParseDependency(parseDependency int) func(*Parser) { return func(p *Parser) { - p.ParseDependency = parseDependency + p.ParseDependency = ParseFlag(parseDependency) if p.packages != nil { - p.packages.parseDependency = parseDependency + p.packages.parseDependency = p.ParseDependency } } } @@ -273,6 +277,20 @@ func SetExcludedDirsAndFiles(excludes string) func(*Parser) { } } +// SetPackagePrefix sets a list of package path prefixes from a comma-separated +// string, packages that do not match any one of them will be excluded when +// searching. +func SetPackagePrefix(packagePrefix string) func(*Parser) { + return func(p *Parser) { + for _, f := range strings.Split(packagePrefix, ",") { + f = strings.TrimSpace(f) + if f != "" { + p.packagePrefix = append(p.packagePrefix, f) + } + } + } +} + // SetTags sets the tags to be included func SetTags(include string) func(*Parser) { return func(p *Parser) { @@ -343,6 +361,20 @@ func (parser *Parser) ParseAPI(searchDir string, mainAPIFile string, parseDepth return parser.ParseAPIMultiSearchDir([]string{searchDir}, mainAPIFile, parseDepth) } +// skipPackageByPrefix returns true the given pkgpath does not match +// any user-defined package path prefixes. +func (parser *Parser) skipPackageByPrefix(pkgpath string) bool { + if len(parser.packagePrefix) == 0 { + return false + } + for _, prefix := range parser.packagePrefix { + if strings.HasPrefix(pkgpath, prefix) { + return false + } + } + return true +} + // ParseAPIMultiSearchDir is like ParseAPI but for multiple search dirs. func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile string, parseDepth int) error { for _, searchDir := range searchDirs { @@ -365,7 +397,7 @@ func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile st } // Use 'go list' command instead of depth.Resolve() - if parser.ParseDependency { + if parser.ParseDependency > 0 { if parser.parseGoList { pkgs, err := listPackages(context.Background(), filepath.Dir(absMainAPIFilePath), nil, "-deps") if err != nil { @@ -374,7 +406,7 @@ func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile st length := len(pkgs) for i := 0; i < length; i++ { - err := parser.getAllGoFileInfoFromDepsByList(pkgs[i]) + err := parser.getAllGoFileInfoFromDepsByList(pkgs[i], parser.ParseDependency) if err != nil { return err } @@ -394,7 +426,7 @@ func (parser *Parser) ParseAPIMultiSearchDir(searchDirs []string, mainAPIFile st return fmt.Errorf("pkg %s cannot find all dependencies, %s", pkgName, err) } for i := 0; i < len(t.Root.Deps); i++ { - err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i]) + err := parser.getAllGoFileInfoFromDeps(&t.Root.Deps[i], parser.ParseDependency) if err != nil { return err } @@ -569,6 +601,9 @@ func parseGeneralAPIInfo(parser *Parser, comments []string) error { parser.swagger.SecurityDefinitions[value] = scheme + case securityAttr: + parser.swagger.Security = append(parser.swagger.Security, parseSecurity(value)) + case "@query.collection.format": parser.collectionFormatInQuery = TransToValidCollectionFormat(value) @@ -768,6 +803,34 @@ func parseSecAttributes(context string, lines []string, index *int) (*spec.Secur return scheme, nil } +func parseSecurity(commentLine string) map[string][]string { + securityMap := make(map[string][]string) + + for _, securityOption := range strings.Split(commentLine, "||") { + securityOption = strings.TrimSpace(securityOption) + + left, right := strings.Index(securityOption, "["), strings.Index(securityOption, "]") + + if !(left == -1 && right == -1) { + scopes := securityOption[left+1 : right] + + var options []string + + for _, scope := range strings.Split(scopes, ",") { + options = append(options, strings.TrimSpace(scope)) + } + + securityKey := securityOption[0:left] + securityMap[securityKey] = append(securityMap[securityKey], options...) + } else { + securityKey := strings.TrimSpace(securityOption) + securityMap[securityKey] = []string{} + } + } + + return securityMap +} + func initIfEmpty(license *spec.License) *spec.License { if license == nil { return new(spec.License) @@ -867,18 +930,29 @@ func getTagsFromComment(comment string) (tags []string) { } func (parser *Parser) matchTags(comments []*ast.Comment) (match bool) { - if len(parser.tags) != 0 { - for _, comment := range comments { - for _, tag := range getTagsFromComment(comment.Text) { - if _, has := parser.tags["!"+tag]; has { - return false - } - if _, has := parser.tags[tag]; has { - match = true // keep iterating as it may contain a tag that is excluded - } + if len(parser.tags) == 0 { + return true + } + + match = false + for _, comment := range comments { + for _, tag := range getTagsFromComment(comment.Text) { + if _, has := parser.tags["!"+tag]; has { + return false + } + if _, has := parser.tags[tag]; has { + match = true // keep iterating as it may contain a tag that is excluded + } + } + } + + if !match { + // If all tags are negation then we should return true + for key := range parser.tags { + if key[0] != '!' { + return false } } - return } return true } @@ -975,7 +1049,21 @@ func processRouterOperation(parser *Parser, operation *Operation) error { parser.debug.Printf("warning: %s\n", err) } - *op = &operation.Operation + if len(operation.RouterProperties) > 1 { + newOp := *operation + var validParams []spec.Parameter + for _, param := range newOp.Operation.OperationProps.Parameters { + if param.In == "path" && !strings.Contains(routeProperties.Path, param.Name) { + // This path param is not actually contained in the path, skip adding it to the final params + continue + } + validParams = append(validParams, param) + } + newOp.Operation.OperationProps.Parameters = validParams + *op = &newOp.Operation + } else { + *op = &operation.Operation + } parser.swagger.Paths.Paths[routeProperties.Path] = pathItem } @@ -1051,7 +1139,7 @@ func (parser *Parser) getTypeSchema(typeName string, file *ast.File, ref bool) ( if err == ErrRecursiveParseStruct && ref { return parser.getRefTypeSchema(typeSpecDef, schema), nil } - return nil, err + return nil, fmt.Errorf("%s: %w", typeName, err) } } @@ -1285,7 +1373,7 @@ func (parser *Parser) parseStruct(file *ast.File, fields *ast.FieldList) (*spec. for _, field := range fields.List { fieldProps, requiredFromAnon, err := parser.parseStructField(file, field) if err != nil { - if err == ErrFuncTypeField || err == ErrSkippedField { + if errors.Is(err, ErrFuncTypeField) || errors.Is(err, ErrSkippedField) { continue } @@ -1336,12 +1424,12 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st if fieldName == "" { typeName, err := getFieldType(file, field.Type, nil) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } schema, err := parser.getTypeSchema(typeName, file, false) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } if len(schema.Type) > 0 && schema.Type[0] == OBJECT { @@ -1363,7 +1451,7 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st schema, err := ps.CustomSchema() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } if schema == nil { @@ -1377,20 +1465,20 @@ func (parser *Parser) parseStructField(file *ast.File, field *ast.Field) (map[st } if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } } err = ps.ComplementSchema(schema) if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } var tagRequired []string required, err := ps.IsRequired() if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("%s: %w", fieldName, err) } if required { @@ -1567,6 +1655,9 @@ func defineTypeOfExample(schemaType, arrayType, exampleValue string) (interface{ // GetAllGoFileInfo gets all Go source files information for given searchDir. func (parser *Parser) getAllGoFileInfo(packageDir, searchDir string) error { + if parser.skipPackageByPrefix(packageDir) { + return nil // ignored by user-defined package path prefixes + } return filepath.Walk(searchDir, func(path string, f os.FileInfo, _ error) error { err := parser.Skip(path, f) if err != nil { @@ -1586,12 +1677,16 @@ func (parser *Parser) getAllGoFileInfo(packageDir, searchDir string) error { }) } -func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { +func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg, parseFlag ParseFlag) error { ignoreInternal := pkg.Internal && !parser.ParseInternal if ignoreInternal || !pkg.Resolved { // ignored internal and not resolved dependencies return nil } + if pkg.Raw != nil && parser.skipPackageByPrefix(pkg.Raw.ImportPath) { + return nil // ignored by user-defined package path prefixes + } + // Skip cgo if pkg.Raw == nil && pkg.Name == "C" { return nil @@ -1610,13 +1705,13 @@ func (parser *Parser) getAllGoFileInfoFromDeps(pkg *depth.Pkg) error { } path := filepath.Join(srcDir, f.Name()) - if err := parser.parseFile(pkg.Name, path, nil, ParseModels); err != nil { + if err := parser.parseFile(pkg.Name, path, nil, parseFlag); err != nil { return err } } for i := 0; i < len(pkg.Deps); i++ { - if err := parser.getAllGoFileInfoFromDeps(&pkg.Deps[i]); err != nil { + if err := parser.getAllGoFileInfoFromDeps(&pkg.Deps[i], parseFlag); err != nil { return err } } diff --git a/vendor/github.com/swaggo/swag/types.go b/vendor/github.com/swaggo/swag/types.go index 8e2f51d8..ebb373d7 100644 --- a/vendor/github.com/swaggo/swag/types.go +++ b/vendor/github.com/swaggo/swag/types.go @@ -3,6 +3,7 @@ package swag import ( "go/ast" "go/token" + "regexp" "strings" "github.com/go-openapi/spec" @@ -47,9 +48,15 @@ func (t *TypeSpecDef) TypeName() string { return t.TypeSpec.Name.Name[1:] } else if t.TypeSpec.Comment != nil { // get alias from comment '// @name ' + const regexCaseInsensitive = "(?i)" + reTypeName, err := regexp.Compile(regexCaseInsensitive + `^@name\s+(\S+)`) + if err != nil { + panic(err) + } for _, comment := range t.TypeSpec.Comment.List { - texts := strings.Split(strings.TrimSpace(strings.TrimLeft(comment.Text, "/")), " ") - if len(texts) > 1 && strings.ToLower(texts[0]) == "@name" { + trimmedComment := strings.TrimSpace(strings.TrimLeft(comment.Text, "/")) + texts := reTypeName.FindStringSubmatch(trimmedComment) + if len(texts) > 1 { return texts[1] } } diff --git a/vendor/github.com/swaggo/swag/version.go b/vendor/github.com/swaggo/swag/version.go index 7b928b22..e5cf8792 100644 --- a/vendor/github.com/swaggo/swag/version.go +++ b/vendor/github.com/swaggo/swag/version.go @@ -1,4 +1,4 @@ package swag // Version of swag. -const Version = "v1.8.12" +const Version = "v1.16.2" diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md index c8db11f1..da2bad44 100644 --- a/vendor/github.com/tidwall/gjson/README.md +++ b/vendor/github.com/tidwall/gjson/README.md @@ -211,6 +211,7 @@ There are currently the following built-in modifiers: - `@tostr`: Converts json to a string. Wraps a json string. - `@fromstr`: Converts a string from json. Unwraps a json string. - `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). ### Modifier arguments diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md index 9405514b..6721d7f5 100644 --- a/vendor/github.com/tidwall/gjson/SYNTAX.md +++ b/vendor/github.com/tidwall/gjson/SYNTAX.md @@ -258,6 +258,7 @@ There are currently the following built-in modifiers: - `@tostr`: Converts json to a string. Wraps a json string. - `@fromstr`: Converts a string from json. Unwraps a json string. - `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db). +- `@dig`: Search for a value without providing its entire path. See [e8e87f2](https://github.com/tidwall/gjson/commit/e8e87f2a00dc41f3aba5631094e21f59a8cf8cbf). #### Modifier arguments diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go index e7e8d7e2..a1633be5 100644 --- a/vendor/github.com/tidwall/gjson/gjson.go +++ b/vendor/github.com/tidwall/gjson/gjson.go @@ -2754,6 +2754,7 @@ func execModifier(json, path string) (pathOut, res string, ok bool) { var parsedArgs bool switch pathOut[0] { case '{', '[', '"': + // json arg res := Parse(pathOut) if res.Exists() { args = squash(pathOut) @@ -2762,14 +2763,20 @@ func execModifier(json, path string) (pathOut, res string, ok bool) { } } if !parsedArgs { - idx := strings.IndexByte(pathOut, '|') - if idx == -1 { - args = pathOut - pathOut = "" - } else { - args = pathOut[:idx] - pathOut = pathOut[idx:] + // simple arg + i := 0 + for ; i < len(pathOut); i++ { + if pathOut[i] == '|' { + break + } + switch pathOut[i] { + case '{', '[', '"', '(': + s := squash(pathOut[i:]) + i += len(s) - 1 + } } + args = pathOut[:i] + pathOut = pathOut[i:] } } return pathOut, fn(json, args), true @@ -2789,19 +2796,24 @@ func unwrap(json string) string { // DisableModifiers will disable the modifier syntax var DisableModifiers = false -var modifiers = map[string]func(json, arg string) string{ - "pretty": modPretty, - "ugly": modUgly, - "reverse": modReverse, - "this": modThis, - "flatten": modFlatten, - "join": modJoin, - "valid": modValid, - "keys": modKeys, - "values": modValues, - "tostr": modToStr, - "fromstr": modFromStr, - "group": modGroup, +var modifiers map[string]func(json, arg string) string + +func init() { + modifiers = map[string]func(json, arg string) string{ + "pretty": modPretty, + "ugly": modUgly, + "reverse": modReverse, + "this": modThis, + "flatten": modFlatten, + "join": modJoin, + "valid": modValid, + "keys": modKeys, + "values": modValues, + "tostr": modToStr, + "fromstr": modFromStr, + "group": modGroup, + "dig": modDig, + } } // AddModifier binds a custom modifier command to the GJSON syntax. @@ -3435,3 +3447,30 @@ func escapeComp(comp string) string { } return comp } + +func parseRecursiveDescent(all []Result, parent Result, path string) []Result { + if res := parent.Get(path); res.Exists() { + all = append(all, res) + } + if parent.IsArray() || parent.IsObject() { + parent.ForEach(func(_, val Result) bool { + all = parseRecursiveDescent(all, val, path) + return true + }) + } + return all +} + +func modDig(json, arg string) string { + all := parseRecursiveDescent(nil, Parse(json), arg) + var out []byte + out = append(out, '[') + for i, res := range all { + if i > 0 { + out = append(out, ',') + } + out = append(out, res.Raw...) + } + out = append(out, ']') + return string(out) +} diff --git a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml index 6be2c354..1b27f196 100644 --- a/vendor/github.com/tklauser/go-sysconf/.cirrus.yml +++ b/vendor/github.com/tklauser/go-sysconf/.cirrus.yml @@ -1,6 +1,6 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.19.1 + GO_VERSION: go1.20 freebsd_12_task: freebsd_instance: diff --git a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go index 4a5197b2..3f5d83f6 100644 --- a/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/sysconf_darwin.go @@ -5,6 +5,10 @@ package sysconf import ( + "strconv" + "strings" + "sync" + "golang.org/x/sys/unix" ) @@ -14,8 +18,14 @@ const ( _SYMLOOP_MAX = _MAXSYMLINKS ) -// sysconf implements sysconf(3) as in the Darwin libc, version 1244.30.3 -// (derived from the FreeBSD libc). +var uname struct { + sync.Once + macOSMajor int +} + +// sysconf implements sysconf(4) as in the Darwin libc (derived from the FreeBSD +// libc), version 1534.81.1. +// See https://github.com/apple-oss-distributions/Libc/tree/Libc-1534.81.1. func sysconf(name int) (int64, error) { switch name { case SC_AIO_LISTIO_MAX: @@ -54,12 +64,16 @@ func sysconf(name int) (int64, error) { return sysctl32("kern.ngroups"), nil case SC_OPEN_MAX, SC_STREAM_MAX: var rlim unix.Rlimit - if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err == nil { - if rlim.Cur != unix.RLIM_INFINITY { - return int64(rlim.Cur), nil - } + if err := unix.Getrlimit(unix.RLIMIT_NOFILE, &rlim); err != nil { + return -1, nil } - return -1, nil + if rlim.Cur > unix.RLIM_INFINITY { + return -1, nil + } + if rlim.Cur > _LONG_MAX { + return -1, unix.EOVERFLOW + } + return int64(rlim.Cur), nil case SC_RTSIG_MAX: return -1, nil case SC_SEM_NSEMS_MAX: @@ -126,7 +140,22 @@ func sysconf(name int) (int64, error) { } return _POSIX_SEMAPHORES, nil case SC_SPAWN: - return _POSIX_SPAWN, nil + uname.Once.Do(func() { + var u unix.Utsname + err := unix.Uname(&u) + if err != nil { + return + } + rel := unix.ByteSliceToString(u.Release[:]) + ver := strings.Split(rel, ".") + maj, _ := strconv.Atoi(ver[0]) + uname.macOSMajor = maj + }) + if uname.macOSMajor < 22 { + return -1, nil + } + // macOS 13 (Ventura) and later + return 200112, nil case SC_SPIN_LOCKS: return _POSIX_SPIN_LOCKS, nil case SC_SPORADIC_SERVER: diff --git a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go index 6fa7fde8..6fadf3db 100644 --- a/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go +++ b/vendor/github.com/tklauser/go-sysconf/zsysconf_defs_darwin.go @@ -181,7 +181,6 @@ const ( _POSIX_SHARED_MEMORY_OBJECTS = -0x1 _POSIX_SHELL = 0x30db0 _POSIX_SIGQUEUE_MAX = 0x20 - _POSIX_SPAWN = -0x1 _POSIX_SPIN_LOCKS = -0x1 _POSIX_SPORADIC_SERVER = -0x1 _POSIX_SS_REPL_MAX = 0x4 @@ -248,7 +247,8 @@ const ( const ( _CHAR_BIT = 0x8 - _INT_MAX = 0x7fffffff + _INT_MAX = 0x7fffffff + _LONG_MAX = 0x7fffffffffffffff sizeofOffT = 0x8 ) diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 00000000..7348c50c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 00000000..8da58fbf --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 00000000..866d74a7 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 00000000..b50c6e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 00000000..acf71402 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,744 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +var disableLineWrapping = false + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } + if disableLineWrapping { + emitter.best_width = -1 + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 00000000..129bc2a9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 00000000..a1c2cc52 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 00000000..0ee738e1 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 00000000..81d05dfe --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 00000000..7c1f5fac --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 00000000..4120e0c9 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 00000000..0b9bb603 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 00000000..4c45e660 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 00000000..a2dde608 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 00000000..30813884 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,478 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 00000000..f6a9c8e3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 00000000..8110ce3c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5945c279..3dc3dd15 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -54,10 +54,10 @@ github.com/beorn7/perks/quantile # github.com/boltdb/bolt v1.3.1 ## explicit github.com/boltdb/bolt -# github.com/caddyserver/certmagic v0.19.1 +# github.com/caddyserver/certmagic v0.19.2 ## explicit; go 1.19 github.com/caddyserver/certmagic -# github.com/casbin/casbin/v2 v2.73.1 +# github.com/casbin/casbin/v2 v2.77.1 ## explicit; go 1.13 github.com/casbin/casbin/v2 github.com/casbin/casbin/v2/config @@ -132,7 +132,10 @@ github.com/gabriel-vasile/mimetype github.com/gabriel-vasile/mimetype/internal/charset github.com/gabriel-vasile/mimetype/internal/json github.com/gabriel-vasile/mimetype/internal/magic -# github.com/go-ole/go-ole v1.2.6 +# github.com/ghodss/yaml v1.0.0 +## explicit +github.com/ghodss/yaml +# github.com/go-ole/go-ole v1.3.0 ## explicit; go 1.12 github.com/go-ole/go-ole github.com/go-ole/go-ole/oleutil @@ -156,7 +159,7 @@ github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.15.0 +# github.com/go-playground/validator/v10 v10.15.3 ## explicit; go 1.18 github.com/go-playground/validator/v10 # github.com/gobwas/glob v0.2.3 @@ -178,12 +181,12 @@ github.com/golang-jwt/jwt/v4 # github.com/golang/protobuf v1.5.3 ## explicit; go 1.9 github.com/golang/protobuf/proto -# github.com/google/gops v0.3.27 +# github.com/google/gops v0.3.28 ## explicit; go 1.17 github.com/google/gops/agent github.com/google/gops/internal github.com/google/gops/signal -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.3.1 ## explicit github.com/google/uuid # github.com/gorilla/websocket v1.5.0 @@ -198,12 +201,13 @@ github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-msgpack v0.5.5 ## explicit github.com/hashicorp/go-msgpack/codec -# github.com/hashicorp/golang-lru v0.5.4 +# github.com/hashicorp/golang-lru v1.0.2 ## explicit; go 1.12 github.com/hashicorp/golang-lru/simplelru -# github.com/hashicorp/golang-lru/v2 v2.0.4 +# github.com/hashicorp/golang-lru/v2 v2.0.6 ## explicit; go 1.18 github.com/hashicorp/golang-lru/v2 +github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru # github.com/hashicorp/raft v1.5.0 ## explicit; go 1.16 @@ -283,7 +287,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.61 +# github.com/minio/minio-go/v7 v7.0.63 ## explicit; go 1.17 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/credentials @@ -363,16 +367,16 @@ github.com/sirupsen/logrus ## explicit; go 1.20 github.com/stretchr/testify/assert github.com/stretchr/testify/require -# github.com/swaggo/echo-swagger v1.4.0 +# github.com/swaggo/echo-swagger v1.4.1 ## explicit; go 1.17 github.com/swaggo/echo-swagger # github.com/swaggo/files/v2 v2.0.0 ## explicit; go 1.16 github.com/swaggo/files/v2 -# github.com/swaggo/swag v1.16.1 +# github.com/swaggo/swag v1.16.2 ## explicit; go 1.18 github.com/swaggo/swag -# github.com/tidwall/gjson v1.15.0 +# github.com/tidwall/gjson v1.16.0 ## explicit; go 1.12 github.com/tidwall/gjson # github.com/tidwall/match v1.1.1 @@ -381,7 +385,7 @@ github.com/tidwall/match # github.com/tidwall/pretty v1.2.1 ## explicit; go 1.16 github.com/tidwall/pretty -# github.com/tklauser/go-sysconf v0.3.11 +# github.com/tklauser/go-sysconf v0.3.12 ## explicit; go 1.13 github.com/tklauser/go-sysconf # github.com/tklauser/numcpus v0.6.1 @@ -567,6 +571,9 @@ google.golang.org/protobuf/types/known/timestamppb # gopkg.in/ini.v1 v1.67.0 ## explicit gopkg.in/ini.v1 +# gopkg.in/yaml.v2 v2.4.0 +## explicit; go 1.15 +gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3