diff --git a/autocert/autocert.go b/autocert/autocert.go
index c3f4029d..09cffe28 100644
--- a/autocert/autocert.go
+++ b/autocert/autocert.go
@@ -195,7 +195,6 @@ func (m *manager) AcquireCertificates(ctx context.Context, hostnames []string) e
if len(removed) != 0 {
m.logger.WithField("hostnames", removed).Info().Log("Unmanage certificates")
- m.config.Unmanage(removed)
}
m.lock.Lock()
@@ -216,7 +215,6 @@ func (m *manager) ManageCertificates(ctx context.Context, hostnames []string) er
if len(removed) != 0 {
m.logger.WithField("hostnames", removed).Info().Log("Unmanage certificates")
- m.config.Unmanage(removed)
}
if len(added) == 0 {
diff --git a/go.mod b/go.mod
index 3d644faa..521d0c04 100644
--- a/go.mod
+++ b/go.mod
@@ -3,13 +3,13 @@ module github.com/datarhei/core/v16
go 1.18
require (
- github.com/99designs/gqlgen v0.17.34
+ github.com/99designs/gqlgen v0.17.35
github.com/Masterminds/semver/v3 v3.2.1
github.com/adhocore/gronx v1.6.3
github.com/atrox/haikunatorgo/v2 v2.0.1
- github.com/caddyserver/certmagic v0.18.2
- github.com/casbin/casbin/v2 v2.71.1
- github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e
+ github.com/caddyserver/certmagic v0.19.0
+ github.com/casbin/casbin/v2 v2.72.0
+ github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c
github.com/datarhei/gosrt v0.5.2
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a
github.com/fujiwara/shapeio v1.0.0
@@ -24,18 +24,18 @@ require (
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.5.1
github.com/klauspost/cpuid/v2 v2.2.5
- github.com/labstack/echo/v4 v4.10.2
+ github.com/labstack/echo/v4 v4.11.1
github.com/lestrrat-go/strftime v1.0.6
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.19
- github.com/minio/minio-go/v7 v7.0.59
+ github.com/minio/minio-go/v7 v7.0.60
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.16.0
github.com/shirou/gopsutil/v3 v3.23.6
github.com/stretchr/testify v1.8.4
github.com/swaggo/echo-swagger v1.4.0
github.com/swaggo/swag v1.16.1
- github.com/vektah/gqlparser/v2 v2.5.6
+ github.com/vektah/gqlparser/v2 v2.5.8
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.7
go.uber.org/automaxprocs v1.5.2
@@ -61,7 +61,7 @@ require (
github.com/fatih/color v1.15.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
- github.com/go-openapi/jsonpointer v0.19.6 // indirect
+ github.com/go-openapi/jsonpointer v0.20.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/spec v0.20.9 // indirect
github.com/go-openapi/swag v0.22.4 // indirect
@@ -103,6 +103,9 @@ require (
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/swaggo/files/v2 v2.0.0 // indirect
+ github.com/tidwall/gjson v1.14.4 // indirect
+ github.com/tidwall/match v1.1.1 // indirect
+ github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/urfave/cli/v2 v2.25.5 // indirect
@@ -112,6 +115,7 @@ require (
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ github.com/zeebo/blake3 v0.2.3 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/goleak v1.1.12 // indirect
go.uber.org/multierr v1.11.0 // indirect
diff --git a/go.sum b/go.sum
index ec96a23f..0f12aaa5 100644
--- a/go.sum
+++ b/go.sum
@@ -1,5 +1,5 @@
-github.com/99designs/gqlgen v0.17.34 h1:5cS5/OKFguQt+Ws56uj9FlG2xm1IlcJWNF2jrMIKYFQ=
-github.com/99designs/gqlgen v0.17.34/go.mod h1:Axcd3jIFHBVcqzixujJQr1wGqE+lGTpz6u4iZBZg1G8=
+github.com/99designs/gqlgen v0.17.35 h1:r0KF1xL3cPMyUArNWeC3e2Ckuc4iiLm7bj5xzYZQYbQ=
+github.com/99designs/gqlgen v0.17.35/go.mod h1:Vlf7TeY3ZdVI9SagB5IZE8CYhpq8kJPCVPJ7MrlVoX0=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible h1:1G1pk05UrOh0NlF1oeaaix1x8XzrfjIDK47TY0Zehcw=
@@ -34,10 +34,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
-github.com/caddyserver/certmagic v0.18.2 h1:Nj2+M+A2Ho9IF6n1wUSbra4mX1X6ALzWpul9HooprHA=
-github.com/caddyserver/certmagic v0.18.2/go.mod h1:cLsgYXecH1iVUPjDXw15/1SKjZk/TK+aFfQk5FnugGQ=
-github.com/casbin/casbin/v2 v2.71.1 h1:LRHyqM0S1LzM/K59PmfUIN0ZJfLgcOjL4OhOQI/FNXU=
-github.com/casbin/casbin/v2 v2.71.1/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg=
+github.com/caddyserver/certmagic v0.19.0 h1:HuJ1Yf1H1jAfmBGrSSQN1XRkafnWcpDtyIiyMV6vmpM=
+github.com/caddyserver/certmagic v0.19.0/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8=
+github.com/casbin/casbin/v2 v2.72.0 h1:Lzp1h4rfQzjzN8N6FfaDDsLdhmZBqQot2Wc/Rnp8Eis=
+github.com/casbin/casbin/v2 v2.72.0/go.mod h1:mzGx0hYW9/ksOSpw3wNjk3NRAroq5VMFYUQ6G43iGPk=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -46,8 +46,8 @@ github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e h1:PUBHatfuW/qclTFQ062QtxlDEsqH3HlIjqI3vUOKR3c=
-github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8=
+github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c h1:VECuOSlBtcikfAkb00DFhxKXeJzpMpeUVEZIJRnpEDE=
+github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c/go.mod h1:3eKfwhPKoW7faTn+luShRVNMqcIskvlIKjRJ7ShjyL8=
github.com/datarhei/gosrt v0.5.2 h1:eagqZwEIiGPNJW0rLep3gwceObyaZ17+iKRc+l4VEpc=
github.com/datarhei/gosrt v0.5.2/go.mod h1:0308GQhAu5hxe2KYdbss901aKceSSKXnwCr8Vs++eiw=
github.com/datarhei/joy4 v0.0.0-20230505074825-fde05957445a h1:Tf4DSHY1xruBglr+yYP5Wct7czM86GKMYgbXH8a7OFo=
@@ -75,8 +75,9 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
-github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
+github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ=
+github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA=
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
@@ -164,6 +165,7 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I=
github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
+github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -175,8 +177,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
-github.com/labstack/echo/v4 v4.10.2 h1:n1jAhnq/elIFTHr1EYpiYtyKgx4RW9ccVgkqByZaN2M=
-github.com/labstack/echo/v4 v4.10.2/go.mod h1:OEyqf2//K1DFdE57vw2DRgWY0M7s65IVQO2FzvI4J5k=
+github.com/labstack/echo/v4 v4.11.1 h1:dEpLU2FLg4UVmvCGPuk/APjlH6GDpbEPti61srUUUs4=
+github.com/labstack/echo/v4 v4.11.1/go.mod h1:YuYRTSM3CHs2ybfrL8Px48bO6BAnYIN4l8wSTMP6BDQ=
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
@@ -216,8 +218,8 @@ github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo=
github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
-github.com/minio/minio-go/v7 v7.0.59 h1:lxIXwsTIcQkYoEG25rUJbzpmSB/oWeVDmxFo/uWUUsw=
-github.com/minio/minio-go/v7 v7.0.59/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
+github.com/minio/minio-go/v7 v7.0.60 h1:iHkrmWyHFs/eZiWc2F/5jAHtNBAFy+HjdhMX6FkkPWc=
+github.com/minio/minio-go/v7 v7.0.60/go.mod h1:NUDy4A4oXPq1l2yK6LTSvCEzAMeIcoz9lcj5dbzSrRE=
github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM=
github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
@@ -308,6 +310,13 @@ github.com/swaggo/files/v2 v2.0.0 h1:hmAt8Dkynw7Ssz46F6pn8ok6YmGZqHSVLZ+HQM7i0kw
github.com/swaggo/files/v2 v2.0.0/go.mod h1:24kk2Y9NYEJ5lHuCra6iVwkMjIekMCaFq/0JQj66kyM=
github.com/swaggo/swag v1.16.1 h1:fTNRhKstPKxcnoKsytm4sahr8FaYzUcT7i1/3nd/fBg=
github.com/swaggo/swag v1.16.1/go.mod h1:9/LMvHycG3NFHfR6LwvikHv5iFvmPADQ359cKikGxto=
+github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
+github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
+github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
+github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
+github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
+github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
+github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
@@ -321,8 +330,8 @@ github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyC
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
-github.com/vektah/gqlparser/v2 v2.5.6 h1:Ou14T0N1s191eRMZ1gARVqohcbe1e8FrcONScsq8cRU=
-github.com/vektah/gqlparser/v2 v2.5.6/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
+github.com/vektah/gqlparser/v2 v2.5.8 h1:pm6WOnGdzFOCfcQo9L3+xzW51mKrlwTEg4Wr7AH1JW4=
+github.com/vektah/gqlparser/v2 v2.5.8/go.mod h1:z8xXUff237NntSuH8mLFijZ+1tjV1swDbpDqjJmk6ME=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -335,6 +344,12 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsr
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY=
+github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
+github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
+github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
+github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
+github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md
index 73c93188..02c90774 100644
--- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md
+++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md
@@ -5,10 +5,144 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.32...HEAD)
+## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.34...HEAD)
+
+## [v0.17.34](https://github.com/99designs/gqlgen/compare/v0.17.33...v0.17.34) - 2023-06-23
+- 5a705857 release v0.17.34
+
+
1a9dbadd Use "No longer supported" as the default deprecationReason for deprecations without a reason specified (#2692)
+
+* fix: use "No longer supported" as the default deprecationReason for deprecated fields with no reason specified
+
+* test: add integration tests to ensure deprecated fields with no reason set get the default reason defined in the spec `No longer supported`
+
+
+
+- 640f3836 Update gqlparser dependency (#2694)
+
+- 5ac9fe59 Added flag to omit interface checks (#2689)
+
+abc3c627 feat: always use latest apollo sandbox (#2686)
+
+* feat: removeDuplicateTags() validates tags and panic with meaningful error message
+
+* Instead of pinning on _latest without subresource integrity check, update both url and integrity to latest
+
+* Update graphql/playground/apollo_sandbox_playground.go
+
+---------
+
+
+
+3b295bb4 added GoInitialismsConfig which overrides the initialisms to be regarded (#2683)
+
+* added GoInitialismsConfig which overrides the initialisms to be regarded
+
+* typo
+
+* adjusted examples and documentation
+
+* removed test with side-effects, adjustend yaml indentations, changed example entry "ID" to "CC" (again? I though I already did that)
+
+* comply with linter
+
+
+
+- ee6add4b Refactor TypeIdentifier to avoid circular imports (#2682)
+
+- 44376e52 fix subscription example in documentation (#2677)
+
+d5080828 Reworked integration testing using vitest (#2675)
+
+* Reworked integration using vitest
+Added SSE client testing
+Fixed SSE Transport parse errors not being sent as event-stream
+
+* Added defer testing using urql
+
+* Cleanup unnecessary dependencies
+
+
+
+d16f498f fix: issue with extraFields being thrown away (#2674)
+
+* fix: issue with extraFields being thrown away
+
+* Go fumpt on file
+
+
+---------
+
+
+
+- 71d16aa0 v0.17.33 postrelease bump
+
+
+
+
+
+
+## [v0.17.33](https://github.com/99designs/gqlgen/compare/v0.17.32...v0.17.33) - 2023-06-13
+- a1e34ca0 release v0.17.33
+
+790a72c1 issue-1372: add custom decode func (#2666)
+
+* issue-1372: add custom decode func
+
+* issue-1372: add custom decode method
+
+* issue-1372: fix lint
+
+* issue-1372: add custom decode func
+
+* issue-1372: add custom decode method
+
+* issue-1372: fix lint
+
+* issue-1372: extend functionality by setting up the whole decode config instead of one nested field
+
+* issue-1372: rollback generated.go file
+
+* issue-1372: fix lint
+
+
+
+c63c60eb Update all modules (#2667)
+
+* Update all modules
+
+
+* Add gqlparser v2.5.3
+
+
+---------
+
+
+
+4a78eb0c minor cleaning: fix some stricter lint rule warnings (#2665)
+
+* Add Changelog notes
+
+
+* Some spring cleaning
+
+
+* Update golangci-lint to latest
+
+
+---------
+
+
+
+- 1e925f7e v0.17.32 postrelease bump
+
+
+
+
+
## [v0.17.32](https://github.com/99designs/gqlgen/compare/v0.17.31...v0.17.32) - 2023-06-06
- 3a81a78b release v0.17.32
diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go
index be466adc..63b60743 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/playground/apollo_sandbox_playground.go
@@ -53,7 +53,7 @@ func ApolloSandboxHandler(title, endpoint string) http.HandlerFunc {
"title": title,
"endpoint": endpoint,
"endpointIsAbsolute": endpointHasScheme(endpoint),
- "mainSRI": "sha256-/ldbSJ7EovavF815TfCN50qKB9AMvzskb9xiG71bmg2I=",
+ "mainSRI": "sha256-ldbSJ7EovavF815TfCN50qKB9AMvzskb9xiG71bmg2I=",
})
if err != nil {
panic(err)
diff --git a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go
index 08e5e4a8..da17ca62 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/playground/playground.go
@@ -24,12 +24,12 @@ var page = template.Must(template.New("graphiql").Parse(`
}
@@ -82,11 +82,11 @@ func Handler(title string, endpoint string) http.HandlerFunc {
"endpoint": endpoint,
"endpointIsAbsolute": endpointHasScheme(endpoint),
"subscriptionEndpoint": getSubscriptionEndpoint(endpoint),
- "version": "2.0.7",
- "cssSRI": "sha256-gQryfbGYeYFxnJYnfPStPYFt0+uv8RP8Dm++eh00G9c=",
- "jsSRI": "sha256-qQ6pw7LwTLC+GfzN+cJsYXfVWRKH9O5o7+5H96gTJhQ=",
- "reactSRI": "sha256-Ipu/TQ50iCCVZBUsZyNJfxrDk0E2yhaEIz0vqI+kFG8=",
- "reactDOMSRI": "sha256-nbMykgB6tsOFJ7OdVmPpdqMFVk4ZsqWocT6issAPUF0=",
+ "version": "3.0.1",
+ "cssSRI": "sha256-wTzfn13a+pLMB5rMeysPPR1hO7x0SwSeQI+cnw7VdbE=",
+ "jsSRI": "sha256-dLnxjV+d2rFUCtYKjbPy413/8O+Ahy7QqAhaPNlL8fk=",
+ "reactSRI": "sha256-S0lp+k7zWUMk2ixteM6HZvu8L9Eh//OVrt+ZfbCpmgY=",
+ "reactDOMSRI": "sha256-IXWO0ITNDjfnNXIu5POVfqlgYoop36bDzhodR6LW5Pc=",
})
if err != nil {
panic(err)
diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go
index 46f7763b..6592e463 100644
--- a/vendor/github.com/99designs/gqlgen/graphql/version.go
+++ b/vendor/github.com/99designs/gqlgen/graphql/version.go
@@ -1,3 +1,3 @@
package graphql
-const Version = "v0.17.34"
+const Version = "v0.17.35"
diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
index 4cc8708d..d0ee8435 100644
--- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
+++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go
@@ -144,7 +144,7 @@ func (f *federation) InjectSourceEarly() *ast.Source {
}
}
-// InjectSources creates a GraphQL Entity type with all
+// InjectSourceLate creates a GraphQL Entity type with all
// the fields that had the @key directive
func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source {
f.setEntities(schema)
diff --git a/vendor/github.com/caddyserver/certmagic/README.md b/vendor/github.com/caddyserver/certmagic/README.md
index 43fb1f0a..aba3ad14 100644
--- a/vendor/github.com/caddyserver/certmagic/README.md
+++ b/vendor/github.com/caddyserver/certmagic/README.md
@@ -249,7 +249,7 @@ cache = certmagic.NewCache(certmagic.CacheOptions{
// Here we use New to get a valid Config associated with the same cache.
// The provided Config is used as a template and will be completed with
// any defaults that are set in the Default config.
- return certmagic.New(cache, &certmagic.config{
+ return certmagic.New(cache, certmagic.Config{
// ...
}), nil
},
@@ -267,7 +267,7 @@ myACME := certmagic.NewACMEIssuer(magic, certmagic.ACMEIssuer{
// plus any other customizations you need
})
-magic.Issuer = myACME
+magic.Issuers = []certmagic.Issuer{myACME}
// this obtains certificates or renews them if necessary
err := magic.ManageSync(context.TODO(), []string{"example.com", "sub.example.com"})
diff --git a/vendor/github.com/caddyserver/certmagic/acmeissuer.go b/vendor/github.com/caddyserver/certmagic/acmeissuer.go
index e930878b..84adbc5c 100644
--- a/vendor/github.com/caddyserver/certmagic/acmeissuer.go
+++ b/vendor/github.com/caddyserver/certmagic/acmeissuer.go
@@ -299,7 +299,7 @@ func (iss *ACMEIssuer) isAgreed() bool {
// batch is eligible for certificates if using Let's Encrypt.
// It also ensures that an email address is available.
func (am *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
- publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com")
+ publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com") || strings.Contains(am.CA, "api.pki.goog")
if publicCA {
for _, name := range names {
if !SubjectQualifiesForPublicCert(name) {
diff --git a/vendor/github.com/caddyserver/certmagic/cache.go b/vendor/github.com/caddyserver/certmagic/cache.go
index a28021cc..152fe18a 100644
--- a/vendor/github.com/caddyserver/certmagic/cache.go
+++ b/vendor/github.com/caddyserver/certmagic/cache.go
@@ -48,7 +48,8 @@ import (
// differently.
type Cache struct {
// User configuration of the cache
- options CacheOptions
+ options CacheOptions
+ optionsMu sync.RWMutex
// The cache is keyed by certificate hash
cache map[string]Certificate
@@ -56,7 +57,7 @@ type Cache struct {
// cacheIndex is a map of SAN to cache key (cert hash)
cacheIndex map[string][]string
- // Protects the cache and index maps
+ // Protects the cache and cacheIndex maps
mu sync.RWMutex
// Close this channel to cancel asset maintenance
@@ -128,6 +129,12 @@ func NewCache(opts CacheOptions) *Cache {
return c
}
+func (certCache *Cache) SetOptions(opts CacheOptions) {
+ certCache.optionsMu.Lock()
+ certCache.options = opts
+ certCache.optionsMu.Unlock()
+}
+
// Stop stops the maintenance goroutine for
// certificates in certCache. It blocks until
// stopping is complete. Once a cache is
@@ -226,7 +233,11 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
// if the cache is at capacity, make room for new cert
cacheSize := len(certCache.cache)
- if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
+ certCache.optionsMu.RLock()
+ atCapacity := certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity
+ certCache.optionsMu.RUnlock()
+
+ if atCapacity {
// Go maps are "nondeterministic" but not actually random,
// so although we could just chop off the "front" of the
// map with less code, that is a heavily skewed eviction
@@ -256,6 +267,7 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
}
+ certCache.optionsMu.RLock()
certCache.logger.Debug("added certificate to cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)),
@@ -264,6 +276,7 @@ func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
+ certCache.optionsMu.RUnlock()
}
// removeCertificate removes cert from the cache.
@@ -290,6 +303,7 @@ func (certCache *Cache) removeCertificate(cert Certificate) {
// delete the actual cert from the cache
delete(certCache.cache, cert.hash)
+ certCache.optionsMu.RLock()
certCache.logger.Debug("removed certificate from cache",
zap.Strings("subjects", cert.Names),
zap.Time("expiration", expiresAt(cert.Leaf)),
@@ -298,6 +312,7 @@ func (certCache *Cache) removeCertificate(cert Certificate) {
zap.String("hash", cert.hash),
zap.Int("cache_size", len(certCache.cache)),
zap.Int("cache_capacity", certCache.options.Capacity))
+ certCache.optionsMu.RUnlock()
}
// replaceCertificate atomically replaces oldCert with newCert in
@@ -314,11 +329,13 @@ func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
zap.Time("new_expiration", expiresAt(newCert.Leaf)))
}
-func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
+// getAllMatchingCerts returns all certificates with exactly this subject
+// (wildcards are NOT expanded).
+func (certCache *Cache) getAllMatchingCerts(subject string) []Certificate {
certCache.mu.RLock()
defer certCache.mu.RUnlock()
- allCertKeys := certCache.cacheIndex[name]
+ allCertKeys := certCache.cacheIndex[subject]
certs := make([]Certificate, len(allCertKeys))
for i := range allCertKeys {
@@ -339,7 +356,11 @@ func (certCache *Cache) getAllCerts() []Certificate {
}
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
- cfg, err := certCache.options.GetConfigForCert(cert)
+ certCache.optionsMu.RLock()
+ getCert := certCache.options.GetConfigForCert
+ certCache.optionsMu.RUnlock()
+
+ cfg, err := getCert(cert)
if err != nil {
return nil, err
}
@@ -373,6 +394,33 @@ func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
return certs
}
+// RemoveManaged removes managed certificates for the given subjects from the cache.
+// This effectively stops maintenance of those certificates.
+func (certCache *Cache) RemoveManaged(subjects []string) {
+ deleteQueue := make([]string, 0, len(subjects))
+ for _, subject := range subjects {
+ certs := certCache.getAllMatchingCerts(subject) // does NOT expand wildcards; exact matches only
+ for _, cert := range certs {
+ if !cert.managed {
+ continue
+ }
+ deleteQueue = append(deleteQueue, cert.hash)
+ }
+ }
+ certCache.Remove(deleteQueue)
+}
+
+// Remove removes certificates with the given hashes from the cache.
+// This is effectively used to unload manually-loaded certificates.
+func (certCache *Cache) Remove(hashes []string) {
+ certCache.mu.Lock()
+ for _, h := range hashes {
+ cert := certCache.cache[h]
+ certCache.removeCertificate(cert)
+ }
+ certCache.mu.Unlock()
+}
+
var (
defaultCache *Cache
defaultCacheMu sync.Mutex
diff --git a/vendor/github.com/caddyserver/certmagic/certificates.go b/vendor/github.com/caddyserver/certmagic/certificates.go
index 9e983406..16e51aaf 100644
--- a/vendor/github.com/caddyserver/certmagic/certificates.go
+++ b/vendor/github.com/caddyserver/certmagic/certificates.go
@@ -48,7 +48,7 @@ type Certificate struct {
// most recent OCSP response we have for this certificate.
ocsp *ocsp.Response
- // The hex-encoded hash of this cert's chain's bytes.
+ // The hex-encoded hash of this cert's chain's DER bytes.
hash string
// Whether this certificate is under our management.
@@ -64,6 +64,9 @@ func (cert Certificate) Empty() bool {
return len(cert.Certificate.Certificate) == 0
}
+// Hash returns a checksum of the certificate chain's DER-encoded bytes.
+func (cert Certificate) Hash() string { return cert.hash }
+
// NeedsRenewal returns true if the certificate is
// expiring soon (according to cfg) or has expired.
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
@@ -155,29 +158,32 @@ func (cfg *Config) loadManagedCertificate(ctx context.Context, domain string) (C
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
// and keyFile, which must be in PEM format. It stores the certificate in
-// the in-memory cache.
+// the in-memory cache and returns the hash, useful for removing from the cache.
//
// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) error {
+func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) (string, error) {
cert, err := cfg.makeCertificateFromDiskWithOCSP(ctx, cfg.Storage, certFile, keyFile)
if err != nil {
- return err
+ return "", err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
- return nil
+ return cert.hash, nil
}
-// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
+// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache
+//
+// and returns the hash, useful for removing from the cache.
+//
// It staples OCSP if possible.
//
// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) error {
+func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) (string, error) {
var cert Certificate
err := fillCertFromLeaf(&cert, tlsCert)
if err != nil {
- return err
+ return "", err
}
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
if err != nil {
@@ -186,22 +192,23 @@ func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
- return nil
+ return cert.hash, nil
}
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
-// of the certificate and key, then caches it in memory.
+// of the certificate and key, then caches it in memory, and returns the hash,
+// which is useful for removing from the cache.
//
// This method is safe for concurrent use.
-func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) error {
+func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) (string, error) {
cert, err := cfg.makeCertificateWithOCSP(ctx, certBytes, keyBytes)
if err != nil {
- return err
+ return "", err
}
cert.Tags = tags
cfg.certCache.cacheCertificate(cert)
cfg.emit(ctx, "cached_unmanaged_cert", map[string]any{"sans": cert.Names})
- return nil
+ return cert.hash, nil
}
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
diff --git a/vendor/github.com/caddyserver/certmagic/certmagic.go b/vendor/github.com/caddyserver/certmagic/certmagic.go
index 1fe8c1cc..6bc33d58 100644
--- a/vendor/github.com/caddyserver/certmagic/certmagic.go
+++ b/vendor/github.com/caddyserver/certmagic/certmagic.go
@@ -294,17 +294,12 @@ type OnDemandConfig struct {
// that allows the same names it already passed
// into Manage) and without letting clients have
// their run of any domain names they want.
- // Only enforced if len > 0.
- hostAllowlist []string
-}
-
-func (o *OnDemandConfig) allowlistContains(name string) bool {
- for _, n := range o.hostAllowlist {
- if strings.EqualFold(n, name) {
- return true
- }
- }
- return false
+ // Only enforced if len > 0. (This is a map to
+ // avoid O(n^2) performance; when it was a slice,
+ // we saw a 30s CPU profile for a config managing
+ // 110K names where 29s was spent checking for
+ // duplicates. Order is not important here.)
+ hostAllowlist map[string]struct{}
}
// isLoopback returns true if the hostname of addr looks
diff --git a/vendor/github.com/caddyserver/certmagic/config.go b/vendor/github.com/caddyserver/certmagic/config.go
index a5e1a10d..3d80a7eb 100644
--- a/vendor/github.com/caddyserver/certmagic/config.go
+++ b/vendor/github.com/caddyserver/certmagic/config.go
@@ -209,7 +209,10 @@ func New(certCache *Cache, cfg Config) *Config {
if certCache == nil {
panic("a certificate cache is required")
}
- if certCache.options.GetConfigForCert == nil {
+ certCache.optionsMu.RLock()
+ getConfigForCert := certCache.options.GetConfigForCert
+ defer certCache.optionsMu.RUnlock()
+ if getConfigForCert == nil {
panic("cache must have GetConfigForCert set in its options")
}
return newWithCache(certCache, cfg)
@@ -278,17 +281,20 @@ func newWithCache(certCache *Cache, cfg Config) *Config {
// ManageSync causes the certificates for domainNames to be managed
// according to cfg. If cfg.OnDemand is not nil, then this simply
-// whitelists the domain names and defers the certificate operations
+// allowlists the domain names and defers the certificate operations
// to when they are needed. Otherwise, the certificates for each
-// name are loaded from storage or obtained from the CA. If loaded
-// from storage, they are renewed if they are expiring or expired.
-// It then caches the certificate in memory and is prepared to serve
-// them up during TLS handshakes.
+// name are loaded from storage or obtained from the CA if not already
+// in the cache associated with the Config. If loaded from storage,
+// they are renewed if they are expiring or expired. It then caches
+// the certificate in memory and is prepared to serve them up during
+// TLS handshakes. To change how an already-loaded certificate is
+// managed, update the cache options relating to getting a config for
+// a cert.
//
-// Note that name whitelisting for on-demand management only takes
+// Note that name allowlisting for on-demand management only takes
// effect if cfg.OnDemand.DecisionFunc is not set (is nil); it will
// not overwrite an existing DecisionFunc, nor will it overwrite
-// its decision; i.e. the implicit whitelist is only used if no
+// its decision; i.e. the implicit allowlist is only used if no
// DecisionFunc is set.
//
// This method is synchronous, meaning that certificates for all
@@ -348,13 +354,14 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
if ctx == nil {
ctx = context.Background()
}
+ if cfg.OnDemand != nil && cfg.OnDemand.hostAllowlist == nil {
+ cfg.OnDemand.hostAllowlist = make(map[string]struct{})
+ }
for _, domainName := range domainNames {
// if on-demand is configured, defer obtain and renew operations
if cfg.OnDemand != nil {
- if !cfg.OnDemand.allowlistContains(domainName) {
- cfg.OnDemand.hostAllowlist = append(cfg.OnDemand.hostAllowlist, domainName)
- }
+ cfg.OnDemand.hostAllowlist[normalizedName(domainName)] = struct{}{}
continue
}
@@ -370,6 +377,14 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo
}
func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) error {
+ // if certificate is already being managed, nothing to do; maintenance will continue
+ certs := cfg.certCache.getAllMatchingCerts(domainName)
+ for _, cert := range certs {
+ if cert.managed {
+ return nil
+ }
+ }
+
// first try loading existing certificate from storage
cert, err := cfg.CacheManagedCertificate(ctx, domainName)
if err != nil {
@@ -449,28 +464,6 @@ func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool)
return renew()
}
-// Unmanage causes the certificates for domainNames to stop being managed.
-// If there are certificates for the supplied domain names in the cache, they
-// are evicted from the cache.
-func (cfg *Config) Unmanage(domainNames []string) {
- var deleteQueue []Certificate
- for _, domainName := range domainNames {
- certs := cfg.certCache.AllMatchingCertificates(domainName)
- for _, cert := range certs {
- if !cert.managed {
- continue
- }
- deleteQueue = append(deleteQueue, cert)
- }
- }
-
- cfg.certCache.mu.Lock()
- for _, cert := range deleteQueue {
- cfg.certCache.removeCertificate(cert)
- }
- cfg.certCache.mu.Unlock()
-}
-
// ObtainCertSync generates a new private key and obtains a certificate for
// name using cfg in the foreground; i.e. interactively and without retries.
// It stows the renewed certificate and its assets in storage if successful.
diff --git a/vendor/github.com/caddyserver/certmagic/crypto.go b/vendor/github.com/caddyserver/certmagic/crypto.go
index 7a11964e..5855ad75 100644
--- a/vendor/github.com/caddyserver/certmagic/crypto.go
+++ b/vendor/github.com/caddyserver/certmagic/crypto.go
@@ -22,7 +22,6 @@ import (
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
- "crypto/sha256"
"crypto/tls"
"crypto/x509"
"encoding/json"
@@ -35,6 +34,7 @@ import (
"strings"
"github.com/klauspost/cpuid/v2"
+ "github.com/zeebo/blake3"
"go.uber.org/zap"
"golang.org/x/net/idna"
)
@@ -271,7 +271,7 @@ func (cfg *Config) loadCertResource(ctx context.Context, issuer Issuer, certName
// which is the chain of DER-encoded bytes. It returns the
// hex encoding of the hash.
func hashCertificateChain(certChain [][]byte) string {
- h := sha256.New()
+ h := blake3.New()
for _, certInChain := range certChain {
h.Write(certInChain)
}
diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go
index 6d28ddfc..a3d714e8 100644
--- a/vendor/github.com/caddyserver/certmagic/handshake.go
+++ b/vendor/github.com/caddyserver/certmagic/handshake.go
@@ -341,7 +341,9 @@ func (cfg *Config) getCertDuringHandshake(ctx context.Context, hello *tls.Client
// perfectly full while still being able to load needed certs from storage.
// See https://caddy.community/t/error-tls-alert-internal-error-592-again/13272
// and caddyserver/caddy#4320.
+ cfg.certCache.optionsMu.RLock()
cacheCapacity := float64(cfg.certCache.options.Capacity)
+ cfg.certCache.optionsMu.RUnlock()
cacheAlmostFull := cacheCapacity > 0 && float64(cacheSize) >= cacheCapacity*.9
loadDynamically := cfg.OnDemand != nil || cacheAlmostFull
@@ -448,8 +450,10 @@ func (cfg *Config) checkIfCertShouldBeObtained(name string, requireOnDemand bool
}
return nil
}
- if len(cfg.OnDemand.hostAllowlist) > 0 && !cfg.OnDemand.allowlistContains(name) {
- return fmt.Errorf("certificate for '%s' is not managed", name)
+ if len(cfg.OnDemand.hostAllowlist) > 0 {
+ if _, ok := cfg.OnDemand.hostAllowlist[name]; !ok {
+ return fmt.Errorf("certificate for '%s' is not managed", name)
+ }
}
}
return nil
diff --git a/vendor/github.com/caddyserver/certmagic/maintain.go b/vendor/github.com/caddyserver/certmagic/maintain.go
index eb340f1a..475bb6df 100644
--- a/vendor/github.com/caddyserver/certmagic/maintain.go
+++ b/vendor/github.com/caddyserver/certmagic/maintain.go
@@ -53,8 +53,10 @@ func (certCache *Cache) maintainAssets(panicCount int) {
}
}()
+ certCache.optionsMu.RLock()
renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
+ certCache.optionsMu.RUnlock()
log.Info("started background certificate maintenance")
diff --git a/vendor/github.com/caddyserver/certmagic/storage.go b/vendor/github.com/caddyserver/certmagic/storage.go
index 9cae3c38..5ee39291 100644
--- a/vendor/github.com/caddyserver/certmagic/storage.go
+++ b/vendor/github.com/caddyserver/certmagic/storage.go
@@ -25,18 +25,31 @@ import (
"go.uber.org/zap"
)
-// Storage is a type that implements a key-value store.
-// Keys are prefix-based, with forward slash '/' as separators
-// and without a leading slash.
+// Storage is a type that implements a key-value store with
+// basic file system (folder path) semantics. Keys use the
+// forward slash '/' to separate path components and have no
+// leading or trailing slashes.
+//
+// A "prefix" of a key is defined on a component basis,
+// e.g. "a" is a prefix of "a/b" but not "ab/c".
+//
+// A "file" is a key with a value associated with it.
+//
+// A "directory" is a key with no value, but which may be
+// the prefix of other keys.
+//
+// Keys passed into Load and Store always have "file" semantics,
+// whereas "directories" are only implicit by leading up to the
+// file.
+//
+// The Load, Delete, List, and Stat methods should return
+// fs.ErrNotExist if the key does not exist.
//
// Processes running in a cluster should use the same Storage
// value (with the same configuration) in order to share
// certificates and other TLS resources with the cluster.
//
-// The Load, Delete, List, and Stat methods should return
-// fs.ErrNotExist if the key does not exist.
-//
-// Implementations of Storage must be safe for concurrent use
+// Implementations of Storage MUST be safe for concurrent use
// and honor context cancellations. Methods should block until
// their operation is complete; that is, Load() should always
// return the value from the last call to Store() for a given
@@ -46,36 +59,45 @@ import (
// For simplicity, this is not a streaming API and is not
// suitable for very large files.
type Storage interface {
- // Locker provides atomic synchronization
- // operations, making Storage safe to share.
- // The use of Locker is not expected around
- // every other method (Store, Load, etc.)
- // as those should already be thread-safe;
- // Locker is intended for custom jobs or
- // transactions that need synchronization.
+ // Locker enables the storage backend to synchronize
+ // operational units of work.
+ //
+ // The use of Locker is NOT employed around every
+ // Storage method call (Store, Load, etc), as these
+ // should already be thread-safe. Locker is used for
+ // high-level jobs or transactions that need
+ // synchronization across a cluster; it's a simple
+ // distributed lock. For example, CertMagic uses the
+ // Locker interface to coordinate the obtaining of
+ // certificates.
Locker
- // Store puts value at key.
+ // Store puts value at key. It creates the key if it does
+ // not exist and overwrites any existing value at this key.
Store(ctx context.Context, key string, value []byte) error
// Load retrieves the value at key.
Load(ctx context.Context, key string) ([]byte, error)
- // Delete deletes key. An error should be
- // returned only if the key still exists
+ // Delete deletes the named key. If the name is a
+ // directory (i.e. prefix of other keys), all keys
+ // prefixed by this key should be deleted. An error
+ // should be returned only if the key still exists
// when the method returns.
Delete(ctx context.Context, key string) error
- // Exists returns true if the key exists
+ // Exists returns true if the key exists either as
+ // a directory (prefix to other keys) or a file,
// and there was no error checking.
Exists(ctx context.Context, key string) bool
- // List returns all keys that match prefix.
+ // List returns all keys in the given path.
+ //
// If recursive is true, non-terminal keys
// will be enumerated (i.e. "directories"
// should be walked); otherwise, only keys
// prefixed exactly by prefix will be listed.
- List(ctx context.Context, prefix string, recursive bool) ([]string, error)
+ List(ctx context.Context, path string, recursive bool) ([]string, error)
// Stat returns information about key.
Stat(ctx context.Context, key string) (KeyInfo, error)
@@ -84,38 +106,46 @@ type Storage interface {
// Locker facilitates synchronization across machines and networks.
// It essentially provides a distributed named-mutex service so
// that multiple consumers can coordinate tasks and share resources.
+//
+// If possible, a Locker should implement a coordinated distributed
+// locking mechanism by generating fencing tokens (see
+// https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html).
+// This typically requires a central server or consensus algorithm
+// However, if that is not feasible, Lockers may implement an
+// alternative mechanism that uses timeouts to detect node or network
+// failures and avoid deadlocks. For example, the default FileStorage
+// writes a timestamp to the lock file every few seconds, and if another
+// node acquiring the lock sees that timestamp is too old, it may
+// assume the lock is stale.
+//
+// As not all Locker implementations use fencing tokens, code relying
+// upon Locker must be tolerant of some mis-synchronizations but can
+// expect them to be rare.
+//
+// This interface should only be used for coordinating expensive
+// operations across nodes in a cluster; not for internal, extremely
+// short-lived, or high-contention locks.
type Locker interface {
// Lock acquires the lock for name, blocking until the lock
- // can be obtained or an error is returned. Note that, even
- // after acquiring a lock, an idempotent operation may have
- // already been performed by another process that acquired
- // the lock before - so always check to make sure idempotent
- // operations still need to be performed after acquiring the
- // lock.
+ // can be obtained or an error is returned. Only one lock
+ // for the given name can exist at a time. A call to Lock for
+ // a name which already exists blocks until the named lock
+ // is released or becomes stale.
//
- // The actual implementation of obtaining of a lock must be
- // an atomic operation so that multiple Lock calls at the
- // same time always results in only one caller receiving the
- // lock at any given time.
+ // If the named lock represents an idempotent operation, callers
+ // should awlays check to make sure the work still needs to be
+ // completed after acquiring the lock. You never know if another
+ // process already completed the task while you were waiting to
+ // acquire it.
//
- // To prevent deadlocks, all implementations should put a
- // reasonable expiration on the lock in case Unlock is unable
- // to be called due to some sort of network failure or system
- // crash. Additionally, implementations should honor context
- // cancellation as much as possible (in case the caller wishes
- // to give up and free resources before the lock can be obtained).
- //
- // Additionally, implementations may wish to support fencing
- // tokens (https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html)
- // in order to be robust against long process pauses, extremely
- // high network latency (or other factors that get in the way of
- // renewing lock leases).
+ // Implementations should honor context cancellation.
Lock(ctx context.Context, name string) error
- // Unlock releases the lock for name. This method must ONLY be
- // called after a successful call to Lock, and only after the
- // critical section is finished, even if it errored or timed
- // out. Unlock cleans up any resources allocated during Lock.
+ // Unlock releases named lock. This method must ONLY be called
+ // after a successful call to Lock, and only after the critical
+ // section is finished, even if it errored or timed out. Unlock
+ // cleans up any resources allocated during Lock. Unlock should
+ // only return an error if the lock was unable to be released.
Unlock(ctx context.Context, name string) error
}
@@ -130,7 +160,7 @@ type KeyInfo struct {
Key string
Modified time.Time
Size int64
- IsTerminal bool // false for keys that only contain other keys (like directories)
+ IsTerminal bool // false for directories (keys that act as prefix for other keys)
}
// storeTx stores all the values or none at all.
diff --git a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md
index 702b2d92..4bab59c9 100644
--- a/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md
+++ b/vendor/github.com/casbin/casbin/v2/CONTRIBUTING.md
@@ -8,7 +8,7 @@ This project adheres to the [Contributor Covenant 1.2.](https://www.contributor-
- We do our best to have an [up-to-date documentation](https://casbin.org/docs/overview)
- [Stack Overflow](https://stackoverflow.com) is the best place to start if you have a question. Please use the [casbin tag](https://stackoverflow.com/tags/casbin/info) we are actively monitoring. We encourage you to use Stack Overflow specially for Modeling Access Control Problems, in order to build a shared knowledge base.
-- You can also join our [Gitter community](https://gitter.im/casbin/Lobby).
+- You can also join our [Discord](https://discord.gg/S5UjpzGZjN).
## Reporting issues
diff --git a/vendor/github.com/casbin/casbin/v2/README.md b/vendor/github.com/casbin/casbin/v2/README.md
index 98723bff..7d5c1053 100644
--- a/vendor/github.com/casbin/casbin/v2/README.md
+++ b/vendor/github.com/casbin/casbin/v2/README.md
@@ -6,7 +6,7 @@ Casbin
[](https://coveralls.io/github/casbin/casbin?branch=master)
[](https://pkg.go.dev/github.com/casbin/casbin/v2)
[](https://github.com/casbin/casbin/releases/latest)
-[](https://gitter.im/casbin/lobby)
+[](https://discord.gg/S5UjpzGZjN)
[](https://sourcegraph.com/github.com/casbin/casbin?badge)
💖 [**Looking for an open-source identity and access management solution like Okta, Auth0, Keycloak ? Learn more about: Casdoor**](https://casdoor.org/)
diff --git a/vendor/github.com/casbin/casbin/v2/enforcer.go b/vendor/github.com/casbin/casbin/v2/enforcer.go
index bae4d27a..86167e92 100644
--- a/vendor/github.com/casbin/casbin/v2/enforcer.go
+++ b/vendor/github.com/casbin/casbin/v2/enforcer.go
@@ -17,11 +17,11 @@ package casbin
import (
"errors"
"fmt"
+ "regexp"
"runtime/debug"
"strings"
"sync"
- "github.com/Knetic/govaluate"
"github.com/casbin/casbin/v2/effector"
"github.com/casbin/casbin/v2/log"
"github.com/casbin/casbin/v2/model"
@@ -30,6 +30,9 @@ import (
"github.com/casbin/casbin/v2/rbac"
defaultrolemanager "github.com/casbin/casbin/v2/rbac/default-role-manager"
"github.com/casbin/casbin/v2/util"
+
+ "github.com/Knetic/govaluate"
+ "github.com/tidwall/gjson"
)
// Enforcer is the main interface for authorization enforcement and policy management.
@@ -50,6 +53,7 @@ type Enforcer struct {
autoBuildRoleLinks bool
autoNotifyWatcher bool
autoNotifyDispatcher bool
+ acceptJsonRequest bool
logger log.Logger
}
@@ -476,6 +480,11 @@ func (e *Enforcer) EnableAutoBuildRoleLinks(autoBuildRoleLinks bool) {
e.autoBuildRoleLinks = autoBuildRoleLinks
}
+// EnableAcceptJsonRequest controls whether to accept json as a request parameter
+func (e *Enforcer) EnableAcceptJsonRequest(acceptJsonRequest bool) {
+ e.acceptJsonRequest = acceptJsonRequest
+}
+
// BuildRoleLinks manually rebuild the role inheritance relations.
func (e *Enforcer) BuildRoleLinks() error {
for _, rm := range e.rmMap {
@@ -564,6 +573,10 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
pTokens[token] = i
}
+ if e.acceptJsonRequest {
+ expString = requestJsonReplace(expString, rTokens, rvals)
+ }
+
parameters := enforceParameters{
rTokens: rTokens,
rVals: rvals,
@@ -609,7 +622,16 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
pvals)
}
- parameters.pVals = pvals
+ if e.acceptJsonRequest {
+ pvalsCopy := make([]string, len(pvals))
+ copy(pvalsCopy, pvals)
+ for i, pStr := range pvalsCopy {
+ pvalsCopy[i] = requestJsonReplace(pStr, rTokens, rvals)
+ }
+ parameters.pVals = pvalsCopy
+ } else {
+ parameters.pVals = pvals
+ }
result, err := expression.Eval(parameters)
// log.LogPrint("Result: ", result)
@@ -646,9 +668,9 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
policyEffects[policyIndex] = effector.Allow
}
- //if e.model["e"]["e"].Value == "priority(p_eft) || deny" {
+ // if e.model["e"]["e"].Value == "priority(p_eft) || deny" {
// break
- //}
+ // }
effect, explainIndex, err = e.eft.MergeEffects(e.model["e"][eType].Value, policyEffects, matcherResults, policyIndex, policyLen)
if err != nil {
@@ -711,6 +733,31 @@ func (e *Enforcer) enforce(matcher string, explains *[]string, rvals ...interfac
return result, nil
}
+var requestObjectRegex = regexp.MustCompile(`r[_.][A-Za-z_0-9]+\.[A-Za-z_0-9.]+[A-Za-z_0-9]`)
+var requestObjectRegexPrefix = regexp.MustCompile(`r[_.][A-Za-z_0-9]+\.`)
+
+// requestJsonReplace used to support request parameters of type json
+// It will replace the access of the request object in matchers or policy with the actual value in the request json parameter
+// For example: request sub = `{"Owner": "alice", "Age": 30}`
+// policy: p, r.sub.Age > 18, /data1, read ==> p, 30 > 18, /data1, read
+// matchers: m = r.sub == r.obj.Owner ==> m = r.sub == "alice"
+func requestJsonReplace(str string, rTokens map[string]int, rvals []interface{}) string {
+ matches := requestObjectRegex.FindStringSubmatch(str)
+ for _, matchesStr := range matches {
+ prefix := requestObjectRegexPrefix.FindString(matchesStr)
+ jsonPath := strings.TrimPrefix(matchesStr, prefix)
+ tokenIndex := rTokens[prefix[:len(prefix)-1]]
+ if jsonStr, ok := rvals[tokenIndex].(string); ok {
+ newStr := gjson.Get(jsonStr, jsonPath).String()
+ if !util.IsNumeric(newStr) {
+ newStr = `"` + newStr + `"`
+ }
+ str = strings.Replace(str, matchesStr, newStr, -1)
+ }
+ }
+ return str
+}
+
func (e *Enforcer) getAndStoreMatcherExpression(hasEval bool, expString string, functions map[string]govaluate.ExpressionFunction) (*govaluate.EvaluableExpression, error) {
var expression *govaluate.EvaluableExpression
var err error
diff --git a/vendor/github.com/casbin/casbin/v2/util/util.go b/vendor/github.com/casbin/casbin/v2/util/util.go
index 88f4345d..e89e5eba 100644
--- a/vendor/github.com/casbin/casbin/v2/util/util.go
+++ b/vendor/github.com/casbin/casbin/v2/util/util.go
@@ -25,6 +25,12 @@ var evalReg = regexp.MustCompile(`\beval\((?P[^)]*)\)`)
var escapeAssertionRegex = regexp.MustCompile(`\b((r|p)[0-9]*)\.`)
+var numericRegex = regexp.MustCompile(`^-?\d+(?:\.\d+)?$`)
+
+func IsNumeric(s string) bool {
+ return numericRegex.MatchString(s)
+}
+
// EscapeAssertion escapes the dots in the assertion, because the expression evaluation doesn't support such variable names.
func EscapeAssertion(s string) string {
s = escapeAssertionRegex.ReplaceAllStringFunc(s, func(m string) string {
diff --git a/vendor/github.com/datarhei/core-client-go/v16/api/cluster.go b/vendor/github.com/datarhei/core-client-go/v16/api/cluster.go
index bc81e680..08e8a48e 100644
--- a/vendor/github.com/datarhei/core-client-go/v16/api/cluster.go
+++ b/vendor/github.com/datarhei/core-client-go/v16/api/cluster.go
@@ -7,15 +7,28 @@ import (
type ClusterNode struct {
ID string `json:"id"`
Name string `json:"name"`
+ Version string `json:"version"`
+ Status string `json:"status"`
+ Error string `json:"error"`
+ Voter bool `json:"voter"`
+ Leader bool `json:"leader"`
Address string `json:"address"`
- CreatedAt string `json:"created_at"`
- Uptime int64 `json:"uptime_seconds"`
- LastContact int64 `json:"last_contact"` // unix timestamp
- Latency float64 `json:"latency_ms"` // milliseconds
- State string `json:"state"`
+ CreatedAt string `json:"created_at"` // RFC 3339
+ Uptime int64 `json:"uptime_seconds"` // seconds
+ LastContact float64 `json:"last_contact_ms"` // milliseconds
+ Latency float64 `json:"latency_ms"` // milliseconds
+ Core ClusterNodeCore `json:"core"`
Resources ClusterNodeResources `json:"resources"`
}
+type ClusterNodeCore struct {
+ Address string `json:"address"`
+ Status string `json:"status"`
+ Error string `json:"error"`
+ LastContact float64 `json:"last_contact_ms"` // milliseconds
+ Latency float64 `json:"latency_ms"` // milliseconds
+}
+
type ClusterNodeResources struct {
IsThrottling bool `json:"is_throttling"`
NCPU float64 `json:"ncpu"`
@@ -25,52 +38,30 @@ type ClusterNodeResources struct {
MemLimit uint64 `json:"memory_limit_bytes"` // bytes
}
-type ClusterNodeFiles struct {
- LastUpdate int64 `json:"last_update"` // unix timestamp
- Files map[string][]string `json:"files"`
-}
-
-type ClusterRaftServer struct {
- ID string `json:"id"`
- Address string `json:"address"` // raft address
- Voter bool `json:"voter"`
- Leader bool `json:"leader"`
-}
-
-type ClusterRaftStats struct {
- State string `json:"state"`
- LastContact float64 `json:"last_contact_ms"`
- NumPeers uint64 `json:"num_peers"`
-}
-
type ClusterRaft struct {
- Server []ClusterRaftServer `json:"server"`
- Stats ClusterRaftStats `json:"stats"`
+ Address string `json:"address"`
+ State string `json:"state"`
+ LastContact float64 `json:"last_contact_ms"` // milliseconds
+ NumPeers uint64 `json:"num_peers"`
+ LogTerm uint64 `json:"log_term"`
+ LogIndex uint64 `json:"log_index"`
}
type ClusterAbout struct {
- ID string `json:"id"`
- Address string `json:"address"`
- ClusterAPIAddress string `json:"cluster_api_address"`
- CoreAPIAddress string `json:"core_api_address"`
- Raft ClusterRaft `json:"raft"`
- Nodes []ClusterNode `json:"nodes"`
- Version string `json:"version"`
- Degraded bool `json:"degraded"`
- DegradedErr string `json:"degraded_error"`
+ ID string `json:"id"`
+ Name string `json:"name"`
+ Leader bool `json:"leader"`
+ Address string `json:"address"`
+ Raft ClusterRaft `json:"raft"`
+ Nodes []ClusterNode `json:"nodes"`
+ Version string `json:"version"`
+ Degraded bool `json:"degraded"`
+ DegradedErr string `json:"degraded_error"`
}
-type ClusterProcess struct {
- ID string `json:"id"`
- Owner string `json:"owner"`
- Domain string `json:"domain"`
- NodeID string `json:"node_id"`
- Reference string `json:"reference"`
- Order string `json:"order"`
- State string `json:"state"`
- CPU float64 `json:"cpu" swaggertype:"number" jsonschema:"type=number"` // percent 0-100*ncpu
- Memory uint64 `json:"memory_bytes"` // bytes
- Runtime int64 `json:"runtime_seconds"` // seconds
+type ClusterNodeFiles struct {
+ LastUpdate int64 `json:"last_update"` // unix timestamp
+ Files map[string][]string `json:"files"`
}
type ClusterLock struct {
@@ -84,3 +75,5 @@ type ClusterKVSValue struct {
}
type ClusterKVS map[string]ClusterKVSValue
+
+type ClusterProcessMap map[string]string
diff --git a/vendor/github.com/datarhei/core-client-go/v16/api/process.go b/vendor/github.com/datarhei/core-client-go/v16/api/process.go
index 649a6ddd..4d0feb2c 100644
--- a/vendor/github.com/datarhei/core-client-go/v16/api/process.go
+++ b/vendor/github.com/datarhei/core-client-go/v16/api/process.go
@@ -7,6 +7,7 @@ type Process struct {
Domain string `json:"domain"`
Type string `json:"type" jsonschema:"enum=ffmpeg"`
Reference string `json:"reference"`
+ CoreID string `json:"core_id"`
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"` // Unix timestamp
UpdatedAt int64 `json:"updated_at" jsonschema:"minimum=0" format:"int64"` // Unix timestamp
Config *ProcessConfig `json:"config,omitempty"`
diff --git a/vendor/github.com/datarhei/core-client-go/v16/client.go b/vendor/github.com/datarhei/core-client-go/v16/client.go
index 75946964..664f5bf1 100644
--- a/vendor/github.com/datarhei/core-client-go/v16/client.go
+++ b/vendor/github.com/datarhei/core-client-go/v16/client.go
@@ -99,10 +99,11 @@ type RestClient interface {
IdentitySetPolicies(name string, p []api.IAMPolicy) error // PUT /v3/iam/user/{name}/policy
IdentityDelete(name string) error // DELETE /v3/iam/user/{name}
- Cluster() (api.ClusterAbout, error) // GET /v3/cluster
- ClusterHealthy() (bool, error) // GET /v3/cluster/healthy
- ClusterSnapshot() (io.ReadCloser, error) // GET /v3/cluster/snapshot
- ClusterLeave() error // PUT /v3/cluster/leave
+ Cluster() (api.ClusterAbout, error) // GET /v3/cluster
+ ClusterHealthy() (bool, error) // GET /v3/cluster/healthy
+ ClusterSnapshot() (io.ReadCloser, error) // GET /v3/cluster/snapshot
+ ClusterLeave() error // PUT /v3/cluster/leave
+ ClusterTransferLeadership(id string) error // PUT /v3/cluster/transfer/{id}
ClusterNodeList() ([]api.ClusterNode, error) // GET /v3/cluster/node
ClusterNode(id string) (api.ClusterNode, error) // GET /v3/cluster/node/{id}
@@ -110,13 +111,14 @@ type RestClient interface {
ClusterNodeProcessList(id string, opts ProcessListOptions) ([]api.Process, error) // GET /v3/cluster/node/{id}/process
ClusterNodeVersion(id string) (api.Version, error) // GET /v3/cluster/node/{id}/version
- ClusterDBProcessList() ([]api.Process, error) // GET /v3/cluster/db/process
- ClusterDBProcess(id ProcessID) (api.Process, error) // GET /v3/cluster/db/process/{id}
- ClusterDBUserList() ([]api.IAMUser, error) // GET /v3/cluster/db/user
- ClusterDBUser(name string) (api.IAMUser, error) // GET /v3/cluster/db/user/{name}
- ClusterDBPolicies() ([]api.IAMPolicy, error) // GET /v3/cluster/db/policies
- ClusterDBLocks() ([]api.ClusterLock, error) // GET /v3/cluster/db/locks
- ClusterDBKeyValues() (api.ClusterKVS, error) // GET /v3/cluster/db/kv
+ ClusterDBProcessList() ([]api.Process, error) // GET /v3/cluster/db/process
+ ClusterDBProcess(id ProcessID) (api.Process, error) // GET /v3/cluster/db/process/{id}
+ ClusterDBUserList() ([]api.IAMUser, error) // GET /v3/cluster/db/user
+ ClusterDBUser(name string) (api.IAMUser, error) // GET /v3/cluster/db/user/{name}
+ ClusterDBPolicies() ([]api.IAMPolicy, error) // GET /v3/cluster/db/policies
+ ClusterDBLocks() ([]api.ClusterLock, error) // GET /v3/cluster/db/locks
+ ClusterDBKeyValues() (api.ClusterKVS, error) // GET /v3/cluster/db/kv
+ ClusterDBProcessMap() (api.ClusterProcessMap, error) // GET /v3/cluster/db/map/process
ClusterProcessList(opts ProcessListOptions) ([]api.Process, error) // GET /v3/cluster/process
ClusterProcess(id ProcessID, filter []string) (api.Process, error) // POST /v3/cluster/process
@@ -415,6 +417,10 @@ func New(config Config) (RestClient, error) {
path: mustNewGlob("/v3/cluster/node/*/version"),
constraint: mustNewConstraint("^16.14.0"),
},
+ {
+ path: mustNewGlob("/v3/cluster/db/map/process"),
+ constraint: mustNewConstraint("^16.14.0"),
+ },
},
"POST": {
{
@@ -471,6 +477,10 @@ func New(config Config) (RestClient, error) {
path: mustNewGlob("/v3/session/token/*"),
constraint: mustNewConstraint("^16.14.0"),
},
+ {
+ path: mustNewGlob("/v3/cluster/transfer/*"),
+ constraint: mustNewConstraint("^16.14.0"),
+ },
},
"DELETE": {
{
diff --git a/vendor/github.com/datarhei/core-client-go/v16/cluster.go b/vendor/github.com/datarhei/core-client-go/v16/cluster.go
index 8f61964f..19e75d11 100644
--- a/vendor/github.com/datarhei/core-client-go/v16/cluster.go
+++ b/vendor/github.com/datarhei/core-client-go/v16/cluster.go
@@ -3,6 +3,7 @@ package coreclient
import (
"encoding/json"
"io"
+ "net/url"
"github.com/datarhei/core-client-go/v16/api"
)
@@ -42,3 +43,9 @@ func (r *restclient) ClusterLeave() error {
return err
}
+
+func (r *restclient) ClusterTransferLeadership(id string) error {
+ _, err := r.call("PUT", "/v3/cluster/transfer/"+url.PathEscape(id), nil, nil, "", nil)
+
+ return err
+}
diff --git a/vendor/github.com/datarhei/core-client-go/v16/cluster_db.go b/vendor/github.com/datarhei/core-client-go/v16/cluster_db.go
index 6239c6d7..f6e107ed 100644
--- a/vendor/github.com/datarhei/core-client-go/v16/cluster_db.go
+++ b/vendor/github.com/datarhei/core-client-go/v16/cluster_db.go
@@ -100,3 +100,16 @@ func (r *restclient) ClusterDBKeyValues() (api.ClusterKVS, error) {
return kvs, err
}
+
+func (r *restclient) ClusterDBProcessMap() (api.ClusterProcessMap, error) {
+ var m api.ClusterProcessMap
+
+ data, err := r.call("GET", "/v3/cluster/db/map/process", nil, nil, "", nil)
+ if err != nil {
+ return m, err
+ }
+
+ err = json.Unmarshal(data, &m)
+
+ return m, err
+}
diff --git a/vendor/github.com/go-openapi/jsonpointer/pointer.go b/vendor/github.com/go-openapi/jsonpointer/pointer.go
index 7df9853d..de60dc7d 100644
--- a/vendor/github.com/go-openapi/jsonpointer/pointer.go
+++ b/vendor/github.com/go-openapi/jsonpointer/pointer.go
@@ -26,6 +26,7 @@
package jsonpointer
import (
+ "encoding/json"
"errors"
"fmt"
"reflect"
@@ -40,6 +41,7 @@ const (
pointerSeparator = `/`
invalidStart = `JSON pointer must be empty or start with a "` + pointerSeparator
+ notFound = `Can't find the pointer in the document`
)
var jsonPointableType = reflect.TypeOf(new(JSONPointable)).Elem()
@@ -48,13 +50,13 @@ var jsonSetableType = reflect.TypeOf(new(JSONSetable)).Elem()
// JSONPointable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONPointable interface {
- JSONLookup(string) (interface{}, error)
+ JSONLookup(string) (any, error)
}
// JSONSetable is an interface for structs to implement when they need to customize the
// json pointer process
type JSONSetable interface {
- JSONSet(string, interface{}) error
+ JSONSet(string, any) error
}
// New creates a new json pointer for the given string
@@ -81,9 +83,7 @@ func (p *Pointer) parse(jsonPointerString string) error {
err = errors.New(invalidStart)
} else {
referenceTokens := strings.Split(jsonPointerString, pointerSeparator)
- for _, referenceToken := range referenceTokens[1:] {
- p.referenceTokens = append(p.referenceTokens, referenceToken)
- }
+ p.referenceTokens = append(p.referenceTokens, referenceTokens[1:]...)
}
}
@@ -91,26 +91,26 @@ func (p *Pointer) parse(jsonPointerString string) error {
}
// Get uses the pointer to retrieve a value from a JSON document
-func (p *Pointer) Get(document interface{}) (interface{}, reflect.Kind, error) {
+func (p *Pointer) Get(document any) (any, reflect.Kind, error) {
return p.get(document, swag.DefaultJSONNameProvider)
}
// Set uses the pointer to set a value from a JSON document
-func (p *Pointer) Set(document interface{}, value interface{}) (interface{}, error) {
+func (p *Pointer) Set(document any, value any) (any, error) {
return document, p.set(document, value, swag.DefaultJSONNameProvider)
}
// GetForToken gets a value for a json pointer token 1 level deep
-func GetForToken(document interface{}, decodedToken string) (interface{}, reflect.Kind, error) {
+func GetForToken(document any, decodedToken string) (any, reflect.Kind, error) {
return getSingleImpl(document, decodedToken, swag.DefaultJSONNameProvider)
}
// SetForToken gets a value for a json pointer token 1 level deep
-func SetForToken(document interface{}, decodedToken string, value interface{}) (interface{}, error) {
+func SetForToken(document any, decodedToken string, value any) (any, error) {
return document, setSingleImpl(document, value, decodedToken, swag.DefaultJSONNameProvider)
}
-func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func getSingleImpl(node any, decodedToken string, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
rValue := reflect.Indirect(reflect.ValueOf(node))
kind := rValue.Kind()
@@ -159,7 +159,7 @@ func getSingleImpl(node interface{}, decodedToken string, nameProvider *swag.Nam
}
-func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *swag.NameProvider) error {
+func setSingleImpl(node, data any, decodedToken string, nameProvider *swag.NameProvider) error {
rValue := reflect.Indirect(reflect.ValueOf(node))
if ns, ok := node.(JSONSetable); ok { // pointer impl
@@ -210,7 +210,7 @@ func setSingleImpl(node, data interface{}, decodedToken string, nameProvider *sw
}
-func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interface{}, reflect.Kind, error) {
+func (p *Pointer) get(node any, nameProvider *swag.NameProvider) (any, reflect.Kind, error) {
if nameProvider == nil {
nameProvider = swag.DefaultJSONNameProvider
@@ -241,7 +241,7 @@ func (p *Pointer) get(node interface{}, nameProvider *swag.NameProvider) (interf
return node, kind, nil
}
-func (p *Pointer) set(node, data interface{}, nameProvider *swag.NameProvider) error {
+func (p *Pointer) set(node, data any, nameProvider *swag.NameProvider) error {
knd := reflect.ValueOf(node).Kind()
if knd != reflect.Ptr && knd != reflect.Struct && knd != reflect.Map && knd != reflect.Slice && knd != reflect.Array {
@@ -363,6 +363,127 @@ func (p *Pointer) String() string {
return pointerString
}
+func (p *Pointer) Offset(document string) (int64, error) {
+ dec := json.NewDecoder(strings.NewReader(document))
+ var offset int64
+ for _, ttk := range p.DecodedTokens() {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ offset, err = offsetSingleObject(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ case '[':
+ offset, err = offsetSingleArray(dec, ttk)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return offset, nil
+}
+
+func offsetSingleObject(dec *json.Decoder, decodedToken string) (int64, error) {
+ for dec.More() {
+ offset := dec.InputOffset()
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err := drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err := drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ case string:
+ if tk == decodedToken {
+ return offset, nil
+ }
+ default:
+ return 0, fmt.Errorf("invalid token %#v", tk)
+ }
+ }
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+}
+
+func offsetSingleArray(dec *json.Decoder, decodedToken string) (int64, error) {
+ idx, err := strconv.Atoi(decodedToken)
+ if err != nil {
+ return 0, fmt.Errorf("token reference %q is not a number: %v", decodedToken, err)
+ }
+ var i int
+ for i = 0; i < idx && dec.More(); i++ {
+ tk, err := dec.Token()
+ if err != nil {
+ return 0, err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err := drainSingle(dec); err != nil {
+ return 0, err
+ }
+ case '[':
+ if err := drainSingle(dec); err != nil {
+ return 0, err
+ }
+ }
+ }
+ }
+ if !dec.More() {
+ return 0, fmt.Errorf("token reference %q not found", decodedToken)
+ }
+ return dec.InputOffset(), nil
+}
+
+// drainSingle drains a single level of object or array.
+// The decoder has to guarantee the begining delim (i.e. '{' or '[') has been consumed.
+func drainSingle(dec *json.Decoder) error {
+ for dec.More() {
+ tk, err := dec.Token()
+ if err != nil {
+ return err
+ }
+ switch tk := tk.(type) {
+ case json.Delim:
+ switch tk {
+ case '{':
+ if err := drainSingle(dec); err != nil {
+ return err
+ }
+ case '[':
+ if err := drainSingle(dec); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ // Consumes the ending delim
+ if _, err := dec.Token(); err != nil {
+ return err
+ }
+ return nil
+}
+
// Specific JSON pointer encoding here
// ~0 => ~
// ~1 => /
diff --git a/vendor/github.com/labstack/echo/v4/CHANGELOG.md b/vendor/github.com/labstack/echo/v4/CHANGELOG.md
index 83184249..fef7bb98 100644
--- a/vendor/github.com/labstack/echo/v4/CHANGELOG.md
+++ b/vendor/github.com/labstack/echo/v4/CHANGELOG.md
@@ -1,5 +1,38 @@
# Changelog
+## v4.11.1 - 2023-07-16
+
+**Fixes**
+
+* Fix `Gzip` middleware not sending response code for no content responses (404, 301/302 redirects etc) [#2481](https://github.com/labstack/echo/pull/2481)
+
+
+## v4.11.0 - 2023-07-14
+
+
+**Fixes**
+
+* Fixes the proxy middleware concurrency issue of calling the Next() proxy target on Round Robin Balancer [#2409](https://github.com/labstack/echo/pull/2409)
+* Fix `group.RouteNotFound` not working when group has attached middlewares [#2411](https://github.com/labstack/echo/pull/2411)
+* Fix global error handler return error message when message is an error [#2456](https://github.com/labstack/echo/pull/2456)
+* Do not use global timeNow variables [#2477](https://github.com/labstack/echo/pull/2477)
+
+
+**Enhancements**
+
+* Added a optional config variable to disable centralized error handler in recovery middleware [#2410](https://github.com/labstack/echo/pull/2410)
+* refactor: use `strings.ReplaceAll` directly [#2424](https://github.com/labstack/echo/pull/2424)
+* Add support for Go1.20 `http.rwUnwrapper` to Response struct [#2425](https://github.com/labstack/echo/pull/2425)
+* Check whether is nil before invoking centralized error handling [#2429](https://github.com/labstack/echo/pull/2429)
+* Proper colon support in `echo.Reverse` method [#2416](https://github.com/labstack/echo/pull/2416)
+* Fix misuses of a vs an in documentation comments [#2436](https://github.com/labstack/echo/pull/2436)
+* Add link to slog.Handler library for Echo logging into README.md [#2444](https://github.com/labstack/echo/pull/2444)
+* In proxy middleware Support retries of failed proxy requests [#2414](https://github.com/labstack/echo/pull/2414)
+* gofmt fixes to comments [#2452](https://github.com/labstack/echo/pull/2452)
+* gzip response only if it exceeds a minimal length [#2267](https://github.com/labstack/echo/pull/2267)
+* Upgrade packages [#2475](https://github.com/labstack/echo/pull/2475)
+
+
## v4.10.2 - 2023-02-22
**Security**
diff --git a/vendor/github.com/labstack/echo/v4/README.md b/vendor/github.com/labstack/echo/v4/README.md
index fe78b6ed..ea8f30f6 100644
--- a/vendor/github.com/labstack/echo/v4/README.md
+++ b/vendor/github.com/labstack/echo/v4/README.md
@@ -110,6 +110,7 @@ of middlewares in this list.
| [github.com/swaggo/echo-swagger](https://github.com/swaggo/echo-swagger) | Automatically generate RESTful API documentation with [Swagger](https://swagger.io/) 2.0. |
| [github.com/ziflex/lecho](https://github.com/ziflex/lecho) | [Zerolog](https://github.com/rs/zerolog) logging library wrapper for Echo logger interface. |
| [github.com/brpaz/echozap](https://github.com/brpaz/echozap) | Uber´s [Zap](https://github.com/uber-go/zap) logging library wrapper for Echo logger interface. |
+| [github.com/samber/slog-echo](https://github.com/samber/slog-echo) | Go [slog](https://pkg.go.dev/golang.org/x/exp/slog) logging library wrapper for Echo logger interface. |
| [github.com/darkweak/souin/plugins/echo](https://github.com/darkweak/souin/tree/master/plugins/echo) | HTTP cache system based on [Souin](https://github.com/darkweak/souin) to automatically get your endpoints cached. It supports some distributed and non-distributed storage systems depending your needs. |
| [github.com/mikestefanello/pagoda](https://github.com/mikestefanello/pagoda) | Rapid, easy full-stack web development starter kit built with Echo. |
| [github.com/go-woo/protoc-gen-echo](https://github.com/go-woo/protoc-gen-echo) | ProtoBuf generate Echo server side code |
diff --git a/vendor/github.com/labstack/echo/v4/bind.go b/vendor/github.com/labstack/echo/v4/bind.go
index c841ca01..374a2aec 100644
--- a/vendor/github.com/labstack/echo/v4/bind.go
+++ b/vendor/github.com/labstack/echo/v4/bind.go
@@ -114,7 +114,7 @@ func (b *DefaultBinder) Bind(i interface{}, c Context) (err error) {
// Only bind query parameters for GET/DELETE/HEAD to avoid unexpected behavior with destination struct binding from body.
// For example a request URL `&id=1&lang=en` with body `{"id":100,"lang":"de"}` would lead to precedence issues.
// The HTTP method check restores pre-v4.1.11 behavior to avoid these problems (see issue #1670)
- method := c.Request().Method
+ method := c.Request().Method
if method == http.MethodGet || method == http.MethodDelete || method == http.MethodHead {
if err = b.BindQueryParams(c, i); err != nil {
return err
diff --git a/vendor/github.com/labstack/echo/v4/binder.go b/vendor/github.com/labstack/echo/v4/binder.go
index 5a6cf9d9..29cceca0 100644
--- a/vendor/github.com/labstack/echo/v4/binder.go
+++ b/vendor/github.com/labstack/echo/v4/binder.go
@@ -1236,7 +1236,7 @@ func (b *ValueBinder) durations(sourceParam string, values []string, dest *[]tim
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Second)
}
@@ -1247,7 +1247,7 @@ func (b *ValueBinder) UnixTime(sourceParam string, dest *time.Time) *ValueBinder
// Example: 1609180603 bind to 2020-12-28T18:36:43.000000000+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Second)
}
@@ -1257,7 +1257,7 @@ func (b *ValueBinder) MustUnixTime(sourceParam string, dest *time.Time) *ValueBi
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Millisecond)
}
@@ -1268,7 +1268,7 @@ func (b *ValueBinder) UnixTimeMilli(sourceParam string, dest *time.Time) *ValueB
// Example: 1647184410140 bind to 2022-03-13T15:13:30.140000000+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Millisecond)
}
@@ -1280,8 +1280,8 @@ func (b *ValueBinder) MustUnixTimeMilli(sourceParam string, dest *time.Time) *Va
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
-// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, false, time.Nanosecond)
}
@@ -1294,8 +1294,8 @@ func (b *ValueBinder) UnixTimeNano(sourceParam string, dest *time.Time) *ValueBi
// Example: 999999999 binds to 1970-01-01T00:00:00.999999999+00:00
//
// Note:
-// * time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
-// * Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
+// - time.Time{} (param is empty) and time.Unix(0,0) (param = "0") are not equal
+// - Javascript's Number type only has about 53 bits of precision (Number.MAX_SAFE_INTEGER = 9007199254740991). Compare it to 1609180603123456789 in example.
func (b *ValueBinder) MustUnixTimeNano(sourceParam string, dest *time.Time) *ValueBinder {
return b.unixTime(sourceParam, dest, true, time.Nanosecond)
}
diff --git a/vendor/github.com/labstack/echo/v4/context.go b/vendor/github.com/labstack/echo/v4/context.go
index b3a7ce8d..27da28a9 100644
--- a/vendor/github.com/labstack/echo/v4/context.go
+++ b/vendor/github.com/labstack/echo/v4/context.go
@@ -100,8 +100,8 @@ type (
// Set saves data in the context.
Set(key string, val interface{})
- // Bind binds the request body into provided type `i`. The default binder
- // does it based on Content-Type header.
+ // Bind binds path params, query params and the request body into provided type `i`. The default binder
+ // binds body based on Content-Type header.
Bind(i interface{}) error
// Validate validates provided `i`. It is usually called after `Context#Bind()`.
diff --git a/vendor/github.com/labstack/echo/v4/echo.go b/vendor/github.com/labstack/echo/v4/echo.go
index 085a3a7f..22a5b7af 100644
--- a/vendor/github.com/labstack/echo/v4/echo.go
+++ b/vendor/github.com/labstack/echo/v4/echo.go
@@ -39,6 +39,7 @@ package echo
import (
stdContext "context"
"crypto/tls"
+ "encoding/json"
"errors"
"fmt"
"io"
@@ -258,7 +259,7 @@ const (
const (
// Version of Echo
- Version = "4.10.2"
+ Version = "4.11.1"
website = "https://echo.labstack.com"
// http://patorjk.com/software/taag/#p=display&f=Small%20Slant&t=Echo
banner = `
@@ -438,12 +439,18 @@ func (e *Echo) DefaultHTTPErrorHandler(err error, c Context) {
// Issue #1426
code := he.Code
message := he.Message
- if m, ok := he.Message.(string); ok {
+
+ switch m := he.Message.(type) {
+ case string:
if e.Debug {
message = Map{"message": m, "error": err.Error()}
} else {
message = Map{"message": m}
}
+ case json.Marshaler:
+ // do nothing - this type knows how to format itself to JSON
+ case error:
+ message = Map{"message": m.Error()}
}
// Send response
@@ -614,7 +621,7 @@ func (e *Echo) URL(h HandlerFunc, params ...interface{}) string {
return e.URI(h, params...)
}
-// Reverse generates an URL from route name and provided parameters.
+// Reverse generates a URL from route name and provided parameters.
func (e *Echo) Reverse(name string, params ...interface{}) string {
return e.router.Reverse(name, params...)
}
diff --git a/vendor/github.com/labstack/echo/v4/group.go b/vendor/github.com/labstack/echo/v4/group.go
index 28ce0dd9..749a5caa 100644
--- a/vendor/github.com/labstack/echo/v4/group.go
+++ b/vendor/github.com/labstack/echo/v4/group.go
@@ -23,10 +23,12 @@ func (g *Group) Use(middleware ...MiddlewareFunc) {
if len(g.middleware) == 0 {
return
}
- // Allow all requests to reach the group as they might get dropped if router
- // doesn't find a match, making none of the group middleware process.
- g.Any("", NotFoundHandler)
- g.Any("/*", NotFoundHandler)
+ // group level middlewares are different from Echo `Pre` and `Use` middlewares (those are global). Group level middlewares
+ // are only executed if they are added to the Router with route.
+ // So we register catch all route (404 is a safe way to emulate route match) for this group and now during routing the
+ // Router would find route to match our request path and therefore guarantee the middleware(s) will get executed.
+ g.RouteNotFound("", NotFoundHandler)
+ g.RouteNotFound("/*", NotFoundHandler)
}
// CONNECT implements `Echo#CONNECT()` for sub-routes within the Group.
diff --git a/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go b/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go
index 52ef1042..f9e8caaf 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/basic_auth.go
@@ -2,9 +2,9 @@ package middleware
import (
"encoding/base64"
+ "net/http"
"strconv"
"strings"
- "net/http"
"github.com/labstack/echo/v4"
)
diff --git a/vendor/github.com/labstack/echo/v4/middleware/compress.go b/vendor/github.com/labstack/echo/v4/middleware/compress.go
index 9e5f6106..3e9bd320 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/compress.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/compress.go
@@ -2,6 +2,7 @@ package middleware
import (
"bufio"
+ "bytes"
"compress/gzip"
"io"
"net"
@@ -21,12 +22,30 @@ type (
// Gzip compression level.
// Optional. Default value -1.
Level int `yaml:"level"`
+
+ // Length threshold before gzip compression is applied.
+ // Optional. Default value 0.
+ //
+ // Most of the time you will not need to change the default. Compressing
+ // a short response might increase the transmitted data because of the
+ // gzip format overhead. Compressing the response will also consume CPU
+ // and time on the server and the client (for decompressing). Depending on
+ // your use case such a threshold might be useful.
+ //
+ // See also:
+ // https://webmasters.stackexchange.com/questions/31750/what-is-recommended-minimum-object-size-for-gzip-performance-benefits
+ MinLength int
}
gzipResponseWriter struct {
io.Writer
http.ResponseWriter
- wroteBody bool
+ wroteHeader bool
+ wroteBody bool
+ minLength int
+ minLengthExceeded bool
+ buffer *bytes.Buffer
+ code int
}
)
@@ -37,8 +56,9 @@ const (
var (
// DefaultGzipConfig is the default Gzip middleware config.
DefaultGzipConfig = GzipConfig{
- Skipper: DefaultSkipper,
- Level: -1,
+ Skipper: DefaultSkipper,
+ Level: -1,
+ MinLength: 0,
}
)
@@ -58,8 +78,12 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
if config.Level == 0 {
config.Level = DefaultGzipConfig.Level
}
+ if config.MinLength < 0 {
+ config.MinLength = DefaultGzipConfig.MinLength
+ }
pool := gzipCompressPool(config)
+ bpool := bufferPool()
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
@@ -70,7 +94,6 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
res := c.Response()
res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding)
if strings.Contains(c.Request().Header.Get(echo.HeaderAcceptEncoding), gzipScheme) {
- res.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
i := pool.Get()
w, ok := i.(*gzip.Writer)
if !ok {
@@ -78,19 +101,38 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
}
rw := res.Writer
w.Reset(rw)
- grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw}
+
+ buf := bpool.Get().(*bytes.Buffer)
+ buf.Reset()
+
+ grw := &gzipResponseWriter{Writer: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf}
defer func() {
+ // There are different reasons for cases when we have not yet written response to the client and now need to do so.
+ // a) handler response had only response code and no response body (ala 404 or redirects etc). Response code need to be written now.
+ // b) body is shorter than our minimum length threshold and being buffered currently and needs to be written
if !grw.wroteBody {
if res.Header().Get(echo.HeaderContentEncoding) == gzipScheme {
res.Header().Del(echo.HeaderContentEncoding)
}
+ if grw.wroteHeader {
+ rw.WriteHeader(grw.code)
+ }
// We have to reset response to it's pristine state when
// nothing is written to body or error is returned.
// See issue #424, #407.
res.Writer = rw
w.Reset(io.Discard)
+ } else if !grw.minLengthExceeded {
+ // Write uncompressed response
+ res.Writer = rw
+ if grw.wroteHeader {
+ grw.ResponseWriter.WriteHeader(grw.code)
+ }
+ grw.buffer.WriteTo(rw)
+ w.Reset(io.Discard)
}
w.Close()
+ bpool.Put(buf)
pool.Put(w)
}()
res.Writer = grw
@@ -102,7 +144,11 @@ func GzipWithConfig(config GzipConfig) echo.MiddlewareFunc {
func (w *gzipResponseWriter) WriteHeader(code int) {
w.Header().Del(echo.HeaderContentLength) // Issue #444
- w.ResponseWriter.WriteHeader(code)
+
+ w.wroteHeader = true
+
+ // Delay writing of the header until we know if we'll actually compress the response
+ w.code = code
}
func (w *gzipResponseWriter) Write(b []byte) (int, error) {
@@ -110,10 +156,40 @@ func (w *gzipResponseWriter) Write(b []byte) (int, error) {
w.Header().Set(echo.HeaderContentType, http.DetectContentType(b))
}
w.wroteBody = true
+
+ if !w.minLengthExceeded {
+ n, err := w.buffer.Write(b)
+
+ if w.buffer.Len() >= w.minLength {
+ w.minLengthExceeded = true
+
+ // The minimum length is exceeded, add Content-Encoding header and write the header
+ w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
+ if w.wroteHeader {
+ w.ResponseWriter.WriteHeader(w.code)
+ }
+
+ return w.Writer.Write(w.buffer.Bytes())
+ }
+
+ return n, err
+ }
+
return w.Writer.Write(b)
}
func (w *gzipResponseWriter) Flush() {
+ if !w.minLengthExceeded {
+ // Enforce compression because we will not know how much more data will come
+ w.minLengthExceeded = true
+ w.Header().Set(echo.HeaderContentEncoding, gzipScheme) // Issue #806
+ if w.wroteHeader {
+ w.ResponseWriter.WriteHeader(w.code)
+ }
+
+ w.Writer.Write(w.buffer.Bytes())
+ }
+
w.Writer.(*gzip.Writer).Flush()
if flusher, ok := w.ResponseWriter.(http.Flusher); ok {
flusher.Flush()
@@ -142,3 +218,12 @@ func gzipCompressPool(config GzipConfig) sync.Pool {
},
}
}
+
+func bufferPool() sync.Pool {
+ return sync.Pool{
+ New: func() interface{} {
+ b := &bytes.Buffer{}
+ return b
+ },
+ }
+}
diff --git a/vendor/github.com/labstack/echo/v4/middleware/cors.go b/vendor/github.com/labstack/echo/v4/middleware/cors.go
index 149de347..6ddb540a 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/cors.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/cors.go
@@ -150,8 +150,8 @@ func CORSWithConfig(config CORSConfig) echo.MiddlewareFunc {
allowOriginPatterns := []string{}
for _, origin := range config.AllowOrigins {
pattern := regexp.QuoteMeta(origin)
- pattern = strings.Replace(pattern, "\\*", ".*", -1)
- pattern = strings.Replace(pattern, "\\?", ".", -1)
+ pattern = strings.ReplaceAll(pattern, "\\*", ".*")
+ pattern = strings.ReplaceAll(pattern, "\\?", ".")
pattern = "^" + pattern + "$"
allowOriginPatterns = append(allowOriginPatterns, pattern)
}
diff --git a/vendor/github.com/labstack/echo/v4/middleware/decompress.go b/vendor/github.com/labstack/echo/v4/middleware/decompress.go
index 88ec7098..a73c9738 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/decompress.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/decompress.go
@@ -20,7 +20,7 @@ type (
}
)
-//GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
+// GZIPEncoding content-encoding header if set to "gzip", decompress body contents.
const GZIPEncoding string = "gzip"
// Decompressor is used to get the sync.Pool used by the middleware to get Gzip readers
@@ -44,12 +44,12 @@ func (d *DefaultGzipDecompressPool) gzipDecompressPool() sync.Pool {
return sync.Pool{New: func() interface{} { return new(gzip.Reader) }}
}
-//Decompress decompresses request body based if content encoding type is set to "gzip" with default config
+// Decompress decompresses request body based if content encoding type is set to "gzip" with default config
func Decompress() echo.MiddlewareFunc {
return DecompressWithConfig(DefaultDecompressConfig)
}
-//DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
+// DecompressWithConfig decompresses request body based if content encoding type is set to "gzip" with config
func DecompressWithConfig(config DecompressConfig) echo.MiddlewareFunc {
// Defaults
if config.Skipper == nil {
diff --git a/vendor/github.com/labstack/echo/v4/middleware/middleware.go b/vendor/github.com/labstack/echo/v4/middleware/middleware.go
index f250ca49..664f71f4 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/middleware.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/middleware.go
@@ -38,9 +38,9 @@ func rewriteRulesRegex(rewrite map[string]string) map[*regexp.Regexp]string {
rulesRegex := map[*regexp.Regexp]string{}
for k, v := range rewrite {
k = regexp.QuoteMeta(k)
- k = strings.Replace(k, `\*`, "(.*?)", -1)
+ k = strings.ReplaceAll(k, `\*`, "(.*?)")
if strings.HasPrefix(k, `\^`) {
- k = strings.Replace(k, `\^`, "^", -1)
+ k = strings.ReplaceAll(k, `\^`, "^")
}
k = k + "$"
rulesRegex[regexp.MustCompile(k)] = v
diff --git a/vendor/github.com/labstack/echo/v4/middleware/proxy.go b/vendor/github.com/labstack/echo/v4/middleware/proxy.go
index d2cd2aa6..e4f98d9e 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/proxy.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/proxy.go
@@ -12,7 +12,6 @@ import (
"regexp"
"strings"
"sync"
- "sync/atomic"
"time"
"github.com/labstack/echo/v4"
@@ -30,6 +29,33 @@ type (
// Required.
Balancer ProxyBalancer
+ // RetryCount defines the number of times a failed proxied request should be retried
+ // using the next available ProxyTarget. Defaults to 0, meaning requests are never retried.
+ RetryCount int
+
+ // RetryFilter defines a function used to determine if a failed request to a
+ // ProxyTarget should be retried. The RetryFilter will only be called when the number
+ // of previous retries is less than RetryCount. If the function returns true, the
+ // request will be retried. The provided error indicates the reason for the request
+ // failure. When the ProxyTarget is unavailable, the error will be an instance of
+ // echo.HTTPError with a Code of http.StatusBadGateway. In all other cases, the error
+ // will indicate an internal error in the Proxy middleware. When a RetryFilter is not
+ // specified, all requests that fail with http.StatusBadGateway will be retried. A custom
+ // RetryFilter can be provided to only retry specific requests. Note that RetryFilter is
+ // only called when the request to the target fails, or an internal error in the Proxy
+ // middleware has occurred. Successful requests that return a non-200 response code cannot
+ // be retried.
+ RetryFilter func(c echo.Context, e error) bool
+
+ // ErrorHandler defines a function which can be used to return custom errors from
+ // the Proxy middleware. ErrorHandler is only invoked when there has been
+ // either an internal error in the Proxy middleware or the ProxyTarget is
+ // unavailable. Due to the way requests are proxied, ErrorHandler is not invoked
+ // when a ProxyTarget returns a non-200 response. In these cases, the response
+ // is already written so errors cannot be modified. ErrorHandler is only
+ // invoked after all retry attempts have been exhausted.
+ ErrorHandler func(c echo.Context, err error) error
+
// Rewrite defines URL path rewrite rules. The values captured in asterisk can be
// retrieved by index e.g. $1, $2 and so on.
// Examples:
@@ -72,26 +98,28 @@ type (
Next(echo.Context) *ProxyTarget
}
- // TargetProvider defines an interface that gives the opportunity for balancer to return custom errors when selecting target.
+ // TargetProvider defines an interface that gives the opportunity for balancer
+ // to return custom errors when selecting target.
TargetProvider interface {
NextTarget(echo.Context) (*ProxyTarget, error)
}
commonBalancer struct {
targets []*ProxyTarget
- mutex sync.RWMutex
+ mutex sync.Mutex
}
// RandomBalancer implements a random load balancing technique.
randomBalancer struct {
- *commonBalancer
+ commonBalancer
random *rand.Rand
}
// RoundRobinBalancer implements a round-robin load balancing technique.
roundRobinBalancer struct {
- *commonBalancer
- i uint32
+ commonBalancer
+ // tracking the index on `targets` slice for the next `*ProxyTarget` to be used
+ i int
}
)
@@ -107,14 +135,14 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
in, _, err := c.Response().Hijack()
if err != nil {
- c.Set("_error", fmt.Sprintf("proxy raw, hijack error=%v, url=%s", t.URL, err))
+ c.Set("_error", fmt.Errorf("proxy raw, hijack error=%w, url=%s", err, t.URL))
return
}
defer in.Close()
out, err := net.Dial("tcp", t.URL.Host)
if err != nil {
- c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", t.URL, err)))
+ c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, dial error=%v, url=%s", err, t.URL)))
return
}
defer out.Close()
@@ -122,7 +150,7 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
// Write header
err = r.Write(out)
if err != nil {
- c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", t.URL, err)))
+ c.Set("_error", echo.NewHTTPError(http.StatusBadGateway, fmt.Sprintf("proxy raw, request header copy error=%v, url=%s", err, t.URL)))
return
}
@@ -136,39 +164,44 @@ func proxyRaw(t *ProxyTarget, c echo.Context) http.Handler {
go cp(in, out)
err = <-errCh
if err != nil && err != io.EOF {
- c.Set("_error", fmt.Errorf("proxy raw, copy body error=%v, url=%s", t.URL, err))
+ c.Set("_error", fmt.Errorf("proxy raw, copy body error=%w, url=%s", err, t.URL))
}
})
}
// NewRandomBalancer returns a random proxy balancer.
func NewRandomBalancer(targets []*ProxyTarget) ProxyBalancer {
- b := &randomBalancer{commonBalancer: new(commonBalancer)}
+ b := randomBalancer{}
b.targets = targets
- return b
+ b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
+ return &b
}
// NewRoundRobinBalancer returns a round-robin proxy balancer.
func NewRoundRobinBalancer(targets []*ProxyTarget) ProxyBalancer {
- b := &roundRobinBalancer{commonBalancer: new(commonBalancer)}
+ b := roundRobinBalancer{}
b.targets = targets
- return b
+ return &b
}
-// AddTarget adds an upstream target to the list.
+// AddTarget adds an upstream target to the list and returns `true`.
+//
+// However, if a target with the same name already exists then the operation is aborted returning `false`.
func (b *commonBalancer) AddTarget(target *ProxyTarget) bool {
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
for _, t := range b.targets {
if t.Name == target.Name {
return false
}
}
- b.mutex.Lock()
- defer b.mutex.Unlock()
b.targets = append(b.targets, target)
return true
}
-// RemoveTarget removes an upstream target from the list.
+// RemoveTarget removes an upstream target from the list by name.
+//
+// Returns `true` on success, `false` if no target with the name is found.
func (b *commonBalancer) RemoveTarget(name string) bool {
b.mutex.Lock()
defer b.mutex.Unlock()
@@ -182,21 +215,58 @@ func (b *commonBalancer) RemoveTarget(name string) bool {
}
// Next randomly returns an upstream target.
+//
+// Note: `nil` is returned in case upstream target list is empty.
func (b *randomBalancer) Next(c echo.Context) *ProxyTarget {
- if b.random == nil {
- b.random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+ if len(b.targets) == 0 {
+ return nil
+ } else if len(b.targets) == 1 {
+ return b.targets[0]
}
- b.mutex.RLock()
- defer b.mutex.RUnlock()
return b.targets[b.random.Intn(len(b.targets))]
}
-// Next returns an upstream target using round-robin technique.
+// Next returns an upstream target using round-robin technique. In the case
+// where a previously failed request is being retried, the round-robin
+// balancer will attempt to use the next target relative to the original
+// request. If the list of targets held by the balancer is modified while a
+// failed request is being retried, it is possible that the balancer will
+// return the original failed target.
+//
+// Note: `nil` is returned in case upstream target list is empty.
func (b *roundRobinBalancer) Next(c echo.Context) *ProxyTarget {
- b.i = b.i % uint32(len(b.targets))
- t := b.targets[b.i]
- atomic.AddUint32(&b.i, 1)
- return t
+ b.mutex.Lock()
+ defer b.mutex.Unlock()
+ if len(b.targets) == 0 {
+ return nil
+ } else if len(b.targets) == 1 {
+ return b.targets[0]
+ }
+
+ var i int
+ const lastIdxKey = "_round_robin_last_index"
+ // This request is a retry, start from the index of the previous
+ // target to ensure we don't attempt to retry the request with
+ // the same failed target
+ if c.Get(lastIdxKey) != nil {
+ i = c.Get(lastIdxKey).(int)
+ i++
+ if i >= len(b.targets) {
+ i = 0
+ }
+ } else {
+ // This is a first time request, use the global index
+ if b.i >= len(b.targets) {
+ b.i = 0
+ }
+ i = b.i
+ b.i++
+ }
+
+ c.Set(lastIdxKey, i)
+ return b.targets[i]
}
// Proxy returns a Proxy middleware.
@@ -211,14 +281,26 @@ func Proxy(balancer ProxyBalancer) echo.MiddlewareFunc {
// ProxyWithConfig returns a Proxy middleware with config.
// See: `Proxy()`
func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
+ if config.Balancer == nil {
+ panic("echo: proxy middleware requires balancer")
+ }
// Defaults
if config.Skipper == nil {
config.Skipper = DefaultProxyConfig.Skipper
}
- if config.Balancer == nil {
- panic("echo: proxy middleware requires balancer")
+ if config.RetryFilter == nil {
+ config.RetryFilter = func(c echo.Context, e error) bool {
+ if httpErr, ok := e.(*echo.HTTPError); ok {
+ return httpErr.Code == http.StatusBadGateway
+ }
+ return false
+ }
+ }
+ if config.ErrorHandler == nil {
+ config.ErrorHandler = func(c echo.Context, err error) error {
+ return err
+ }
}
-
if config.Rewrite != nil {
if config.RegexRewrite == nil {
config.RegexRewrite = make(map[*regexp.Regexp]string)
@@ -229,28 +311,17 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
}
provider, isTargetProvider := config.Balancer.(TargetProvider)
+
return func(next echo.HandlerFunc) echo.HandlerFunc {
- return func(c echo.Context) (err error) {
+ return func(c echo.Context) error {
if config.Skipper(c) {
return next(c)
}
req := c.Request()
res := c.Response()
-
- var tgt *ProxyTarget
- if isTargetProvider {
- tgt, err = provider.NextTarget(c)
- if err != nil {
- return err
- }
- } else {
- tgt = config.Balancer.Next(c)
- }
- c.Set(config.ContextKey, tgt)
-
if err := rewriteURL(config.RegexRewrite, req); err != nil {
- return err
+ return config.ErrorHandler(c, err)
}
// Fix header
@@ -266,19 +337,49 @@ func ProxyWithConfig(config ProxyConfig) echo.MiddlewareFunc {
req.Header.Set(echo.HeaderXForwardedFor, c.RealIP())
}
- // Proxy
- switch {
- case c.IsWebSocket():
- proxyRaw(tgt, c).ServeHTTP(res, req)
- case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
- default:
- proxyHTTP(tgt, c, config).ServeHTTP(res, req)
- }
- if e, ok := c.Get("_error").(error); ok {
- err = e
- }
+ retries := config.RetryCount
+ for {
+ var tgt *ProxyTarget
+ var err error
+ if isTargetProvider {
+ tgt, err = provider.NextTarget(c)
+ if err != nil {
+ return config.ErrorHandler(c, err)
+ }
+ } else {
+ tgt = config.Balancer.Next(c)
+ }
- return
+ c.Set(config.ContextKey, tgt)
+
+ //If retrying a failed request, clear any previous errors from
+ //context here so that balancers have the option to check for
+ //errors that occurred using previous target
+ if retries < config.RetryCount {
+ c.Set("_error", nil)
+ }
+
+ // Proxy
+ switch {
+ case c.IsWebSocket():
+ proxyRaw(tgt, c).ServeHTTP(res, req)
+ case req.Header.Get(echo.HeaderAccept) == "text/event-stream":
+ default:
+ proxyHTTP(tgt, c, config).ServeHTTP(res, req)
+ }
+
+ err, hasError := c.Get("_error").(error)
+ if !hasError {
+ return nil
+ }
+
+ retry := retries > 0 && config.RetryFilter(c, err)
+ if !retry {
+ return config.ErrorHandler(c, err)
+ }
+
+ retries--
+ }
}
}
}
diff --git a/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go b/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go
index f7fae83c..1d24df52 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/rate_limiter.go
@@ -160,6 +160,8 @@ type (
burst int
expiresIn time.Duration
lastCleanup time.Time
+
+ timeNow func() time.Time
}
// Visitor signifies a unique user's limiter details
Visitor struct {
@@ -219,7 +221,8 @@ func NewRateLimiterMemoryStoreWithConfig(config RateLimiterMemoryStoreConfig) (s
store.burst = int(config.Rate)
}
store.visitors = make(map[string]*Visitor)
- store.lastCleanup = now()
+ store.timeNow = time.Now
+ store.lastCleanup = store.timeNow()
return
}
@@ -244,12 +247,13 @@ func (store *RateLimiterMemoryStore) Allow(identifier string) (bool, error) {
limiter.Limiter = rate.NewLimiter(store.rate, store.burst)
store.visitors[identifier] = limiter
}
- limiter.lastSeen = now()
- if now().Sub(store.lastCleanup) > store.expiresIn {
+ now := store.timeNow()
+ limiter.lastSeen = now
+ if now.Sub(store.lastCleanup) > store.expiresIn {
store.cleanupStaleVisitors()
}
store.mutex.Unlock()
- return limiter.AllowN(now(), 1), nil
+ return limiter.AllowN(store.timeNow(), 1), nil
}
/*
@@ -258,14 +262,9 @@ of users who haven't visited again after the configured expiry time has elapsed
*/
func (store *RateLimiterMemoryStore) cleanupStaleVisitors() {
for id, visitor := range store.visitors {
- if now().Sub(visitor.lastSeen) > store.expiresIn {
+ if store.timeNow().Sub(visitor.lastSeen) > store.expiresIn {
delete(store.visitors, id)
}
}
- store.lastCleanup = now()
+ store.lastCleanup = store.timeNow()
}
-
-/*
-actual time method which is mocked in test file
-*/
-var now = time.Now
diff --git a/vendor/github.com/labstack/echo/v4/middleware/recover.go b/vendor/github.com/labstack/echo/v4/middleware/recover.go
index 7b612853..0466cfe5 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/recover.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/recover.go
@@ -37,19 +37,26 @@ type (
// LogErrorFunc defines a function for custom logging in the middleware.
// If it's set you don't need to provide LogLevel for config.
+ // If this function returns nil, the centralized HTTPErrorHandler will not be called.
LogErrorFunc LogErrorFunc
+
+ // DisableErrorHandler disables the call to centralized HTTPErrorHandler.
+ // The recovered error is then passed back to upstream middleware, instead of swallowing the error.
+ // Optional. Default value false.
+ DisableErrorHandler bool `yaml:"disable_error_handler"`
}
)
var (
// DefaultRecoverConfig is the default Recover middleware config.
DefaultRecoverConfig = RecoverConfig{
- Skipper: DefaultSkipper,
- StackSize: 4 << 10, // 4 KB
- DisableStackAll: false,
- DisablePrintStack: false,
- LogLevel: 0,
- LogErrorFunc: nil,
+ Skipper: DefaultSkipper,
+ StackSize: 4 << 10, // 4 KB
+ DisableStackAll: false,
+ DisablePrintStack: false,
+ LogLevel: 0,
+ LogErrorFunc: nil,
+ DisableErrorHandler: false,
}
)
@@ -71,7 +78,7 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
}
return func(next echo.HandlerFunc) echo.HandlerFunc {
- return func(c echo.Context) error {
+ return func(c echo.Context) (returnErr error) {
if config.Skipper(c) {
return next(c)
}
@@ -113,7 +120,12 @@ func RecoverWithConfig(config RecoverConfig) echo.MiddlewareFunc {
c.Logger().Print(msg)
}
}
- c.Error(err)
+
+ if err != nil && !config.DisableErrorHandler {
+ c.Error(err)
+ } else {
+ returnErr = err
+ }
}
}()
return next(c)
diff --git a/vendor/github.com/labstack/echo/v4/middleware/request_logger.go b/vendor/github.com/labstack/echo/v4/middleware/request_logger.go
index b9e36925..ce76230c 100644
--- a/vendor/github.com/labstack/echo/v4/middleware/request_logger.go
+++ b/vendor/github.com/labstack/echo/v4/middleware/request_logger.go
@@ -225,7 +225,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
if config.Skipper == nil {
config.Skipper = DefaultSkipper
}
- now = time.Now
+ now := time.Now
if config.timeNow != nil {
now = config.timeNow
}
@@ -257,7 +257,7 @@ func (config RequestLoggerConfig) ToMiddleware() (echo.MiddlewareFunc, error) {
config.BeforeNextFunc(c)
}
err := next(c)
- if config.HandleError {
+ if err != nil && config.HandleError {
c.Error(err)
}
diff --git a/vendor/github.com/labstack/echo/v4/response.go b/vendor/github.com/labstack/echo/v4/response.go
index 84f7c9e7..d9c9aa6e 100644
--- a/vendor/github.com/labstack/echo/v4/response.go
+++ b/vendor/github.com/labstack/echo/v4/response.go
@@ -94,6 +94,13 @@ func (r *Response) Hijack() (net.Conn, *bufio.ReadWriter, error) {
return r.Writer.(http.Hijacker).Hijack()
}
+// Unwrap returns the original http.ResponseWriter.
+// ResponseController can be used to access the original http.ResponseWriter.
+// See [https://go.dev/blog/go1.20]
+func (r *Response) Unwrap() http.ResponseWriter {
+ return r.Writer
+}
+
func (r *Response) reset(w http.ResponseWriter) {
r.beforeFuncs = nil
r.afterFuncs = nil
diff --git a/vendor/github.com/labstack/echo/v4/router.go b/vendor/github.com/labstack/echo/v4/router.go
index 597660d3..ee6f3fa4 100644
--- a/vendor/github.com/labstack/echo/v4/router.go
+++ b/vendor/github.com/labstack/echo/v4/router.go
@@ -151,7 +151,7 @@ func (r *Router) Routes() []*Route {
return routes
}
-// Reverse generates an URL from route name and provided parameters.
+// Reverse generates a URL from route name and provided parameters.
func (r *Router) Reverse(name string, params ...interface{}) string {
uri := new(bytes.Buffer)
ln := len(params)
@@ -159,7 +159,12 @@ func (r *Router) Reverse(name string, params ...interface{}) string {
for _, route := range r.routes {
if route.Name == name {
for i, l := 0, len(route.Path); i < l; i++ {
- if (route.Path[i] == ':' || route.Path[i] == '*') && n < ln {
+ hasBackslash := route.Path[i] == '\\'
+ if hasBackslash && i+1 < l && route.Path[i+1] == ':' {
+ i++ // backslash before colon escapes that colon. in that case skip backslash
+ }
+ if n < ln && (route.Path[i] == '*' || (!hasBackslash && route.Path[i] == ':')) {
+ // in case of `*` wildcard or `:` (unescaped colon) param we replace everything till next slash or end of path
for ; i < l && route.Path[i] != '/'; i++ {
}
uri.WriteString(fmt.Sprintf("%v", params[n]))
diff --git a/vendor/github.com/minio/minio-go/v7/api-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
index e1a34e00..97a6f80b 100644
--- a/vendor/github.com/minio/minio-go/v7/api-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-datatypes.go
@@ -220,6 +220,11 @@ type ObjectInfo struct {
ChecksumSHA1 string
ChecksumSHA256 string
+ Internal *struct {
+ K int // Data blocks
+ M int // Parity blocks
+ } `xml:"Internal"`
+
// Error
Err error `json:"-"`
}
diff --git a/vendor/github.com/minio/minio-go/v7/api-list.go b/vendor/github.com/minio/minio-go/v7/api-list.go
index 627811cf..3b50f61d 100644
--- a/vendor/github.com/minio/minio-go/v7/api-list.go
+++ b/vendor/github.com/minio/minio-go/v7/api-list.go
@@ -97,7 +97,15 @@ func (c *Client) listObjectsV2(ctx context.Context, bucketName string, opts List
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
- defer close(objectStatCh)
+ defer func() {
+ if contextCanceled(ctx) {
+ objectStatCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectStatCh)
+ }()
+
// Save continuationToken for next request.
var continuationToken string
for {
@@ -304,7 +312,14 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
// Initiate list objects goroutine here.
go func(objectStatCh chan<- ObjectInfo) {
- defer close(objectStatCh)
+ defer func() {
+ if contextCanceled(ctx) {
+ objectStatCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectStatCh)
+ }()
marker := opts.StartAfter
for {
@@ -321,6 +336,7 @@ func (c *Client) listObjects(ctx context.Context, bucketName string, opts ListOb
for _, object := range result.Contents {
// Save the marker.
marker = object.Key
+ object.ETag = trimEtag(object.ETag)
select {
// Send object content.
case objectStatCh <- object:
@@ -393,7 +409,14 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
// Initiate list objects goroutine here.
go func(resultCh chan<- ObjectInfo) {
- defer close(resultCh)
+ defer func() {
+ if contextCanceled(ctx) {
+ resultCh <- ObjectInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(resultCh)
+ }()
var (
keyMarker = ""
@@ -424,6 +447,7 @@ func (c *Client) listObjectVersions(ctx context.Context, bucketName string, opts
IsDeleteMarker: version.isDeleteMarker,
UserTags: version.UserTags,
UserMetadata: version.UserMetadata,
+ Internal: version.Internal,
}
select {
// Send object version info.
@@ -698,6 +722,10 @@ func (o *ListObjectsOptions) Set(key, value string) {
// for object := range api.ListObjects(ctx, "mytestbucket", minio.ListObjectsOptions{Prefix: "starthere", Recursive:true}) {
// fmt.Println(object)
// }
+//
+// If caller cancels the context, then the last entry on the 'chan ObjectInfo' will be the context.Error()
+// caller must drain the channel entirely and wait until channel is closed before proceeding, without
+// waiting on the channel to be closed completely you might leak goroutines.
func (c *Client) ListObjects(ctx context.Context, bucketName string, opts ListObjectsOptions) <-chan ObjectInfo {
if opts.WithVersions {
return c.listObjectVersions(ctx, bucketName, opts)
@@ -738,6 +766,16 @@ func (c *Client) ListIncompleteUploads(ctx context.Context, bucketName, objectPr
return c.listIncompleteUploads(ctx, bucketName, objectPrefix, recursive)
}
+// contextCanceled returns whether a context is canceled.
+func contextCanceled(ctx context.Context) bool {
+ select {
+ case <-ctx.Done():
+ return true
+ default:
+ return false
+ }
+}
+
// listIncompleteUploads lists all incomplete uploads.
func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPrefix string, recursive bool) <-chan ObjectMultipartInfo {
// Allocate channel for multipart uploads.
@@ -765,7 +803,15 @@ func (c *Client) listIncompleteUploads(ctx context.Context, bucketName, objectPr
return objectMultipartStatCh
}
go func(objectMultipartStatCh chan<- ObjectMultipartInfo) {
- defer close(objectMultipartStatCh)
+ defer func() {
+ if contextCanceled(ctx) {
+ objectMultipartStatCh <- ObjectMultipartInfo{
+ Err: ctx.Err(),
+ }
+ }
+ close(objectMultipartStatCh)
+ }()
+
// object and upload ID marker for future requests.
var objectMarker string
var uploadIDMarker string
diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
index 6e784be4..1527b746 100644
--- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
+++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go
@@ -93,6 +93,11 @@ type Version struct {
// Only returned by MinIO servers.
UserTags URLMap `json:"userTags,omitempty" xml:"UserTags"`
+ Internal *struct {
+ K int // Data blocks
+ M int // Parity blocks
+ } `xml:"Internal"`
+
isDeleteMarker bool
}
diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go
index 9251c389..93c255a0 100644
--- a/vendor/github.com/minio/minio-go/v7/api.go
+++ b/vendor/github.com/minio/minio-go/v7/api.go
@@ -124,7 +124,7 @@ type Options struct {
// Global constants.
const (
libraryName = "minio-go"
- libraryVersion = "v7.0.59"
+ libraryVersion = "v7.0.60"
)
// User Agent should always following the below style.
diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go
index 2bc6b864..3c83d6f6 100644
--- a/vendor/github.com/minio/minio-go/v7/functional_tests.go
+++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go
@@ -2471,7 +2471,8 @@ func testTrailingChecksums() {
PO minio.PutObjectOptions
}{
// Currently there is no way to override the checksum type.
- {header: "x-amz-checksum-crc32c",
+ {
+ header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@@ -2481,7 +2482,8 @@ func testTrailingChecksums() {
PartSize: 5 << 20,
},
},
- {header: "x-amz-checksum-crc32c",
+ {
+ header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@@ -2491,7 +2493,8 @@ func testTrailingChecksums() {
PartSize: 6_645_654, // Rather arbitrary size
},
},
- {header: "x-amz-checksum-crc32c",
+ {
+ header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
@@ -2501,7 +2504,8 @@ func testTrailingChecksums() {
PartSize: 5 << 20,
},
},
- {header: "x-amz-checksum-crc32c",
+ {
+ header: "x-amz-checksum-crc32c",
hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)),
ChecksumCRC32C: "set",
PO: minio.PutObjectOptions{
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
index 55b0d716..830061b8 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/lifecycle/lifecycle.go
@@ -308,19 +308,27 @@ func (eDate ExpirationDate) MarshalXML(e *xml.Encoder, startElement xml.StartEle
}
// ExpireDeleteMarker represents value of ExpiredObjectDeleteMarker field in Expiration XML element.
-type ExpireDeleteMarker bool
+type ExpireDeleteMarker ExpirationBoolean
+
+// IsEnabled returns true if the auto delete-marker expiration is enabled
+func (e ExpireDeleteMarker) IsEnabled() bool {
+ return bool(e)
+}
+
+// ExpirationBoolean represents an XML version of 'bool' type
+type ExpirationBoolean bool
// MarshalXML encodes delete marker boolean into an XML form.
-func (b ExpireDeleteMarker) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
+func (b ExpirationBoolean) MarshalXML(e *xml.Encoder, startElement xml.StartElement) error {
if !b {
return nil
}
- type expireDeleteMarkerWrapper ExpireDeleteMarker
- return e.EncodeElement(expireDeleteMarkerWrapper(b), startElement)
+ type booleanWrapper ExpirationBoolean
+ return e.EncodeElement(booleanWrapper(b), startElement)
}
-// IsEnabled returns true if the auto delete-marker expiration is enabled
-func (b ExpireDeleteMarker) IsEnabled() bool {
+// IsEnabled returns true if the expiration boolean is enabled
+func (b ExpirationBoolean) IsEnabled() bool {
return bool(b)
}
@@ -330,6 +338,7 @@ type Expiration struct {
Date ExpirationDate `xml:"Date,omitempty" json:"Date,omitempty"`
Days ExpirationDays `xml:"Days,omitempty" json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `xml:"ExpiredObjectDeleteMarker,omitempty" json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `xml:"ExpiredObjectAllVersions,omitempty" json:"ExpiredObjectAllVersions,omitempty"`
}
// MarshalJSON customizes json encoding by removing empty day/date specification.
@@ -338,10 +347,12 @@ func (e Expiration) MarshalJSON() ([]byte, error) {
Date *ExpirationDate `json:"Date,omitempty"`
Days *ExpirationDays `json:"Days,omitempty"`
DeleteMarker ExpireDeleteMarker `json:"ExpiredObjectDeleteMarker,omitempty"`
+ DeleteAll ExpirationBoolean `json:"ExpiredObjectAllVersions,omitempty"`
}
newexp := expiration{
DeleteMarker: e.DeleteMarker,
+ DeleteAll: e.DeleteAll,
}
if !e.IsDaysNull() {
newexp.Days = &e.Days
diff --git a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
index 98ae17ef..7a84a6f3 100644
--- a/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
+++ b/vendor/github.com/minio/minio-go/v7/pkg/tags/tags.go
@@ -203,6 +203,10 @@ func (tags *tagSet) set(key, value string, failOnExist bool) error {
return nil
}
+func (tags tagSet) count() int {
+ return len(tags.tagMap)
+}
+
func (tags tagSet) toMap() map[string]string {
m := make(map[string]string, len(tags.tagMap))
for key, value := range tags.tagMap {
@@ -279,6 +283,11 @@ func (tags *Tags) Set(key, value string) error {
return tags.TagSet.set(key, value, false)
}
+// Count - return number of tags accounted for
+func (tags Tags) Count() int {
+ return tags.TagSet.count()
+}
+
// ToMap returns copy of tags.
func (tags Tags) ToMap() map[string]string {
return tags.TagSet.toMap()
diff --git a/vendor/github.com/tidwall/gjson/LICENSE b/vendor/github.com/tidwall/gjson/LICENSE
new file mode 100644
index 00000000..58f5819a
--- /dev/null
+++ b/vendor/github.com/tidwall/gjson/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Josh Baker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tidwall/gjson/README.md b/vendor/github.com/tidwall/gjson/README.md
new file mode 100644
index 00000000..c8db11f1
--- /dev/null
+++ b/vendor/github.com/tidwall/gjson/README.md
@@ -0,0 +1,497 @@
+
+
+
+
+
+
+
+
+
+get json values quickly
+
+GJSON is a Go package that provides a [fast](#performance) and [simple](#get-a-value) way to get values from a json document.
+It has features such as [one line retrieval](#get-a-value), [dot notation paths](#path-syntax), [iteration](#iterate-through-an-object-or-array), and [parsing json lines](#json-lines).
+
+Also check out [SJSON](https://github.com/tidwall/sjson) for modifying json, and the [JJ](https://github.com/tidwall/jj) command line tool.
+
+This README is a quick overview of how to use GJSON, for more information check out [GJSON Syntax](SYNTAX.md).
+
+GJSON is also available for [Python](https://github.com/volans-/gjson-py) and [Rust](https://github.com/tidwall/gjson.rs)
+
+Getting Started
+===============
+
+## Installing
+
+To start using GJSON, install Go and run `go get`:
+
+```sh
+$ go get -u github.com/tidwall/gjson
+```
+
+This will retrieve the library.
+
+## Get a value
+Get searches json for the specified path. A path is in dot syntax, such as "name.last" or "age". When the value is found it's returned immediately.
+
+```go
+package main
+
+import "github.com/tidwall/gjson"
+
+const json = `{"name":{"first":"Janet","last":"Prichard"},"age":47}`
+
+func main() {
+ value := gjson.Get(json, "name.last")
+ println(value.String())
+}
+```
+
+This will print:
+
+```
+Prichard
+```
+*There's also the [GetMany](#get-multiple-values-at-once) function to get multiple values at once, and [GetBytes](#working-with-bytes) for working with JSON byte slices.*
+
+## Path Syntax
+
+Below is a quick overview of the path syntax, for more complete information please
+check out [GJSON Syntax](SYNTAX.md).
+
+A path is a series of keys separated by a dot.
+A key may contain special wildcard characters '\*' and '?'.
+To access an array value use the index as the key.
+To get the number of elements in an array or to access a child path, use the '#' character.
+The dot and wildcard characters can be escaped with '\\'.
+
+```json
+{
+ "name": {"first": "Tom", "last": "Anderson"},
+ "age":37,
+ "children": ["Sara","Alex","Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]},
+ {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]},
+ {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]}
+ ]
+}
+```
+```
+"name.last" >> "Anderson"
+"age" >> 37
+"children" >> ["Sara","Alex","Jack"]
+"children.#" >> 3
+"children.1" >> "Alex"
+"child*.2" >> "Jack"
+"c?ildren.0" >> "Sara"
+"fav\.movie" >> "Deer Hunter"
+"friends.#.first" >> ["Dale","Roger","Jane"]
+"friends.1.last" >> "Craig"
+```
+
+You can also query an array for the first match by using `#(...)`, or find all
+matches with `#(...)#`. Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=`
+comparison operators and the simple pattern matching `%` (like) and `!%`
+(not like) operators.
+
+```
+friends.#(last=="Murphy").first >> "Dale"
+friends.#(last=="Murphy")#.first >> ["Dale","Jane"]
+friends.#(age>45)#.last >> ["Craig","Murphy"]
+friends.#(first%"D*").last >> "Murphy"
+friends.#(first!%"D*").last >> "Craig"
+friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"]
+```
+
+*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was
+changed in v1.3.0 as to avoid confusion with the new
+[multipath](SYNTAX.md#multipaths) syntax. For backwards compatibility,
+`#[...]` will continue to work until the next major release.*
+
+## Result Type
+
+GJSON supports the json types `string`, `number`, `bool`, and `null`.
+Arrays and Objects are returned as their raw json types.
+
+The `Result` type holds one of these:
+
+```
+bool, for JSON booleans
+float64, for JSON numbers
+string, for JSON string literals
+nil, for JSON null
+```
+
+To directly access the value:
+
+```go
+result.Type // can be String, Number, True, False, Null, or JSON
+result.Str // holds the string
+result.Num // holds the float64 number
+result.Raw // holds the raw json
+result.Index // index of raw value in original json, zero means index unknown
+result.Indexes // indexes of all the elements that match on a path containing the '#' query character.
+```
+
+There are a variety of handy functions that work on a result:
+
+```go
+result.Exists() bool
+result.Value() interface{}
+result.Int() int64
+result.Uint() uint64
+result.Float() float64
+result.String() string
+result.Bool() bool
+result.Time() time.Time
+result.Array() []gjson.Result
+result.Map() map[string]gjson.Result
+result.Get(path string) Result
+result.ForEach(iterator func(key, value Result) bool)
+result.Less(token Result, caseSensitive bool) bool
+```
+
+The `result.Value()` function returns an `interface{}` which requires type assertion and is one of the following Go types:
+
+```go
+boolean >> bool
+number >> float64
+string >> string
+null >> nil
+array >> []interface{}
+object >> map[string]interface{}
+```
+
+The `result.Array()` function returns back an array of values.
+If the result represents a non-existent value, then an empty array will be returned.
+If the result is not a JSON array, the return value will be an array containing one result.
+
+### 64-bit integers
+
+The `result.Int()` and `result.Uint()` calls are capable of reading all 64 bits, allowing for large JSON integers.
+
+```go
+result.Int() int64 // -9223372036854775808 to 9223372036854775807
+result.Uint() uint64 // 0 to 18446744073709551615
+```
+
+## Modifiers and path chaining
+
+New in version 1.2 is support for modifier functions and path chaining.
+
+A modifier is a path component that performs custom processing on the
+json.
+
+Multiple paths can be "chained" together using the pipe character.
+This is useful for getting results from a modified query.
+
+For example, using the built-in `@reverse` modifier on the above json document,
+we'll get `children` array and reverse the order:
+
+```
+"children|@reverse" >> ["Jack","Alex","Sara"]
+"children|@reverse|0" >> "Jack"
+```
+
+There are currently the following built-in modifiers:
+
+- `@reverse`: Reverse an array or the members of an object.
+- `@ugly`: Remove all whitespace from a json document.
+- `@pretty`: Make the json document more human readable.
+- `@this`: Returns the current element. It can be used to retrieve the root element.
+- `@valid`: Ensure the json document is valid.
+- `@flatten`: Flattens an array.
+- `@join`: Joins multiple objects into a single object.
+- `@keys`: Returns an array of keys for an object.
+- `@values`: Returns an array of values for an object.
+- `@tostr`: Converts json to a string. Wraps a json string.
+- `@fromstr`: Converts a string from json. Unwraps a json string.
+- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
+
+### Modifier arguments
+
+A modifier may accept an optional argument. The argument can be a valid JSON
+document or just characters.
+
+For example, the `@pretty` modifier takes a json object as its argument.
+
+```
+@pretty:{"sortKeys":true}
+```
+
+Which makes the json pretty and orders all of its keys.
+
+```json
+{
+ "age":37,
+ "children": ["Sara","Alex","Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {"age": 44, "first": "Dale", "last": "Murphy"},
+ {"age": 68, "first": "Roger", "last": "Craig"},
+ {"age": 47, "first": "Jane", "last": "Murphy"}
+ ],
+ "name": {"first": "Tom", "last": "Anderson"}
+}
+```
+
+*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`.
+Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.*
+
+### Custom modifiers
+
+You can also add custom modifiers.
+
+For example, here we create a modifier that makes the entire json document upper
+or lower case.
+
+```go
+gjson.AddModifier("case", func(json, arg string) string {
+ if arg == "upper" {
+ return strings.ToUpper(json)
+ }
+ if arg == "lower" {
+ return strings.ToLower(json)
+ }
+ return json
+})
+```
+
+```
+"children|@case:upper" >> ["SARA","ALEX","JACK"]
+"children|@case:lower|@reverse" >> ["jack","alex","sara"]
+```
+
+## JSON Lines
+
+There's support for [JSON Lines](http://jsonlines.org/) using the `..` prefix, which treats a multilined document as an array.
+
+For example:
+
+```
+{"name": "Gilbert", "age": 61}
+{"name": "Alexa", "age": 34}
+{"name": "May", "age": 57}
+{"name": "Deloise", "age": 44}
+```
+
+```
+..# >> 4
+..1 >> {"name": "Alexa", "age": 34}
+..3 >> {"name": "Deloise", "age": 44}
+..#.name >> ["Gilbert","Alexa","May","Deloise"]
+..#(name="May").age >> 57
+```
+
+The `ForEachLines` function will iterate through JSON lines.
+
+```go
+gjson.ForEachLine(json, func(line gjson.Result) bool{
+ println(line.String())
+ return true
+})
+```
+
+## Get nested array values
+
+Suppose you want all the last names from the following json:
+
+```json
+{
+ "programmers": [
+ {
+ "firstName": "Janet",
+ "lastName": "McLaughlin",
+ }, {
+ "firstName": "Elliotte",
+ "lastName": "Hunter",
+ }, {
+ "firstName": "Jason",
+ "lastName": "Harold",
+ }
+ ]
+}
+```
+
+You would use the path "programmers.#.lastName" like such:
+
+```go
+result := gjson.Get(json, "programmers.#.lastName")
+for _, name := range result.Array() {
+ println(name.String())
+}
+```
+
+You can also query an object inside an array:
+
+```go
+name := gjson.Get(json, `programmers.#(lastName="Hunter").firstName`)
+println(name.String()) // prints "Elliotte"
+```
+
+## Iterate through an object or array
+
+The `ForEach` function allows for quickly iterating through an object or array.
+The key and value are passed to the iterator function for objects.
+Only the value is passed for arrays.
+Returning `false` from an iterator will stop iteration.
+
+```go
+result := gjson.Get(json, "programmers")
+result.ForEach(func(key, value gjson.Result) bool {
+ println(value.String())
+ return true // keep iterating
+})
+```
+
+## Simple Parse and Get
+
+There's a `Parse(json)` function that will do a simple parse, and `result.Get(path)` that will search a result.
+
+For example, all of these will return the same result:
+
+```go
+gjson.Parse(json).Get("name").Get("last")
+gjson.Get(json, "name").Get("last")
+gjson.Get(json, "name.last")
+```
+
+## Check for the existence of a value
+
+Sometimes you just want to know if a value exists.
+
+```go
+value := gjson.Get(json, "name.last")
+if !value.Exists() {
+ println("no last name")
+} else {
+ println(value.String())
+}
+
+// Or as one step
+if gjson.Get(json, "name.last").Exists() {
+ println("has a last name")
+}
+```
+
+## Validate JSON
+
+The `Get*` and `Parse*` functions expects that the json is well-formed. Bad json will not panic, but it may return back unexpected results.
+
+If you are consuming JSON from an unpredictable source then you may want to validate prior to using GJSON.
+
+```go
+if !gjson.Valid(json) {
+ return errors.New("invalid json")
+}
+value := gjson.Get(json, "name.last")
+```
+
+## Unmarshal to a map
+
+To unmarshal to a `map[string]interface{}`:
+
+```go
+m, ok := gjson.Parse(json).Value().(map[string]interface{})
+if !ok {
+ // not a map
+}
+```
+
+## Working with Bytes
+
+If your JSON is contained in a `[]byte` slice, there's the [GetBytes](https://godoc.org/github.com/tidwall/gjson#GetBytes) function. This is preferred over `Get(string(data), path)`.
+
+```go
+var json []byte = ...
+result := gjson.GetBytes(json, path)
+```
+
+If you are using the `gjson.GetBytes(json, path)` function and you want to avoid converting `result.Raw` to a `[]byte`, then you can use this pattern:
+
+```go
+var json []byte = ...
+result := gjson.GetBytes(json, path)
+var raw []byte
+if result.Index > 0 {
+ raw = json[result.Index:result.Index+len(result.Raw)]
+} else {
+ raw = []byte(result.Raw)
+}
+```
+
+This is a best-effort no allocation sub slice of the original json. This method utilizes the `result.Index` field, which is the position of the raw data in the original json. It's possible that the value of `result.Index` equals zero, in which case the `result.Raw` is converted to a `[]byte`.
+
+## Get multiple values at once
+
+The `GetMany` function can be used to get multiple values at the same time.
+
+```go
+results := gjson.GetMany(json, "name.first", "name.last", "age")
+```
+
+The return value is a `[]Result`, which will always contain exactly the same number of items as the input paths.
+
+## Performance
+
+Benchmarks of GJSON alongside [encoding/json](https://golang.org/pkg/encoding/json/),
+[ffjson](https://github.com/pquerna/ffjson),
+[EasyJSON](https://github.com/mailru/easyjson),
+[jsonparser](https://github.com/buger/jsonparser),
+and [json-iterator](https://github.com/json-iterator/go)
+
+```
+BenchmarkGJSONGet-16 11644512 311 ns/op 0 B/op 0 allocs/op
+BenchmarkGJSONUnmarshalMap-16 1122678 3094 ns/op 1920 B/op 26 allocs/op
+BenchmarkJSONUnmarshalMap-16 516681 6810 ns/op 2944 B/op 69 allocs/op
+BenchmarkJSONUnmarshalStruct-16 697053 5400 ns/op 928 B/op 13 allocs/op
+BenchmarkJSONDecoder-16 330450 10217 ns/op 3845 B/op 160 allocs/op
+BenchmarkFFJSONLexer-16 1424979 2585 ns/op 880 B/op 8 allocs/op
+BenchmarkEasyJSONLexer-16 3000000 729 ns/op 501 B/op 5 allocs/op
+BenchmarkJSONParserGet-16 3000000 366 ns/op 21 B/op 0 allocs/op
+BenchmarkJSONIterator-16 3000000 869 ns/op 693 B/op 14 allocs/op
+```
+
+JSON document used:
+
+```json
+{
+ "widget": {
+ "debug": "on",
+ "window": {
+ "title": "Sample Konfabulator Widget",
+ "name": "main_window",
+ "width": 500,
+ "height": 500
+ },
+ "image": {
+ "src": "Images/Sun.png",
+ "hOffset": 250,
+ "vOffset": 250,
+ "alignment": "center"
+ },
+ "text": {
+ "data": "Click Here",
+ "size": 36,
+ "style": "bold",
+ "vOffset": 100,
+ "alignment": "center",
+ "onMouseUp": "sun1.opacity = (sun1.opacity / 100) * 90;"
+ }
+ }
+}
+```
+
+Each operation was rotated through one of the following search paths:
+
+```
+widget.window.name
+widget.image.hOffset
+widget.text.onMouseUp
+```
+
+*These benchmarks were run on a MacBook Pro 16" 2.4 GHz Intel Core i9 using Go 1.17 and can be found [here](https://github.com/tidwall/gjson-benchmarks).*
diff --git a/vendor/github.com/tidwall/gjson/SYNTAX.md b/vendor/github.com/tidwall/gjson/SYNTAX.md
new file mode 100644
index 00000000..7a9b6a2d
--- /dev/null
+++ b/vendor/github.com/tidwall/gjson/SYNTAX.md
@@ -0,0 +1,342 @@
+# GJSON Path Syntax
+
+A GJSON Path is a text string syntax that describes a search pattern for quickly retreiving values from a JSON payload.
+
+This document is designed to explain the structure of a GJSON Path through examples.
+
+- [Path structure](#path-structure)
+- [Basic](#basic)
+- [Wildcards](#wildcards)
+- [Escape Character](#escape-character)
+- [Arrays](#arrays)
+- [Queries](#queries)
+- [Dot vs Pipe](#dot-vs-pipe)
+- [Modifiers](#modifiers)
+- [Multipaths](#multipaths)
+- [Literals](#literals)
+
+The definitive implemenation is [github.com/tidwall/gjson](https://github.com/tidwall/gjson).
+Use the [GJSON Playground](https://gjson.dev) to experiment with the syntax online.
+
+## Path structure
+
+A GJSON Path is intended to be easily expressed as a series of components seperated by a `.` character.
+
+Along with `.` character, there are a few more that have special meaning, including `|`, `#`, `@`, `\`, `*`, `!`, and `?`.
+
+## Example
+
+Given this JSON
+
+```json
+{
+ "name": {"first": "Tom", "last": "Anderson"},
+ "age":37,
+ "children": ["Sara","Alex","Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {"first": "Dale", "last": "Murphy", "age": 44, "nets": ["ig", "fb", "tw"]},
+ {"first": "Roger", "last": "Craig", "age": 68, "nets": ["fb", "tw"]},
+ {"first": "Jane", "last": "Murphy", "age": 47, "nets": ["ig", "tw"]}
+ ]
+}
+```
+
+The following GJSON Paths evaluate to the accompanying values.
+
+### Basic
+
+In many cases you'll just want to retreive values by object name or array index.
+
+```go
+name.last "Anderson"
+name.first "Tom"
+age 37
+children ["Sara","Alex","Jack"]
+children.0 "Sara"
+children.1 "Alex"
+friends.1 {"first": "Roger", "last": "Craig", "age": 68}
+friends.1.first "Roger"
+```
+
+### Wildcards
+
+A key may contain the special wildcard characters `*` and `?`.
+The `*` will match on any zero+ characters, and `?` matches on any one character.
+
+```go
+child*.2 "Jack"
+c?ildren.0 "Sara"
+```
+
+### Escape character
+
+Special purpose characters, such as `.`, `*`, and `?` can be escaped with `\`.
+
+```go
+fav\.movie "Deer Hunter"
+```
+
+You'll also need to make sure that the `\` character is correctly escaped when hardcoding a path in your source code.
+
+```go
+// Go
+val := gjson.Get(json, "fav\\.movie") // must escape the slash
+val := gjson.Get(json, `fav\.movie`) // no need to escape the slash
+```
+
+```rust
+// Rust
+let val = gjson::get(json, "fav\\.movie") // must escape the slash
+let val = gjson::get(json, r#"fav\.movie"#) // no need to escape the slash
+```
+
+
+### Arrays
+
+The `#` character allows for digging into JSON Arrays.
+
+To get the length of an array you'll just use the `#` all by itself.
+
+```go
+friends.# 3
+friends.#.age [44,68,47]
+```
+
+### Queries
+
+You can also query an array for the first match by using `#(...)`, or find all matches with `#(...)#`.
+Queries support the `==`, `!=`, `<`, `<=`, `>`, `>=` comparison operators,
+and the simple pattern matching `%` (like) and `!%` (not like) operators.
+
+```go
+friends.#(last=="Murphy").first "Dale"
+friends.#(last=="Murphy")#.first ["Dale","Jane"]
+friends.#(age>45)#.last ["Craig","Murphy"]
+friends.#(first%"D*").last "Murphy"
+friends.#(first!%"D*").last "Craig"
+```
+
+To query for a non-object value in an array, you can forgo the string to the right of the operator.
+
+```go
+children.#(!%"*a*") "Alex"
+children.#(%"*a*")# ["Sara","Jack"]
+```
+
+Nested queries are allowed.
+
+```go
+friends.#(nets.#(=="fb"))#.first >> ["Dale","Roger"]
+```
+
+*Please note that prior to v1.3.0, queries used the `#[...]` brackets. This was
+changed in v1.3.0 as to avoid confusion with the new [multipath](#multipaths)
+syntax. For backwards compatibility, `#[...]` will continue to work until the
+next major release.*
+
+The `~` (tilde) operator will convert a value to a boolean before comparison.
+
+For example, using the following JSON:
+
+```json
+{
+ "vals": [
+ { "a": 1, "b": true },
+ { "a": 2, "b": true },
+ { "a": 3, "b": false },
+ { "a": 4, "b": "0" },
+ { "a": 5, "b": 0 },
+ { "a": 6, "b": "1" },
+ { "a": 7, "b": 1 },
+ { "a": 8, "b": "true" },
+ { "a": 9, "b": false },
+ { "a": 10, "b": null },
+ { "a": 11 }
+ ]
+}
+```
+
+You can now query for all true(ish) or false(ish) values:
+
+```
+vals.#(b==~true)#.a >> [1,2,6,7,8]
+vals.#(b==~false)#.a >> [3,4,5,9,10,11]
+```
+
+The last value which was non-existent is treated as `false`
+
+### Dot vs Pipe
+
+The `.` is standard separator, but it's also possible to use a `|`.
+In most cases they both end up returning the same results.
+The cases where`|` differs from `.` is when it's used after the `#` for [Arrays](#arrays) and [Queries](#queries).
+
+Here are some examples
+
+```go
+friends.0.first "Dale"
+friends|0.first "Dale"
+friends.0|first "Dale"
+friends|0|first "Dale"
+friends|# 3
+friends.# 3
+friends.#(last="Murphy")# [{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}]
+friends.#(last="Murphy")#.first ["Dale","Jane"]
+friends.#(last="Murphy")#|first
+friends.#(last="Murphy")#.0 []
+friends.#(last="Murphy")#|0 {"first": "Dale", "last": "Murphy", "age": 44}
+friends.#(last="Murphy")#.# []
+friends.#(last="Murphy")#|# 2
+```
+
+Let's break down a few of these.
+
+The path `friends.#(last="Murphy")#` all by itself results in
+
+```json
+[{"first": "Dale", "last": "Murphy", "age": 44},{"first": "Jane", "last": "Murphy", "age": 47}]
+```
+
+The `.first` suffix will process the `first` path on each array element *before* returning the results. Which becomes
+
+```json
+["Dale","Jane"]
+```
+
+But the `|first` suffix actually processes the `first` path *after* the previous result.
+Since the previous result is an array, not an object, it's not possible to process
+because `first` does not exist.
+
+Yet, `|0` suffix returns
+
+```json
+{"first": "Dale", "last": "Murphy", "age": 44}
+```
+
+Because `0` is the first index of the previous result.
+
+### Modifiers
+
+A modifier is a path component that performs custom processing on the JSON.
+
+For example, using the built-in `@reverse` modifier on the above JSON payload will reverse the `children` array:
+
+```go
+children.@reverse ["Jack","Alex","Sara"]
+children.@reverse.0 "Jack"
+```
+
+There are currently the following built-in modifiers:
+
+- `@reverse`: Reverse an array or the members of an object.
+- `@ugly`: Remove all whitespace from JSON.
+- `@pretty`: Make the JSON more human readable.
+- `@this`: Returns the current element. It can be used to retrieve the root element.
+- `@valid`: Ensure the json document is valid.
+- `@flatten`: Flattens an array.
+- `@join`: Joins multiple objects into a single object.
+- `@keys`: Returns an array of keys for an object.
+- `@values`: Returns an array of values for an object.
+- `@tostr`: Converts json to a string. Wraps a json string.
+- `@fromstr`: Converts a string from json. Unwraps a json string.
+- `@group`: Groups arrays of objects. See [e4fc67c](https://github.com/tidwall/gjson/commit/e4fc67c92aeebf2089fabc7872f010e340d105db).
+
+#### Modifier arguments
+
+A modifier may accept an optional argument. The argument can be a valid JSON payload or just characters.
+
+For example, the `@pretty` modifier takes a json object as its argument.
+
+```
+@pretty:{"sortKeys":true}
+```
+
+Which makes the json pretty and orders all of its keys.
+
+```json
+{
+ "age":37,
+ "children": ["Sara","Alex","Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {"age": 44, "first": "Dale", "last": "Murphy"},
+ {"age": 68, "first": "Roger", "last": "Craig"},
+ {"age": 47, "first": "Jane", "last": "Murphy"}
+ ],
+ "name": {"first": "Tom", "last": "Anderson"}
+}
+```
+
+*The full list of `@pretty` options are `sortKeys`, `indent`, `prefix`, and `width`.
+Please see [Pretty Options](https://github.com/tidwall/pretty#customized-output) for more information.*
+
+#### Custom modifiers
+
+You can also add custom modifiers.
+
+For example, here we create a modifier which makes the entire JSON payload upper or lower case.
+
+```go
+gjson.AddModifier("case", func(json, arg string) string {
+ if arg == "upper" {
+ return strings.ToUpper(json)
+ }
+ if arg == "lower" {
+ return strings.ToLower(json)
+ }
+ return json
+})
+"children.@case:upper" ["SARA","ALEX","JACK"]
+"children.@case:lower.@reverse" ["jack","alex","sara"]
+```
+
+*Note: Custom modifiers are not yet available in the Rust version*
+
+### Multipaths
+
+Starting with v1.3.0, GJSON added the ability to join multiple paths together
+to form new documents. Wrapping comma-separated paths between `[...]` or
+`{...}` will result in a new array or object, respectively.
+
+For example, using the given multipath:
+
+```
+{name.first,age,"the_murphys":friends.#(last="Murphy")#.first}
+```
+
+Here we selected the first name, age, and the first name for friends with the
+last name "Murphy".
+
+You'll notice that an optional key can be provided, in this case
+"the_murphys", to force assign a key to a value. Otherwise, the name of the
+actual field will be used, in this case "first". If a name cannot be
+determined, then "_" is used.
+
+This results in
+
+```json
+{"first":"Tom","age":37,"the_murphys":["Dale","Jane"]}
+```
+
+### Literals
+
+Starting with v1.12.0, GJSON added support of json literals, which provides a way for constructing static blocks of json. This is can be particularly useful when constructing a new json document using [multipaths](#multipaths).
+
+A json literal begins with the '!' declaration character.
+
+For example, using the given multipath:
+
+```
+{name.first,age,"company":!"Happysoft","employed":!true}
+```
+
+Here we selected the first name and age. Then add two new fields, "company" and "employed".
+
+This results in
+
+```json
+{"first":"Tom","age":37,"company":"Happysoft","employed":true}
+```
+
+*See issue [#249](https://github.com/tidwall/gjson/issues/249) for additional context on JSON Literals.*
diff --git a/vendor/github.com/tidwall/gjson/gjson.go b/vendor/github.com/tidwall/gjson/gjson.go
new file mode 100644
index 00000000..53cbd236
--- /dev/null
+++ b/vendor/github.com/tidwall/gjson/gjson.go
@@ -0,0 +1,3359 @@
+// Package gjson provides searching for json strings.
+package gjson
+
+import (
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+ "unsafe"
+
+ "github.com/tidwall/match"
+ "github.com/tidwall/pretty"
+)
+
+// Type is Result type
+type Type int
+
+const (
+ // Null is a null json value
+ Null Type = iota
+ // False is a json false boolean
+ False
+ // Number is json number
+ Number
+ // String is a json string
+ String
+ // True is a json true boolean
+ True
+ // JSON is a raw block of JSON
+ JSON
+)
+
+// String returns a string representation of the type.
+func (t Type) String() string {
+ switch t {
+ default:
+ return ""
+ case Null:
+ return "Null"
+ case False:
+ return "False"
+ case Number:
+ return "Number"
+ case String:
+ return "String"
+ case True:
+ return "True"
+ case JSON:
+ return "JSON"
+ }
+}
+
+// Result represents a json value that is returned from Get().
+type Result struct {
+ // Type is the json type
+ Type Type
+ // Raw is the raw json
+ Raw string
+ // Str is the json string
+ Str string
+ // Num is the json number
+ Num float64
+ // Index of raw value in original json, zero means index unknown
+ Index int
+ // Indexes of all the elements that match on a path containing the '#'
+ // query character.
+ Indexes []int
+}
+
+// String returns a string representation of the value.
+func (t Result) String() string {
+ switch t.Type {
+ default:
+ return ""
+ case False:
+ return "false"
+ case Number:
+ if len(t.Raw) == 0 {
+ // calculated result
+ return strconv.FormatFloat(t.Num, 'f', -1, 64)
+ }
+ var i int
+ if t.Raw[0] == '-' {
+ i++
+ }
+ for ; i < len(t.Raw); i++ {
+ if t.Raw[i] < '0' || t.Raw[i] > '9' {
+ return strconv.FormatFloat(t.Num, 'f', -1, 64)
+ }
+ }
+ return t.Raw
+ case String:
+ return t.Str
+ case JSON:
+ return t.Raw
+ case True:
+ return "true"
+ }
+}
+
+// Bool returns an boolean representation.
+func (t Result) Bool() bool {
+ switch t.Type {
+ default:
+ return false
+ case True:
+ return true
+ case String:
+ b, _ := strconv.ParseBool(strings.ToLower(t.Str))
+ return b
+ case Number:
+ return t.Num != 0
+ }
+}
+
+// Int returns an integer representation.
+func (t Result) Int() int64 {
+ switch t.Type {
+ default:
+ return 0
+ case True:
+ return 1
+ case String:
+ n, _ := parseInt(t.Str)
+ return n
+ case Number:
+ // try to directly convert the float64 to int64
+ i, ok := safeInt(t.Num)
+ if ok {
+ return i
+ }
+ // now try to parse the raw string
+ i, ok = parseInt(t.Raw)
+ if ok {
+ return i
+ }
+ // fallback to a standard conversion
+ return int64(t.Num)
+ }
+}
+
+// Uint returns an unsigned integer representation.
+func (t Result) Uint() uint64 {
+ switch t.Type {
+ default:
+ return 0
+ case True:
+ return 1
+ case String:
+ n, _ := parseUint(t.Str)
+ return n
+ case Number:
+ // try to directly convert the float64 to uint64
+ i, ok := safeInt(t.Num)
+ if ok && i >= 0 {
+ return uint64(i)
+ }
+ // now try to parse the raw string
+ u, ok := parseUint(t.Raw)
+ if ok {
+ return u
+ }
+ // fallback to a standard conversion
+ return uint64(t.Num)
+ }
+}
+
+// Float returns an float64 representation.
+func (t Result) Float() float64 {
+ switch t.Type {
+ default:
+ return 0
+ case True:
+ return 1
+ case String:
+ n, _ := strconv.ParseFloat(t.Str, 64)
+ return n
+ case Number:
+ return t.Num
+ }
+}
+
+// Time returns a time.Time representation.
+func (t Result) Time() time.Time {
+ res, _ := time.Parse(time.RFC3339, t.String())
+ return res
+}
+
+// Array returns back an array of values.
+// If the result represents a null value or is non-existent, then an empty
+// array will be returned.
+// If the result is not a JSON array, the return value will be an
+// array containing one result.
+func (t Result) Array() []Result {
+ if t.Type == Null {
+ return []Result{}
+ }
+ if !t.IsArray() {
+ return []Result{t}
+ }
+ r := t.arrayOrMap('[', false)
+ return r.a
+}
+
+// IsObject returns true if the result value is a JSON object.
+func (t Result) IsObject() bool {
+ return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '{'
+}
+
+// IsArray returns true if the result value is a JSON array.
+func (t Result) IsArray() bool {
+ return t.Type == JSON && len(t.Raw) > 0 && t.Raw[0] == '['
+}
+
+// IsBool returns true if the result value is a JSON boolean.
+func (t Result) IsBool() bool {
+ return t.Type == True || t.Type == False
+}
+
+// ForEach iterates through values.
+// If the result represents a non-existent value, then no values will be
+// iterated. If the result is an Object, the iterator will pass the key and
+// value of each item. If the result is an Array, the iterator will only pass
+// the value of each item. If the result is not a JSON array or object, the
+// iterator will pass back one value equal to the result.
+func (t Result) ForEach(iterator func(key, value Result) bool) {
+ if !t.Exists() {
+ return
+ }
+ if t.Type != JSON {
+ iterator(Result{}, t)
+ return
+ }
+ json := t.Raw
+ var obj bool
+ var i int
+ var key, value Result
+ for ; i < len(json); i++ {
+ if json[i] == '{' {
+ i++
+ key.Type = String
+ obj = true
+ break
+ } else if json[i] == '[' {
+ i++
+ key.Type = Number
+ key.Num = -1
+ break
+ }
+ if json[i] > ' ' {
+ return
+ }
+ }
+ var str string
+ var vesc bool
+ var ok bool
+ var idx int
+ for ; i < len(json); i++ {
+ if obj {
+ if json[i] != '"' {
+ continue
+ }
+ s := i
+ i, str, vesc, ok = parseString(json, i+1)
+ if !ok {
+ return
+ }
+ if vesc {
+ key.Str = unescape(str[1 : len(str)-1])
+ } else {
+ key.Str = str[1 : len(str)-1]
+ }
+ key.Raw = str
+ key.Index = s + t.Index
+ } else {
+ key.Num += 1
+ }
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' || json[i] == ',' || json[i] == ':' {
+ continue
+ }
+ break
+ }
+ s := i
+ i, value, ok = parseAny(json, i, true)
+ if !ok {
+ return
+ }
+ if t.Indexes != nil {
+ if idx < len(t.Indexes) {
+ value.Index = t.Indexes[idx]
+ }
+ } else {
+ value.Index = s + t.Index
+ }
+ if !iterator(key, value) {
+ return
+ }
+ idx++
+ }
+}
+
+// Map returns back a map of values. The result should be a JSON object.
+// If the result is not a JSON object, the return value will be an empty map.
+func (t Result) Map() map[string]Result {
+ if t.Type != JSON {
+ return map[string]Result{}
+ }
+ r := t.arrayOrMap('{', false)
+ return r.o
+}
+
+// Get searches result for the specified path.
+// The result should be a JSON array or object.
+func (t Result) Get(path string) Result {
+ r := Get(t.Raw, path)
+ if r.Indexes != nil {
+ for i := 0; i < len(r.Indexes); i++ {
+ r.Indexes[i] += t.Index
+ }
+ } else {
+ r.Index += t.Index
+ }
+ return r
+}
+
+type arrayOrMapResult struct {
+ a []Result
+ ai []interface{}
+ o map[string]Result
+ oi map[string]interface{}
+ vc byte
+}
+
+func (t Result) arrayOrMap(vc byte, valueize bool) (r arrayOrMapResult) {
+ var json = t.Raw
+ var i int
+ var value Result
+ var count int
+ var key Result
+ if vc == 0 {
+ for ; i < len(json); i++ {
+ if json[i] == '{' || json[i] == '[' {
+ r.vc = json[i]
+ i++
+ break
+ }
+ if json[i] > ' ' {
+ goto end
+ }
+ }
+ } else {
+ for ; i < len(json); i++ {
+ if json[i] == vc {
+ i++
+ break
+ }
+ if json[i] > ' ' {
+ goto end
+ }
+ }
+ r.vc = vc
+ }
+ if r.vc == '{' {
+ if valueize {
+ r.oi = make(map[string]interface{})
+ } else {
+ r.o = make(map[string]Result)
+ }
+ } else {
+ if valueize {
+ r.ai = make([]interface{}, 0)
+ } else {
+ r.a = make([]Result, 0)
+ }
+ }
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' {
+ continue
+ }
+ // get next value
+ if json[i] == ']' || json[i] == '}' {
+ break
+ }
+ switch json[i] {
+ default:
+ if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' {
+ value.Type = Number
+ value.Raw, value.Num = tonum(json[i:])
+ value.Str = ""
+ } else {
+ continue
+ }
+ case '{', '[':
+ value.Type = JSON
+ value.Raw = squash(json[i:])
+ value.Str, value.Num = "", 0
+ case 'n':
+ value.Type = Null
+ value.Raw = tolit(json[i:])
+ value.Str, value.Num = "", 0
+ case 't':
+ value.Type = True
+ value.Raw = tolit(json[i:])
+ value.Str, value.Num = "", 0
+ case 'f':
+ value.Type = False
+ value.Raw = tolit(json[i:])
+ value.Str, value.Num = "", 0
+ case '"':
+ value.Type = String
+ value.Raw, value.Str = tostr(json[i:])
+ value.Num = 0
+ }
+ value.Index = i + t.Index
+
+ i += len(value.Raw) - 1
+
+ if r.vc == '{' {
+ if count%2 == 0 {
+ key = value
+ } else {
+ if valueize {
+ if _, ok := r.oi[key.Str]; !ok {
+ r.oi[key.Str] = value.Value()
+ }
+ } else {
+ if _, ok := r.o[key.Str]; !ok {
+ r.o[key.Str] = value
+ }
+ }
+ }
+ count++
+ } else {
+ if valueize {
+ r.ai = append(r.ai, value.Value())
+ } else {
+ r.a = append(r.a, value)
+ }
+ }
+ }
+end:
+ if t.Indexes != nil {
+ if len(t.Indexes) != len(r.a) {
+ for i := 0; i < len(r.a); i++ {
+ r.a[i].Index = 0
+ }
+ } else {
+ for i := 0; i < len(r.a); i++ {
+ r.a[i].Index = t.Indexes[i]
+ }
+ }
+ }
+ return
+}
+
+// Parse parses the json and returns a result.
+//
+// This function expects that the json is well-formed, and does not validate.
+// Invalid json will not panic, but it may return back unexpected results.
+// If you are consuming JSON from an unpredictable source then you may want to
+// use the Valid function first.
+func Parse(json string) Result {
+ var value Result
+ i := 0
+ for ; i < len(json); i++ {
+ if json[i] == '{' || json[i] == '[' {
+ value.Type = JSON
+ value.Raw = json[i:] // just take the entire raw
+ break
+ }
+ if json[i] <= ' ' {
+ continue
+ }
+ switch json[i] {
+ case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'i', 'I', 'N':
+ value.Type = Number
+ value.Raw, value.Num = tonum(json[i:])
+ case 'n':
+ if i+1 < len(json) && json[i+1] != 'u' {
+ // nan
+ value.Type = Number
+ value.Raw, value.Num = tonum(json[i:])
+ } else {
+ // null
+ value.Type = Null
+ value.Raw = tolit(json[i:])
+ }
+ case 't':
+ value.Type = True
+ value.Raw = tolit(json[i:])
+ case 'f':
+ value.Type = False
+ value.Raw = tolit(json[i:])
+ case '"':
+ value.Type = String
+ value.Raw, value.Str = tostr(json[i:])
+ default:
+ return Result{}
+ }
+ break
+ }
+ if value.Exists() {
+ value.Index = i
+ }
+ return value
+}
+
+// ParseBytes parses the json and returns a result.
+// If working with bytes, this method preferred over Parse(string(data))
+func ParseBytes(json []byte) Result {
+ return Parse(string(json))
+}
+
+func squash(json string) string {
+ // expects that the lead character is a '[' or '{' or '(' or '"'
+ // squash the value, ignoring all nested arrays and objects.
+ var i, depth int
+ if json[0] != '"' {
+ i, depth = 1, 1
+ }
+ for ; i < len(json); i++ {
+ if json[i] >= '"' && json[i] <= '}' {
+ switch json[i] {
+ case '"':
+ i++
+ s2 := i
+ for ; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ // look for an escaped slash
+ if json[i-1] == '\\' {
+ n := 0
+ for j := i - 2; j > s2-1; j-- {
+ if json[j] != '\\' {
+ break
+ }
+ n++
+ }
+ if n%2 == 0 {
+ continue
+ }
+ }
+ break
+ }
+ }
+ if depth == 0 {
+ if i >= len(json) {
+ return json
+ }
+ return json[:i+1]
+ }
+ case '{', '[', '(':
+ depth++
+ case '}', ']', ')':
+ depth--
+ if depth == 0 {
+ return json[:i+1]
+ }
+ }
+ }
+ }
+ return json
+}
+
+func tonum(json string) (raw string, num float64) {
+ for i := 1; i < len(json); i++ {
+ // less than dash might have valid characters
+ if json[i] <= '-' {
+ if json[i] <= ' ' || json[i] == ',' {
+ // break on whitespace and comma
+ raw = json[:i]
+ num, _ = strconv.ParseFloat(raw, 64)
+ return
+ }
+ // could be a '+' or '-'. let's assume so.
+ } else if json[i] == ']' || json[i] == '}' {
+ // break on ']' or '}'
+ raw = json[:i]
+ num, _ = strconv.ParseFloat(raw, 64)
+ return
+ }
+ }
+ raw = json
+ num, _ = strconv.ParseFloat(raw, 64)
+ return
+}
+
+func tolit(json string) (raw string) {
+ for i := 1; i < len(json); i++ {
+ if json[i] < 'a' || json[i] > 'z' {
+ return json[:i]
+ }
+ }
+ return json
+}
+
+func tostr(json string) (raw string, str string) {
+ // expects that the lead character is a '"'
+ for i := 1; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ return json[:i+1], json[1:i]
+ }
+ if json[i] == '\\' {
+ i++
+ for ; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ // look for an escaped slash
+ if json[i-1] == '\\' {
+ n := 0
+ for j := i - 2; j > 0; j-- {
+ if json[j] != '\\' {
+ break
+ }
+ n++
+ }
+ if n%2 == 0 {
+ continue
+ }
+ }
+ return json[:i+1], unescape(json[1:i])
+ }
+ }
+ var ret string
+ if i+1 < len(json) {
+ ret = json[:i+1]
+ } else {
+ ret = json[:i]
+ }
+ return ret, unescape(json[1:i])
+ }
+ }
+ return json, json[1:]
+}
+
+// Exists returns true if value exists.
+//
+// if gjson.Get(json, "name.last").Exists(){
+// println("value exists")
+// }
+func (t Result) Exists() bool {
+ return t.Type != Null || len(t.Raw) != 0
+}
+
+// Value returns one of these types:
+//
+// bool, for JSON booleans
+// float64, for JSON numbers
+// Number, for JSON numbers
+// string, for JSON string literals
+// nil, for JSON null
+// map[string]interface{}, for JSON objects
+// []interface{}, for JSON arrays
+//
+func (t Result) Value() interface{} {
+ if t.Type == String {
+ return t.Str
+ }
+ switch t.Type {
+ default:
+ return nil
+ case False:
+ return false
+ case Number:
+ return t.Num
+ case JSON:
+ r := t.arrayOrMap(0, true)
+ if r.vc == '{' {
+ return r.oi
+ } else if r.vc == '[' {
+ return r.ai
+ }
+ return nil
+ case True:
+ return true
+ }
+}
+
+func parseString(json string, i int) (int, string, bool, bool) {
+ var s = i
+ for ; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ return i + 1, json[s-1 : i+1], false, true
+ }
+ if json[i] == '\\' {
+ i++
+ for ; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ // look for an escaped slash
+ if json[i-1] == '\\' {
+ n := 0
+ for j := i - 2; j > 0; j-- {
+ if json[j] != '\\' {
+ break
+ }
+ n++
+ }
+ if n%2 == 0 {
+ continue
+ }
+ }
+ return i + 1, json[s-1 : i+1], true, true
+ }
+ }
+ break
+ }
+ }
+ return i, json[s-1:], false, false
+}
+
+func parseNumber(json string, i int) (int, string) {
+ var s = i
+ i++
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' || json[i] == ',' || json[i] == ']' ||
+ json[i] == '}' {
+ return i, json[s:i]
+ }
+ }
+ return i, json[s:]
+}
+
+func parseLiteral(json string, i int) (int, string) {
+ var s = i
+ i++
+ for ; i < len(json); i++ {
+ if json[i] < 'a' || json[i] > 'z' {
+ return i, json[s:i]
+ }
+ }
+ return i, json[s:]
+}
+
+type arrayPathResult struct {
+ part string
+ path string
+ pipe string
+ piped bool
+ more bool
+ alogok bool
+ arrch bool
+ alogkey string
+ query struct {
+ on bool
+ all bool
+ path string
+ op string
+ value string
+ }
+}
+
+func parseArrayPath(path string) (r arrayPathResult) {
+ for i := 0; i < len(path); i++ {
+ if path[i] == '|' {
+ r.part = path[:i]
+ r.pipe = path[i+1:]
+ r.piped = true
+ return
+ }
+ if path[i] == '.' {
+ r.part = path[:i]
+ if !r.arrch && i < len(path)-1 && isDotPiperChar(path[i+1:]) {
+ r.pipe = path[i+1:]
+ r.piped = true
+ } else {
+ r.path = path[i+1:]
+ r.more = true
+ }
+ return
+ }
+ if path[i] == '#' {
+ r.arrch = true
+ if i == 0 && len(path) > 1 {
+ if path[1] == '.' {
+ r.alogok = true
+ r.alogkey = path[2:]
+ r.path = path[:1]
+ } else if path[1] == '[' || path[1] == '(' {
+ // query
+ r.query.on = true
+ qpath, op, value, _, fi, vesc, ok :=
+ parseQuery(path[i:])
+ if !ok {
+ // bad query, end now
+ break
+ }
+ if len(value) >= 2 && value[0] == '"' &&
+ value[len(value)-1] == '"' {
+ value = value[1 : len(value)-1]
+ if vesc {
+ value = unescape(value)
+ }
+ }
+ r.query.path = qpath
+ r.query.op = op
+ r.query.value = value
+
+ i = fi - 1
+ if i+1 < len(path) && path[i+1] == '#' {
+ r.query.all = true
+ }
+ }
+ }
+ continue
+ }
+ }
+ r.part = path
+ r.path = ""
+ return
+}
+
+// splitQuery takes a query and splits it into three parts:
+// path, op, middle, and right.
+// So for this query:
+// #(first_name=="Murphy").last
+// Becomes
+// first_name # path
+// =="Murphy" # middle
+// .last # right
+// Or,
+// #(service_roles.#(=="one")).cap
+// Becomes
+// service_roles.#(=="one") # path
+// # middle
+// .cap # right
+func parseQuery(query string) (
+ path, op, value, remain string, i int, vesc, ok bool,
+) {
+ if len(query) < 2 || query[0] != '#' ||
+ (query[1] != '(' && query[1] != '[') {
+ return "", "", "", "", i, false, false
+ }
+ i = 2
+ j := 0 // start of value part
+ depth := 1
+ for ; i < len(query); i++ {
+ if depth == 1 && j == 0 {
+ switch query[i] {
+ case '!', '=', '<', '>', '%':
+ // start of the value part
+ j = i
+ continue
+ }
+ }
+ if query[i] == '\\' {
+ i++
+ } else if query[i] == '[' || query[i] == '(' {
+ depth++
+ } else if query[i] == ']' || query[i] == ')' {
+ depth--
+ if depth == 0 {
+ break
+ }
+ } else if query[i] == '"' {
+ // inside selector string, balance quotes
+ i++
+ for ; i < len(query); i++ {
+ if query[i] == '\\' {
+ vesc = true
+ i++
+ } else if query[i] == '"' {
+ break
+ }
+ }
+ }
+ }
+ if depth > 0 {
+ return "", "", "", "", i, false, false
+ }
+ if j > 0 {
+ path = trim(query[2:j])
+ value = trim(query[j:i])
+ remain = query[i+1:]
+ // parse the compare op from the value
+ var opsz int
+ switch {
+ case len(value) == 1:
+ opsz = 1
+ case value[0] == '!' && value[1] == '=':
+ opsz = 2
+ case value[0] == '!' && value[1] == '%':
+ opsz = 2
+ case value[0] == '<' && value[1] == '=':
+ opsz = 2
+ case value[0] == '>' && value[1] == '=':
+ opsz = 2
+ case value[0] == '=' && value[1] == '=':
+ value = value[1:]
+ opsz = 1
+ case value[0] == '<':
+ opsz = 1
+ case value[0] == '>':
+ opsz = 1
+ case value[0] == '=':
+ opsz = 1
+ case value[0] == '%':
+ opsz = 1
+ }
+ op = value[:opsz]
+ value = trim(value[opsz:])
+ } else {
+ path = trim(query[2:i])
+ remain = query[i+1:]
+ }
+ return path, op, value, remain, i + 1, vesc, true
+}
+
+func trim(s string) string {
+left:
+ if len(s) > 0 && s[0] <= ' ' {
+ s = s[1:]
+ goto left
+ }
+right:
+ if len(s) > 0 && s[len(s)-1] <= ' ' {
+ s = s[:len(s)-1]
+ goto right
+ }
+ return s
+}
+
+// peek at the next byte and see if it's a '@', '[', or '{'.
+func isDotPiperChar(s string) bool {
+ if DisableModifiers {
+ return false
+ }
+ c := s[0]
+ if c == '@' {
+ // check that the next component is *not* a modifier.
+ i := 1
+ for ; i < len(s); i++ {
+ if s[i] == '.' || s[i] == '|' || s[i] == ':' {
+ break
+ }
+ }
+ _, ok := modifiers[s[1:i]]
+ return ok
+ }
+ return c == '[' || c == '{'
+}
+
+type objectPathResult struct {
+ part string
+ path string
+ pipe string
+ piped bool
+ wild bool
+ more bool
+}
+
+func parseObjectPath(path string) (r objectPathResult) {
+ for i := 0; i < len(path); i++ {
+ if path[i] == '|' {
+ r.part = path[:i]
+ r.pipe = path[i+1:]
+ r.piped = true
+ return
+ }
+ if path[i] == '.' {
+ r.part = path[:i]
+ if i < len(path)-1 && isDotPiperChar(path[i+1:]) {
+ r.pipe = path[i+1:]
+ r.piped = true
+ } else {
+ r.path = path[i+1:]
+ r.more = true
+ }
+ return
+ }
+ if path[i] == '*' || path[i] == '?' {
+ r.wild = true
+ continue
+ }
+ if path[i] == '\\' {
+ // go into escape mode. this is a slower path that
+ // strips off the escape character from the part.
+ epart := []byte(path[:i])
+ i++
+ if i < len(path) {
+ epart = append(epart, path[i])
+ i++
+ for ; i < len(path); i++ {
+ if path[i] == '\\' {
+ i++
+ if i < len(path) {
+ epart = append(epart, path[i])
+ }
+ continue
+ } else if path[i] == '.' {
+ r.part = string(epart)
+ if i < len(path)-1 && isDotPiperChar(path[i+1:]) {
+ r.pipe = path[i+1:]
+ r.piped = true
+ } else {
+ r.path = path[i+1:]
+ r.more = true
+ }
+ return
+ } else if path[i] == '|' {
+ r.part = string(epart)
+ r.pipe = path[i+1:]
+ r.piped = true
+ return
+ } else if path[i] == '*' || path[i] == '?' {
+ r.wild = true
+ }
+ epart = append(epart, path[i])
+ }
+ }
+ // append the last part
+ r.part = string(epart)
+ return
+ }
+ }
+ r.part = path
+ return
+}
+
+func parseSquash(json string, i int) (int, string) {
+ // expects that the lead character is a '[' or '{' or '('
+ // squash the value, ignoring all nested arrays and objects.
+ // the first '[' or '{' or '(' has already been read
+ s := i
+ i++
+ depth := 1
+ for ; i < len(json); i++ {
+ if json[i] >= '"' && json[i] <= '}' {
+ switch json[i] {
+ case '"':
+ i++
+ s2 := i
+ for ; i < len(json); i++ {
+ if json[i] > '\\' {
+ continue
+ }
+ if json[i] == '"' {
+ // look for an escaped slash
+ if json[i-1] == '\\' {
+ n := 0
+ for j := i - 2; j > s2-1; j-- {
+ if json[j] != '\\' {
+ break
+ }
+ n++
+ }
+ if n%2 == 0 {
+ continue
+ }
+ }
+ break
+ }
+ }
+ case '{', '[', '(':
+ depth++
+ case '}', ']', ')':
+ depth--
+ if depth == 0 {
+ i++
+ return i, json[s:i]
+ }
+ }
+ }
+ }
+ return i, json[s:]
+}
+
+func parseObject(c *parseContext, i int, path string) (int, bool) {
+ var pmatch, kesc, vesc, ok, hit bool
+ var key, val string
+ rp := parseObjectPath(path)
+ if !rp.more && rp.piped {
+ c.pipe = rp.pipe
+ c.piped = true
+ }
+ for i < len(c.json) {
+ for ; i < len(c.json); i++ {
+ if c.json[i] == '"' {
+ // parse_key_string
+ // this is slightly different from getting s string value
+ // because we don't need the outer quotes.
+ i++
+ var s = i
+ for ; i < len(c.json); i++ {
+ if c.json[i] > '\\' {
+ continue
+ }
+ if c.json[i] == '"' {
+ i, key, kesc, ok = i+1, c.json[s:i], false, true
+ goto parse_key_string_done
+ }
+ if c.json[i] == '\\' {
+ i++
+ for ; i < len(c.json); i++ {
+ if c.json[i] > '\\' {
+ continue
+ }
+ if c.json[i] == '"' {
+ // look for an escaped slash
+ if c.json[i-1] == '\\' {
+ n := 0
+ for j := i - 2; j > 0; j-- {
+ if c.json[j] != '\\' {
+ break
+ }
+ n++
+ }
+ if n%2 == 0 {
+ continue
+ }
+ }
+ i, key, kesc, ok = i+1, c.json[s:i], true, true
+ goto parse_key_string_done
+ }
+ }
+ break
+ }
+ }
+ key, kesc, ok = c.json[s:], false, false
+ parse_key_string_done:
+ break
+ }
+ if c.json[i] == '}' {
+ return i + 1, false
+ }
+ }
+ if !ok {
+ return i, false
+ }
+ if rp.wild {
+ if kesc {
+ pmatch = matchLimit(unescape(key), rp.part)
+ } else {
+ pmatch = matchLimit(key, rp.part)
+ }
+ } else {
+ if kesc {
+ pmatch = rp.part == unescape(key)
+ } else {
+ pmatch = rp.part == key
+ }
+ }
+ hit = pmatch && !rp.more
+ for ; i < len(c.json); i++ {
+ var num bool
+ switch c.json[i] {
+ default:
+ continue
+ case '"':
+ i++
+ i, val, vesc, ok = parseString(c.json, i)
+ if !ok {
+ return i, false
+ }
+ if hit {
+ if vesc {
+ c.value.Str = unescape(val[1 : len(val)-1])
+ } else {
+ c.value.Str = val[1 : len(val)-1]
+ }
+ c.value.Raw = val
+ c.value.Type = String
+ return i, true
+ }
+ case '{':
+ if pmatch && !hit {
+ i, hit = parseObject(c, i+1, rp.path)
+ if hit {
+ return i, true
+ }
+ } else {
+ i, val = parseSquash(c.json, i)
+ if hit {
+ c.value.Raw = val
+ c.value.Type = JSON
+ return i, true
+ }
+ }
+ case '[':
+ if pmatch && !hit {
+ i, hit = parseArray(c, i+1, rp.path)
+ if hit {
+ return i, true
+ }
+ } else {
+ i, val = parseSquash(c.json, i)
+ if hit {
+ c.value.Raw = val
+ c.value.Type = JSON
+ return i, true
+ }
+ }
+ case 'n':
+ if i+1 < len(c.json) && c.json[i+1] != 'u' {
+ num = true
+ break
+ }
+ fallthrough
+ case 't', 'f':
+ vc := c.json[i]
+ i, val = parseLiteral(c.json, i)
+ if hit {
+ c.value.Raw = val
+ switch vc {
+ case 't':
+ c.value.Type = True
+ case 'f':
+ c.value.Type = False
+ }
+ return i, true
+ }
+ case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'i', 'I', 'N':
+ num = true
+ }
+ if num {
+ i, val = parseNumber(c.json, i)
+ if hit {
+ c.value.Raw = val
+ c.value.Type = Number
+ c.value.Num, _ = strconv.ParseFloat(val, 64)
+ return i, true
+ }
+ }
+ break
+ }
+ }
+ return i, false
+}
+
+// matchLimit will limit the complexity of the match operation to avoid ReDos
+// attacks from arbritary inputs.
+// See the github.com/tidwall/match.MatchLimit function for more information.
+func matchLimit(str, pattern string) bool {
+ matched, _ := match.MatchLimit(str, pattern, 10000)
+ return matched
+}
+
+func queryMatches(rp *arrayPathResult, value Result) bool {
+ rpv := rp.query.value
+ if len(rpv) > 0 && rpv[0] == '~' {
+ // convert to bool
+ rpv = rpv[1:]
+ if value.Bool() {
+ value = Result{Type: True}
+ } else {
+ value = Result{Type: False}
+ }
+ }
+ if !value.Exists() {
+ return false
+ }
+ if rp.query.op == "" {
+ // the query is only looking for existence, such as:
+ // friends.#(name)
+ // which makes sure that the array "friends" has an element of
+ // "name" that exists
+ return true
+ }
+ switch value.Type {
+ case String:
+ switch rp.query.op {
+ case "=":
+ return value.Str == rpv
+ case "!=":
+ return value.Str != rpv
+ case "<":
+ return value.Str < rpv
+ case "<=":
+ return value.Str <= rpv
+ case ">":
+ return value.Str > rpv
+ case ">=":
+ return value.Str >= rpv
+ case "%":
+ return matchLimit(value.Str, rpv)
+ case "!%":
+ return !matchLimit(value.Str, rpv)
+ }
+ case Number:
+ rpvn, _ := strconv.ParseFloat(rpv, 64)
+ switch rp.query.op {
+ case "=":
+ return value.Num == rpvn
+ case "!=":
+ return value.Num != rpvn
+ case "<":
+ return value.Num < rpvn
+ case "<=":
+ return value.Num <= rpvn
+ case ">":
+ return value.Num > rpvn
+ case ">=":
+ return value.Num >= rpvn
+ }
+ case True:
+ switch rp.query.op {
+ case "=":
+ return rpv == "true"
+ case "!=":
+ return rpv != "true"
+ case ">":
+ return rpv == "false"
+ case ">=":
+ return true
+ }
+ case False:
+ switch rp.query.op {
+ case "=":
+ return rpv == "false"
+ case "!=":
+ return rpv != "false"
+ case "<":
+ return rpv == "true"
+ case "<=":
+ return true
+ }
+ }
+ return false
+}
+func parseArray(c *parseContext, i int, path string) (int, bool) {
+ var pmatch, vesc, ok, hit bool
+ var val string
+ var h int
+ var alog []int
+ var partidx int
+ var multires []byte
+ var queryIndexes []int
+ rp := parseArrayPath(path)
+ if !rp.arrch {
+ n, ok := parseUint(rp.part)
+ if !ok {
+ partidx = -1
+ } else {
+ partidx = int(n)
+ }
+ }
+ if !rp.more && rp.piped {
+ c.pipe = rp.pipe
+ c.piped = true
+ }
+
+ procQuery := func(qval Result) bool {
+ if rp.query.all {
+ if len(multires) == 0 {
+ multires = append(multires, '[')
+ }
+ }
+ var tmp parseContext
+ tmp.value = qval
+ fillIndex(c.json, &tmp)
+ parentIndex := tmp.value.Index
+ var res Result
+ if qval.Type == JSON {
+ res = qval.Get(rp.query.path)
+ } else {
+ if rp.query.path != "" {
+ return false
+ }
+ res = qval
+ }
+ if queryMatches(&rp, res) {
+ if rp.more {
+ left, right, ok := splitPossiblePipe(rp.path)
+ if ok {
+ rp.path = left
+ c.pipe = right
+ c.piped = true
+ }
+ res = qval.Get(rp.path)
+ } else {
+ res = qval
+ }
+ if rp.query.all {
+ raw := res.Raw
+ if len(raw) == 0 {
+ raw = res.String()
+ }
+ if raw != "" {
+ if len(multires) > 1 {
+ multires = append(multires, ',')
+ }
+ multires = append(multires, raw...)
+ queryIndexes = append(queryIndexes, res.Index+parentIndex)
+ }
+ } else {
+ c.value = res
+ return true
+ }
+ }
+ return false
+ }
+ for i < len(c.json)+1 {
+ if !rp.arrch {
+ pmatch = partidx == h
+ hit = pmatch && !rp.more
+ }
+ h++
+ if rp.alogok {
+ alog = append(alog, i)
+ }
+ for ; ; i++ {
+ var ch byte
+ if i > len(c.json) {
+ break
+ } else if i == len(c.json) {
+ ch = ']'
+ } else {
+ ch = c.json[i]
+ }
+ var num bool
+ switch ch {
+ default:
+ continue
+ case '"':
+ i++
+ i, val, vesc, ok = parseString(c.json, i)
+ if !ok {
+ return i, false
+ }
+ if rp.query.on {
+ var qval Result
+ if vesc {
+ qval.Str = unescape(val[1 : len(val)-1])
+ } else {
+ qval.Str = val[1 : len(val)-1]
+ }
+ qval.Raw = val
+ qval.Type = String
+ if procQuery(qval) {
+ return i, true
+ }
+ } else if hit {
+ if rp.alogok {
+ break
+ }
+ if vesc {
+ c.value.Str = unescape(val[1 : len(val)-1])
+ } else {
+ c.value.Str = val[1 : len(val)-1]
+ }
+ c.value.Raw = val
+ c.value.Type = String
+ return i, true
+ }
+ case '{':
+ if pmatch && !hit {
+ i, hit = parseObject(c, i+1, rp.path)
+ if hit {
+ if rp.alogok {
+ break
+ }
+ return i, true
+ }
+ } else {
+ i, val = parseSquash(c.json, i)
+ if rp.query.on {
+ if procQuery(Result{Raw: val, Type: JSON}) {
+ return i, true
+ }
+ } else if hit {
+ if rp.alogok {
+ break
+ }
+ c.value.Raw = val
+ c.value.Type = JSON
+ return i, true
+ }
+ }
+ case '[':
+ if pmatch && !hit {
+ i, hit = parseArray(c, i+1, rp.path)
+ if hit {
+ if rp.alogok {
+ break
+ }
+ return i, true
+ }
+ } else {
+ i, val = parseSquash(c.json, i)
+ if rp.query.on {
+ if procQuery(Result{Raw: val, Type: JSON}) {
+ return i, true
+ }
+ } else if hit {
+ if rp.alogok {
+ break
+ }
+ c.value.Raw = val
+ c.value.Type = JSON
+ return i, true
+ }
+ }
+ case 'n':
+ if i+1 < len(c.json) && c.json[i+1] != 'u' {
+ num = true
+ break
+ }
+ fallthrough
+ case 't', 'f':
+ vc := c.json[i]
+ i, val = parseLiteral(c.json, i)
+ if rp.query.on {
+ var qval Result
+ qval.Raw = val
+ switch vc {
+ case 't':
+ qval.Type = True
+ case 'f':
+ qval.Type = False
+ }
+ if procQuery(qval) {
+ return i, true
+ }
+ } else if hit {
+ if rp.alogok {
+ break
+ }
+ c.value.Raw = val
+ switch vc {
+ case 't':
+ c.value.Type = True
+ case 'f':
+ c.value.Type = False
+ }
+ return i, true
+ }
+ case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'i', 'I', 'N':
+ num = true
+ case ']':
+ if rp.arrch && rp.part == "#" {
+ if rp.alogok {
+ left, right, ok := splitPossiblePipe(rp.alogkey)
+ if ok {
+ rp.alogkey = left
+ c.pipe = right
+ c.piped = true
+ }
+ var indexes = make([]int, 0, 64)
+ var jsons = make([]byte, 0, 64)
+ jsons = append(jsons, '[')
+ for j, k := 0, 0; j < len(alog); j++ {
+ idx := alog[j]
+ for idx < len(c.json) {
+ switch c.json[idx] {
+ case ' ', '\t', '\r', '\n':
+ idx++
+ continue
+ }
+ break
+ }
+ if idx < len(c.json) && c.json[idx] != ']' {
+ _, res, ok := parseAny(c.json, idx, true)
+ if ok {
+ res := res.Get(rp.alogkey)
+ if res.Exists() {
+ if k > 0 {
+ jsons = append(jsons, ',')
+ }
+ raw := res.Raw
+ if len(raw) == 0 {
+ raw = res.String()
+ }
+ jsons = append(jsons, []byte(raw)...)
+ indexes = append(indexes, res.Index)
+ k++
+ }
+ }
+ }
+ }
+ jsons = append(jsons, ']')
+ c.value.Type = JSON
+ c.value.Raw = string(jsons)
+ c.value.Indexes = indexes
+ return i + 1, true
+ }
+ if rp.alogok {
+ break
+ }
+
+ c.value.Type = Number
+ c.value.Num = float64(h - 1)
+ c.value.Raw = strconv.Itoa(h - 1)
+ c.calcd = true
+ return i + 1, true
+ }
+ if !c.value.Exists() {
+ if len(multires) > 0 {
+ c.value = Result{
+ Raw: string(append(multires, ']')),
+ Type: JSON,
+ Indexes: queryIndexes,
+ }
+ } else if rp.query.all {
+ c.value = Result{
+ Raw: "[]",
+ Type: JSON,
+ }
+ }
+ }
+ return i + 1, false
+ }
+ if num {
+ i, val = parseNumber(c.json, i)
+ if rp.query.on {
+ var qval Result
+ qval.Raw = val
+ qval.Type = Number
+ qval.Num, _ = strconv.ParseFloat(val, 64)
+ if procQuery(qval) {
+ return i, true
+ }
+ } else if hit {
+ if rp.alogok {
+ break
+ }
+ c.value.Raw = val
+ c.value.Type = Number
+ c.value.Num, _ = strconv.ParseFloat(val, 64)
+ return i, true
+ }
+ }
+ break
+ }
+ }
+ return i, false
+}
+
+func splitPossiblePipe(path string) (left, right string, ok bool) {
+ // take a quick peek for the pipe character. If found we'll split the piped
+ // part of the path into the c.pipe field and shorten the rp.
+ var possible bool
+ for i := 0; i < len(path); i++ {
+ if path[i] == '|' {
+ possible = true
+ break
+ }
+ }
+ if !possible {
+ return
+ }
+
+ if len(path) > 0 && path[0] == '{' {
+ squashed := squash(path[1:])
+ if len(squashed) < len(path)-1 {
+ squashed = path[:len(squashed)+1]
+ remain := path[len(squashed):]
+ if remain[0] == '|' {
+ return squashed, remain[1:], true
+ }
+ }
+ return
+ }
+
+ // split the left and right side of the path with the pipe character as
+ // the delimiter. This is a little tricky because we'll need to basically
+ // parse the entire path.
+ for i := 0; i < len(path); i++ {
+ if path[i] == '\\' {
+ i++
+ } else if path[i] == '.' {
+ if i == len(path)-1 {
+ return
+ }
+ if path[i+1] == '#' {
+ i += 2
+ if i == len(path) {
+ return
+ }
+ if path[i] == '[' || path[i] == '(' {
+ var start, end byte
+ if path[i] == '[' {
+ start, end = '[', ']'
+ } else {
+ start, end = '(', ')'
+ }
+ // inside selector, balance brackets
+ i++
+ depth := 1
+ for ; i < len(path); i++ {
+ if path[i] == '\\' {
+ i++
+ } else if path[i] == start {
+ depth++
+ } else if path[i] == end {
+ depth--
+ if depth == 0 {
+ break
+ }
+ } else if path[i] == '"' {
+ // inside selector string, balance quotes
+ i++
+ for ; i < len(path); i++ {
+ if path[i] == '\\' {
+ i++
+ } else if path[i] == '"' {
+ break
+ }
+ }
+ }
+ }
+ }
+ }
+ } else if path[i] == '|' {
+ return path[:i], path[i+1:], true
+ }
+ }
+ return
+}
+
+// ForEachLine iterates through lines of JSON as specified by the JSON Lines
+// format (http://jsonlines.org/).
+// Each line is returned as a GJSON Result.
+func ForEachLine(json string, iterator func(line Result) bool) {
+ var res Result
+ var i int
+ for {
+ i, res, _ = parseAny(json, i, true)
+ if !res.Exists() {
+ break
+ }
+ if !iterator(res) {
+ return
+ }
+ }
+}
+
+type subSelector struct {
+ name string
+ path string
+}
+
+// parseSubSelectors returns the subselectors belonging to a '[path1,path2]' or
+// '{"field1":path1,"field2":path2}' type subSelection. It's expected that the
+// first character in path is either '[' or '{', and has already been checked
+// prior to calling this function.
+func parseSubSelectors(path string) (sels []subSelector, out string, ok bool) {
+ modifier := 0
+ depth := 1
+ colon := 0
+ start := 1
+ i := 1
+ pushSel := func() {
+ var sel subSelector
+ if colon == 0 {
+ sel.path = path[start:i]
+ } else {
+ sel.name = path[start:colon]
+ sel.path = path[colon+1 : i]
+ }
+ sels = append(sels, sel)
+ colon = 0
+ modifier = 0
+ start = i + 1
+ }
+ for ; i < len(path); i++ {
+ switch path[i] {
+ case '\\':
+ i++
+ case '@':
+ if modifier == 0 && i > 0 && (path[i-1] == '.' || path[i-1] == '|') {
+ modifier = i
+ }
+ case ':':
+ if modifier == 0 && colon == 0 && depth == 1 {
+ colon = i
+ }
+ case ',':
+ if depth == 1 {
+ pushSel()
+ }
+ case '"':
+ i++
+ loop:
+ for ; i < len(path); i++ {
+ switch path[i] {
+ case '\\':
+ i++
+ case '"':
+ break loop
+ }
+ }
+ case '[', '(', '{':
+ depth++
+ case ']', ')', '}':
+ depth--
+ if depth == 0 {
+ pushSel()
+ path = path[i+1:]
+ return sels, path, true
+ }
+ }
+ }
+ return
+}
+
+// nameOfLast returns the name of the last component
+func nameOfLast(path string) string {
+ for i := len(path) - 1; i >= 0; i-- {
+ if path[i] == '|' || path[i] == '.' {
+ if i > 0 {
+ if path[i-1] == '\\' {
+ continue
+ }
+ }
+ return path[i+1:]
+ }
+ }
+ return path
+}
+
+func isSimpleName(component string) bool {
+ for i := 0; i < len(component); i++ {
+ if component[i] < ' ' {
+ return false
+ }
+ switch component[i] {
+ case '[', ']', '{', '}', '(', ')', '#', '|', '!':
+ return false
+ }
+ }
+ return true
+}
+
+var hexchars = [...]byte{
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'a', 'b', 'c', 'd', 'e', 'f',
+}
+
+func appendHex16(dst []byte, x uint16) []byte {
+ return append(dst,
+ hexchars[x>>12&0xF], hexchars[x>>8&0xF],
+ hexchars[x>>4&0xF], hexchars[x>>0&0xF],
+ )
+}
+
+// AppendJSONString is a convenience function that converts the provided string
+// to a valid JSON string and appends it to dst.
+func AppendJSONString(dst []byte, s string) []byte {
+ dst = append(dst, make([]byte, len(s)+2)...)
+ dst = append(dst[:len(dst)-len(s)-2], '"')
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' {
+ dst = append(dst, '\\')
+ switch s[i] {
+ case '\n':
+ dst = append(dst, 'n')
+ case '\r':
+ dst = append(dst, 'r')
+ case '\t':
+ dst = append(dst, 't')
+ default:
+ dst = append(dst, 'u')
+ dst = appendHex16(dst, uint16(s[i]))
+ }
+ } else if s[i] == '>' || s[i] == '<' || s[i] == '&' {
+ dst = append(dst, '\\', 'u')
+ dst = appendHex16(dst, uint16(s[i]))
+ } else if s[i] == '\\' {
+ dst = append(dst, '\\', '\\')
+ } else if s[i] == '"' {
+ dst = append(dst, '\\', '"')
+ } else if s[i] > 127 {
+ // read utf8 character
+ r, n := utf8.DecodeRuneInString(s[i:])
+ if n == 0 {
+ break
+ }
+ if r == utf8.RuneError && n == 1 {
+ dst = append(dst, `\ufffd`...)
+ } else if r == '\u2028' || r == '\u2029' {
+ dst = append(dst, `\u202`...)
+ dst = append(dst, hexchars[r&0xF])
+ } else {
+ dst = append(dst, s[i:i+n]...)
+ }
+ i = i + n - 1
+ } else {
+ dst = append(dst, s[i])
+ }
+ }
+ return append(dst, '"')
+}
+
+type parseContext struct {
+ json string
+ value Result
+ pipe string
+ piped bool
+ calcd bool
+ lines bool
+}
+
+// Get searches json for the specified path.
+// A path is in dot syntax, such as "name.last" or "age".
+// When the value is found it's returned immediately.
+//
+// A path is a series of keys separated by a dot.
+// A key may contain special wildcard characters '*' and '?'.
+// To access an array value use the index as the key.
+// To get the number of elements in an array or to access a child path, use
+// the '#' character.
+// The dot and wildcard character can be escaped with '\'.
+//
+// {
+// "name": {"first": "Tom", "last": "Anderson"},
+// "age":37,
+// "children": ["Sara","Alex","Jack"],
+// "friends": [
+// {"first": "James", "last": "Murphy"},
+// {"first": "Roger", "last": "Craig"}
+// ]
+// }
+// "name.last" >> "Anderson"
+// "age" >> 37
+// "children" >> ["Sara","Alex","Jack"]
+// "children.#" >> 3
+// "children.1" >> "Alex"
+// "child*.2" >> "Jack"
+// "c?ildren.0" >> "Sara"
+// "friends.#.first" >> ["James","Roger"]
+//
+// This function expects that the json is well-formed, and does not validate.
+// Invalid json will not panic, but it may return back unexpected results.
+// If you are consuming JSON from an unpredictable source then you may want to
+// use the Valid function first.
+func Get(json, path string) Result {
+ if len(path) > 1 {
+ if (path[0] == '@' && !DisableModifiers) || path[0] == '!' {
+ // possible modifier
+ var ok bool
+ var npath string
+ var rjson string
+ if path[0] == '@' && !DisableModifiers {
+ npath, rjson, ok = execModifier(json, path)
+ } else if path[0] == '!' {
+ npath, rjson, ok = execStatic(json, path)
+ }
+ if ok {
+ path = npath
+ if len(path) > 0 && (path[0] == '|' || path[0] == '.') {
+ res := Get(rjson, path[1:])
+ res.Index = 0
+ res.Indexes = nil
+ return res
+ }
+ return Parse(rjson)
+ }
+ }
+ if path[0] == '[' || path[0] == '{' {
+ // using a subselector path
+ kind := path[0]
+ var ok bool
+ var subs []subSelector
+ subs, path, ok = parseSubSelectors(path)
+ if ok {
+ if len(path) == 0 || (path[0] == '|' || path[0] == '.') {
+ var b []byte
+ b = append(b, kind)
+ var i int
+ for _, sub := range subs {
+ res := Get(json, sub.path)
+ if res.Exists() {
+ if i > 0 {
+ b = append(b, ',')
+ }
+ if kind == '{' {
+ if len(sub.name) > 0 {
+ if sub.name[0] == '"' && Valid(sub.name) {
+ b = append(b, sub.name...)
+ } else {
+ b = AppendJSONString(b, sub.name)
+ }
+ } else {
+ last := nameOfLast(sub.path)
+ if isSimpleName(last) {
+ b = AppendJSONString(b, last)
+ } else {
+ b = AppendJSONString(b, "_")
+ }
+ }
+ b = append(b, ':')
+ }
+ var raw string
+ if len(res.Raw) == 0 {
+ raw = res.String()
+ if len(raw) == 0 {
+ raw = "null"
+ }
+ } else {
+ raw = res.Raw
+ }
+ b = append(b, raw...)
+ i++
+ }
+ }
+ b = append(b, kind+2)
+ var res Result
+ res.Raw = string(b)
+ res.Type = JSON
+ if len(path) > 0 {
+ res = res.Get(path[1:])
+ }
+ res.Index = 0
+ return res
+ }
+ }
+ }
+ }
+ var i int
+ var c = &parseContext{json: json}
+ if len(path) >= 2 && path[0] == '.' && path[1] == '.' {
+ c.lines = true
+ parseArray(c, 0, path[2:])
+ } else {
+ for ; i < len(c.json); i++ {
+ if c.json[i] == '{' {
+ i++
+ parseObject(c, i, path)
+ break
+ }
+ if c.json[i] == '[' {
+ i++
+ parseArray(c, i, path)
+ break
+ }
+ }
+ }
+ if c.piped {
+ res := c.value.Get(c.pipe)
+ res.Index = 0
+ return res
+ }
+ fillIndex(json, c)
+ return c.value
+}
+
+// GetBytes searches json for the specified path.
+// If working with bytes, this method preferred over Get(string(data), path)
+func GetBytes(json []byte, path string) Result {
+ return getBytes(json, path)
+}
+
+// runeit returns the rune from the the \uXXXX
+func runeit(json string) rune {
+ n, _ := strconv.ParseUint(json[:4], 16, 64)
+ return rune(n)
+}
+
+// unescape unescapes a string
+func unescape(json string) string {
+ var str = make([]byte, 0, len(json))
+ for i := 0; i < len(json); i++ {
+ switch {
+ default:
+ str = append(str, json[i])
+ case json[i] < ' ':
+ return string(str)
+ case json[i] == '\\':
+ i++
+ if i >= len(json) {
+ return string(str)
+ }
+ switch json[i] {
+ default:
+ return string(str)
+ case '\\':
+ str = append(str, '\\')
+ case '/':
+ str = append(str, '/')
+ case 'b':
+ str = append(str, '\b')
+ case 'f':
+ str = append(str, '\f')
+ case 'n':
+ str = append(str, '\n')
+ case 'r':
+ str = append(str, '\r')
+ case 't':
+ str = append(str, '\t')
+ case '"':
+ str = append(str, '"')
+ case 'u':
+ if i+5 > len(json) {
+ return string(str)
+ }
+ r := runeit(json[i+1:])
+ i += 5
+ if utf16.IsSurrogate(r) {
+ // need another code
+ if len(json[i:]) >= 6 && json[i] == '\\' &&
+ json[i+1] == 'u' {
+ // we expect it to be correct so just consume it
+ r = utf16.DecodeRune(r, runeit(json[i+2:]))
+ i += 6
+ }
+ }
+ // provide enough space to encode the largest utf8 possible
+ str = append(str, 0, 0, 0, 0, 0, 0, 0, 0)
+ n := utf8.EncodeRune(str[len(str)-8:], r)
+ str = str[:len(str)-8+n]
+ i-- // backtrack index by one
+ }
+ }
+ }
+ return string(str)
+}
+
+// Less return true if a token is less than another token.
+// The caseSensitive paramater is used when the tokens are Strings.
+// The order when comparing two different type is:
+//
+// Null < False < Number < String < True < JSON
+//
+func (t Result) Less(token Result, caseSensitive bool) bool {
+ if t.Type < token.Type {
+ return true
+ }
+ if t.Type > token.Type {
+ return false
+ }
+ if t.Type == String {
+ if caseSensitive {
+ return t.Str < token.Str
+ }
+ return stringLessInsensitive(t.Str, token.Str)
+ }
+ if t.Type == Number {
+ return t.Num < token.Num
+ }
+ return t.Raw < token.Raw
+}
+
+func stringLessInsensitive(a, b string) bool {
+ for i := 0; i < len(a) && i < len(b); i++ {
+ if a[i] >= 'A' && a[i] <= 'Z' {
+ if b[i] >= 'A' && b[i] <= 'Z' {
+ // both are uppercase, do nothing
+ if a[i] < b[i] {
+ return true
+ } else if a[i] > b[i] {
+ return false
+ }
+ } else {
+ // a is uppercase, convert a to lowercase
+ if a[i]+32 < b[i] {
+ return true
+ } else if a[i]+32 > b[i] {
+ return false
+ }
+ }
+ } else if b[i] >= 'A' && b[i] <= 'Z' {
+ // b is uppercase, convert b to lowercase
+ if a[i] < b[i]+32 {
+ return true
+ } else if a[i] > b[i]+32 {
+ return false
+ }
+ } else {
+ // neither are uppercase
+ if a[i] < b[i] {
+ return true
+ } else if a[i] > b[i] {
+ return false
+ }
+ }
+ }
+ return len(a) < len(b)
+}
+
+// parseAny parses the next value from a json string.
+// A Result is returned when the hit param is set.
+// The return values are (i int, res Result, ok bool)
+func parseAny(json string, i int, hit bool) (int, Result, bool) {
+ var res Result
+ var val string
+ for ; i < len(json); i++ {
+ if json[i] == '{' || json[i] == '[' {
+ i, val = parseSquash(json, i)
+ if hit {
+ res.Raw = val
+ res.Type = JSON
+ }
+ var tmp parseContext
+ tmp.value = res
+ fillIndex(json, &tmp)
+ return i, tmp.value, true
+ }
+ if json[i] <= ' ' {
+ continue
+ }
+ var num bool
+ switch json[i] {
+ case '"':
+ i++
+ var vesc bool
+ var ok bool
+ i, val, vesc, ok = parseString(json, i)
+ if !ok {
+ return i, res, false
+ }
+ if hit {
+ res.Type = String
+ res.Raw = val
+ if vesc {
+ res.Str = unescape(val[1 : len(val)-1])
+ } else {
+ res.Str = val[1 : len(val)-1]
+ }
+ }
+ return i, res, true
+ case 'n':
+ if i+1 < len(json) && json[i+1] != 'u' {
+ num = true
+ break
+ }
+ fallthrough
+ case 't', 'f':
+ vc := json[i]
+ i, val = parseLiteral(json, i)
+ if hit {
+ res.Raw = val
+ switch vc {
+ case 't':
+ res.Type = True
+ case 'f':
+ res.Type = False
+ }
+ return i, res, true
+ }
+ case '+', '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+ 'i', 'I', 'N':
+ num = true
+ }
+ if num {
+ i, val = parseNumber(json, i)
+ if hit {
+ res.Raw = val
+ res.Type = Number
+ res.Num, _ = strconv.ParseFloat(val, 64)
+ }
+ return i, res, true
+ }
+
+ }
+ return i, res, false
+}
+
+// GetMany searches json for the multiple paths.
+// The return value is a Result array where the number of items
+// will be equal to the number of input paths.
+func GetMany(json string, path ...string) []Result {
+ res := make([]Result, len(path))
+ for i, path := range path {
+ res[i] = Get(json, path)
+ }
+ return res
+}
+
+// GetManyBytes searches json for the multiple paths.
+// The return value is a Result array where the number of items
+// will be equal to the number of input paths.
+func GetManyBytes(json []byte, path ...string) []Result {
+ res := make([]Result, len(path))
+ for i, path := range path {
+ res[i] = GetBytes(json, path)
+ }
+ return res
+}
+
+func validpayload(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ i, ok = validany(data, i)
+ if !ok {
+ return i, false
+ }
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ }
+ }
+ return i, true
+ case ' ', '\t', '\n', '\r':
+ continue
+ }
+ }
+ return i, false
+}
+func validany(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ case '{':
+ return validobject(data, i+1)
+ case '[':
+ return validarray(data, i+1)
+ case '"':
+ return validstring(data, i+1)
+ case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ return validnumber(data, i+1)
+ case 't':
+ return validtrue(data, i+1)
+ case 'f':
+ return validfalse(data, i+1)
+ case 'n':
+ return validnull(data, i+1)
+ }
+ }
+ return i, false
+}
+func validobject(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ case '}':
+ return i + 1, true
+ case '"':
+ key:
+ if i, ok = validstring(data, i+1); !ok {
+ return i, false
+ }
+ if i, ok = validcolon(data, i); !ok {
+ return i, false
+ }
+ if i, ok = validany(data, i); !ok {
+ return i, false
+ }
+ if i, ok = validcomma(data, i, '}'); !ok {
+ return i, false
+ }
+ if data[i] == '}' {
+ return i + 1, true
+ }
+ i++
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ case '"':
+ goto key
+ }
+ }
+ return i, false
+ }
+ }
+ return i, false
+}
+func validcolon(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ case ':':
+ return i + 1, true
+ }
+ }
+ return i, false
+}
+func validcomma(data []byte, i int, end byte) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ return i, false
+ case ' ', '\t', '\n', '\r':
+ continue
+ case ',':
+ return i, true
+ case end:
+ return i, true
+ }
+ }
+ return i, false
+}
+func validarray(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ switch data[i] {
+ default:
+ for ; i < len(data); i++ {
+ if i, ok = validany(data, i); !ok {
+ return i, false
+ }
+ if i, ok = validcomma(data, i, ']'); !ok {
+ return i, false
+ }
+ if data[i] == ']' {
+ return i + 1, true
+ }
+ }
+ case ' ', '\t', '\n', '\r':
+ continue
+ case ']':
+ return i + 1, true
+ }
+ }
+ return i, false
+}
+func validstring(data []byte, i int) (outi int, ok bool) {
+ for ; i < len(data); i++ {
+ if data[i] < ' ' {
+ return i, false
+ } else if data[i] == '\\' {
+ i++
+ if i == len(data) {
+ return i, false
+ }
+ switch data[i] {
+ default:
+ return i, false
+ case '"', '\\', '/', 'b', 'f', 'n', 'r', 't':
+ case 'u':
+ for j := 0; j < 4; j++ {
+ i++
+ if i >= len(data) {
+ return i, false
+ }
+ if !((data[i] >= '0' && data[i] <= '9') ||
+ (data[i] >= 'a' && data[i] <= 'f') ||
+ (data[i] >= 'A' && data[i] <= 'F')) {
+ return i, false
+ }
+ }
+ }
+ } else if data[i] == '"' {
+ return i + 1, true
+ }
+ }
+ return i, false
+}
+func validnumber(data []byte, i int) (outi int, ok bool) {
+ i--
+ // sign
+ if data[i] == '-' {
+ i++
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] < '0' || data[i] > '9' {
+ return i, false
+ }
+ }
+ // int
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] == '0' {
+ i++
+ } else {
+ for ; i < len(data); i++ {
+ if data[i] >= '0' && data[i] <= '9' {
+ continue
+ }
+ break
+ }
+ }
+ // frac
+ if i == len(data) {
+ return i, true
+ }
+ if data[i] == '.' {
+ i++
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] < '0' || data[i] > '9' {
+ return i, false
+ }
+ i++
+ for ; i < len(data); i++ {
+ if data[i] >= '0' && data[i] <= '9' {
+ continue
+ }
+ break
+ }
+ }
+ // exp
+ if i == len(data) {
+ return i, true
+ }
+ if data[i] == 'e' || data[i] == 'E' {
+ i++
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] == '+' || data[i] == '-' {
+ i++
+ }
+ if i == len(data) {
+ return i, false
+ }
+ if data[i] < '0' || data[i] > '9' {
+ return i, false
+ }
+ i++
+ for ; i < len(data); i++ {
+ if data[i] >= '0' && data[i] <= '9' {
+ continue
+ }
+ break
+ }
+ }
+ return i, true
+}
+
+func validtrue(data []byte, i int) (outi int, ok bool) {
+ if i+3 <= len(data) && data[i] == 'r' && data[i+1] == 'u' &&
+ data[i+2] == 'e' {
+ return i + 3, true
+ }
+ return i, false
+}
+func validfalse(data []byte, i int) (outi int, ok bool) {
+ if i+4 <= len(data) && data[i] == 'a' && data[i+1] == 'l' &&
+ data[i+2] == 's' && data[i+3] == 'e' {
+ return i + 4, true
+ }
+ return i, false
+}
+func validnull(data []byte, i int) (outi int, ok bool) {
+ if i+3 <= len(data) && data[i] == 'u' && data[i+1] == 'l' &&
+ data[i+2] == 'l' {
+ return i + 3, true
+ }
+ return i, false
+}
+
+// Valid returns true if the input is valid json.
+//
+// if !gjson.Valid(json) {
+// return errors.New("invalid json")
+// }
+// value := gjson.Get(json, "name.last")
+//
+func Valid(json string) bool {
+ _, ok := validpayload(stringBytes(json), 0)
+ return ok
+}
+
+// ValidBytes returns true if the input is valid json.
+//
+// if !gjson.Valid(json) {
+// return errors.New("invalid json")
+// }
+// value := gjson.Get(json, "name.last")
+//
+// If working with bytes, this method preferred over ValidBytes(string(data))
+//
+func ValidBytes(json []byte) bool {
+ _, ok := validpayload(json, 0)
+ return ok
+}
+
+func parseUint(s string) (n uint64, ok bool) {
+ var i int
+ if i == len(s) {
+ return 0, false
+ }
+ for ; i < len(s); i++ {
+ if s[i] >= '0' && s[i] <= '9' {
+ n = n*10 + uint64(s[i]-'0')
+ } else {
+ return 0, false
+ }
+ }
+ return n, true
+}
+
+func parseInt(s string) (n int64, ok bool) {
+ var i int
+ var sign bool
+ if len(s) > 0 && s[0] == '-' {
+ sign = true
+ i++
+ }
+ if i == len(s) {
+ return 0, false
+ }
+ for ; i < len(s); i++ {
+ if s[i] >= '0' && s[i] <= '9' {
+ n = n*10 + int64(s[i]-'0')
+ } else {
+ return 0, false
+ }
+ }
+ if sign {
+ return n * -1, true
+ }
+ return n, true
+}
+
+// safeInt validates a given JSON number
+// ensures it lies within the minimum and maximum representable JSON numbers
+func safeInt(f float64) (n int64, ok bool) {
+ // https://tc39.es/ecma262/#sec-number.min_safe_integer
+ // https://tc39.es/ecma262/#sec-number.max_safe_integer
+ if f < -9007199254740991 || f > 9007199254740991 {
+ return 0, false
+ }
+ return int64(f), true
+}
+
+// execStatic parses the path to find a static value.
+// The input expects that the path already starts with a '!'
+func execStatic(json, path string) (pathOut, res string, ok bool) {
+ name := path[1:]
+ if len(name) > 0 {
+ switch name[0] {
+ case '{', '[', '"', '+', '-', '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9':
+ _, res = parseSquash(name, 0)
+ pathOut = name[len(res):]
+ return pathOut, res, true
+ }
+ }
+ for i := 1; i < len(path); i++ {
+ if path[i] == '|' {
+ pathOut = path[i:]
+ name = path[1:i]
+ break
+ }
+ if path[i] == '.' {
+ pathOut = path[i:]
+ name = path[1:i]
+ break
+ }
+ }
+ switch strings.ToLower(name) {
+ case "true", "false", "null", "nan", "inf":
+ return pathOut, name, true
+ }
+ return pathOut, res, false
+}
+
+// execModifier parses the path to find a matching modifier function.
+// The input expects that the path already starts with a '@'
+func execModifier(json, path string) (pathOut, res string, ok bool) {
+ name := path[1:]
+ var hasArgs bool
+ for i := 1; i < len(path); i++ {
+ if path[i] == ':' {
+ pathOut = path[i+1:]
+ name = path[1:i]
+ hasArgs = len(pathOut) > 0
+ break
+ }
+ if path[i] == '|' {
+ pathOut = path[i:]
+ name = path[1:i]
+ break
+ }
+ if path[i] == '.' {
+ pathOut = path[i:]
+ name = path[1:i]
+ break
+ }
+ }
+ if fn, ok := modifiers[name]; ok {
+ var args string
+ if hasArgs {
+ var parsedArgs bool
+ switch pathOut[0] {
+ case '{', '[', '"':
+ res := Parse(pathOut)
+ if res.Exists() {
+ args = squash(pathOut)
+ pathOut = pathOut[len(args):]
+ parsedArgs = true
+ }
+ }
+ if !parsedArgs {
+ idx := strings.IndexByte(pathOut, '|')
+ if idx == -1 {
+ args = pathOut
+ pathOut = ""
+ } else {
+ args = pathOut[:idx]
+ pathOut = pathOut[idx:]
+ }
+ }
+ }
+ return pathOut, fn(json, args), true
+ }
+ return pathOut, res, false
+}
+
+// unwrap removes the '[]' or '{}' characters around json
+func unwrap(json string) string {
+ json = trim(json)
+ if len(json) >= 2 && (json[0] == '[' || json[0] == '{') {
+ json = json[1 : len(json)-1]
+ }
+ return json
+}
+
+// DisableModifiers will disable the modifier syntax
+var DisableModifiers = false
+
+var modifiers = map[string]func(json, arg string) string{
+ "pretty": modPretty,
+ "ugly": modUgly,
+ "reverse": modReverse,
+ "this": modThis,
+ "flatten": modFlatten,
+ "join": modJoin,
+ "valid": modValid,
+ "keys": modKeys,
+ "values": modValues,
+ "tostr": modToStr,
+ "fromstr": modFromStr,
+ "group": modGroup,
+}
+
+// AddModifier binds a custom modifier command to the GJSON syntax.
+// This operation is not thread safe and should be executed prior to
+// using all other gjson function.
+func AddModifier(name string, fn func(json, arg string) string) {
+ modifiers[name] = fn
+}
+
+// ModifierExists returns true when the specified modifier exists.
+func ModifierExists(name string, fn func(json, arg string) string) bool {
+ _, ok := modifiers[name]
+ return ok
+}
+
+// cleanWS remove any non-whitespace from string
+func cleanWS(s string) string {
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case ' ', '\t', '\n', '\r':
+ continue
+ default:
+ var s2 []byte
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case ' ', '\t', '\n', '\r':
+ s2 = append(s2, s[i])
+ }
+ }
+ return string(s2)
+ }
+ }
+ return s
+}
+
+// @pretty modifier makes the json look nice.
+func modPretty(json, arg string) string {
+ if len(arg) > 0 {
+ opts := *pretty.DefaultOptions
+ Parse(arg).ForEach(func(key, value Result) bool {
+ switch key.String() {
+ case "sortKeys":
+ opts.SortKeys = value.Bool()
+ case "indent":
+ opts.Indent = cleanWS(value.String())
+ case "prefix":
+ opts.Prefix = cleanWS(value.String())
+ case "width":
+ opts.Width = int(value.Int())
+ }
+ return true
+ })
+ return bytesString(pretty.PrettyOptions(stringBytes(json), &opts))
+ }
+ return bytesString(pretty.Pretty(stringBytes(json)))
+}
+
+// @this returns the current element. Can be used to retrieve the root element.
+func modThis(json, arg string) string {
+ return json
+}
+
+// @ugly modifier removes all whitespace.
+func modUgly(json, arg string) string {
+ return bytesString(pretty.Ugly(stringBytes(json)))
+}
+
+// @reverse reverses array elements or root object members.
+func modReverse(json, arg string) string {
+ res := Parse(json)
+ if res.IsArray() {
+ var values []Result
+ res.ForEach(func(_, value Result) bool {
+ values = append(values, value)
+ return true
+ })
+ out := make([]byte, 0, len(json))
+ out = append(out, '[')
+ for i, j := len(values)-1, 0; i >= 0; i, j = i-1, j+1 {
+ if j > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, values[i].Raw...)
+ }
+ out = append(out, ']')
+ return bytesString(out)
+ }
+ if res.IsObject() {
+ var keyValues []Result
+ res.ForEach(func(key, value Result) bool {
+ keyValues = append(keyValues, key, value)
+ return true
+ })
+ out := make([]byte, 0, len(json))
+ out = append(out, '{')
+ for i, j := len(keyValues)-2, 0; i >= 0; i, j = i-2, j+1 {
+ if j > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, keyValues[i+0].Raw...)
+ out = append(out, ':')
+ out = append(out, keyValues[i+1].Raw...)
+ }
+ out = append(out, '}')
+ return bytesString(out)
+ }
+ return json
+}
+
+// @flatten an array with child arrays.
+// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,[6,7]]
+// The {"deep":true} arg can be provide for deep flattening.
+// [1,[2],[3,4],[5,[6,7]]] -> [1,2,3,4,5,6,7]
+// The original json is returned when the json is not an array.
+func modFlatten(json, arg string) string {
+ res := Parse(json)
+ if !res.IsArray() {
+ return json
+ }
+ var deep bool
+ if arg != "" {
+ Parse(arg).ForEach(func(key, value Result) bool {
+ if key.String() == "deep" {
+ deep = value.Bool()
+ }
+ return true
+ })
+ }
+ var out []byte
+ out = append(out, '[')
+ var idx int
+ res.ForEach(func(_, value Result) bool {
+ var raw string
+ if value.IsArray() {
+ if deep {
+ raw = unwrap(modFlatten(value.Raw, arg))
+ } else {
+ raw = unwrap(value.Raw)
+ }
+ } else {
+ raw = value.Raw
+ }
+ raw = strings.TrimSpace(raw)
+ if len(raw) > 0 {
+ if idx > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, raw...)
+ idx++
+ }
+ return true
+ })
+ out = append(out, ']')
+ return bytesString(out)
+}
+
+// @keys extracts the keys from an object.
+// {"first":"Tom","last":"Smith"} -> ["first","last"]
+func modKeys(json, arg string) string {
+ v := Parse(json)
+ if !v.Exists() {
+ return "[]"
+ }
+ obj := v.IsObject()
+ var out strings.Builder
+ out.WriteByte('[')
+ var i int
+ v.ForEach(func(key, _ Result) bool {
+ if i > 0 {
+ out.WriteByte(',')
+ }
+ if obj {
+ out.WriteString(key.Raw)
+ } else {
+ out.WriteString("null")
+ }
+ i++
+ return true
+ })
+ out.WriteByte(']')
+ return out.String()
+}
+
+// @values extracts the values from an object.
+// {"first":"Tom","last":"Smith"} -> ["Tom","Smith"]
+func modValues(json, arg string) string {
+ v := Parse(json)
+ if !v.Exists() {
+ return "[]"
+ }
+ if v.IsArray() {
+ return json
+ }
+ var out strings.Builder
+ out.WriteByte('[')
+ var i int
+ v.ForEach(func(_, value Result) bool {
+ if i > 0 {
+ out.WriteByte(',')
+ }
+ out.WriteString(value.Raw)
+ i++
+ return true
+ })
+ out.WriteByte(']')
+ return out.String()
+}
+
+// @join multiple objects into a single object.
+// [{"first":"Tom"},{"last":"Smith"}] -> {"first","Tom","last":"Smith"}
+// The arg can be "true" to specify that duplicate keys should be preserved.
+// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":37,"age":41}
+// Without preserved keys:
+// [{"first":"Tom","age":37},{"age":41}] -> {"first","Tom","age":41}
+// The original json is returned when the json is not an object.
+func modJoin(json, arg string) string {
+ res := Parse(json)
+ if !res.IsArray() {
+ return json
+ }
+ var preserve bool
+ if arg != "" {
+ Parse(arg).ForEach(func(key, value Result) bool {
+ if key.String() == "preserve" {
+ preserve = value.Bool()
+ }
+ return true
+ })
+ }
+ var out []byte
+ out = append(out, '{')
+ if preserve {
+ // Preserve duplicate keys.
+ var idx int
+ res.ForEach(func(_, value Result) bool {
+ if !value.IsObject() {
+ return true
+ }
+ if idx > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, unwrap(value.Raw)...)
+ idx++
+ return true
+ })
+ } else {
+ // Deduplicate keys and generate an object with stable ordering.
+ var keys []Result
+ kvals := make(map[string]Result)
+ res.ForEach(func(_, value Result) bool {
+ if !value.IsObject() {
+ return true
+ }
+ value.ForEach(func(key, value Result) bool {
+ k := key.String()
+ if _, ok := kvals[k]; !ok {
+ keys = append(keys, key)
+ }
+ kvals[k] = value
+ return true
+ })
+ return true
+ })
+ for i := 0; i < len(keys); i++ {
+ if i > 0 {
+ out = append(out, ',')
+ }
+ out = append(out, keys[i].Raw...)
+ out = append(out, ':')
+ out = append(out, kvals[keys[i].String()].Raw...)
+ }
+ }
+ out = append(out, '}')
+ return bytesString(out)
+}
+
+// @valid ensures that the json is valid before moving on. An empty string is
+// returned when the json is not valid, otherwise it returns the original json.
+func modValid(json, arg string) string {
+ if !Valid(json) {
+ return ""
+ }
+ return json
+}
+
+// @fromstr converts a string to json
+// "{\"id\":1023,\"name\":\"alert\"}" -> {"id":1023,"name":"alert"}
+func modFromStr(json, arg string) string {
+ if !Valid(json) {
+ return ""
+ }
+ return Parse(json).String()
+}
+
+// @tostr converts a string to json
+// {"id":1023,"name":"alert"} -> "{\"id\":1023,\"name\":\"alert\"}"
+func modToStr(str, arg string) string {
+ return string(AppendJSONString(nil, str))
+}
+
+func modGroup(json, arg string) string {
+ res := Parse(json)
+ if !res.IsObject() {
+ return ""
+ }
+ var all [][]byte
+ res.ForEach(func(key, value Result) bool {
+ if !value.IsArray() {
+ return true
+ }
+ var idx int
+ value.ForEach(func(_, value Result) bool {
+ if idx == len(all) {
+ all = append(all, []byte{})
+ }
+ all[idx] = append(all[idx], ("," + key.Raw + ":" + value.Raw)...)
+ idx++
+ return true
+ })
+ return true
+ })
+ var data []byte
+ data = append(data, '[')
+ for i, item := range all {
+ if i > 0 {
+ data = append(data, ',')
+ }
+ data = append(data, '{')
+ data = append(data, item[1:]...)
+ data = append(data, '}')
+ }
+ data = append(data, ']')
+ return string(data)
+}
+
+// stringHeader instead of reflect.StringHeader
+type stringHeader struct {
+ data unsafe.Pointer
+ len int
+}
+
+// sliceHeader instead of reflect.SliceHeader
+type sliceHeader struct {
+ data unsafe.Pointer
+ len int
+ cap int
+}
+
+// getBytes casts the input json bytes to a string and safely returns the
+// results as uniquely allocated data. This operation is intended to minimize
+// copies and allocations for the large json string->[]byte.
+func getBytes(json []byte, path string) Result {
+ var result Result
+ if json != nil {
+ // unsafe cast to string
+ result = Get(*(*string)(unsafe.Pointer(&json)), path)
+ // safely get the string headers
+ rawhi := *(*stringHeader)(unsafe.Pointer(&result.Raw))
+ strhi := *(*stringHeader)(unsafe.Pointer(&result.Str))
+ // create byte slice headers
+ rawh := sliceHeader{data: rawhi.data, len: rawhi.len, cap: rawhi.len}
+ strh := sliceHeader{data: strhi.data, len: strhi.len, cap: rawhi.len}
+ if strh.data == nil {
+ // str is nil
+ if rawh.data == nil {
+ // raw is nil
+ result.Raw = ""
+ } else {
+ // raw has data, safely copy the slice header to a string
+ result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
+ }
+ result.Str = ""
+ } else if rawh.data == nil {
+ // raw is nil
+ result.Raw = ""
+ // str has data, safely copy the slice header to a string
+ result.Str = string(*(*[]byte)(unsafe.Pointer(&strh)))
+ } else if uintptr(strh.data) >= uintptr(rawh.data) &&
+ uintptr(strh.data)+uintptr(strh.len) <=
+ uintptr(rawh.data)+uintptr(rawh.len) {
+ // Str is a substring of Raw.
+ start := uintptr(strh.data) - uintptr(rawh.data)
+ // safely copy the raw slice header
+ result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
+ // substring the raw
+ result.Str = result.Raw[start : start+uintptr(strh.len)]
+ } else {
+ // safely copy both the raw and str slice headers to strings
+ result.Raw = string(*(*[]byte)(unsafe.Pointer(&rawh)))
+ result.Str = string(*(*[]byte)(unsafe.Pointer(&strh)))
+ }
+ }
+ return result
+}
+
+// fillIndex finds the position of Raw data and assigns it to the Index field
+// of the resulting value. If the position cannot be found then Index zero is
+// used instead.
+func fillIndex(json string, c *parseContext) {
+ if len(c.value.Raw) > 0 && !c.calcd {
+ jhdr := *(*stringHeader)(unsafe.Pointer(&json))
+ rhdr := *(*stringHeader)(unsafe.Pointer(&(c.value.Raw)))
+ c.value.Index = int(uintptr(rhdr.data) - uintptr(jhdr.data))
+ if c.value.Index < 0 || c.value.Index >= len(json) {
+ c.value.Index = 0
+ }
+ }
+}
+
+func stringBytes(s string) []byte {
+ return *(*[]byte)(unsafe.Pointer(&sliceHeader{
+ data: (*stringHeader)(unsafe.Pointer(&s)).data,
+ len: len(s),
+ cap: len(s),
+ }))
+}
+
+func bytesString(b []byte) string {
+ return *(*string)(unsafe.Pointer(&b))
+}
+
+func revSquash(json string) string {
+ // reverse squash
+ // expects that the tail character is a ']' or '}' or ')' or '"'
+ // squash the value, ignoring all nested arrays and objects.
+ i := len(json) - 1
+ var depth int
+ if json[i] != '"' {
+ depth++
+ }
+ if json[i] == '}' || json[i] == ']' || json[i] == ')' {
+ i--
+ }
+ for ; i >= 0; i-- {
+ switch json[i] {
+ case '"':
+ i--
+ for ; i >= 0; i-- {
+ if json[i] == '"' {
+ esc := 0
+ for i > 0 && json[i-1] == '\\' {
+ i--
+ esc++
+ }
+ if esc%2 == 1 {
+ continue
+ }
+ i += esc
+ break
+ }
+ }
+ if depth == 0 {
+ if i < 0 {
+ i = 0
+ }
+ return json[i:]
+ }
+ case '}', ']', ')':
+ depth++
+ case '{', '[', '(':
+ depth--
+ if depth == 0 {
+ return json[i:]
+ }
+ }
+ }
+ return json
+}
+
+// Paths returns the original GJSON paths for a Result where the Result came
+// from a simple query path that returns an array, like:
+//
+// gjson.Get(json, "friends.#.first")
+//
+// The returned value will be in the form of a JSON array:
+//
+// ["friends.0.first","friends.1.first","friends.2.first"]
+//
+// The param 'json' must be the original JSON used when calling Get.
+//
+// Returns an empty string if the paths cannot be determined, which can happen
+// when the Result came from a path that contained a multipath, modifier,
+// or a nested query.
+func (t Result) Paths(json string) []string {
+ if t.Indexes == nil {
+ return nil
+ }
+ paths := make([]string, 0, len(t.Indexes))
+ t.ForEach(func(_, value Result) bool {
+ paths = append(paths, value.Path(json))
+ return true
+ })
+ if len(paths) != len(t.Indexes) {
+ return nil
+ }
+ return paths
+}
+
+// Path returns the original GJSON path for a Result where the Result came
+// from a simple path that returns a single value, like:
+//
+// gjson.Get(json, "friends.#(last=Murphy)")
+//
+// The returned value will be in the form of a JSON string:
+//
+// "friends.0"
+//
+// The param 'json' must be the original JSON used when calling Get.
+//
+// Returns an empty string if the paths cannot be determined, which can happen
+// when the Result came from a path that contained a multipath, modifier,
+// or a nested query.
+func (t Result) Path(json string) string {
+ var path []byte
+ var comps []string // raw components
+ i := t.Index - 1
+ if t.Index+len(t.Raw) > len(json) {
+ // JSON cannot safely contain Result.
+ goto fail
+ }
+ if !strings.HasPrefix(json[t.Index:], t.Raw) {
+ // Result is not at the JSON index as exepcted.
+ goto fail
+ }
+ for ; i >= 0; i-- {
+ if json[i] <= ' ' {
+ continue
+ }
+ if json[i] == ':' {
+ // inside of object, get the key
+ for ; i >= 0; i-- {
+ if json[i] != '"' {
+ continue
+ }
+ break
+ }
+ raw := revSquash(json[:i+1])
+ i = i - len(raw)
+ comps = append(comps, raw)
+ // key gotten, now squash the rest
+ raw = revSquash(json[:i+1])
+ i = i - len(raw)
+ i++ // increment the index for next loop step
+ } else if json[i] == '{' {
+ // Encountered an open object. The original result was probably an
+ // object key.
+ goto fail
+ } else if json[i] == ',' || json[i] == '[' {
+ // inside of an array, count the position
+ var arrIdx int
+ if json[i] == ',' {
+ arrIdx++
+ i--
+ }
+ for ; i >= 0; i-- {
+ if json[i] == ':' {
+ // Encountered an unexpected colon. The original result was
+ // probably an object key.
+ goto fail
+ } else if json[i] == ',' {
+ arrIdx++
+ } else if json[i] == '[' {
+ comps = append(comps, strconv.Itoa(arrIdx))
+ break
+ } else if json[i] == ']' || json[i] == '}' || json[i] == '"' {
+ raw := revSquash(json[:i+1])
+ i = i - len(raw) + 1
+ }
+ }
+ }
+ }
+ if len(comps) == 0 {
+ if DisableModifiers {
+ goto fail
+ }
+ return "@this"
+ }
+ for i := len(comps) - 1; i >= 0; i-- {
+ rcomp := Parse(comps[i])
+ if !rcomp.Exists() {
+ goto fail
+ }
+ comp := escapeComp(rcomp.String())
+ path = append(path, '.')
+ path = append(path, comp...)
+ }
+ if len(path) > 0 {
+ path = path[1:]
+ }
+ return string(path)
+fail:
+ return ""
+}
+
+// isSafePathKeyChar returns true if the input character is safe for not
+// needing escaping.
+func isSafePathKeyChar(c byte) bool {
+ return c <= ' ' || c > '~' || c == '_' || c == '-' || c == ':' ||
+ (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9')
+}
+
+// escapeComp escaped a path compontent, making it safe for generating a
+// path for later use.
+func escapeComp(comp string) string {
+ for i := 0; i < len(comp); i++ {
+ if !isSafePathKeyChar(comp[i]) {
+ ncomp := []byte(comp[:i])
+ for ; i < len(comp); i++ {
+ if !isSafePathKeyChar(comp[i]) {
+ ncomp = append(ncomp, '\\')
+ }
+ ncomp = append(ncomp, comp[i])
+ }
+ return string(ncomp)
+ }
+ }
+ return comp
+}
diff --git a/vendor/github.com/tidwall/gjson/logo.png b/vendor/github.com/tidwall/gjson/logo.png
new file mode 100644
index 00000000..17a8bbe9
Binary files /dev/null and b/vendor/github.com/tidwall/gjson/logo.png differ
diff --git a/vendor/github.com/tidwall/match/LICENSE b/vendor/github.com/tidwall/match/LICENSE
new file mode 100644
index 00000000..58f5819a
--- /dev/null
+++ b/vendor/github.com/tidwall/match/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Josh Baker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tidwall/match/README.md b/vendor/github.com/tidwall/match/README.md
new file mode 100644
index 00000000..5fdd4cf6
--- /dev/null
+++ b/vendor/github.com/tidwall/match/README.md
@@ -0,0 +1,29 @@
+# Match
+
+[](https://godoc.org/github.com/tidwall/match)
+
+Match is a very simple pattern matcher where '*' matches on any
+number characters and '?' matches on any one character.
+
+## Installing
+
+```
+go get -u github.com/tidwall/match
+```
+
+## Example
+
+```go
+match.Match("hello", "*llo")
+match.Match("jello", "?ello")
+match.Match("hello", "h*o")
+```
+
+
+## Contact
+
+Josh Baker [@tidwall](http://twitter.com/tidwall)
+
+## License
+
+Redcon source code is available under the MIT [License](/LICENSE).
diff --git a/vendor/github.com/tidwall/match/match.go b/vendor/github.com/tidwall/match/match.go
new file mode 100644
index 00000000..11da28f1
--- /dev/null
+++ b/vendor/github.com/tidwall/match/match.go
@@ -0,0 +1,237 @@
+// Package match provides a simple pattern matcher with unicode support.
+package match
+
+import (
+ "unicode/utf8"
+)
+
+// Match returns true if str matches pattern. This is a very
+// simple wildcard match where '*' matches on any number characters
+// and '?' matches on any one character.
+//
+// pattern:
+// { term }
+// term:
+// '*' matches any sequence of non-Separator characters
+// '?' matches any single non-Separator character
+// c matches character c (c != '*', '?', '\\')
+// '\\' c matches character c
+//
+func Match(str, pattern string) bool {
+ if pattern == "*" {
+ return true
+ }
+ return match(str, pattern, 0, nil, -1) == rMatch
+}
+
+// MatchLimit is the same as Match but will limit the complexity of the match
+// operation. This is to avoid long running matches, specifically to avoid ReDos
+// attacks from arbritary inputs.
+//
+// How it works:
+// The underlying match routine is recursive and may call itself when it
+// encounters a sandwiched wildcard pattern, such as: `user:*:name`.
+// Everytime it calls itself a counter is incremented.
+// The operation is stopped when counter > maxcomp*len(str).
+func MatchLimit(str, pattern string, maxcomp int) (matched, stopped bool) {
+ if pattern == "*" {
+ return true, false
+ }
+ counter := 0
+ r := match(str, pattern, len(str), &counter, maxcomp)
+ if r == rStop {
+ return false, true
+ }
+ return r == rMatch, false
+}
+
+type result int
+
+const (
+ rNoMatch result = iota
+ rMatch
+ rStop
+)
+
+func match(str, pat string, slen int, counter *int, maxcomp int) result {
+ // check complexity limit
+ if maxcomp > -1 {
+ if *counter > slen*maxcomp {
+ return rStop
+ }
+ *counter++
+ }
+
+ for len(pat) > 0 {
+ var wild bool
+ pc, ps := rune(pat[0]), 1
+ if pc > 0x7f {
+ pc, ps = utf8.DecodeRuneInString(pat)
+ }
+ var sc rune
+ var ss int
+ if len(str) > 0 {
+ sc, ss = rune(str[0]), 1
+ if sc > 0x7f {
+ sc, ss = utf8.DecodeRuneInString(str)
+ }
+ }
+ switch pc {
+ case '?':
+ if ss == 0 {
+ return rNoMatch
+ }
+ case '*':
+ // Ignore repeating stars.
+ for len(pat) > 1 && pat[1] == '*' {
+ pat = pat[1:]
+ }
+
+ // If this star is the last character then it must be a match.
+ if len(pat) == 1 {
+ return rMatch
+ }
+
+ // Match and trim any non-wildcard suffix characters.
+ var ok bool
+ str, pat, ok = matchTrimSuffix(str, pat)
+ if !ok {
+ return rNoMatch
+ }
+
+ // Check for single star again.
+ if len(pat) == 1 {
+ return rMatch
+ }
+
+ // Perform recursive wildcard search.
+ r := match(str, pat[1:], slen, counter, maxcomp)
+ if r != rNoMatch {
+ return r
+ }
+ if len(str) == 0 {
+ return rNoMatch
+ }
+ wild = true
+ default:
+ if ss == 0 {
+ return rNoMatch
+ }
+ if pc == '\\' {
+ pat = pat[ps:]
+ pc, ps = utf8.DecodeRuneInString(pat)
+ if ps == 0 {
+ return rNoMatch
+ }
+ }
+ if sc != pc {
+ return rNoMatch
+ }
+ }
+ str = str[ss:]
+ if !wild {
+ pat = pat[ps:]
+ }
+ }
+ if len(str) == 0 {
+ return rMatch
+ }
+ return rNoMatch
+}
+
+// matchTrimSuffix matches and trims any non-wildcard suffix characters.
+// Returns the trimed string and pattern.
+//
+// This is called because the pattern contains extra data after the wildcard
+// star. Here we compare any suffix characters in the pattern to the suffix of
+// the target string. Basically a reverse match that stops when a wildcard
+// character is reached. This is a little trickier than a forward match because
+// we need to evaluate an escaped character in reverse.
+//
+// Any matched characters will be trimmed from both the target
+// string and the pattern.
+func matchTrimSuffix(str, pat string) (string, string, bool) {
+ // It's expected that the pattern has at least two bytes and the first byte
+ // is a wildcard star '*'
+ match := true
+ for len(str) > 0 && len(pat) > 1 {
+ pc, ps := utf8.DecodeLastRuneInString(pat)
+ var esc bool
+ for i := 0; ; i++ {
+ if pat[len(pat)-ps-i-1] != '\\' {
+ if i&1 == 1 {
+ esc = true
+ ps++
+ }
+ break
+ }
+ }
+ if pc == '*' && !esc {
+ match = true
+ break
+ }
+ sc, ss := utf8.DecodeLastRuneInString(str)
+ if !((pc == '?' && !esc) || pc == sc) {
+ match = false
+ break
+ }
+ str = str[:len(str)-ss]
+ pat = pat[:len(pat)-ps]
+ }
+ return str, pat, match
+}
+
+var maxRuneBytes = [...]byte{244, 143, 191, 191}
+
+// Allowable parses the pattern and determines the minimum and maximum allowable
+// values that the pattern can represent.
+// When the max cannot be determined, 'true' will be returned
+// for infinite.
+func Allowable(pattern string) (min, max string) {
+ if pattern == "" || pattern[0] == '*' {
+ return "", ""
+ }
+
+ minb := make([]byte, 0, len(pattern))
+ maxb := make([]byte, 0, len(pattern))
+ var wild bool
+ for i := 0; i < len(pattern); i++ {
+ if pattern[i] == '*' {
+ wild = true
+ break
+ }
+ if pattern[i] == '?' {
+ minb = append(minb, 0)
+ maxb = append(maxb, maxRuneBytes[:]...)
+ } else {
+ minb = append(minb, pattern[i])
+ maxb = append(maxb, pattern[i])
+ }
+ }
+ if wild {
+ r, n := utf8.DecodeLastRune(maxb)
+ if r != utf8.RuneError {
+ if r < utf8.MaxRune {
+ r++
+ if r > 0x7f {
+ b := make([]byte, 4)
+ nn := utf8.EncodeRune(b, r)
+ maxb = append(maxb[:len(maxb)-n], b[:nn]...)
+ } else {
+ maxb = append(maxb[:len(maxb)-n], byte(r))
+ }
+ }
+ }
+ }
+ return string(minb), string(maxb)
+}
+
+// IsPattern returns true if the string is a pattern.
+func IsPattern(str string) bool {
+ for i := 0; i < len(str); i++ {
+ if str[i] == '*' || str[i] == '?' {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/tidwall/pretty/LICENSE b/vendor/github.com/tidwall/pretty/LICENSE
new file mode 100644
index 00000000..993b83f2
--- /dev/null
+++ b/vendor/github.com/tidwall/pretty/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2017 Josh Baker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/tidwall/pretty/README.md b/vendor/github.com/tidwall/pretty/README.md
new file mode 100644
index 00000000..76c06a5e
--- /dev/null
+++ b/vendor/github.com/tidwall/pretty/README.md
@@ -0,0 +1,122 @@
+# Pretty
+
+[](https://pkg.go.dev/github.com/tidwall/pretty)
+
+Pretty is a Go package that provides [fast](#performance) methods for formatting JSON for human readability, or to compact JSON for smaller payloads.
+
+Getting Started
+===============
+
+## Installing
+
+To start using Pretty, install Go and run `go get`:
+
+```sh
+$ go get -u github.com/tidwall/pretty
+```
+
+This will retrieve the library.
+
+## Pretty
+
+Using this example:
+
+```json
+{"name": {"first":"Tom","last":"Anderson"}, "age":37,
+"children": ["Sara","Alex","Jack"],
+"fav.movie": "Deer Hunter", "friends": [
+ {"first": "Janet", "last": "Murphy", "age": 44}
+ ]}
+```
+
+The following code:
+```go
+result = pretty.Pretty(example)
+```
+
+Will format the json to:
+
+```json
+{
+ "name": {
+ "first": "Tom",
+ "last": "Anderson"
+ },
+ "age": 37,
+ "children": ["Sara", "Alex", "Jack"],
+ "fav.movie": "Deer Hunter",
+ "friends": [
+ {
+ "first": "Janet",
+ "last": "Murphy",
+ "age": 44
+ }
+ ]
+}
+```
+
+## Color
+
+Color will colorize the json for outputing to the screen.
+
+```go
+result = pretty.Color(json, nil)
+```
+
+Will add color to the result for printing to the terminal.
+The second param is used for a customizing the style, and passing nil will use the default `pretty.TerminalStyle`.
+
+## Ugly
+
+The following code:
+```go
+result = pretty.Ugly(example)
+```
+
+Will format the json to:
+
+```json
+{"name":{"first":"Tom","last":"Anderson"},"age":37,"children":["Sara","Alex","Jack"],"fav.movie":"Deer Hunter","friends":[{"first":"Janet","last":"Murphy","age":44}]}```
+```
+
+## Customized output
+
+There's a `PrettyOptions(json, opts)` function which allows for customizing the output with the following options:
+
+```go
+type Options struct {
+ // Width is an max column width for single line arrays
+ // Default is 80
+ Width int
+ // Prefix is a prefix for all lines
+ // Default is an empty string
+ Prefix string
+ // Indent is the nested indentation
+ // Default is two spaces
+ Indent string
+ // SortKeys will sort the keys alphabetically
+ // Default is false
+ SortKeys bool
+}
+```
+## Performance
+
+Benchmarks of Pretty alongside the builtin `encoding/json` Indent/Compact methods.
+```
+BenchmarkPretty-16 1000000 1034 ns/op 720 B/op 2 allocs/op
+BenchmarkPrettySortKeys-16 586797 1983 ns/op 2848 B/op 14 allocs/op
+BenchmarkUgly-16 4652365 254 ns/op 240 B/op 1 allocs/op
+BenchmarkUglyInPlace-16 6481233 183 ns/op 0 B/op 0 allocs/op
+BenchmarkJSONIndent-16 450654 2687 ns/op 1221 B/op 0 allocs/op
+BenchmarkJSONCompact-16 685111 1699 ns/op 442 B/op 0 allocs/op
+```
+
+*These benchmarks were run on a MacBook Pro 2.4 GHz 8-Core Intel Core i9.*
+
+## Contact
+Josh Baker [@tidwall](http://twitter.com/tidwall)
+
+## License
+
+Pretty source code is available under the MIT [License](/LICENSE).
+
diff --git a/vendor/github.com/tidwall/pretty/pretty.go b/vendor/github.com/tidwall/pretty/pretty.go
new file mode 100644
index 00000000..d705f9cd
--- /dev/null
+++ b/vendor/github.com/tidwall/pretty/pretty.go
@@ -0,0 +1,682 @@
+package pretty
+
+import (
+ "bytes"
+ "encoding/json"
+ "sort"
+ "strconv"
+)
+
+// Options is Pretty options
+type Options struct {
+ // Width is an max column width for single line arrays
+ // Default is 80
+ Width int
+ // Prefix is a prefix for all lines
+ // Default is an empty string
+ Prefix string
+ // Indent is the nested indentation
+ // Default is two spaces
+ Indent string
+ // SortKeys will sort the keys alphabetically
+ // Default is false
+ SortKeys bool
+}
+
+// DefaultOptions is the default options for pretty formats.
+var DefaultOptions = &Options{Width: 80, Prefix: "", Indent: " ", SortKeys: false}
+
+// Pretty converts the input json into a more human readable format where each
+// element is on it's own line with clear indentation.
+func Pretty(json []byte) []byte { return PrettyOptions(json, nil) }
+
+// PrettyOptions is like Pretty but with customized options.
+func PrettyOptions(json []byte, opts *Options) []byte {
+ if opts == nil {
+ opts = DefaultOptions
+ }
+ buf := make([]byte, 0, len(json))
+ if len(opts.Prefix) != 0 {
+ buf = append(buf, opts.Prefix...)
+ }
+ buf, _, _, _ = appendPrettyAny(buf, json, 0, true,
+ opts.Width, opts.Prefix, opts.Indent, opts.SortKeys,
+ 0, 0, -1)
+ if len(buf) > 0 {
+ buf = append(buf, '\n')
+ }
+ return buf
+}
+
+// Ugly removes insignificant space characters from the input json byte slice
+// and returns the compacted result.
+func Ugly(json []byte) []byte {
+ buf := make([]byte, 0, len(json))
+ return ugly(buf, json)
+}
+
+// UglyInPlace removes insignificant space characters from the input json
+// byte slice and returns the compacted result. This method reuses the
+// input json buffer to avoid allocations. Do not use the original bytes
+// slice upon return.
+func UglyInPlace(json []byte) []byte { return ugly(json, json) }
+
+func ugly(dst, src []byte) []byte {
+ dst = dst[:0]
+ for i := 0; i < len(src); i++ {
+ if src[i] > ' ' {
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ for i = i + 1; i < len(src); i++ {
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ j := i - 1
+ for ; ; j-- {
+ if src[j] != '\\' {
+ break
+ }
+ }
+ if (j-i)%2 != 0 {
+ break
+ }
+ }
+ }
+ }
+ }
+ }
+ return dst
+}
+
+func isNaNOrInf(src []byte) bool {
+ return src[0] == 'i' || //Inf
+ src[0] == 'I' || // inf
+ src[0] == '+' || // +Inf
+ src[0] == 'N' || // Nan
+ (src[0] == 'n' && len(src) > 1 && src[1] != 'u') // nan
+}
+
+func appendPrettyAny(buf, json []byte, i int, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) {
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' {
+ continue
+ }
+ if json[i] == '"' {
+ return appendPrettyString(buf, json, i, nl)
+ }
+
+ if (json[i] >= '0' && json[i] <= '9') || json[i] == '-' || isNaNOrInf(json[i:]) {
+ return appendPrettyNumber(buf, json, i, nl)
+ }
+ if json[i] == '{' {
+ return appendPrettyObject(buf, json, i, '{', '}', pretty, width, prefix, indent, sortkeys, tabs, nl, max)
+ }
+ if json[i] == '[' {
+ return appendPrettyObject(buf, json, i, '[', ']', pretty, width, prefix, indent, sortkeys, tabs, nl, max)
+ }
+ switch json[i] {
+ case 't':
+ return append(buf, 't', 'r', 'u', 'e'), i + 4, nl, true
+ case 'f':
+ return append(buf, 'f', 'a', 'l', 's', 'e'), i + 5, nl, true
+ case 'n':
+ return append(buf, 'n', 'u', 'l', 'l'), i + 4, nl, true
+ }
+ }
+ return buf, i, nl, true
+}
+
+type pair struct {
+ kstart, kend int
+ vstart, vend int
+}
+
+type byKeyVal struct {
+ sorted bool
+ json []byte
+ buf []byte
+ pairs []pair
+}
+
+func (arr *byKeyVal) Len() int {
+ return len(arr.pairs)
+}
+func (arr *byKeyVal) Less(i, j int) bool {
+ if arr.isLess(i, j, byKey) {
+ return true
+ }
+ if arr.isLess(j, i, byKey) {
+ return false
+ }
+ return arr.isLess(i, j, byVal)
+}
+func (arr *byKeyVal) Swap(i, j int) {
+ arr.pairs[i], arr.pairs[j] = arr.pairs[j], arr.pairs[i]
+ arr.sorted = true
+}
+
+type byKind int
+
+const (
+ byKey byKind = 0
+ byVal byKind = 1
+)
+
+type jtype int
+
+const (
+ jnull jtype = iota
+ jfalse
+ jnumber
+ jstring
+ jtrue
+ jjson
+)
+
+func getjtype(v []byte) jtype {
+ if len(v) == 0 {
+ return jnull
+ }
+ switch v[0] {
+ case '"':
+ return jstring
+ case 'f':
+ return jfalse
+ case 't':
+ return jtrue
+ case 'n':
+ return jnull
+ case '[', '{':
+ return jjson
+ default:
+ return jnumber
+ }
+}
+
+func (arr *byKeyVal) isLess(i, j int, kind byKind) bool {
+ k1 := arr.json[arr.pairs[i].kstart:arr.pairs[i].kend]
+ k2 := arr.json[arr.pairs[j].kstart:arr.pairs[j].kend]
+ var v1, v2 []byte
+ if kind == byKey {
+ v1 = k1
+ v2 = k2
+ } else {
+ v1 = bytes.TrimSpace(arr.buf[arr.pairs[i].vstart:arr.pairs[i].vend])
+ v2 = bytes.TrimSpace(arr.buf[arr.pairs[j].vstart:arr.pairs[j].vend])
+ if len(v1) >= len(k1)+1 {
+ v1 = bytes.TrimSpace(v1[len(k1)+1:])
+ }
+ if len(v2) >= len(k2)+1 {
+ v2 = bytes.TrimSpace(v2[len(k2)+1:])
+ }
+ }
+ t1 := getjtype(v1)
+ t2 := getjtype(v2)
+ if t1 < t2 {
+ return true
+ }
+ if t1 > t2 {
+ return false
+ }
+ if t1 == jstring {
+ s1 := parsestr(v1)
+ s2 := parsestr(v2)
+ return string(s1) < string(s2)
+ }
+ if t1 == jnumber {
+ n1, _ := strconv.ParseFloat(string(v1), 64)
+ n2, _ := strconv.ParseFloat(string(v2), 64)
+ return n1 < n2
+ }
+ return string(v1) < string(v2)
+
+}
+
+func parsestr(s []byte) []byte {
+ for i := 1; i < len(s); i++ {
+ if s[i] == '\\' {
+ var str string
+ json.Unmarshal(s, &str)
+ return []byte(str)
+ }
+ if s[i] == '"' {
+ return s[1:i]
+ }
+ }
+ return nil
+}
+
+func appendPrettyObject(buf, json []byte, i int, open, close byte, pretty bool, width int, prefix, indent string, sortkeys bool, tabs, nl, max int) ([]byte, int, int, bool) {
+ var ok bool
+ if width > 0 {
+ if pretty && open == '[' && max == -1 {
+ // here we try to create a single line array
+ max := width - (len(buf) - nl)
+ if max > 3 {
+ s1, s2 := len(buf), i
+ buf, i, _, ok = appendPrettyObject(buf, json, i, '[', ']', false, width, prefix, "", sortkeys, 0, 0, max)
+ if ok && len(buf)-s1 <= max {
+ return buf, i, nl, true
+ }
+ buf = buf[:s1]
+ i = s2
+ }
+ } else if max != -1 && open == '{' {
+ return buf, i, nl, false
+ }
+ }
+ buf = append(buf, open)
+ i++
+ var pairs []pair
+ if open == '{' && sortkeys {
+ pairs = make([]pair, 0, 8)
+ }
+ var n int
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' {
+ continue
+ }
+ if json[i] == close {
+ if pretty {
+ if open == '{' && sortkeys {
+ buf = sortPairs(json, buf, pairs)
+ }
+ if n > 0 {
+ nl = len(buf)
+ if buf[nl-1] == ' ' {
+ buf[nl-1] = '\n'
+ } else {
+ buf = append(buf, '\n')
+ }
+ }
+ if buf[len(buf)-1] != open {
+ buf = appendTabs(buf, prefix, indent, tabs)
+ }
+ }
+ buf = append(buf, close)
+ return buf, i + 1, nl, open != '{'
+ }
+ if open == '[' || json[i] == '"' {
+ if n > 0 {
+ buf = append(buf, ',')
+ if width != -1 && open == '[' {
+ buf = append(buf, ' ')
+ }
+ }
+ var p pair
+ if pretty {
+ nl = len(buf)
+ if buf[nl-1] == ' ' {
+ buf[nl-1] = '\n'
+ } else {
+ buf = append(buf, '\n')
+ }
+ if open == '{' && sortkeys {
+ p.kstart = i
+ p.vstart = len(buf)
+ }
+ buf = appendTabs(buf, prefix, indent, tabs+1)
+ }
+ if open == '{' {
+ buf, i, nl, _ = appendPrettyString(buf, json, i, nl)
+ if sortkeys {
+ p.kend = i
+ }
+ buf = append(buf, ':')
+ if pretty {
+ buf = append(buf, ' ')
+ }
+ }
+ buf, i, nl, ok = appendPrettyAny(buf, json, i, pretty, width, prefix, indent, sortkeys, tabs+1, nl, max)
+ if max != -1 && !ok {
+ return buf, i, nl, false
+ }
+ if pretty && open == '{' && sortkeys {
+ p.vend = len(buf)
+ if p.kstart > p.kend || p.vstart > p.vend {
+ // bad data. disable sorting
+ sortkeys = false
+ } else {
+ pairs = append(pairs, p)
+ }
+ }
+ i--
+ n++
+ }
+ }
+ return buf, i, nl, open != '{'
+}
+func sortPairs(json, buf []byte, pairs []pair) []byte {
+ if len(pairs) == 0 {
+ return buf
+ }
+ vstart := pairs[0].vstart
+ vend := pairs[len(pairs)-1].vend
+ arr := byKeyVal{false, json, buf, pairs}
+ sort.Stable(&arr)
+ if !arr.sorted {
+ return buf
+ }
+ nbuf := make([]byte, 0, vend-vstart)
+ for i, p := range pairs {
+ nbuf = append(nbuf, buf[p.vstart:p.vend]...)
+ if i < len(pairs)-1 {
+ nbuf = append(nbuf, ',')
+ nbuf = append(nbuf, '\n')
+ }
+ }
+ return append(buf[:vstart], nbuf...)
+}
+
+func appendPrettyString(buf, json []byte, i, nl int) ([]byte, int, int, bool) {
+ s := i
+ i++
+ for ; i < len(json); i++ {
+ if json[i] == '"' {
+ var sc int
+ for j := i - 1; j > s; j-- {
+ if json[j] == '\\' {
+ sc++
+ } else {
+ break
+ }
+ }
+ if sc%2 == 1 {
+ continue
+ }
+ i++
+ break
+ }
+ }
+ return append(buf, json[s:i]...), i, nl, true
+}
+
+func appendPrettyNumber(buf, json []byte, i, nl int) ([]byte, int, int, bool) {
+ s := i
+ i++
+ for ; i < len(json); i++ {
+ if json[i] <= ' ' || json[i] == ',' || json[i] == ':' || json[i] == ']' || json[i] == '}' {
+ break
+ }
+ }
+ return append(buf, json[s:i]...), i, nl, true
+}
+
+func appendTabs(buf []byte, prefix, indent string, tabs int) []byte {
+ if len(prefix) != 0 {
+ buf = append(buf, prefix...)
+ }
+ if len(indent) == 2 && indent[0] == ' ' && indent[1] == ' ' {
+ for i := 0; i < tabs; i++ {
+ buf = append(buf, ' ', ' ')
+ }
+ } else {
+ for i := 0; i < tabs; i++ {
+ buf = append(buf, indent...)
+ }
+ }
+ return buf
+}
+
+// Style is the color style
+type Style struct {
+ Key, String, Number [2]string
+ True, False, Null [2]string
+ Escape [2]string
+ Brackets [2]string
+ Append func(dst []byte, c byte) []byte
+}
+
+func hexp(p byte) byte {
+ switch {
+ case p < 10:
+ return p + '0'
+ default:
+ return (p - 10) + 'a'
+ }
+}
+
+// TerminalStyle is for terminals
+var TerminalStyle *Style
+
+func init() {
+ TerminalStyle = &Style{
+ Key: [2]string{"\x1B[1m\x1B[94m", "\x1B[0m"},
+ String: [2]string{"\x1B[32m", "\x1B[0m"},
+ Number: [2]string{"\x1B[33m", "\x1B[0m"},
+ True: [2]string{"\x1B[36m", "\x1B[0m"},
+ False: [2]string{"\x1B[36m", "\x1B[0m"},
+ Null: [2]string{"\x1B[2m", "\x1B[0m"},
+ Escape: [2]string{"\x1B[35m", "\x1B[0m"},
+ Brackets: [2]string{"\x1B[1m", "\x1B[0m"},
+ Append: func(dst []byte, c byte) []byte {
+ if c < ' ' && (c != '\r' && c != '\n' && c != '\t' && c != '\v') {
+ dst = append(dst, "\\u00"...)
+ dst = append(dst, hexp((c>>4)&0xF))
+ return append(dst, hexp((c)&0xF))
+ }
+ return append(dst, c)
+ },
+ }
+}
+
+// Color will colorize the json. The style parma is used for customizing
+// the colors. Passing nil to the style param will use the default
+// TerminalStyle.
+func Color(src []byte, style *Style) []byte {
+ if style == nil {
+ style = TerminalStyle
+ }
+ apnd := style.Append
+ if apnd == nil {
+ apnd = func(dst []byte, c byte) []byte {
+ return append(dst, c)
+ }
+ }
+ type stackt struct {
+ kind byte
+ key bool
+ }
+ var dst []byte
+ var stack []stackt
+ for i := 0; i < len(src); i++ {
+ if src[i] == '"' {
+ key := len(stack) > 0 && stack[len(stack)-1].key
+ if key {
+ dst = append(dst, style.Key[0]...)
+ } else {
+ dst = append(dst, style.String[0]...)
+ }
+ dst = apnd(dst, '"')
+ esc := false
+ uesc := 0
+ for i = i + 1; i < len(src); i++ {
+ if src[i] == '\\' {
+ if key {
+ dst = append(dst, style.Key[1]...)
+ } else {
+ dst = append(dst, style.String[1]...)
+ }
+ dst = append(dst, style.Escape[0]...)
+ dst = apnd(dst, src[i])
+ esc = true
+ if i+1 < len(src) && src[i+1] == 'u' {
+ uesc = 5
+ } else {
+ uesc = 1
+ }
+ } else if esc {
+ dst = apnd(dst, src[i])
+ if uesc == 1 {
+ esc = false
+ dst = append(dst, style.Escape[1]...)
+ if key {
+ dst = append(dst, style.Key[0]...)
+ } else {
+ dst = append(dst, style.String[0]...)
+ }
+ } else {
+ uesc--
+ }
+ } else {
+ dst = apnd(dst, src[i])
+ }
+ if src[i] == '"' {
+ j := i - 1
+ for ; ; j-- {
+ if src[j] != '\\' {
+ break
+ }
+ }
+ if (j-i)%2 != 0 {
+ break
+ }
+ }
+ }
+ if esc {
+ dst = append(dst, style.Escape[1]...)
+ } else if key {
+ dst = append(dst, style.Key[1]...)
+ } else {
+ dst = append(dst, style.String[1]...)
+ }
+ } else if src[i] == '{' || src[i] == '[' {
+ stack = append(stack, stackt{src[i], src[i] == '{'})
+ dst = append(dst, style.Brackets[0]...)
+ dst = apnd(dst, src[i])
+ dst = append(dst, style.Brackets[1]...)
+ } else if (src[i] == '}' || src[i] == ']') && len(stack) > 0 {
+ stack = stack[:len(stack)-1]
+ dst = append(dst, style.Brackets[0]...)
+ dst = apnd(dst, src[i])
+ dst = append(dst, style.Brackets[1]...)
+ } else if (src[i] == ':' || src[i] == ',') && len(stack) > 0 && stack[len(stack)-1].kind == '{' {
+ stack[len(stack)-1].key = !stack[len(stack)-1].key
+ dst = append(dst, style.Brackets[0]...)
+ dst = apnd(dst, src[i])
+ dst = append(dst, style.Brackets[1]...)
+ } else {
+ var kind byte
+ if (src[i] >= '0' && src[i] <= '9') || src[i] == '-' || isNaNOrInf(src[i:]) {
+ kind = '0'
+ dst = append(dst, style.Number[0]...)
+ } else if src[i] == 't' {
+ kind = 't'
+ dst = append(dst, style.True[0]...)
+ } else if src[i] == 'f' {
+ kind = 'f'
+ dst = append(dst, style.False[0]...)
+ } else if src[i] == 'n' {
+ kind = 'n'
+ dst = append(dst, style.Null[0]...)
+ } else {
+ dst = apnd(dst, src[i])
+ }
+ if kind != 0 {
+ for ; i < len(src); i++ {
+ if src[i] <= ' ' || src[i] == ',' || src[i] == ':' || src[i] == ']' || src[i] == '}' {
+ i--
+ break
+ }
+ dst = apnd(dst, src[i])
+ }
+ if kind == '0' {
+ dst = append(dst, style.Number[1]...)
+ } else if kind == 't' {
+ dst = append(dst, style.True[1]...)
+ } else if kind == 'f' {
+ dst = append(dst, style.False[1]...)
+ } else if kind == 'n' {
+ dst = append(dst, style.Null[1]...)
+ }
+ }
+ }
+ }
+ return dst
+}
+
+// Spec strips out comments and trailing commas and convert the input to a
+// valid JSON per the official spec: https://tools.ietf.org/html/rfc8259
+//
+// The resulting JSON will always be the same length as the input and it will
+// include all of the same line breaks at matching offsets. This is to ensure
+// the result can be later processed by a external parser and that that
+// parser will report messages or errors with the correct offsets.
+func Spec(src []byte) []byte {
+ return spec(src, nil)
+}
+
+// SpecInPlace is the same as Spec, but this method reuses the input json
+// buffer to avoid allocations. Do not use the original bytes slice upon return.
+func SpecInPlace(src []byte) []byte {
+ return spec(src, src)
+}
+
+func spec(src, dst []byte) []byte {
+ dst = dst[:0]
+ for i := 0; i < len(src); i++ {
+ if src[i] == '/' {
+ if i < len(src)-1 {
+ if src[i+1] == '/' {
+ dst = append(dst, ' ', ' ')
+ i += 2
+ for ; i < len(src); i++ {
+ if src[i] == '\n' {
+ dst = append(dst, '\n')
+ break
+ } else if src[i] == '\t' || src[i] == '\r' {
+ dst = append(dst, src[i])
+ } else {
+ dst = append(dst, ' ')
+ }
+ }
+ continue
+ }
+ if src[i+1] == '*' {
+ dst = append(dst, ' ', ' ')
+ i += 2
+ for ; i < len(src)-1; i++ {
+ if src[i] == '*' && src[i+1] == '/' {
+ dst = append(dst, ' ', ' ')
+ i++
+ break
+ } else if src[i] == '\n' || src[i] == '\t' ||
+ src[i] == '\r' {
+ dst = append(dst, src[i])
+ } else {
+ dst = append(dst, ' ')
+ }
+ }
+ continue
+ }
+ }
+ }
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ for i = i + 1; i < len(src); i++ {
+ dst = append(dst, src[i])
+ if src[i] == '"' {
+ j := i - 1
+ for ; ; j-- {
+ if src[j] != '\\' {
+ break
+ }
+ }
+ if (j-i)%2 != 0 {
+ break
+ }
+ }
+ }
+ } else if src[i] == '}' || src[i] == ']' {
+ for j := len(dst) - 2; j >= 0; j-- {
+ if dst[j] <= ' ' {
+ continue
+ }
+ if dst[j] == ',' {
+ dst[j] = ' '
+ }
+ break
+ }
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
index 21eb51a5..a1977949 100644
--- a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
+++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go
@@ -382,7 +382,7 @@ func validateDirectives(schema *Schema, dirs DirectiveList, location DirectiveLo
}
}
for _, schemaArg := range dirDefinition.Arguments {
- if schemaArg.Type.NonNull {
+ if schemaArg.Type.NonNull && schemaArg.DefaultValue == nil {
if arg := dir.Arguments.ForName(schemaArg.Name); arg == nil || arg.Value.Kind == NullValue {
return gqlerror.ErrorPosf(dir.Position, "Argument %s for directive %s cannot be null.", schemaArg.Name, dir.Name)
}
diff --git a/vendor/github.com/zeebo/blake3/.gitignore b/vendor/github.com/zeebo/blake3/.gitignore
new file mode 100644
index 00000000..c6bfdf2c
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/.gitignore
@@ -0,0 +1,6 @@
+*.pprof
+*.test
+*.txt
+*.out
+
+/upstream
diff --git a/vendor/github.com/zeebo/blake3/LICENSE b/vendor/github.com/zeebo/blake3/LICENSE
new file mode 100644
index 00000000..3a63575d
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/LICENSE
@@ -0,0 +1,125 @@
+This work is released into the public domain with CC0 1.0.
+
+-------------------------------------------------------------------------------
+
+Creative Commons Legal Code
+
+CC0 1.0 Universal
+
+ CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
+ LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
+ ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
+ INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
+ REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
+ PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
+ THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
+ HEREUNDER.
+
+Statement of Purpose
+
+The laws of most jurisdictions throughout the world automatically confer
+exclusive Copyright and Related Rights (defined below) upon the creator
+and subsequent owner(s) (each and all, an "owner") of an original work of
+authorship and/or a database (each, a "Work").
+
+Certain owners wish to permanently relinquish those rights to a Work for
+the purpose of contributing to a commons of creative, cultural and
+scientific works ("Commons") that the public can reliably and without fear
+of later claims of infringement build upon, modify, incorporate in other
+works, reuse and redistribute as freely as possible in any form whatsoever
+and for any purposes, including without limitation commercial purposes.
+These owners may contribute to the Commons to promote the ideal of a free
+culture and the further production of creative, cultural and scientific
+works, or to gain reputation or greater distribution for their Work in
+part through the use and efforts of others.
+
+For these and/or other purposes and motivations, and without any
+expectation of additional consideration or compensation, the person
+associating CC0 with a Work (the "Affirmer"), to the extent that he or she
+is an owner of Copyright and Related Rights in the Work, voluntarily
+elects to apply CC0 to the Work and publicly distribute the Work under its
+terms, with knowledge of his or her Copyright and Related Rights in the
+Work and the meaning and intended legal effect of CC0 on those rights.
+
+1. Copyright and Related Rights. A Work made available under CC0 may be
+protected by copyright and related or neighboring rights ("Copyright and
+Related Rights"). Copyright and Related Rights include, but are not
+limited to, the following:
+
+ i. the right to reproduce, adapt, distribute, perform, display,
+ communicate, and translate a Work;
+ ii. moral rights retained by the original author(s) and/or performer(s);
+iii. publicity and privacy rights pertaining to a person's image or
+ likeness depicted in a Work;
+ iv. rights protecting against unfair competition in regards to a Work,
+ subject to the limitations in paragraph 4(a), below;
+ v. rights protecting the extraction, dissemination, use and reuse of data
+ in a Work;
+ vi. database rights (such as those arising under Directive 96/9/EC of the
+ European Parliament and of the Council of 11 March 1996 on the legal
+ protection of databases, and under any national implementation
+ thereof, including any amended or successor version of such
+ directive); and
+vii. other similar, equivalent or corresponding rights throughout the
+ world based on applicable law or treaty, and any national
+ implementations thereof.
+
+2. Waiver. To the greatest extent permitted by, but not in contravention
+of, applicable law, Affirmer hereby overtly, fully, permanently,
+irrevocably and unconditionally waives, abandons, and surrenders all of
+Affirmer's Copyright and Related Rights and associated claims and causes
+of action, whether now known or unknown (including existing as well as
+future claims and causes of action), in the Work (i) in all territories
+worldwide, (ii) for the maximum duration provided by applicable law or
+treaty (including future time extensions), (iii) in any current or future
+medium and for any number of copies, and (iv) for any purpose whatsoever,
+including without limitation commercial, advertising or promotional
+purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
+member of the public at large and to the detriment of Affirmer's heirs and
+successors, fully intending that such Waiver shall not be subject to
+revocation, rescission, cancellation, termination, or any other legal or
+equitable action to disrupt the quiet enjoyment of the Work by the public
+as contemplated by Affirmer's express Statement of Purpose.
+
+3. Public License Fallback. Should any part of the Waiver for any reason
+be judged legally invalid or ineffective under applicable law, then the
+Waiver shall be preserved to the maximum extent permitted taking into
+account Affirmer's express Statement of Purpose. In addition, to the
+extent the Waiver is so judged Affirmer hereby grants to each affected
+person a royalty-free, non transferable, non sublicensable, non exclusive,
+irrevocable and unconditional license to exercise Affirmer's Copyright and
+Related Rights in the Work (i) in all territories worldwide, (ii) for the
+maximum duration provided by applicable law or treaty (including future
+time extensions), (iii) in any current or future medium and for any number
+of copies, and (iv) for any purpose whatsoever, including without
+limitation commercial, advertising or promotional purposes (the
+"License"). The License shall be deemed effective as of the date CC0 was
+applied by Affirmer to the Work. Should any part of the License for any
+reason be judged legally invalid or ineffective under applicable law, such
+partial invalidity or ineffectiveness shall not invalidate the remainder
+of the License, and in such case Affirmer hereby affirms that he or she
+will not (i) exercise any of his or her remaining Copyright and Related
+Rights in the Work or (ii) assert any associated claims and causes of
+action with respect to the Work, in either case contrary to Affirmer's
+express Statement of Purpose.
+
+4. Limitations and Disclaimers.
+
+ a. No trademark or patent rights held by Affirmer are waived, abandoned,
+ surrendered, licensed or otherwise affected by this document.
+ b. Affirmer offers the Work as-is and makes no representations or
+ warranties of any kind concerning the Work, express, implied,
+ statutory or otherwise, including without limitation warranties of
+ title, merchantability, fitness for a particular purpose, non
+ infringement, or the absence of latent or other defects, accuracy, or
+ the present or absence of errors, whether or not discoverable, all to
+ the greatest extent permissible under applicable law.
+ c. Affirmer disclaims responsibility for clearing rights of other persons
+ that may apply to the Work or any use thereof, including without
+ limitation any person's Copyright and Related Rights in the Work.
+ Further, Affirmer disclaims responsibility for obtaining any necessary
+ consents, permissions or other rights required for any use of the
+ Work.
+ d. Affirmer understands and acknowledges that Creative Commons is not a
+ party to this document and has no duty or obligation with respect to
+ this CC0 or use of the Work.
diff --git a/vendor/github.com/zeebo/blake3/Makefile b/vendor/github.com/zeebo/blake3/Makefile
new file mode 100644
index 00000000..b96623be
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/Makefile
@@ -0,0 +1,34 @@
+asm: internal/alg/hash/hash_avx2/impl_amd64.s internal/alg/compress/compress_sse41/impl_amd64.s
+
+internal/alg/hash/hash_avx2/impl_amd64.s: avo/avx2/*.go
+ ( cd avo; go run ./avx2 ) > internal/alg/hash/hash_avx2/impl_amd64.s
+
+internal/alg/compress/compress_sse41/impl_amd64.s: avo/sse41/*.go
+ ( cd avo; go run ./sse41 ) > internal/alg/compress/compress_sse41/impl_amd64.s
+
+.PHONY: fmt
+fmt:
+ go fmt ./...
+
+.PHONY: clean
+clean:
+ rm -f internal/alg/hash/hash_avx2/impl_amd64.s
+ rm -f internal/alg/compress/compress_sse41/impl_amd64.s
+
+.PHONY: test
+test:
+ go test -race -bench=. -benchtime=1x
+
+.PHONY: vet
+vet:
+ GOOS=linux GOARCH=386 GO386=softfloat go vet ./...
+ GOOS=windows GOARCH=386 GO386=softfloat go vet ./...
+ GOOS=linux GOARCH=amd64 go vet ./...
+ GOOS=windows GOARCH=amd64 go vet ./...
+ GOOS=darwin GOARCH=amd64 go vet ./...
+ GOOS=linux GOARCH=arm go vet ./...
+ GOOS=linux GOARCH=arm64 go vet ./...
+ GOOS=windows GOARCH=arm64 go vet ./...
+ GOOS=darwin GOARCH=arm64 go vet ./...
+ GOOS=js GOARCH=wasm go vet ./...
+ GOOS=linux GOARCH=mips go vet ./...
\ No newline at end of file
diff --git a/vendor/github.com/zeebo/blake3/README.md b/vendor/github.com/zeebo/blake3/README.md
new file mode 100644
index 00000000..0a0f2e18
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/README.md
@@ -0,0 +1,77 @@
+# BLAKE3
+
+
+
+
+
+
+
+Pure Go implementation of [BLAKE3](https://blake3.io) with AVX2 and SSE4.1 acceleration.
+
+Special thanks to the excellent [avo](https://github.com/mmcloughlin/avo) making writing vectorized version much easier.
+
+# Benchmarks
+
+## Caveats
+
+This library makes some different design decisions than the upstream Rust crate around internal buffering. Specifically, because it does not target the embedded system space, nor does it support multithreading, it elects to do its own internal buffering. This means that a user does not have to worry about providing large enough buffers to get the best possible performance, but it does worse on smaller input sizes. So some notes:
+
+- The Rust benchmarks below are all single-threaded to match this Go implementation.
+- I make no attempt to get precise measurements (cpu throttling, noisy environment, etc.) so please benchmark on your own systems.
+- These benchmarks are run on an i7-6700K which does not support AVX-512, so Rust is limited to use AVX2 at sizes above 8 kib.
+- I tried my best to make them benchmark the same thing, but who knows? :smile:
+
+## Charts
+
+In this case, both libraries are able to avoid a lot of data copying and will use vectorized instructions to hash as fast as possible, and perform similarly.
+
+
+
+For incremental writes, you must provide the Rust version large enough buffers so that it can use vectorized instructions. This Go library performs consistently regardless of the size being sent into the update function.
+
+
+
+The downside of internal buffering is most apparent with small sizes as most time is spent initializing the hasher state. In terms of hashing rate, the difference is 3-4x, but in an absolute sense it's ~100ns (see tables below). If you wish to hash a large number of very small strings and you care about those nanoseconds, be sure to use the Reset method to avoid re-initializing the state.
+
+
+
+## Timing Tables
+
+### Small
+
+| Size | Full Buffer | Reset | | Full Buffer Rate | Reset Rate |
+|--------|-------------|------------|-|------------------|--------------|
+| 64 b | `205ns` | `86.5ns` | | `312MB/s` | `740MB/s` |
+| 256 b | `364ns` | `250ns` | | `703MB/s` | `1.03GB/s` |
+| 512 b | `575ns` | `468ns` | | `892MB/s` | `1.10GB/s` |
+| 768 b | `795ns` | `682ns` | | `967MB/s` | `1.13GB/s` |
+
+### Large
+
+| Size | Incremental | Full Buffer | Reset | | Incremental Rate | Full Buffer Rate | Reset Rate |
+|----------|-------------|-------------|------------|-|------------------|------------------|--------------|
+| 1 kib | `1.02µs` | `1.01µs` | `891ns` | | `1.00GB/s` | `1.01GB/s` | `1.15GB/s` |
+| 2 kib | `2.11µs` | `2.07µs` | `1.95µs` | | `968MB/s` | `990MB/s` | `1.05GB/s` |
+| 4 kib | `2.28µs` | `2.15µs` | `2.05µs` | | `1.80GB/s` | `1.90GB/s` | `2.00GB/s` |
+| 8 kib | `2.64µs` | `2.52µs` | `2.44µs` | | `3.11GB/s` | `3.25GB/s` | `3.36GB/s` |
+| 16 kib | `4.93µs` | `4.54µs` | `4.48µs` | | `3.33GB/s` | `3.61GB/s` | `3.66GB/s` |
+| 32 kib | `9.41µs` | `8.62µs` | `8.54µs` | | `3.48GB/s` | `3.80GB/s` | `3.84GB/s` |
+| 64 kib | `18.2µs` | `16.7µs` | `16.6µs` | | `3.59GB/s` | `3.91GB/s` | `3.94GB/s` |
+| 128 kib | `36.3µs` | `32.9µs` | `33.1µs` | | `3.61GB/s` | `3.99GB/s` | `3.96GB/s` |
+| 256 kib | `72.5µs` | `65.7µs` | `66.0µs` | | `3.62GB/s` | `3.99GB/s` | `3.97GB/s` |
+| 512 kib | `145µs` | `131µs` | `132µs` | | `3.60GB/s` | `4.00GB/s` | `3.97GB/s` |
+| 1024 kib | `290µs` | `262µs` | `262µs` | | `3.62GB/s` | `4.00GB/s` | `4.00GB/s` |
+
+### No ASM
+
+| Size | Incremental | Full Buffer | Reset | | Incremental Rate | Full Buffer Rate | Reset Rate |
+|----------|-------------|-------------|------------|-|------------------|------------------|-------------|
+| 64 b | `253ns` | `254ns` | `134ns` | | `253MB/s` | `252MB/s` | `478MB/s` |
+| 256 b | `553ns` | `557ns` | `441ns` | | `463MB/s` | `459MB/s` | `580MB/s` |
+| 512 b | `948ns` | `953ns` | `841ns` | | `540MB/s` | `538MB/s` | `609MB/s` |
+| 768 b | `1.38µs` | `1.40µs` | `1.35µs` | | `558MB/s` | `547MB/s` | `570MB/s` |
+| 1 kib | `1.77µs` | `1.77µs` | `1.70µs` | | `577MB/s` | `580MB/s` | `602MB/s` |
+| | | | | | | | |
+| 1024 kib | `880µs` | `883µs` | `878µs` | | `596MB/s` | `595MB/s` | `598MB/s` |
+
+The speed caps out at around 1 kib, so most rows have been elided from the presentation.
diff --git a/vendor/github.com/zeebo/blake3/api.go b/vendor/github.com/zeebo/blake3/api.go
new file mode 100644
index 00000000..0eed13cc
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/api.go
@@ -0,0 +1,165 @@
+// Package blake3 provides an SSE4.1/AVX2 accelerated BLAKE3 implementation.
+package blake3
+
+import (
+ "errors"
+
+ "github.com/zeebo/blake3/internal/consts"
+ "github.com/zeebo/blake3/internal/utils"
+)
+
+// Hasher is a hash.Hash for BLAKE3.
+type Hasher struct {
+ size int
+ h hasher
+}
+
+// New returns a new Hasher that has a digest size of 32 bytes.
+//
+// If you need more or less output bytes than that, use Digest method.
+func New() *Hasher {
+ return &Hasher{
+ size: 32,
+ h: hasher{
+ key: consts.IV,
+ },
+ }
+}
+
+// NewKeyed returns a new Hasher that uses the 32 byte input key and has
+// a digest size of 32 bytes.
+//
+// If you need more or less output bytes than that, use the Digest method.
+func NewKeyed(key []byte) (*Hasher, error) {
+ if len(key) != 32 {
+ return nil, errors.New("invalid key size")
+ }
+
+ h := &Hasher{
+ size: 32,
+ h: hasher{
+ flags: consts.Flag_Keyed,
+ },
+ }
+ utils.KeyFromBytes(key, &h.h.key)
+
+ return h, nil
+}
+
+// DeriveKey derives a key based on reusable key material of any
+// length, in the given context. The key will be stored in out, using
+// all of its current length.
+//
+// Context strings must be hardcoded constants, and the recommended
+// format is "[application] [commit timestamp] [purpose]", e.g.,
+// "example.com 2019-12-25 16:18:03 session tokens v1".
+func DeriveKey(context string, material []byte, out []byte) {
+ h := NewDeriveKey(context)
+ _, _ = h.Write(material)
+ _, _ = h.Digest().Read(out)
+}
+
+// NewDeriveKey returns a Hasher that is initialized with the context
+// string. See DeriveKey for details. It has a digest size of 32 bytes.
+//
+// If you need more or less output bytes than that, use the Digest method.
+func NewDeriveKey(context string) *Hasher {
+ // hash the context string and use that instead of IV
+ h := &Hasher{
+ size: 32,
+ h: hasher{
+ key: consts.IV,
+ flags: consts.Flag_DeriveKeyContext,
+ },
+ }
+
+ var buf [32]byte
+ _, _ = h.WriteString(context)
+ _, _ = h.Digest().Read(buf[:])
+
+ h.Reset()
+ utils.KeyFromBytes(buf[:], &h.h.key)
+ h.h.flags = consts.Flag_DeriveKeyMaterial
+
+ return h
+}
+
+// Write implements part of the hash.Hash interface. It never returns an error.
+func (h *Hasher) Write(p []byte) (int, error) {
+ h.h.update(p)
+ return len(p), nil
+}
+
+// WriteString is like Write but specialized to strings to avoid allocations.
+func (h *Hasher) WriteString(p string) (int, error) {
+ h.h.updateString(p)
+ return len(p), nil
+}
+
+// Reset implements part of the hash.Hash interface. It causes the Hasher to
+// act as if it was newly created.
+func (h *Hasher) Reset() {
+ h.h.reset()
+}
+
+// Clone returns a new Hasher with the same internal state.
+//
+// Modifying the resulting Hasher will not modify the original Hasher, and vice versa.
+func (h *Hasher) Clone() *Hasher {
+ return &Hasher{size: h.size, h: h.h}
+}
+
+// Size implements part of the hash.Hash interface. It returns the number of
+// bytes the hash will output in Sum.
+func (h *Hasher) Size() int {
+ return h.size
+}
+
+// BlockSize implements part of the hash.Hash interface. It returns the most
+// natural size to write to the Hasher.
+func (h *Hasher) BlockSize() int {
+ return 64
+}
+
+// Sum implements part of the hash.Hash interface. It appends the digest of
+// the Hasher to the provided buffer and returns it.
+func (h *Hasher) Sum(b []byte) []byte {
+ if top := len(b) + h.size; top <= cap(b) && top >= len(b) {
+ h.h.finalize(b[len(b):top])
+ return b[:top]
+ }
+
+ tmp := make([]byte, h.size)
+ h.h.finalize(tmp)
+ return append(b, tmp...)
+}
+
+// Digest takes a snapshot of the hash state and returns an object that can
+// be used to read and seek through 2^64 bytes of digest output.
+func (h *Hasher) Digest() *Digest {
+ var d Digest
+ h.h.finalizeDigest(&d)
+ return &d
+}
+
+// Sum256 returns the first 256 bits of the unkeyed digest of the data.
+func Sum256(data []byte) (sum [32]byte) {
+ out := Sum512(data)
+ copy(sum[:], out[:32])
+ return sum
+}
+
+// Sum512 returns the first 512 bits of the unkeyed digest of the data.
+func Sum512(data []byte) (sum [64]byte) {
+ if len(data) <= consts.ChunkLen {
+ var d Digest
+ compressAll(&d, data, 0, consts.IV)
+ _, _ = d.Read(sum[:])
+ return sum
+ } else {
+ h := hasher{key: consts.IV}
+ h.update(data)
+ h.finalize(sum[:])
+ return sum
+ }
+}
diff --git a/vendor/github.com/zeebo/blake3/blake3.go b/vendor/github.com/zeebo/blake3/blake3.go
new file mode 100644
index 00000000..b18d7eaa
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/blake3.go
@@ -0,0 +1,285 @@
+package blake3
+
+import (
+ "math/bits"
+ "unsafe"
+
+ "github.com/zeebo/blake3/internal/alg"
+ "github.com/zeebo/blake3/internal/consts"
+ "github.com/zeebo/blake3/internal/utils"
+)
+
+//
+// hasher contains state for a blake3 hash
+//
+
+type hasher struct {
+ len uint64
+ chunks uint64
+ flags uint32
+ key [8]uint32
+ stack cvstack
+ buf [8192]byte
+}
+
+func (a *hasher) reset() {
+ a.len = 0
+ a.chunks = 0
+ a.stack.occ = 0
+ a.stack.lvls = [8]uint8{}
+ a.stack.bufn = 0
+}
+
+func (a *hasher) update(buf []byte) {
+ // relies on the first two words of a string being the same as a slice
+ a.updateString(*(*string)(unsafe.Pointer(&buf)))
+}
+
+func (a *hasher) updateString(buf string) {
+ var input *[8192]byte
+
+ for len(buf) > 0 {
+ if a.len == 0 && len(buf) > 8192 {
+ // relies on the data pointer being the first word in the string header
+ input = (*[8192]byte)(*(*unsafe.Pointer)(unsafe.Pointer(&buf)))
+ buf = buf[8192:]
+ } else if a.len < 8192 {
+ n := copy(a.buf[a.len:], buf)
+ a.len += uint64(n)
+ buf = buf[n:]
+ continue
+ } else {
+ input = &a.buf
+ }
+
+ a.consume(input)
+ a.len = 0
+ a.chunks += 8
+ }
+}
+
+func (a *hasher) consume(input *[8192]byte) {
+ var out chainVector
+ var chain [8]uint32
+ alg.HashF(input, 8192, a.chunks, a.flags, &a.key, &out, &chain)
+ a.stack.pushN(0, &out, 8, a.flags, &a.key)
+}
+
+func (a *hasher) finalize(p []byte) {
+ var d Digest
+ a.finalizeDigest(&d)
+ _, _ = d.Read(p)
+}
+
+func (a *hasher) finalizeDigest(d *Digest) {
+ if a.chunks == 0 && a.len <= consts.ChunkLen {
+ compressAll(d, a.buf[:a.len], a.flags, a.key)
+ return
+ }
+
+ d.chain = a.key
+ d.flags = a.flags | consts.Flag_ChunkEnd
+
+ if a.len > 64 {
+ var buf chainVector
+ alg.HashF(&a.buf, a.len, a.chunks, a.flags, &a.key, &buf, &d.chain)
+
+ if a.len > consts.ChunkLen {
+ complete := (a.len - 1) / consts.ChunkLen
+ a.stack.pushN(0, &buf, int(complete), a.flags, &a.key)
+ a.chunks += complete
+ a.len = uint64(copy(a.buf[:], a.buf[complete*consts.ChunkLen:a.len]))
+ }
+ }
+
+ if a.len <= 64 {
+ d.flags |= consts.Flag_ChunkStart
+ }
+
+ d.counter = a.chunks
+ d.blen = uint32(a.len) % 64
+
+ base := a.len / 64 * 64
+ if a.len > 0 && d.blen == 0 {
+ d.blen = 64
+ base -= 64
+ }
+
+ if consts.OptimizeLittleEndian {
+ copy((*[64]byte)(unsafe.Pointer(&d.block[0]))[:], a.buf[base:a.len])
+ } else {
+ var tmp [64]byte
+ copy(tmp[:], a.buf[base:a.len])
+ utils.BytesToWords(&tmp, &d.block)
+ }
+
+ for a.stack.bufn > 0 {
+ a.stack.flush(a.flags, &a.key)
+ }
+
+ var tmp [16]uint32
+ for occ := a.stack.occ; occ != 0; occ &= occ - 1 {
+ col := uint(bits.TrailingZeros64(occ)) % 64
+
+ alg.Compress(&d.chain, &d.block, d.counter, d.blen, d.flags, &tmp)
+
+ *(*[8]uint32)(unsafe.Pointer(&d.block[0])) = a.stack.stack[col]
+ *(*[8]uint32)(unsafe.Pointer(&d.block[8])) = *(*[8]uint32)(unsafe.Pointer(&tmp[0]))
+
+ if occ == a.stack.occ {
+ d.chain = a.key
+ d.counter = 0
+ d.blen = consts.BlockLen
+ d.flags = a.flags | consts.Flag_Parent
+ }
+ }
+
+ d.flags |= consts.Flag_Root
+}
+
+//
+// chain value stack
+//
+
+type chainVector = [64]uint32
+
+type cvstack struct {
+ occ uint64 // which levels in stack are occupied
+ lvls [8]uint8 // what level the buf input was in
+ bufn int // how many pairs are loaded into buf
+ buf [2]chainVector
+ stack [64][8]uint32
+}
+
+func (a *cvstack) pushN(l uint8, cv *chainVector, n int, flags uint32, key *[8]uint32) {
+ for i := 0; i < n; i++ {
+ a.pushL(l, cv, i)
+ for a.bufn == 8 {
+ a.flush(flags, key)
+ }
+ }
+}
+
+func (a *cvstack) pushL(l uint8, cv *chainVector, n int) {
+ bit := uint64(1) << (l & 63)
+ if a.occ&bit == 0 {
+ readChain(cv, n, &a.stack[l&63])
+ a.occ ^= bit
+ return
+ }
+
+ a.lvls[a.bufn&7] = l
+ writeChain(&a.stack[l&63], &a.buf[0], a.bufn)
+ copyChain(cv, n, &a.buf[1], a.bufn)
+ a.bufn++
+ a.occ ^= bit
+}
+
+func (a *cvstack) flush(flags uint32, key *[8]uint32) {
+ var out chainVector
+ alg.HashP(&a.buf[0], &a.buf[1], flags|consts.Flag_Parent, key, &out, a.bufn)
+
+ bufn, lvls := a.bufn, a.lvls
+ a.bufn, a.lvls = 0, [8]uint8{}
+
+ for i := 0; i < bufn; i++ {
+ a.pushL(lvls[i]+1, &out, i)
+ }
+}
+
+//
+// helpers to deal with reading/writing transposed values
+//
+
+func copyChain(in *chainVector, icol int, out *chainVector, ocol int) {
+ type u = uintptr
+ type p = unsafe.Pointer
+ type a = *uint32
+
+ i := p(u(p(in)) + u(icol*4))
+ o := p(u(p(out)) + u(ocol*4))
+
+ *a(p(u(o) + 0*32)) = *a(p(u(i) + 0*32))
+ *a(p(u(o) + 1*32)) = *a(p(u(i) + 1*32))
+ *a(p(u(o) + 2*32)) = *a(p(u(i) + 2*32))
+ *a(p(u(o) + 3*32)) = *a(p(u(i) + 3*32))
+ *a(p(u(o) + 4*32)) = *a(p(u(i) + 4*32))
+ *a(p(u(o) + 5*32)) = *a(p(u(i) + 5*32))
+ *a(p(u(o) + 6*32)) = *a(p(u(i) + 6*32))
+ *a(p(u(o) + 7*32)) = *a(p(u(i) + 7*32))
+}
+
+func readChain(in *chainVector, col int, out *[8]uint32) {
+ type u = uintptr
+ type p = unsafe.Pointer
+ type a = *uint32
+
+ i := p(u(p(in)) + u(col*4))
+
+ out[0] = *a(p(u(i) + 0*32))
+ out[1] = *a(p(u(i) + 1*32))
+ out[2] = *a(p(u(i) + 2*32))
+ out[3] = *a(p(u(i) + 3*32))
+ out[4] = *a(p(u(i) + 4*32))
+ out[5] = *a(p(u(i) + 5*32))
+ out[6] = *a(p(u(i) + 6*32))
+ out[7] = *a(p(u(i) + 7*32))
+}
+
+func writeChain(in *[8]uint32, out *chainVector, col int) {
+ type u = uintptr
+ type p = unsafe.Pointer
+ type a = *uint32
+
+ o := p(u(p(out)) + u(col*4))
+
+ *a(p(u(o) + 0*32)) = in[0]
+ *a(p(u(o) + 1*32)) = in[1]
+ *a(p(u(o) + 2*32)) = in[2]
+ *a(p(u(o) + 3*32)) = in[3]
+ *a(p(u(o) + 4*32)) = in[4]
+ *a(p(u(o) + 5*32)) = in[5]
+ *a(p(u(o) + 6*32)) = in[6]
+ *a(p(u(o) + 7*32)) = in[7]
+}
+
+//
+// compress <= chunkLen bytes in one shot
+//
+
+func compressAll(d *Digest, in []byte, flags uint32, key [8]uint32) {
+ var compressed [16]uint32
+
+ d.chain = key
+ d.flags = flags | consts.Flag_ChunkStart
+
+ for len(in) > 64 {
+ buf := (*[64]byte)(unsafe.Pointer(&in[0]))
+
+ var block *[16]uint32
+ if consts.OptimizeLittleEndian {
+ block = (*[16]uint32)(unsafe.Pointer(buf))
+ } else {
+ block = &d.block
+ utils.BytesToWords(buf, block)
+ }
+
+ alg.Compress(&d.chain, block, 0, consts.BlockLen, d.flags, &compressed)
+
+ d.chain = *(*[8]uint32)(unsafe.Pointer(&compressed[0]))
+ d.flags &^= consts.Flag_ChunkStart
+
+ in = in[64:]
+ }
+
+ if consts.OptimizeLittleEndian {
+ copy((*[64]byte)(unsafe.Pointer(&d.block[0]))[:], in)
+ } else {
+ var tmp [64]byte
+ copy(tmp[:], in)
+ utils.BytesToWords(&tmp, &d.block)
+ }
+
+ d.blen = uint32(len(in))
+ d.flags |= consts.Flag_ChunkEnd | consts.Flag_Root
+}
diff --git a/vendor/github.com/zeebo/blake3/digest.go b/vendor/github.com/zeebo/blake3/digest.go
new file mode 100644
index 00000000..4c511fbd
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/digest.go
@@ -0,0 +1,100 @@
+package blake3
+
+import (
+ "fmt"
+ "io"
+ "unsafe"
+
+ "github.com/zeebo/blake3/internal/alg"
+ "github.com/zeebo/blake3/internal/consts"
+ "github.com/zeebo/blake3/internal/utils"
+)
+
+// Digest captures the state of a Hasher allowing reading and seeking through
+// the output stream.
+type Digest struct {
+ counter uint64
+ chain [8]uint32
+ block [16]uint32
+ blen uint32
+ flags uint32
+ buf [16]uint32
+ bufn int
+}
+
+// Read reads data frm the hasher into out. It always fills the entire buffer and
+// never errors. The stream will wrap around when reading past 2^64 bytes.
+func (d *Digest) Read(p []byte) (n int, err error) {
+ n = len(p)
+
+ if d.bufn > 0 {
+ n := d.slowCopy(p)
+ p = p[n:]
+ d.bufn -= n
+ }
+
+ for len(p) >= 64 {
+ d.fillBuf()
+
+ if consts.OptimizeLittleEndian {
+ *(*[64]byte)(unsafe.Pointer(&p[0])) = *(*[64]byte)(unsafe.Pointer(&d.buf[0]))
+ } else {
+ utils.WordsToBytes(&d.buf, p)
+ }
+
+ p = p[64:]
+ d.bufn = 0
+ }
+
+ if len(p) == 0 {
+ return n, nil
+ }
+
+ d.fillBuf()
+ d.bufn -= d.slowCopy(p)
+
+ return n, nil
+}
+
+// Seek sets the position to the provided location. Only SeekStart and
+// SeekCurrent are allowed.
+func (d *Digest) Seek(offset int64, whence int) (int64, error) {
+ switch whence {
+ case io.SeekStart:
+ case io.SeekEnd:
+ return 0, fmt.Errorf("seek from end not supported")
+ case io.SeekCurrent:
+ offset += int64(consts.BlockLen*d.counter) - int64(d.bufn)
+ default:
+ return 0, fmt.Errorf("invalid whence: %d", whence)
+ }
+ if offset < 0 {
+ return 0, fmt.Errorf("seek before start")
+ }
+ d.setPosition(uint64(offset))
+ return offset, nil
+}
+
+func (d *Digest) setPosition(pos uint64) {
+ d.counter = pos / consts.BlockLen
+ d.fillBuf()
+ d.bufn -= int(pos % consts.BlockLen)
+}
+
+func (d *Digest) slowCopy(p []byte) (n int) {
+ off := uint(consts.BlockLen-d.bufn) % consts.BlockLen
+ if consts.OptimizeLittleEndian {
+ n = copy(p, (*[consts.BlockLen]byte)(unsafe.Pointer(&d.buf[0]))[off:])
+ } else {
+ var tmp [consts.BlockLen]byte
+ utils.WordsToBytes(&d.buf, tmp[:])
+ n = copy(p, tmp[off:])
+ }
+ return n
+}
+
+func (d *Digest) fillBuf() {
+ alg.Compress(&d.chain, &d.block, d.counter, d.blen, d.flags, &d.buf)
+ d.counter++
+ d.bufn = consts.BlockLen
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/alg.go b/vendor/github.com/zeebo/blake3/internal/alg/alg.go
new file mode 100644
index 00000000..239fdec5
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/alg.go
@@ -0,0 +1,18 @@
+package alg
+
+import (
+ "github.com/zeebo/blake3/internal/alg/compress"
+ "github.com/zeebo/blake3/internal/alg/hash"
+)
+
+func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
+ hash.HashF(input, length, counter, flags, key, out, chain)
+}
+
+func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
+ hash.HashP(left, right, flags, key, out, n)
+}
+
+func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
+ compress.Compress(chain, block, counter, blen, flags, out)
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/compress/compress.go b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress.go
new file mode 100644
index 00000000..0b268540
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress.go
@@ -0,0 +1,15 @@
+package compress
+
+import (
+ "github.com/zeebo/blake3/internal/alg/compress/compress_pure"
+ "github.com/zeebo/blake3/internal/alg/compress/compress_sse41"
+ "github.com/zeebo/blake3/internal/consts"
+)
+
+func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
+ if consts.HasSSE41 {
+ compress_sse41.Compress(chain, block, counter, blen, flags, out)
+ } else {
+ compress_pure.Compress(chain, block, counter, blen, flags, out)
+ }
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_pure/compress.go b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_pure/compress.go
new file mode 100644
index 00000000..66ea1fb7
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_pure/compress.go
@@ -0,0 +1,135 @@
+package compress_pure
+
+import (
+ "math/bits"
+
+ "github.com/zeebo/blake3/internal/consts"
+)
+
+func Compress(
+ chain *[8]uint32,
+ block *[16]uint32,
+ counter uint64,
+ blen uint32,
+ flags uint32,
+ out *[16]uint32,
+) {
+
+ *out = [16]uint32{
+ chain[0], chain[1], chain[2], chain[3],
+ chain[4], chain[5], chain[6], chain[7],
+ consts.IV0, consts.IV1, consts.IV2, consts.IV3,
+ uint32(counter), uint32(counter >> 32), blen, flags,
+ }
+
+ rcompress(out, block)
+}
+
+func g(a, b, c, d, mx, my uint32) (uint32, uint32, uint32, uint32) {
+ a += b + mx
+ d = bits.RotateLeft32(d^a, -16)
+ c += d
+ b = bits.RotateLeft32(b^c, -12)
+ a += b + my
+ d = bits.RotateLeft32(d^a, -8)
+ c += d
+ b = bits.RotateLeft32(b^c, -7)
+ return a, b, c, d
+}
+
+func rcompress(s *[16]uint32, m *[16]uint32) {
+ const (
+ a = 10
+ b = 11
+ c = 12
+ d = 13
+ e = 14
+ f = 15
+ )
+
+ s0, s1, s2, s3 := s[0+0], s[0+1], s[0+2], s[0+3]
+ s4, s5, s6, s7 := s[0+4], s[0+5], s[0+6], s[0+7]
+ s8, s9, sa, sb := s[8+0], s[8+1], s[8+2], s[8+3]
+ sc, sd, se, sf := s[8+4], s[8+5], s[8+6], s[8+7]
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[0], m[1])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[2], m[3])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[4], m[5])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[6], m[7])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[8], m[9])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[a], m[b])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[c], m[d])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[e], m[f])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[2], m[6])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[3], m[a])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[7], m[0])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[4], m[d])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[1], m[b])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[c], m[5])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[9], m[e])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[f], m[8])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[3], m[4])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[a], m[c])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[d], m[2])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[7], m[e])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[6], m[5])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[9], m[0])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[b], m[f])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[8], m[1])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[a], m[7])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[c], m[9])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[e], m[3])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[d], m[f])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[4], m[0])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[b], m[2])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[5], m[8])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[1], m[6])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[c], m[d])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[9], m[b])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[f], m[a])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[e], m[8])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[7], m[2])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[5], m[3])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[0], m[1])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[6], m[4])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[9], m[e])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[b], m[5])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[8], m[c])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[f], m[1])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[d], m[3])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[0], m[a])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[2], m[6])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[4], m[7])
+
+ s0, s4, s8, sc = g(s0, s4, s8, sc, m[b], m[f])
+ s1, s5, s9, sd = g(s1, s5, s9, sd, m[5], m[0])
+ s2, s6, sa, se = g(s2, s6, sa, se, m[1], m[9])
+ s3, s7, sb, sf = g(s3, s7, sb, sf, m[8], m[6])
+ s0, s5, sa, sf = g(s0, s5, sa, sf, m[e], m[a])
+ s1, s6, sb, sc = g(s1, s6, sb, sc, m[2], m[c])
+ s2, s7, s8, sd = g(s2, s7, s8, sd, m[3], m[4])
+ s3, s4, s9, se = g(s3, s4, s9, se, m[7], m[d])
+
+ s[8+0] = s8 ^ s[0]
+ s[8+1] = s9 ^ s[1]
+ s[8+2] = sa ^ s[2]
+ s[8+3] = sb ^ s[3]
+ s[8+4] = sc ^ s[4]
+ s[8+5] = sd ^ s[5]
+ s[8+6] = se ^ s[6]
+ s[8+7] = sf ^ s[7]
+
+ s[0] = s0 ^ s8
+ s[1] = s1 ^ s9
+ s[2] = s2 ^ sa
+ s[3] = s3 ^ sb
+ s[4] = s4 ^ sc
+ s[5] = s5 ^ sd
+ s[6] = s6 ^ se
+ s[7] = s7 ^ sf
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_amd64.s b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_amd64.s
new file mode 100644
index 00000000..321f43df
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_amd64.s
@@ -0,0 +1,560 @@
+// Code generated by command: go run compress.go. DO NOT EDIT.
+
+#include "textflag.h"
+
+DATA iv<>+0(SB)/4, $0x6a09e667
+DATA iv<>+4(SB)/4, $0xbb67ae85
+DATA iv<>+8(SB)/4, $0x3c6ef372
+DATA iv<>+12(SB)/4, $0xa54ff53a
+DATA iv<>+16(SB)/4, $0x510e527f
+DATA iv<>+20(SB)/4, $0x9b05688c
+DATA iv<>+24(SB)/4, $0x1f83d9ab
+DATA iv<>+28(SB)/4, $0x5be0cd19
+GLOBL iv<>(SB), RODATA|NOPTR, $32
+
+DATA rot16_shuf<>+0(SB)/1, $0x02
+DATA rot16_shuf<>+1(SB)/1, $0x03
+DATA rot16_shuf<>+2(SB)/1, $0x00
+DATA rot16_shuf<>+3(SB)/1, $0x01
+DATA rot16_shuf<>+4(SB)/1, $0x06
+DATA rot16_shuf<>+5(SB)/1, $0x07
+DATA rot16_shuf<>+6(SB)/1, $0x04
+DATA rot16_shuf<>+7(SB)/1, $0x05
+DATA rot16_shuf<>+8(SB)/1, $0x0a
+DATA rot16_shuf<>+9(SB)/1, $0x0b
+DATA rot16_shuf<>+10(SB)/1, $0x08
+DATA rot16_shuf<>+11(SB)/1, $0x09
+DATA rot16_shuf<>+12(SB)/1, $0x0e
+DATA rot16_shuf<>+13(SB)/1, $0x0f
+DATA rot16_shuf<>+14(SB)/1, $0x0c
+DATA rot16_shuf<>+15(SB)/1, $0x0d
+DATA rot16_shuf<>+16(SB)/1, $0x12
+DATA rot16_shuf<>+17(SB)/1, $0x13
+DATA rot16_shuf<>+18(SB)/1, $0x10
+DATA rot16_shuf<>+19(SB)/1, $0x11
+DATA rot16_shuf<>+20(SB)/1, $0x16
+DATA rot16_shuf<>+21(SB)/1, $0x17
+DATA rot16_shuf<>+22(SB)/1, $0x14
+DATA rot16_shuf<>+23(SB)/1, $0x15
+DATA rot16_shuf<>+24(SB)/1, $0x1a
+DATA rot16_shuf<>+25(SB)/1, $0x1b
+DATA rot16_shuf<>+26(SB)/1, $0x18
+DATA rot16_shuf<>+27(SB)/1, $0x19
+DATA rot16_shuf<>+28(SB)/1, $0x1e
+DATA rot16_shuf<>+29(SB)/1, $0x1f
+DATA rot16_shuf<>+30(SB)/1, $0x1c
+DATA rot16_shuf<>+31(SB)/1, $0x1d
+GLOBL rot16_shuf<>(SB), RODATA|NOPTR, $32
+
+DATA rot8_shuf<>+0(SB)/1, $0x01
+DATA rot8_shuf<>+1(SB)/1, $0x02
+DATA rot8_shuf<>+2(SB)/1, $0x03
+DATA rot8_shuf<>+3(SB)/1, $0x00
+DATA rot8_shuf<>+4(SB)/1, $0x05
+DATA rot8_shuf<>+5(SB)/1, $0x06
+DATA rot8_shuf<>+6(SB)/1, $0x07
+DATA rot8_shuf<>+7(SB)/1, $0x04
+DATA rot8_shuf<>+8(SB)/1, $0x09
+DATA rot8_shuf<>+9(SB)/1, $0x0a
+DATA rot8_shuf<>+10(SB)/1, $0x0b
+DATA rot8_shuf<>+11(SB)/1, $0x08
+DATA rot8_shuf<>+12(SB)/1, $0x0d
+DATA rot8_shuf<>+13(SB)/1, $0x0e
+DATA rot8_shuf<>+14(SB)/1, $0x0f
+DATA rot8_shuf<>+15(SB)/1, $0x0c
+DATA rot8_shuf<>+16(SB)/1, $0x11
+DATA rot8_shuf<>+17(SB)/1, $0x12
+DATA rot8_shuf<>+18(SB)/1, $0x13
+DATA rot8_shuf<>+19(SB)/1, $0x10
+DATA rot8_shuf<>+20(SB)/1, $0x15
+DATA rot8_shuf<>+21(SB)/1, $0x16
+DATA rot8_shuf<>+22(SB)/1, $0x17
+DATA rot8_shuf<>+23(SB)/1, $0x14
+DATA rot8_shuf<>+24(SB)/1, $0x19
+DATA rot8_shuf<>+25(SB)/1, $0x1a
+DATA rot8_shuf<>+26(SB)/1, $0x1b
+DATA rot8_shuf<>+27(SB)/1, $0x18
+DATA rot8_shuf<>+28(SB)/1, $0x1d
+DATA rot8_shuf<>+29(SB)/1, $0x1e
+DATA rot8_shuf<>+30(SB)/1, $0x1f
+DATA rot8_shuf<>+31(SB)/1, $0x1c
+GLOBL rot8_shuf<>(SB), RODATA|NOPTR, $32
+
+// func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32)
+// Requires: SSE, SSE2, SSE4.1, SSSE3
+TEXT ·Compress(SB), NOSPLIT, $0-40
+ MOVQ chain+0(FP), AX
+ MOVQ block+8(FP), CX
+ MOVQ counter+16(FP), DX
+ MOVL blen+24(FP), BX
+ MOVL flags+28(FP), SI
+ MOVQ out+32(FP), DI
+ MOVUPS (AX), X0
+ MOVUPS 16(AX), X1
+ MOVUPS iv<>+0(SB), X2
+ PINSRD $0x00, DX, X3
+ SHRQ $0x20, DX
+ PINSRD $0x01, DX, X3
+ PINSRD $0x02, BX, X3
+ PINSRD $0x03, SI, X3
+ MOVUPS (CX), X4
+ MOVUPS 16(CX), X5
+ MOVUPS 32(CX), X6
+ MOVUPS 48(CX), X7
+ MOVUPS rot16_shuf<>+0(SB), X8
+ MOVUPS rot8_shuf<>+0(SB), X9
+
+ // round 1
+ MOVAPS X4, X10
+ SHUFPS $0x88, X5, X10
+ PADDD X10, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X11
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X11
+ POR X11, X1
+ MOVAPS X4, X4
+ SHUFPS $0xdd, X5, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X6, X5
+ SHUFPS $0x88, X7, X5
+ SHUFPS $0x93, X5, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X11
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X11
+ POR X11, X1
+ MOVAPS X6, X6
+ SHUFPS $0xdd, X7, X6
+ SHUFPS $0x93, X6, X6
+ PADDD X6, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x07, X1
+ PSLLL $0x19, X7
+ POR X7, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 2
+ MOVAPS X10, X7
+ SHUFPS $0xd6, X4, X7
+ SHUFPS $0x39, X7, X7
+ PADDD X7, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X11
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X11
+ POR X11, X1
+ MOVAPS X5, X11
+ SHUFPS $0xfa, X6, X11
+ PSHUFD $0x0f, X10, X10
+ PBLENDW $0x33, X10, X11
+ PADDD X11, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X10
+ PSRLL $0x07, X1
+ PSLLL $0x19, X10
+ POR X10, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X6, X12
+ PUNPCKLLQ X4, X12
+ PBLENDW $0xc0, X5, X12
+ SHUFPS $0xb4, X12, X12
+ PADDD X12, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X10
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X10
+ POR X10, X1
+ MOVAPS X4, X10
+ PUNPCKHLQ X6, X10
+ MOVAPS X5, X4
+ PUNPCKLLQ X10, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 3
+ MOVAPS X7, X5
+ SHUFPS $0xd6, X11, X5
+ SHUFPS $0x39, X5, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X6
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X6
+ POR X6, X1
+ MOVAPS X12, X6
+ SHUFPS $0xfa, X4, X6
+ PSHUFD $0x0f, X7, X7
+ PBLENDW $0x33, X7, X6
+ PADDD X6, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x07, X1
+ PSLLL $0x19, X7
+ POR X7, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X4, X10
+ PUNPCKLLQ X11, X10
+ PBLENDW $0xc0, X12, X10
+ SHUFPS $0xb4, X10, X10
+ PADDD X10, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X7
+ POR X7, X1
+ MOVAPS X11, X7
+ PUNPCKHLQ X4, X7
+ MOVAPS X12, X4
+ PUNPCKLLQ X7, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x07, X1
+ PSLLL $0x19, X7
+ POR X7, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 4
+ MOVAPS X5, X7
+ SHUFPS $0xd6, X6, X7
+ SHUFPS $0x39, X7, X7
+ PADDD X7, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X11
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X11
+ POR X11, X1
+ MOVAPS X10, X11
+ SHUFPS $0xfa, X4, X11
+ PSHUFD $0x0f, X5, X5
+ PBLENDW $0x33, X5, X11
+ PADDD X11, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X4, X12
+ PUNPCKLLQ X6, X12
+ PBLENDW $0xc0, X10, X12
+ SHUFPS $0xb4, X12, X12
+ PADDD X12, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X5
+ POR X5, X1
+ MOVAPS X6, X5
+ PUNPCKHLQ X4, X5
+ MOVAPS X10, X4
+ PUNPCKLLQ X5, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 5
+ MOVAPS X7, X5
+ SHUFPS $0xd6, X11, X5
+ SHUFPS $0x39, X5, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X6
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X6
+ POR X6, X1
+ MOVAPS X12, X6
+ SHUFPS $0xfa, X4, X6
+ PSHUFD $0x0f, X7, X7
+ PBLENDW $0x33, X7, X6
+ PADDD X6, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x07, X1
+ PSLLL $0x19, X7
+ POR X7, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X4, X10
+ PUNPCKLLQ X11, X10
+ PBLENDW $0xc0, X12, X10
+ SHUFPS $0xb4, X10, X10
+ PADDD X10, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X7
+ POR X7, X1
+ MOVAPS X11, X7
+ PUNPCKHLQ X4, X7
+ MOVAPS X12, X4
+ PUNPCKLLQ X7, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X7
+ PSRLL $0x07, X1
+ PSLLL $0x19, X7
+ POR X7, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 6
+ MOVAPS X5, X7
+ SHUFPS $0xd6, X6, X7
+ SHUFPS $0x39, X7, X7
+ PADDD X7, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X11
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X11
+ POR X11, X1
+ MOVAPS X10, X11
+ SHUFPS $0xfa, X4, X11
+ PSHUFD $0x0f, X5, X5
+ PBLENDW $0x33, X5, X11
+ PADDD X11, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X4, X12
+ PUNPCKLLQ X6, X12
+ PBLENDW $0xc0, X10, X12
+ SHUFPS $0xb4, X12, X12
+ PADDD X12, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X5
+ POR X5, X1
+ MOVAPS X6, X5
+ PUNPCKHLQ X4, X5
+ MOVAPS X10, X4
+ PUNPCKLLQ X5, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // round 7
+ MOVAPS X7, X5
+ SHUFPS $0xd6, X11, X5
+ SHUFPS $0x39, X5, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X5
+ POR X5, X1
+ MOVAPS X12, X5
+ SHUFPS $0xfa, X4, X5
+ PSHUFD $0x0f, X7, X6
+ PBLENDW $0x33, X6, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x07, X1
+ PSLLL $0x19, X5
+ POR X5, X1
+ PSHUFD $0x93, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x39, X2, X2
+ MOVAPS X4, X5
+ PUNPCKLLQ X11, X5
+ PBLENDW $0xc0, X12, X5
+ SHUFPS $0xb4, X5, X5
+ PADDD X5, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X8, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X5
+ PSRLL $0x0c, X1
+ PSLLL $0x14, X5
+ POR X5, X1
+ MOVAPS X11, X6
+ PUNPCKHLQ X4, X6
+ MOVAPS X12, X4
+ PUNPCKLLQ X6, X4
+ SHUFPS $0x1e, X4, X4
+ PADDD X4, X0
+ PADDD X1, X0
+ PXOR X0, X3
+ PSHUFB X9, X3
+ PADDD X3, X2
+ PXOR X2, X1
+ MOVAPS X1, X4
+ PSRLL $0x07, X1
+ PSLLL $0x19, X4
+ POR X4, X1
+ PSHUFD $0x39, X0, X0
+ PSHUFD $0x4e, X3, X3
+ PSHUFD $0x93, X2, X2
+
+ // finalize
+ PXOR X2, X0
+ PXOR X3, X1
+ MOVUPS (AX), X4
+ PXOR X4, X2
+ MOVUPS 16(AX), X4
+ PXOR X4, X3
+ MOVUPS X0, (DI)
+ MOVUPS X1, 16(DI)
+ MOVUPS X2, 32(DI)
+ MOVUPS X3, 48(DI)
+ RET
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_other.go b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_other.go
new file mode 100644
index 00000000..cfc1d6bc
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/impl_other.go
@@ -0,0 +1,10 @@
+//go:build !amd64
+// +build !amd64
+
+package compress_sse41
+
+import "github.com/zeebo/blake3/internal/alg/compress/compress_pure"
+
+func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32) {
+ compress_pure.Compress(chain, block, counter, blen, flags, out)
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/stubs.go b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/stubs.go
new file mode 100644
index 00000000..a543c912
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/compress/compress_sse41/stubs.go
@@ -0,0 +1,7 @@
+//go:build amd64
+// +build amd64
+
+package compress_sse41
+
+//go:noescape
+func Compress(chain *[8]uint32, block *[16]uint32, counter uint64, blen uint32, flags uint32, out *[16]uint32)
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash.go b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash.go
new file mode 100644
index 00000000..ac43abb6
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash.go
@@ -0,0 +1,23 @@
+package hash
+
+import (
+ "github.com/zeebo/blake3/internal/alg/hash/hash_avx2"
+ "github.com/zeebo/blake3/internal/alg/hash/hash_pure"
+ "github.com/zeebo/blake3/internal/consts"
+)
+
+func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
+ if consts.HasAVX2 && length > 2*consts.ChunkLen {
+ hash_avx2.HashF(input, length, counter, flags, key, out, chain)
+ } else {
+ hash_pure.HashF(input, length, counter, flags, key, out, chain)
+ }
+}
+
+func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
+ if consts.HasAVX2 && n >= 2 {
+ hash_avx2.HashP(left, right, flags, key, out, n)
+ } else {
+ hash_pure.HashP(left, right, flags, key, out, n)
+ }
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_amd64.s b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_amd64.s
new file mode 100644
index 00000000..0de16ca8
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_amd64.s
@@ -0,0 +1,2561 @@
+// Code generated by command: go run main.go. DO NOT EDIT.
+
+#include "textflag.h"
+
+DATA iv<>+0(SB)/4, $0x6a09e667
+DATA iv<>+4(SB)/4, $0xbb67ae85
+DATA iv<>+8(SB)/4, $0x3c6ef372
+DATA iv<>+12(SB)/4, $0xa54ff53a
+DATA iv<>+16(SB)/4, $0x510e527f
+DATA iv<>+20(SB)/4, $0x9b05688c
+DATA iv<>+24(SB)/4, $0x1f83d9ab
+DATA iv<>+28(SB)/4, $0x5be0cd19
+GLOBL iv<>(SB), RODATA|NOPTR, $32
+
+DATA rot16_shuf<>+0(SB)/1, $0x02
+DATA rot16_shuf<>+1(SB)/1, $0x03
+DATA rot16_shuf<>+2(SB)/1, $0x00
+DATA rot16_shuf<>+3(SB)/1, $0x01
+DATA rot16_shuf<>+4(SB)/1, $0x06
+DATA rot16_shuf<>+5(SB)/1, $0x07
+DATA rot16_shuf<>+6(SB)/1, $0x04
+DATA rot16_shuf<>+7(SB)/1, $0x05
+DATA rot16_shuf<>+8(SB)/1, $0x0a
+DATA rot16_shuf<>+9(SB)/1, $0x0b
+DATA rot16_shuf<>+10(SB)/1, $0x08
+DATA rot16_shuf<>+11(SB)/1, $0x09
+DATA rot16_shuf<>+12(SB)/1, $0x0e
+DATA rot16_shuf<>+13(SB)/1, $0x0f
+DATA rot16_shuf<>+14(SB)/1, $0x0c
+DATA rot16_shuf<>+15(SB)/1, $0x0d
+DATA rot16_shuf<>+16(SB)/1, $0x12
+DATA rot16_shuf<>+17(SB)/1, $0x13
+DATA rot16_shuf<>+18(SB)/1, $0x10
+DATA rot16_shuf<>+19(SB)/1, $0x11
+DATA rot16_shuf<>+20(SB)/1, $0x16
+DATA rot16_shuf<>+21(SB)/1, $0x17
+DATA rot16_shuf<>+22(SB)/1, $0x14
+DATA rot16_shuf<>+23(SB)/1, $0x15
+DATA rot16_shuf<>+24(SB)/1, $0x1a
+DATA rot16_shuf<>+25(SB)/1, $0x1b
+DATA rot16_shuf<>+26(SB)/1, $0x18
+DATA rot16_shuf<>+27(SB)/1, $0x19
+DATA rot16_shuf<>+28(SB)/1, $0x1e
+DATA rot16_shuf<>+29(SB)/1, $0x1f
+DATA rot16_shuf<>+30(SB)/1, $0x1c
+DATA rot16_shuf<>+31(SB)/1, $0x1d
+GLOBL rot16_shuf<>(SB), RODATA|NOPTR, $32
+
+DATA rot8_shuf<>+0(SB)/1, $0x01
+DATA rot8_shuf<>+1(SB)/1, $0x02
+DATA rot8_shuf<>+2(SB)/1, $0x03
+DATA rot8_shuf<>+3(SB)/1, $0x00
+DATA rot8_shuf<>+4(SB)/1, $0x05
+DATA rot8_shuf<>+5(SB)/1, $0x06
+DATA rot8_shuf<>+6(SB)/1, $0x07
+DATA rot8_shuf<>+7(SB)/1, $0x04
+DATA rot8_shuf<>+8(SB)/1, $0x09
+DATA rot8_shuf<>+9(SB)/1, $0x0a
+DATA rot8_shuf<>+10(SB)/1, $0x0b
+DATA rot8_shuf<>+11(SB)/1, $0x08
+DATA rot8_shuf<>+12(SB)/1, $0x0d
+DATA rot8_shuf<>+13(SB)/1, $0x0e
+DATA rot8_shuf<>+14(SB)/1, $0x0f
+DATA rot8_shuf<>+15(SB)/1, $0x0c
+DATA rot8_shuf<>+16(SB)/1, $0x11
+DATA rot8_shuf<>+17(SB)/1, $0x12
+DATA rot8_shuf<>+18(SB)/1, $0x13
+DATA rot8_shuf<>+19(SB)/1, $0x10
+DATA rot8_shuf<>+20(SB)/1, $0x15
+DATA rot8_shuf<>+21(SB)/1, $0x16
+DATA rot8_shuf<>+22(SB)/1, $0x17
+DATA rot8_shuf<>+23(SB)/1, $0x14
+DATA rot8_shuf<>+24(SB)/1, $0x19
+DATA rot8_shuf<>+25(SB)/1, $0x1a
+DATA rot8_shuf<>+26(SB)/1, $0x1b
+DATA rot8_shuf<>+27(SB)/1, $0x18
+DATA rot8_shuf<>+28(SB)/1, $0x1d
+DATA rot8_shuf<>+29(SB)/1, $0x1e
+DATA rot8_shuf<>+30(SB)/1, $0x1f
+DATA rot8_shuf<>+31(SB)/1, $0x1c
+GLOBL rot8_shuf<>(SB), RODATA|NOPTR, $32
+
+DATA block_len<>+0(SB)/4, $0x00000040
+DATA block_len<>+4(SB)/4, $0x00000040
+DATA block_len<>+8(SB)/4, $0x00000040
+DATA block_len<>+12(SB)/4, $0x00000040
+DATA block_len<>+16(SB)/4, $0x00000040
+DATA block_len<>+20(SB)/4, $0x00000040
+DATA block_len<>+24(SB)/4, $0x00000040
+DATA block_len<>+28(SB)/4, $0x00000040
+GLOBL block_len<>(SB), RODATA|NOPTR, $32
+
+DATA zero<>+0(SB)/4, $0x00000000
+DATA zero<>+4(SB)/4, $0x00000000
+DATA zero<>+8(SB)/4, $0x00000000
+DATA zero<>+12(SB)/4, $0x00000000
+DATA zero<>+16(SB)/4, $0x00000000
+DATA zero<>+20(SB)/4, $0x00000000
+DATA zero<>+24(SB)/4, $0x00000000
+DATA zero<>+28(SB)/4, $0x00000000
+GLOBL zero<>(SB), RODATA|NOPTR, $32
+
+DATA counter<>+0(SB)/8, $0x0000000000000000
+DATA counter<>+8(SB)/8, $0x0000000000000001
+DATA counter<>+16(SB)/8, $0x0000000000000002
+DATA counter<>+24(SB)/8, $0x0000000000000003
+DATA counter<>+32(SB)/8, $0x0000000000000004
+DATA counter<>+40(SB)/8, $0x0000000000000005
+DATA counter<>+48(SB)/8, $0x0000000000000006
+DATA counter<>+56(SB)/8, $0x0000000000000007
+GLOBL counter<>(SB), RODATA|NOPTR, $64
+
+// func HashF(input *[8192]byte, length uint64, counter uint64, flags uint32, key *[8]uint32, out *[32]uint32, chain *[8]uint32)
+// Requires: AVX, AVX2
+TEXT ·HashF(SB), $688-56
+ MOVQ input+0(FP), AX
+ MOVQ length+8(FP), CX
+ MOVQ counter+16(FP), DX
+ MOVL flags+24(FP), BX
+ MOVQ key+32(FP), SI
+ MOVQ out+40(FP), DI
+ MOVQ chain+48(FP), R8
+
+ // Allocate local space and align it
+ LEAQ 31(SP), R11
+ MOVQ $0x000000000000001f, R9
+ NOTQ R9
+ ANDQ R9, R11
+
+ // Skip if the length is zero
+ XORQ R9, R9
+ XORQ R10, R10
+ TESTQ CX, CX
+ JZ skip_compute
+
+ // Compute complete chunks and blocks
+ SUBQ $0x01, CX
+ MOVQ CX, R9
+ SHRQ $0x0a, R9
+ MOVQ CX, R10
+ ANDQ $0x000003c0, R10
+
+skip_compute:
+ // Load some params into the stack (avo improvment?)
+ MOVL BX, 64(SP)
+ MOVQ DX, 72(SP)
+
+ // Load IV into vectors
+ VPBROADCASTD (SI), Y0
+ VPBROADCASTD 4(SI), Y1
+ VPBROADCASTD 8(SI), Y2
+ VPBROADCASTD 12(SI), Y3
+ VPBROADCASTD 16(SI), Y4
+ VPBROADCASTD 20(SI), Y5
+ VPBROADCASTD 24(SI), Y6
+ VPBROADCASTD 28(SI), Y7
+
+ // Build and store counter data on the stack
+ VPBROADCASTQ 72(SP), Y8
+ VPADDQ counter<>+0(SB), Y8, Y8
+ VPBROADCASTQ 72(SP), Y9
+ VPADDQ counter<>+32(SB), Y9, Y9
+ VPUNPCKLDQ Y9, Y8, Y10
+ VPUNPCKHDQ Y9, Y8, Y8
+ VPUNPCKLDQ Y8, Y10, Y9
+ VPUNPCKHDQ Y8, Y10, Y8
+ VPERMQ $0xd8, Y9, Y9
+ VPERMQ $0xd8, Y8, Y8
+ VMOVDQU Y9, 112(SP)
+ VMOVDQU Y8, 144(SP)
+
+ // Set up block flags and variables for iteration
+ XORQ CX, CX
+ ORL $0x01, 64(SP)
+
+loop:
+ // Include end flags if last block
+ CMPQ CX, $0x000003c0
+ JNE round_setup
+ ORL $0x02, 64(SP)
+
+round_setup:
+ // Load and transpose message vectors
+ VMOVDQU (AX)(CX*1), Y8
+ VMOVDQU 1024(AX)(CX*1), Y9
+ VMOVDQU 2048(AX)(CX*1), Y10
+ VMOVDQU 3072(AX)(CX*1), Y11
+ VMOVDQU 4096(AX)(CX*1), Y12
+ VMOVDQU 5120(AX)(CX*1), Y13
+ VMOVDQU 6144(AX)(CX*1), Y14
+ VMOVDQU 7168(AX)(CX*1), Y15
+ VMOVDQA Y0, (R11)
+ VPUNPCKLDQ Y9, Y8, Y0
+ VPUNPCKHDQ Y9, Y8, Y8
+ VPUNPCKLDQ Y11, Y10, Y9
+ VPUNPCKHDQ Y11, Y10, Y10
+ VPUNPCKLDQ Y13, Y12, Y11
+ VPUNPCKHDQ Y13, Y12, Y12
+ VPUNPCKLDQ Y15, Y14, Y13
+ VPUNPCKHDQ Y15, Y14, Y14
+ VPUNPCKLQDQ Y9, Y0, Y15
+ VPUNPCKHQDQ Y9, Y0, Y0
+ VPUNPCKLQDQ Y10, Y8, Y9
+ VPUNPCKHQDQ Y10, Y8, Y8
+ VPUNPCKLQDQ Y13, Y11, Y10
+ VPUNPCKHQDQ Y13, Y11, Y11
+ VPUNPCKLQDQ Y14, Y12, Y13
+ VPUNPCKHQDQ Y14, Y12, Y12
+ VINSERTI128 $0x01, X10, Y15, Y14
+ VPERM2I128 $0x31, Y10, Y15, Y10
+ VINSERTI128 $0x01, X11, Y0, Y15
+ VPERM2I128 $0x31, Y11, Y0, Y0
+ VINSERTI128 $0x01, X13, Y9, Y11
+ VPERM2I128 $0x31, Y13, Y9, Y9
+ VINSERTI128 $0x01, X12, Y8, Y13
+ VPERM2I128 $0x31, Y12, Y8, Y8
+ VMOVDQU Y14, 176(SP)
+ VMOVDQU Y15, 208(SP)
+ VMOVDQU Y11, 240(SP)
+ VMOVDQU Y13, 272(SP)
+ VMOVDQU Y10, 304(SP)
+ VMOVDQU Y0, 336(SP)
+ VMOVDQU Y9, 368(SP)
+ VMOVDQU Y8, 400(SP)
+ VMOVDQU 32(AX)(CX*1), Y0
+ VMOVDQU 1056(AX)(CX*1), Y8
+ VMOVDQU 2080(AX)(CX*1), Y9
+ VMOVDQU 3104(AX)(CX*1), Y10
+ VMOVDQU 4128(AX)(CX*1), Y11
+ VMOVDQU 5152(AX)(CX*1), Y12
+ VMOVDQU 6176(AX)(CX*1), Y13
+ VMOVDQU 7200(AX)(CX*1), Y14
+ VPUNPCKLDQ Y8, Y0, Y15
+ VPUNPCKHDQ Y8, Y0, Y0
+ VPUNPCKLDQ Y10, Y9, Y8
+ VPUNPCKHDQ Y10, Y9, Y9
+ VPUNPCKLDQ Y12, Y11, Y10
+ VPUNPCKHDQ Y12, Y11, Y11
+ VPUNPCKLDQ Y14, Y13, Y12
+ VPUNPCKHDQ Y14, Y13, Y13
+ VPUNPCKLQDQ Y8, Y15, Y14
+ VPUNPCKHQDQ Y8, Y15, Y8
+ VPUNPCKLQDQ Y9, Y0, Y15
+ VPUNPCKHQDQ Y9, Y0, Y0
+ VPUNPCKLQDQ Y12, Y10, Y9
+ VPUNPCKHQDQ Y12, Y10, Y10
+ VPUNPCKLQDQ Y13, Y11, Y12
+ VPUNPCKHQDQ Y13, Y11, Y11
+ VINSERTI128 $0x01, X9, Y14, Y13
+ VPERM2I128 $0x31, Y9, Y14, Y9
+ VINSERTI128 $0x01, X10, Y8, Y14
+ VPERM2I128 $0x31, Y10, Y8, Y8
+ VINSERTI128 $0x01, X12, Y15, Y10
+ VPERM2I128 $0x31, Y12, Y15, Y12
+ VINSERTI128 $0x01, X11, Y0, Y15
+ VPERM2I128 $0x31, Y11, Y0, Y0
+ VMOVDQU Y13, 432(SP)
+ VMOVDQU Y14, 464(SP)
+ VMOVDQU Y10, 496(SP)
+ VMOVDQU Y15, 528(SP)
+ VMOVDQU Y9, 560(SP)
+ VMOVDQU Y8, 592(SP)
+ VMOVDQU Y12, 624(SP)
+ VMOVDQU Y0, 656(SP)
+
+ // Load constants for the round
+ VMOVDQA (R11), Y0
+ VMOVDQU block_len<>+0(SB), Y8
+ VPBROADCASTD 64(SP), Y9
+ VPBROADCASTD iv<>+0(SB), Y10
+ VPBROADCASTD iv<>+4(SB), Y11
+ VPBROADCASTD iv<>+8(SB), Y12
+ VPBROADCASTD iv<>+12(SB), Y13
+ VMOVDQU 112(SP), Y14
+ VMOVDQU 144(SP), Y15
+
+ // Save state for partial chunk if necessary
+ CMPQ CX, R10
+ JNE begin_rounds
+ VMOVDQU Y0, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, (R8)
+ VMOVDQU Y1, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 4(R8)
+ VMOVDQU Y2, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 8(R8)
+ VMOVDQU Y3, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 12(R8)
+ VMOVDQU Y4, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 16(R8)
+ VMOVDQU Y5, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 20(R8)
+ VMOVDQU Y6, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 24(R8)
+ VMOVDQU Y7, 80(SP)
+ MOVL 80(SP)(R9*4), DX
+ MOVL DX, 28(R8)
+
+begin_rounds:
+ // Perform the rounds
+ // Round 1
+ VPADDD 176(SP), Y0, Y0
+ VPADDD 240(SP), Y1, Y1
+ VPADDD 304(SP), Y2, Y2
+ VPADDD 368(SP), Y3, Y3
+ VPADDD Y4, Y0, Y0
+ VPXOR Y0, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y7, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y4, Y4
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y5, Y5
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y6, Y6
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y7, Y7
+ VMOVDQA Y0, (R11)
+ VPSRLD $0x0c, Y4, Y0
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y0, Y4, Y0
+ VPSRLD $0x0c, Y5, Y4
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y4, Y5, Y4
+ VPSRLD $0x0c, Y6, Y5
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y5, Y6, Y5
+ VPSRLD $0x0c, Y7, Y6
+ VPSLLD $0x14, Y7, Y7
+ VPOR Y6, Y7, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 208(SP), Y7, Y7
+ VPADDD 272(SP), Y1, Y1
+ VPADDD 336(SP), Y2, Y2
+ VPADDD 400(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 432(SP), Y7, Y7
+ VPADDD 496(SP), Y1, Y1
+ VPADDD 560(SP), Y2, Y2
+ VPADDD 624(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 464(SP), Y7, Y7
+ VPADDD 528(SP), Y1, Y1
+ VPADDD 592(SP), Y2, Y2
+ VPADDD 656(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 2
+ VMOVDQA (R11), Y7
+ VPADDD 240(SP), Y7, Y7
+ VPADDD 272(SP), Y1, Y1
+ VPADDD 400(SP), Y2, Y2
+ VPADDD 304(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 368(SP), Y7, Y7
+ VPADDD 496(SP), Y1, Y1
+ VPADDD 176(SP), Y2, Y2
+ VPADDD 592(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 208(SP), Y7, Y7
+ VPADDD 560(SP), Y1, Y1
+ VPADDD 464(SP), Y2, Y2
+ VPADDD 656(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 528(SP), Y7, Y7
+ VPADDD 336(SP), Y1, Y1
+ VPADDD 624(SP), Y2, Y2
+ VPADDD 432(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 3
+ VMOVDQA (R11), Y7
+ VPADDD 272(SP), Y7, Y7
+ VPADDD 496(SP), Y1, Y1
+ VPADDD 592(SP), Y2, Y2
+ VPADDD 400(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 304(SP), Y7, Y7
+ VPADDD 560(SP), Y1, Y1
+ VPADDD 240(SP), Y2, Y2
+ VPADDD 624(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 368(SP), Y7, Y7
+ VPADDD 464(SP), Y1, Y1
+ VPADDD 528(SP), Y2, Y2
+ VPADDD 432(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 336(SP), Y7, Y7
+ VPADDD 176(SP), Y1, Y1
+ VPADDD 656(SP), Y2, Y2
+ VPADDD 208(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 4
+ VMOVDQA (R11), Y7
+ VPADDD 496(SP), Y7, Y7
+ VPADDD 560(SP), Y1, Y1
+ VPADDD 624(SP), Y2, Y2
+ VPADDD 592(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 400(SP), Y7, Y7
+ VPADDD 464(SP), Y1, Y1
+ VPADDD 272(SP), Y2, Y2
+ VPADDD 656(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 304(SP), Y7, Y7
+ VPADDD 528(SP), Y1, Y1
+ VPADDD 336(SP), Y2, Y2
+ VPADDD 208(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 176(SP), Y7, Y7
+ VPADDD 240(SP), Y1, Y1
+ VPADDD 432(SP), Y2, Y2
+ VPADDD 368(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 5
+ VMOVDQA (R11), Y7
+ VPADDD 560(SP), Y7, Y7
+ VPADDD 464(SP), Y1, Y1
+ VPADDD 656(SP), Y2, Y2
+ VPADDD 624(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 592(SP), Y7, Y7
+ VPADDD 528(SP), Y1, Y1
+ VPADDD 496(SP), Y2, Y2
+ VPADDD 432(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 400(SP), Y7, Y7
+ VPADDD 336(SP), Y1, Y1
+ VPADDD 176(SP), Y2, Y2
+ VPADDD 368(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 240(SP), Y7, Y7
+ VPADDD 272(SP), Y1, Y1
+ VPADDD 208(SP), Y2, Y2
+ VPADDD 304(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 6
+ VMOVDQA (R11), Y7
+ VPADDD 464(SP), Y7, Y7
+ VPADDD 528(SP), Y1, Y1
+ VPADDD 432(SP), Y2, Y2
+ VPADDD 656(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 624(SP), Y7, Y7
+ VPADDD 336(SP), Y1, Y1
+ VPADDD 560(SP), Y2, Y2
+ VPADDD 208(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 592(SP), Y7, Y7
+ VPADDD 176(SP), Y1, Y1
+ VPADDD 240(SP), Y2, Y2
+ VPADDD 304(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 272(SP), Y7, Y7
+ VPADDD 496(SP), Y1, Y1
+ VPADDD 368(SP), Y2, Y2
+ VPADDD 400(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Round 7
+ VMOVDQA (R11), Y7
+ VPADDD 528(SP), Y7, Y7
+ VPADDD 336(SP), Y1, Y1
+ VPADDD 208(SP), Y2, Y2
+ VPADDD 432(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 656(SP), Y7, Y7
+ VPADDD 176(SP), Y1, Y1
+ VPADDD 464(SP), Y2, Y2
+ VPADDD 368(SP), Y3, Y3
+ VPADDD Y0, Y7, Y7
+ VPXOR Y7, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y5, Y2, Y2
+ VPXOR Y2, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y6, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y14, Y10, Y10
+ VPXOR Y10, Y0, Y0
+ VPADDD Y15, Y11, Y11
+ VPXOR Y11, Y4, Y4
+ VPADDD Y8, Y12, Y12
+ VPXOR Y12, Y5, Y5
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VMOVDQA (R11), Y7
+ VPADDD 624(SP), Y7, Y7
+ VPADDD 240(SP), Y1, Y1
+ VPADDD 272(SP), Y2, Y2
+ VPADDD 400(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot16_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot16_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot16_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x0c, Y4, Y7
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x0c, Y5, Y7
+ VPSLLD $0x14, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x0c, Y6, Y7
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x0c, Y0, Y7
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y7, Y0, Y0
+ VMOVDQA (R11), Y7
+ VPADDD 496(SP), Y7, Y7
+ VPADDD 560(SP), Y1, Y1
+ VPADDD 304(SP), Y2, Y2
+ VPADDD 592(SP), Y3, Y3
+ VPADDD Y4, Y7, Y7
+ VPXOR Y7, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y5, Y1, Y1
+ VPXOR Y1, Y14, Y14
+ VPSHUFB rot8_shuf<>+0(SB), Y14, Y14
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y15, Y15
+ VPSHUFB rot8_shuf<>+0(SB), Y15, Y15
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y8, Y8
+ VPSHUFB rot8_shuf<>+0(SB), Y8, Y8
+ VPADDD Y9, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPADDD Y14, Y13, Y13
+ VPXOR Y13, Y5, Y5
+ VPADDD Y15, Y10, Y10
+ VPXOR Y10, Y6, Y6
+ VPADDD Y8, Y11, Y11
+ VPXOR Y11, Y0, Y0
+ VMOVDQA Y7, (R11)
+ VPSRLD $0x07, Y4, Y7
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y7, Y4, Y4
+ VPSRLD $0x07, Y5, Y7
+ VPSLLD $0x19, Y5, Y5
+ VPOR Y7, Y5, Y5
+ VPSRLD $0x07, Y6, Y7
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y7, Y6, Y6
+ VPSRLD $0x07, Y0, Y7
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y7, Y0, Y0
+
+ // Finalize rounds
+ VPXOR Y9, Y6, Y6
+ VPXOR (R11), Y10, Y7
+ VPXOR Y11, Y1, Y1
+ VPXOR Y12, Y2, Y2
+ VPXOR Y13, Y3, Y3
+ VPXOR Y14, Y0, Y0
+ VPXOR Y15, Y4, Y4
+ VPXOR Y8, Y5, Y5
+
+ // Fix up registers for next iteration
+ VMOVDQU Y7, Y8
+ VMOVDQU Y6, Y7
+ VMOVDQU Y5, Y6
+ VMOVDQU Y4, Y5
+ VMOVDQU Y0, Y4
+ VMOVDQU Y8, Y0
+
+ // If we have zero complete chunks, we're done
+ CMPQ R9, $0x00
+ JNE loop_trailer
+ CMPQ R10, CX
+ JEQ finalize
+
+loop_trailer:
+ // Increment, reset flags, and loop
+ CMPQ CX, $0x000003c0
+ JEQ finalize
+ ADDQ $0x40, CX
+ MOVL BX, 64(SP)
+ JMP loop
+
+finalize:
+ // Store result into out
+ VMOVDQU Y0, (DI)
+ VMOVDQU Y1, 32(DI)
+ VMOVDQU Y2, 64(DI)
+ VMOVDQU Y3, 96(DI)
+ VMOVDQU Y4, 128(DI)
+ VMOVDQU Y5, 160(DI)
+ VMOVDQU Y6, 192(DI)
+ VMOVDQU Y7, 224(DI)
+ VZEROUPPER
+ RET
+
+// func HashP(left *[32]uint32, right *[32]uint32, flags uint8, key *[8]uint32, out *[32]uint32, n int)
+// Requires: AVX, AVX2
+TEXT ·HashP(SB), NOSPLIT, $72-48
+ MOVQ left+0(FP), AX
+ MOVQ right+8(FP), CX
+ MOVBLZX flags+16(FP), DX
+ MOVQ key+24(FP), BX
+ MOVQ out+32(FP), SI
+
+ // Allocate local space and align it
+ LEAQ 31(SP), DI
+ MOVQ $0x000000000000001f, R8
+ NOTQ R8
+ ANDQ R8, DI
+
+ // Set up flags value
+ MOVL DX, 64(SP)
+
+ // Perform the rounds
+ // Round 1
+ VPBROADCASTD (BX), Y0
+ VPADDD (AX), Y0, Y0
+ VPBROADCASTD 4(BX), Y1
+ VPADDD 64(AX), Y1, Y1
+ VPBROADCASTD 8(BX), Y2
+ VPADDD 128(AX), Y2, Y2
+ VPBROADCASTD 12(BX), Y3
+ VPADDD 192(AX), Y3, Y3
+ VPBROADCASTD 16(BX), Y4
+ VPADDD Y4, Y0, Y0
+ VMOVDQU zero<>+0(SB), Y5
+ VPXOR Y0, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPBROADCASTD 20(BX), Y6
+ VPADDD Y6, Y1, Y1
+ VMOVDQU zero<>+0(SB), Y7
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPBROADCASTD 24(BX), Y8
+ VPADDD Y8, Y2, Y2
+ VMOVDQU block_len<>+0(SB), Y9
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPBROADCASTD 28(BX), Y10
+ VPADDD Y10, Y3, Y3
+ VPBROADCASTD 64(SP), Y11
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPBROADCASTD iv<>+0(SB), Y12
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y4, Y4
+ VPBROADCASTD iv<>+4(SB), Y13
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y6, Y6
+ VPBROADCASTD iv<>+8(SB), Y14
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y8, Y8
+ VPBROADCASTD iv<>+12(SB), Y15
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y10, Y10
+ VMOVDQA Y0, (DI)
+ VPSRLD $0x0c, Y4, Y0
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y0, Y4, Y0
+ VPSRLD $0x0c, Y6, Y4
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y4, Y6, Y4
+ VPSRLD $0x0c, Y8, Y6
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y6, Y8, Y6
+ VPSRLD $0x0c, Y10, Y8
+ VPSLLD $0x14, Y10, Y10
+ VPOR Y8, Y10, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 32(AX), Y10, Y10
+ VPADDD 96(AX), Y1, Y1
+ VPADDD 160(AX), Y2, Y2
+ VPADDD 224(AX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD (CX), Y10, Y10
+ VPADDD 64(CX), Y1, Y1
+ VPADDD 128(CX), Y2, Y2
+ VPADDD 192(CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 32(CX), Y10, Y10
+ VPADDD 96(CX), Y1, Y1
+ VPADDD 160(CX), Y2, Y2
+ VPADDD 224(CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 2
+ VMOVDQA (DI), Y10
+ VPADDD 64(AX), Y10, Y10
+ VPADDD 96(AX), Y1, Y1
+ VPADDD 224(AX), Y2, Y2
+ VPADDD 128(AX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 192(AX), Y10, Y10
+ VPADDD 64(CX), Y1, Y1
+ VPADDD (AX), Y2, Y2
+ VPADDD 160(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 32(AX), Y10, Y10
+ VPADDD 128(CX), Y1, Y1
+ VPADDD 32(CX), Y2, Y2
+ VPADDD 224(CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 96(CX), Y10, Y10
+ VPADDD 160(AX), Y1, Y1
+ VPADDD 192(CX), Y2, Y2
+ VPADDD (CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 3
+ VMOVDQA (DI), Y10
+ VPADDD 96(AX), Y10, Y10
+ VPADDD 64(CX), Y1, Y1
+ VPADDD 160(CX), Y2, Y2
+ VPADDD 224(AX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 128(AX), Y10, Y10
+ VPADDD 128(CX), Y1, Y1
+ VPADDD 64(AX), Y2, Y2
+ VPADDD 192(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 192(AX), Y10, Y10
+ VPADDD 32(CX), Y1, Y1
+ VPADDD 96(CX), Y2, Y2
+ VPADDD (CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 160(AX), Y10, Y10
+ VPADDD (AX), Y1, Y1
+ VPADDD 224(CX), Y2, Y2
+ VPADDD 32(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 4
+ VMOVDQA (DI), Y10
+ VPADDD 64(CX), Y10, Y10
+ VPADDD 128(CX), Y1, Y1
+ VPADDD 192(CX), Y2, Y2
+ VPADDD 160(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 224(AX), Y10, Y10
+ VPADDD 32(CX), Y1, Y1
+ VPADDD 96(AX), Y2, Y2
+ VPADDD 224(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 128(AX), Y10, Y10
+ VPADDD 96(CX), Y1, Y1
+ VPADDD 160(AX), Y2, Y2
+ VPADDD 32(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD (AX), Y10, Y10
+ VPADDD 64(AX), Y1, Y1
+ VPADDD (CX), Y2, Y2
+ VPADDD 192(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 5
+ VMOVDQA (DI), Y10
+ VPADDD 128(CX), Y10, Y10
+ VPADDD 32(CX), Y1, Y1
+ VPADDD 224(CX), Y2, Y2
+ VPADDD 192(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 160(CX), Y10, Y10
+ VPADDD 96(CX), Y1, Y1
+ VPADDD 64(CX), Y2, Y2
+ VPADDD (CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 224(AX), Y10, Y10
+ VPADDD 160(AX), Y1, Y1
+ VPADDD (AX), Y2, Y2
+ VPADDD 192(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 64(AX), Y10, Y10
+ VPADDD 96(AX), Y1, Y1
+ VPADDD 32(AX), Y2, Y2
+ VPADDD 128(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 6
+ VMOVDQA (DI), Y10
+ VPADDD 32(CX), Y10, Y10
+ VPADDD 96(CX), Y1, Y1
+ VPADDD (CX), Y2, Y2
+ VPADDD 224(CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 192(CX), Y10, Y10
+ VPADDD 160(AX), Y1, Y1
+ VPADDD 128(CX), Y2, Y2
+ VPADDD 32(AX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 160(CX), Y10, Y10
+ VPADDD (AX), Y1, Y1
+ VPADDD 64(AX), Y2, Y2
+ VPADDD 128(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 96(AX), Y10, Y10
+ VPADDD 64(CX), Y1, Y1
+ VPADDD 192(AX), Y2, Y2
+ VPADDD 224(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Round 7
+ VMOVDQA (DI), Y10
+ VPADDD 96(CX), Y10, Y10
+ VPADDD 160(AX), Y1, Y1
+ VPADDD 32(AX), Y2, Y2
+ VPADDD (CX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 224(CX), Y10, Y10
+ VPADDD (AX), Y1, Y1
+ VPADDD 32(CX), Y2, Y2
+ VPADDD 192(AX), Y3, Y3
+ VPADDD Y0, Y10, Y10
+ VPXOR Y10, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y4, Y1, Y1
+ VPXOR Y1, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y6, Y2, Y2
+ VPXOR Y2, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y8, Y3, Y3
+ VPXOR Y3, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y5, Y12, Y12
+ VPXOR Y12, Y0, Y0
+ VPADDD Y7, Y13, Y13
+ VPXOR Y13, Y4, Y4
+ VPADDD Y9, Y14, Y14
+ VPXOR Y14, Y6, Y6
+ VPADDD Y11, Y15, Y15
+ VPXOR Y15, Y8, Y8
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VMOVDQA (DI), Y10
+ VPADDD 192(CX), Y10, Y10
+ VPADDD 64(AX), Y1, Y1
+ VPADDD 96(AX), Y2, Y2
+ VPADDD 224(AX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot16_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot16_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot16_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot16_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x0c, Y4, Y10
+ VPSLLD $0x14, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x0c, Y6, Y10
+ VPSLLD $0x14, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x0c, Y8, Y10
+ VPSLLD $0x14, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x0c, Y0, Y10
+ VPSLLD $0x14, Y0, Y0
+ VPOR Y10, Y0, Y0
+ VMOVDQA (DI), Y10
+ VPADDD 64(CX), Y10, Y10
+ VPADDD 128(CX), Y1, Y1
+ VPADDD 128(AX), Y2, Y2
+ VPADDD 160(CX), Y3, Y3
+ VPADDD Y4, Y10, Y10
+ VPXOR Y10, Y11, Y11
+ VPSHUFB rot8_shuf<>+0(SB), Y11, Y11
+ VPADDD Y6, Y1, Y1
+ VPXOR Y1, Y5, Y5
+ VPSHUFB rot8_shuf<>+0(SB), Y5, Y5
+ VPADDD Y8, Y2, Y2
+ VPXOR Y2, Y7, Y7
+ VPSHUFB rot8_shuf<>+0(SB), Y7, Y7
+ VPADDD Y0, Y3, Y3
+ VPXOR Y3, Y9, Y9
+ VPSHUFB rot8_shuf<>+0(SB), Y9, Y9
+ VPADDD Y11, Y14, Y14
+ VPXOR Y14, Y4, Y4
+ VPADDD Y5, Y15, Y15
+ VPXOR Y15, Y6, Y6
+ VPADDD Y7, Y12, Y12
+ VPXOR Y12, Y8, Y8
+ VPADDD Y9, Y13, Y13
+ VPXOR Y13, Y0, Y0
+ VMOVDQA Y10, (DI)
+ VPSRLD $0x07, Y4, Y10
+ VPSLLD $0x19, Y4, Y4
+ VPOR Y10, Y4, Y4
+ VPSRLD $0x07, Y6, Y10
+ VPSLLD $0x19, Y6, Y6
+ VPOR Y10, Y6, Y6
+ VPSRLD $0x07, Y8, Y10
+ VPSLLD $0x19, Y8, Y8
+ VPOR Y10, Y8, Y8
+ VPSRLD $0x07, Y0, Y10
+ VPSLLD $0x19, Y0, Y0
+ VPOR Y10, Y0, Y0
+
+ // Finalize
+ VPXOR (DI), Y12, Y10
+ VPXOR Y13, Y1, Y1
+ VPXOR Y14, Y2, Y2
+ VPXOR Y15, Y3, Y3
+ VPXOR Y5, Y0, Y0
+ VPXOR Y7, Y4, Y4
+ VPXOR Y9, Y6, Y5
+ VPXOR Y11, Y8, Y6
+
+ // Store result into out
+ VMOVDQU Y10, (SI)
+ VMOVDQU Y1, 32(SI)
+ VMOVDQU Y2, 64(SI)
+ VMOVDQU Y3, 96(SI)
+ VMOVDQU Y0, 128(SI)
+ VMOVDQU Y4, 160(SI)
+ VMOVDQU Y5, 192(SI)
+ VMOVDQU Y6, 224(SI)
+ VZEROUPPER
+ RET
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_other.go b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_other.go
new file mode 100644
index 00000000..feb06387
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/impl_other.go
@@ -0,0 +1,14 @@
+//go:build !amd64
+// +build !amd64
+
+package hash_avx2
+
+import "github.com/zeebo/blake3/internal/alg/hash/hash_pure"
+
+func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
+ hash_pure.HashF(input, length, counter, flags, key, out, chain)
+}
+
+func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
+ hash_pure.HashP(left, right, flags, key, out, n)
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/stubs.go b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/stubs.go
new file mode 100644
index 00000000..a0257636
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_avx2/stubs.go
@@ -0,0 +1,10 @@
+//go:build amd64
+// +build amd64
+
+package hash_avx2
+
+//go:noescape
+func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32)
+
+//go:noescape
+func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int)
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashf.go b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashf.go
new file mode 100644
index 00000000..8b0da235
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashf.go
@@ -0,0 +1,56 @@
+package hash_pure
+
+import (
+ "unsafe"
+
+ "github.com/zeebo/blake3/internal/alg/compress"
+ "github.com/zeebo/blake3/internal/consts"
+ "github.com/zeebo/blake3/internal/utils"
+)
+
+func HashF(input *[8192]byte, length, counter uint64, flags uint32, key *[8]uint32, out *[64]uint32, chain *[8]uint32) {
+ var tmp [16]uint32
+
+ for i := uint64(0); consts.ChunkLen*i < length && i < 8; i++ {
+ bchain := *key
+ bflags := flags | consts.Flag_ChunkStart
+ start := consts.ChunkLen * i
+
+ for n := uint64(0); n < 16; n++ {
+ if n == 15 {
+ bflags |= consts.Flag_ChunkEnd
+ }
+ if start+64*n >= length {
+ break
+ }
+ if start+64+64*n >= length {
+ *chain = bchain
+ }
+
+ var blockPtr *[16]uint32
+ if consts.OptimizeLittleEndian {
+ blockPtr = (*[16]uint32)(unsafe.Pointer(&input[consts.ChunkLen*i+consts.BlockLen*n]))
+ } else {
+ var block [16]uint32
+ utils.BytesToWords((*[64]uint8)(unsafe.Pointer(&input[consts.ChunkLen*i+consts.BlockLen*n])), &block)
+ blockPtr = &block
+ }
+
+ compress.Compress(&bchain, blockPtr, counter, consts.BlockLen, bflags, &tmp)
+
+ bchain = *(*[8]uint32)(unsafe.Pointer(&tmp[0]))
+ bflags = flags
+ }
+
+ out[i+0] = bchain[0]
+ out[i+8] = bchain[1]
+ out[i+16] = bchain[2]
+ out[i+24] = bchain[3]
+ out[i+32] = bchain[4]
+ out[i+40] = bchain[5]
+ out[i+48] = bchain[6]
+ out[i+56] = bchain[7]
+
+ counter++
+ }
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashp.go b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashp.go
new file mode 100644
index 00000000..bee5d8dd
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/alg/hash/hash_pure/hashp.go
@@ -0,0 +1,38 @@
+package hash_pure
+
+import "github.com/zeebo/blake3/internal/alg/compress"
+
+func HashP(left, right *[64]uint32, flags uint32, key *[8]uint32, out *[64]uint32, n int) {
+ var tmp [16]uint32
+ var block [16]uint32
+
+ for i := 0; i < n && i < 8; i++ {
+ block[0] = left[i+0]
+ block[1] = left[i+8]
+ block[2] = left[i+16]
+ block[3] = left[i+24]
+ block[4] = left[i+32]
+ block[5] = left[i+40]
+ block[6] = left[i+48]
+ block[7] = left[i+56]
+ block[8] = right[i+0]
+ block[9] = right[i+8]
+ block[10] = right[i+16]
+ block[11] = right[i+24]
+ block[12] = right[i+32]
+ block[13] = right[i+40]
+ block[14] = right[i+48]
+ block[15] = right[i+56]
+
+ compress.Compress(key, &block, 0, 64, flags, &tmp)
+
+ out[i+0] = tmp[0]
+ out[i+8] = tmp[1]
+ out[i+16] = tmp[2]
+ out[i+24] = tmp[3]
+ out[i+32] = tmp[4]
+ out[i+40] = tmp[5]
+ out[i+48] = tmp[6]
+ out[i+56] = tmp[7]
+ }
+}
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/consts.go b/vendor/github.com/zeebo/blake3/internal/consts/consts.go
new file mode 100644
index 00000000..89f08fe1
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/consts/consts.go
@@ -0,0 +1,29 @@
+package consts
+
+var IV = [...]uint32{IV0, IV1, IV2, IV3, IV4, IV5, IV6, IV7}
+
+const (
+ IV0 = 0x6A09E667
+ IV1 = 0xBB67AE85
+ IV2 = 0x3C6EF372
+ IV3 = 0xA54FF53A
+ IV4 = 0x510E527F
+ IV5 = 0x9B05688C
+ IV6 = 0x1F83D9AB
+ IV7 = 0x5BE0CD19
+)
+
+const (
+ Flag_ChunkStart uint32 = 1 << 0
+ Flag_ChunkEnd uint32 = 1 << 1
+ Flag_Parent uint32 = 1 << 2
+ Flag_Root uint32 = 1 << 3
+ Flag_Keyed uint32 = 1 << 4
+ Flag_DeriveKeyContext uint32 = 1 << 5
+ Flag_DeriveKeyMaterial uint32 = 1 << 6
+)
+
+const (
+ BlockLen = 64
+ ChunkLen = 1024
+)
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/cpu.go b/vendor/github.com/zeebo/blake3/internal/consts/cpu.go
new file mode 100644
index 00000000..20d67f18
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/consts/cpu.go
@@ -0,0 +1,17 @@
+package consts
+
+import (
+ "os"
+
+ "github.com/klauspost/cpuid/v2"
+)
+
+var (
+ HasAVX2 = cpuid.CPU.Has(cpuid.AVX2) &&
+ os.Getenv("BLAKE3_DISABLE_AVX2") == "" &&
+ os.Getenv("BLAKE3_PUREGO") == ""
+
+ HasSSE41 = cpuid.CPU.Has(cpuid.SSE4) &&
+ os.Getenv("BLAKE3_DISABLE_SSE41") == "" &&
+ os.Getenv("BLAKE3_PUREGO") == ""
+)
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/cpu_little.go b/vendor/github.com/zeebo/blake3/internal/consts/cpu_little.go
new file mode 100644
index 00000000..11bef9c9
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/consts/cpu_little.go
@@ -0,0 +1,6 @@
+//go:build amd64 || 386 || arm || arm64 || mipsle || mips64le || ppc64le || riscv64 || wasm
+// +build amd64 386 arm arm64 mipsle mips64le ppc64le riscv64 wasm
+
+package consts
+
+const OptimizeLittleEndian = true
diff --git a/vendor/github.com/zeebo/blake3/internal/consts/cpu_other.go b/vendor/github.com/zeebo/blake3/internal/consts/cpu_other.go
new file mode 100644
index 00000000..be7ac266
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/consts/cpu_other.go
@@ -0,0 +1,6 @@
+//go:build !amd64 && !386 && !arm && !arm64 && !mipsle && !mips64le && !ppc64le && !riscv64 && !wasm
+// +build !amd64,!386,!arm,!arm64,!mipsle,!mips64le,!ppc64le,!riscv64,!wasm
+
+package consts
+
+const OptimizeLittleEndian = false
diff --git a/vendor/github.com/zeebo/blake3/internal/utils/utils.go b/vendor/github.com/zeebo/blake3/internal/utils/utils.go
new file mode 100644
index 00000000..0b36f0f0
--- /dev/null
+++ b/vendor/github.com/zeebo/blake3/internal/utils/utils.go
@@ -0,0 +1,60 @@
+package utils
+
+import (
+ "encoding/binary"
+ "unsafe"
+)
+
+func SliceToArray32(bytes []byte) *[32]uint8 { return (*[32]uint8)(unsafe.Pointer(&bytes[0])) }
+func SliceToArray64(bytes []byte) *[64]uint8 { return (*[64]uint8)(unsafe.Pointer(&bytes[0])) }
+
+func BytesToWords(bytes *[64]uint8, words *[16]uint32) {
+ words[0] = binary.LittleEndian.Uint32(bytes[0*4:])
+ words[1] = binary.LittleEndian.Uint32(bytes[1*4:])
+ words[2] = binary.LittleEndian.Uint32(bytes[2*4:])
+ words[3] = binary.LittleEndian.Uint32(bytes[3*4:])
+ words[4] = binary.LittleEndian.Uint32(bytes[4*4:])
+ words[5] = binary.LittleEndian.Uint32(bytes[5*4:])
+ words[6] = binary.LittleEndian.Uint32(bytes[6*4:])
+ words[7] = binary.LittleEndian.Uint32(bytes[7*4:])
+ words[8] = binary.LittleEndian.Uint32(bytes[8*4:])
+ words[9] = binary.LittleEndian.Uint32(bytes[9*4:])
+ words[10] = binary.LittleEndian.Uint32(bytes[10*4:])
+ words[11] = binary.LittleEndian.Uint32(bytes[11*4:])
+ words[12] = binary.LittleEndian.Uint32(bytes[12*4:])
+ words[13] = binary.LittleEndian.Uint32(bytes[13*4:])
+ words[14] = binary.LittleEndian.Uint32(bytes[14*4:])
+ words[15] = binary.LittleEndian.Uint32(bytes[15*4:])
+}
+
+func WordsToBytes(words *[16]uint32, bytes []byte) {
+ bytes = bytes[:64]
+ binary.LittleEndian.PutUint32(bytes[0*4:1*4], words[0])
+ binary.LittleEndian.PutUint32(bytes[1*4:2*4], words[1])
+ binary.LittleEndian.PutUint32(bytes[2*4:3*4], words[2])
+ binary.LittleEndian.PutUint32(bytes[3*4:4*4], words[3])
+ binary.LittleEndian.PutUint32(bytes[4*4:5*4], words[4])
+ binary.LittleEndian.PutUint32(bytes[5*4:6*4], words[5])
+ binary.LittleEndian.PutUint32(bytes[6*4:7*4], words[6])
+ binary.LittleEndian.PutUint32(bytes[7*4:8*4], words[7])
+ binary.LittleEndian.PutUint32(bytes[8*4:9*4], words[8])
+ binary.LittleEndian.PutUint32(bytes[9*4:10*4], words[9])
+ binary.LittleEndian.PutUint32(bytes[10*4:11*4], words[10])
+ binary.LittleEndian.PutUint32(bytes[11*4:12*4], words[11])
+ binary.LittleEndian.PutUint32(bytes[12*4:13*4], words[12])
+ binary.LittleEndian.PutUint32(bytes[13*4:14*4], words[13])
+ binary.LittleEndian.PutUint32(bytes[14*4:15*4], words[14])
+ binary.LittleEndian.PutUint32(bytes[15*4:16*4], words[15])
+}
+
+func KeyFromBytes(key []byte, out *[8]uint32) {
+ key = key[:32]
+ out[0] = binary.LittleEndian.Uint32(key[0:])
+ out[1] = binary.LittleEndian.Uint32(key[4:])
+ out[2] = binary.LittleEndian.Uint32(key[8:])
+ out[3] = binary.LittleEndian.Uint32(key[12:])
+ out[4] = binary.LittleEndian.Uint32(key[16:])
+ out[5] = binary.LittleEndian.Uint32(key[20:])
+ out[6] = binary.LittleEndian.Uint32(key[24:])
+ out[7] = binary.LittleEndian.Uint32(key[28:])
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 04ffbf0d..9833a9c0 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,4 +1,4 @@
-# github.com/99designs/gqlgen v0.17.34
+# github.com/99designs/gqlgen v0.17.35
## explicit; go 1.18
github.com/99designs/gqlgen
github.com/99designs/gqlgen/api
@@ -54,10 +54,10 @@ github.com/beorn7/perks/quantile
# github.com/boltdb/bolt v1.3.1
## explicit
github.com/boltdb/bolt
-# github.com/caddyserver/certmagic v0.18.2
+# github.com/caddyserver/certmagic v0.19.0
## explicit; go 1.19
github.com/caddyserver/certmagic
-# github.com/casbin/casbin/v2 v2.71.1
+# github.com/casbin/casbin/v2 v2.72.0
## explicit; go 1.13
github.com/casbin/casbin/v2
github.com/casbin/casbin/v2/config
@@ -78,7 +78,7 @@ github.com/cespare/xxhash/v2
# github.com/cpuguy83/go-md2man/v2 v2.0.2
## explicit; go 1.11
github.com/cpuguy83/go-md2man/v2/md2man
-# github.com/datarhei/core-client-go/v16 v16.11.1-0.20230710090938-bfcb7f5f7b3e
+# github.com/datarhei/core-client-go/v16 v16.11.1-0.20230717141633-8f0e5ce4c68c
## explicit; go 1.18
github.com/datarhei/core-client-go/v16
github.com/datarhei/core-client-go/v16/api
@@ -135,8 +135,8 @@ github.com/gabriel-vasile/mimetype/internal/magic
## explicit; go 1.12
github.com/go-ole/go-ole
github.com/go-ole/go-ole/oleutil
-# github.com/go-openapi/jsonpointer v0.19.6
-## explicit; go 1.13
+# github.com/go-openapi/jsonpointer v0.20.0
+## explicit; go 1.18
github.com/go-openapi/jsonpointer
# github.com/go-openapi/jsonreference v0.20.2
## explicit; go 1.13
@@ -232,7 +232,7 @@ github.com/klauspost/compress/s2
# github.com/klauspost/cpuid/v2 v2.2.5
## explicit; go 1.15
github.com/klauspost/cpuid/v2
-# github.com/labstack/echo/v4 v4.10.2
+# github.com/labstack/echo/v4 v4.11.1
## explicit; go 1.17
github.com/labstack/echo/v4
github.com/labstack/echo/v4/middleware
@@ -282,7 +282,7 @@ github.com/miekg/dns
# github.com/minio/md5-simd v1.1.2
## explicit; go 1.14
github.com/minio/md5-simd
-# github.com/minio/minio-go/v7 v7.0.59
+# github.com/minio/minio-go/v7 v7.0.60
## explicit; go 1.17
github.com/minio/minio-go/v7
github.com/minio/minio-go/v7/pkg/credentials
@@ -371,6 +371,15 @@ github.com/swaggo/files/v2
# github.com/swaggo/swag v1.16.1
## explicit; go 1.18
github.com/swaggo/swag
+# github.com/tidwall/gjson v1.14.4
+## explicit; go 1.12
+github.com/tidwall/gjson
+# github.com/tidwall/match v1.1.1
+## explicit; go 1.15
+github.com/tidwall/match
+# github.com/tidwall/pretty v1.2.1
+## explicit; go 1.16
+github.com/tidwall/pretty
# github.com/tklauser/go-sysconf v0.3.11
## explicit; go 1.13
github.com/tklauser/go-sysconf
@@ -386,7 +395,7 @@ github.com/valyala/bytebufferpool
# github.com/valyala/fasttemplate v1.2.2
## explicit; go 1.12
github.com/valyala/fasttemplate
-# github.com/vektah/gqlparser/v2 v2.5.6
+# github.com/vektah/gqlparser/v2 v2.5.8
## explicit; go 1.16
github.com/vektah/gqlparser/v2
github.com/vektah/gqlparser/v2/ast
@@ -410,6 +419,18 @@ github.com/xrash/smetrics
# github.com/yusufpapurcu/wmi v1.2.3
## explicit; go 1.16
github.com/yusufpapurcu/wmi
+# github.com/zeebo/blake3 v0.2.3
+## explicit; go 1.13
+github.com/zeebo/blake3
+github.com/zeebo/blake3/internal/alg
+github.com/zeebo/blake3/internal/alg/compress
+github.com/zeebo/blake3/internal/alg/compress/compress_pure
+github.com/zeebo/blake3/internal/alg/compress/compress_sse41
+github.com/zeebo/blake3/internal/alg/hash
+github.com/zeebo/blake3/internal/alg/hash/hash_avx2
+github.com/zeebo/blake3/internal/alg/hash/hash_pure
+github.com/zeebo/blake3/internal/consts
+github.com/zeebo/blake3/internal/utils
# go.etcd.io/bbolt v1.3.7
## explicit; go 1.17
go.etcd.io/bbolt