diff --git a/app/api/api.go b/app/api/api.go index da5aeeac..73e48d8b 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -1427,7 +1427,6 @@ func (a *api) start(ctx context.Context) error { Password: "", DefaultFile: "index.html", DefaultContentType: "text/html", - Gzip: true, Filesystem: a.diskfs, Cache: a.cache, }, @@ -1440,7 +1439,6 @@ func (a *api) start(ctx context.Context) error { Password: cfg.Storage.Memory.Auth.Password, DefaultFile: "", DefaultContentType: "application/data", - Gzip: true, Filesystem: a.memfs, Cache: nil, }, @@ -1456,7 +1454,6 @@ func (a *api) start(ctx context.Context) error { Password: s3.Auth.Password, DefaultFile: "", DefaultContentType: "application/data", - Gzip: true, Filesystem: a.s3fs[s3.Name], Cache: a.cache, }) @@ -1469,7 +1466,7 @@ func (a *api) start(ctx context.Context) error { Restream: a.restream, Metrics: a.metrics, Prometheus: a.prom, - MimeTypesFile: cfg.Storage.MimeTypes, + MimeTypesFile: cfg.Storage.MimeTypesFile, Filesystems: httpfilesystems, IPLimiter: iplimiter, Profiling: cfg.Debug.Profiling, @@ -1501,6 +1498,11 @@ func (a *api) start(ctx context.Context) error { return false }, Resources: a.resources, + Compress: http.CompressConfig{ + Encoding: cfg.Compress.Encoding, + MimeTypes: cfg.Compress.MimeTypes, + MinLength: cfg.Compress.MinLength, + }, } mainserverhandler, err := http.NewServer(serverConfig) diff --git a/config/config.go b/config/config.go index 317da336..e0364c03 100644 --- a/config/config.go +++ b/config/config.go @@ -94,6 +94,7 @@ func (d *Config) Clone() *Config { data.Log = d.Log data.DB = d.DB data.Host = d.Host + data.Compress = d.Compress data.API = d.API data.TLS = d.TLS data.Storage = d.Storage @@ -113,6 +114,9 @@ func (d *Config) Clone() *Config { data.Host.Name = slices.Copy(d.Host.Name) + data.Compress.Encoding = slices.Copy(d.Compress.Encoding) + data.Compress.MimeTypes = slices.Copy(d.Compress.MimeTypes) + data.API.Access.HTTP.Allow = slices.Copy(d.API.Access.HTTP.Allow) data.API.Access.HTTP.Block = slices.Copy(d.API.Access.HTTP.Block) data.API.Access.HTTPS.Allow = slices.Copy(d.API.Access.HTTPS.Allow) @@ -164,6 +168,21 @@ func (d *Config) init() { d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false) + d.vars.Register(value.NewStringList(&d.Compress.Encoding, []string{"gzip"}, ","), "compress.encoding", "CORE_COMPRESS_ENCODING", nil, "Comma separated list of content encodings", false, false) + d.vars.Register(value.NewStringList(&d.Compress.MimeTypes, []string{ + "text/plain", + "text/html", + "text/css", + "text/javascript", + "application/json", + "application/x-mpegurl", + "application/vnd.apple.mpegurl", + "image/svg+xml", + "text/event-stream", + "application/x-json-stream", + }, ","), "compress.mimetypes", "CORE_COMPRESS_MIMETYPES", nil, "Comma separated list of mimetypes to compress", false, false) + d.vars.Register(value.NewInt(&d.Compress.MinLength, 1000), "compress.min_length", "CORE_COMPRESS_MIN_LENGTH", nil, "Minimum size before compression will be used", false, false) + // API d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false) d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) @@ -193,7 +212,7 @@ func (d *Config) init() { d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEY_FILE", []string{"CORE_TLS_KEYFILE"}, "Path to key file in PEM format", false, false) // Storage - d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + d.vars.Register(value.NewFile(&d.Storage.MimeTypesFile, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) // Storage (Disk) d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) diff --git a/config/data.go b/config/data.go index 887c8e98..26c77054 100644 --- a/config/data.go +++ b/config/data.go @@ -32,6 +32,11 @@ type Data struct { Name []string `json:"name"` Auto bool `json:"auto"` } `json:"host"` + Compress struct { + Encoding []string `json:"encoding"` + MimeTypes []string `json:"mimetypes"` + MinLength int `json:"min_length" jsonschema:"minimum=0"` + } `json:"compress"` API struct { ReadOnly bool `json:"read_only"` Access struct { @@ -100,7 +105,7 @@ type Data struct { CORS struct { Origins []string `json:"origins"` } `json:"cors"` - MimeTypes string `json:"mimetypes_file"` + MimeTypesFile string `json:"mimetypes_file"` } `json:"storage"` RTMP struct { Enable bool `json:"enable"` @@ -259,7 +264,7 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { data.Router.BlockedPrefixes = slices.Copy(d.Router.BlockedPrefixes) data.Router.Routes = copy.StringMap(d.Router.Routes) - data.Storage.MimeTypes = d.Storage.MimeTypes + data.Storage.MimeTypesFile = d.Storage.MimeTypes data.Storage.CORS = d.Storage.CORS data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins) @@ -367,7 +372,7 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) { data.TLS.CertFile = d.TLS.CertFile data.TLS.KeyFile = d.TLS.KeyFile - data.Storage.MimeTypes = d.Storage.MimeTypes + data.Storage.MimeTypes = d.Storage.MimeTypesFile data.Storage.CORS = d.Storage.CORS data.Storage.CORS.Origins = slices.Copy(d.Storage.CORS.Origins) diff --git a/go.mod b/go.mod index f1d41509..8e8ab4ff 100644 --- a/go.mod +++ b/go.mod @@ -1,58 +1,58 @@ module github.com/datarhei/core/v16 -go 1.22.0 +go 1.22.5 -toolchain go1.22.1 +toolchain go1.23.1 require ( - github.com/99designs/gqlgen v0.17.49 - github.com/Masterminds/semver/v3 v3.2.1 - github.com/adhocore/gronx v1.19.0 + github.com/99designs/gqlgen v0.17.55 + github.com/Masterminds/semver/v3 v3.3.0 + github.com/adhocore/gronx v1.19.1 github.com/andybalholm/brotli v1.1.0 github.com/atrox/haikunatorgo/v2 v2.0.1 - github.com/caddyserver/certmagic v0.21.3 + github.com/caddyserver/certmagic v0.21.4 github.com/datarhei/gosrt v0.7.0 github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e github.com/dolthub/swiss v0.2.1 github.com/fujiwara/shapeio v1.0.0 - github.com/go-playground/validator/v10 v10.22.0 + github.com/go-playground/validator/v10 v10.22.1 github.com/gobwas/glob v0.2.3 github.com/golang-jwt/jwt/v4 v4.5.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/gops v0.3.28 github.com/google/uuid v1.6.0 github.com/hashicorp/go-hclog v1.6.3 - github.com/hashicorp/raft v1.7.0 + github.com/hashicorp/raft v1.7.1 github.com/hashicorp/raft-boltdb/v2 v2.3.0 github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.5.1 - github.com/klauspost/compress v1.17.9 + github.com/klauspost/compress v1.17.10 github.com/klauspost/cpuid/v2 v2.2.8 github.com/labstack/echo/v4 v4.12.0 - github.com/lestrrat-go/strftime v1.0.6 + github.com/lestrrat-go/strftime v1.1.0 github.com/lithammer/shortuuid/v4 v4.0.0 github.com/mattn/go-isatty v0.0.20 - github.com/minio/minio-go/v7 v7.0.75 + github.com/minio/minio-go/v7 v7.0.77 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 - github.com/prometheus/client_golang v1.20.0 + github.com/prometheus/client_golang v1.20.4 github.com/puzpuzpuz/xsync/v3 v3.4.0 github.com/shirou/gopsutil/v3 v3.24.5 github.com/stretchr/testify v1.9.0 github.com/swaggo/echo-swagger v1.4.1 github.com/swaggo/swag v1.16.3 github.com/tklauser/go-sysconf v0.3.14 - github.com/vektah/gqlparser/v2 v2.5.16 + github.com/vektah/gqlparser/v2 v2.5.17 github.com/xeipuuv/gojsonschema v1.2.0 - go.etcd.io/bbolt v1.3.10 - go.uber.org/automaxprocs v1.5.3 + go.etcd.io/bbolt v1.3.11 + go.uber.org/automaxprocs v1.6.0 go.uber.org/zap v1.27.0 - golang.org/x/crypto v0.26.0 - golang.org/x/mod v0.20.0 + golang.org/x/crypto v0.28.0 + golang.org/x/mod v0.21.0 ) require ( github.com/KyleBanks/depth v1.2.1 // indirect - github.com/agnivade/levenshtein v1.1.1 // indirect + github.com/agnivade/levenshtein v1.2.0 // indirect github.com/armon/go-metrics v0.4.1 // indirect github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -86,42 +86,41 @@ require ( github.com/labstack/gommon v0.4.2 // indirect github.com/leodido/go-urn v1.4.0 // indirect github.com/libdns/libdns v0.2.2 // indirect - github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect + github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mholt/acmez/v2 v2.0.2 // indirect + github.com/mholt/acmez/v2 v2.0.3 // indirect github.com/miekg/dns v1.1.62 // indirect github.com/minio/md5-simd v1.1.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.55.0 // indirect + github.com/prometheus/common v0.60.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect - github.com/rs/xid v1.5.0 // indirect + github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sosodev/duration v1.3.1 // indirect github.com/swaggo/files/v2 v2.0.1 // indirect - github.com/tklauser/numcpus v0.8.0 // indirect - github.com/urfave/cli/v2 v2.27.2 // indirect + github.com/tklauser/numcpus v0.9.0 // indirect + github.com/urfave/cli/v2 v2.27.4 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect - github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 // indirect + github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect github.com/zeebo/blake3 v0.2.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/net v0.28.0 // indirect + golang.org/x/net v0.30.0 // indirect golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.24.0 // indirect - golang.org/x/text v0.17.0 // indirect - golang.org/x/time v0.6.0 // indirect - golang.org/x/tools v0.24.0 // indirect - google.golang.org/protobuf v1.34.2 // indirect + golang.org/x/sys v0.26.0 // indirect + golang.org/x/text v0.19.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.26.0 // indirect + google.golang.org/protobuf v1.35.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 381e7d01..8ea02977 100644 --- a/go.sum +++ b/go.sum @@ -1,16 +1,16 @@ -github.com/99designs/gqlgen v0.17.49 h1:b3hNGexHd33fBSAd4NDT/c3NCcQzcAVkknhN9ym36YQ= -github.com/99designs/gqlgen v0.17.49/go.mod h1:tC8YFVZMed81x7UJ7ORUwXF4Kn6SXuucFqQBhN8+BU0= +github.com/99designs/gqlgen v0.17.55 h1:3vzrNWYyzSZjGDFo68e5j9sSauLxfKvLp+6ioRokVtM= +github.com/99designs/gqlgen v0.17.55/go.mod h1:3Bq768f8hgVPGZxL8aY9MaYmbxa6llPM/qu1IGH1EJo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= -github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= -github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/PuerkitoBio/goquery v1.9.2 h1:4/wZksC3KgkQw7SQgkKotmKljk0M6V8TUvA8Wb4yPeE= -github.com/PuerkitoBio/goquery v1.9.2/go.mod h1:GHPCaP0ODyyxqcNoFGYlAprUFH81NuRPd0GX3Zu2Mvk= -github.com/adhocore/gronx v1.19.0 h1:GrEvNMPDwXND+YFadCyFVQPC+/xxoGJaQzu+duNf6aU= -github.com/adhocore/gronx v1.19.0/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= -github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= -github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= +github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= +github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/PuerkitoBio/goquery v1.9.3 h1:mpJr/ikUA9/GNJB/DBZcGeFDXUtosHRyRrwh7KGdTG0= +github.com/PuerkitoBio/goquery v1.9.3/go.mod h1:1ndLHPdTz+DyQPICCWYlYQMPl0oXZj0G6D4LCYA6u4U= +github.com/adhocore/gronx v1.19.1 h1:S4c3uVp5jPjnk00De0lslyTenGJ4nA3Ydbkj1SbdPVc= +github.com/adhocore/gronx v1.19.1/go.mod h1:7oUY1WAU8rEJWmAxXR2DN0JaO4gi9khSgKjiRypqteg= +github.com/agnivade/levenshtein v1.2.0 h1:U9L4IOT0Y3i0TIlUIDJ7rVUziKi/zPbrJGaFrtYH3SY= +github.com/agnivade/levenshtein v1.2.0/go.mod h1:QVVI16kDrtSuwcpd0p1+xMC6Z/VfhtCyDIjcwga4/DU= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -35,8 +35,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caddyserver/certmagic v0.21.3 h1:pqRRry3yuB4CWBVq9+cUqu+Y6E2z8TswbhNx1AZeYm0= -github.com/caddyserver/certmagic v0.21.3/go.mod h1:Zq6pklO9nVRl3DIFUw9gVUfXKdpc/0qwTUAQMBlfgtI= +github.com/caddyserver/certmagic v0.21.4 h1:e7VobB8rffHv8ZZpSiZtEwnLDHUwLVYLWzWSa1FfKI0= +github.com/caddyserver/certmagic v0.21.4/go.mod h1:swUXjQ1T9ZtMv95qj7/InJvWLXURU85r+CfG0T+ZbDE= github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA= github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -53,8 +53,8 @@ github.com/datarhei/joy4 v0.0.0-20240603190808-b1407345907e/go.mod h1:Jcw/6jZDQQ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54 h1:SG7nF6SRlWhcT7cNTs5R6Hk4V2lcmLz2NsG2VnInyNo= +github.com/dgryski/trifles v0.0.0-20230903005119-f50d829f2e54/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= github.com/dolthub/maphash v0.1.0 h1:bsQ7JsF4FkkWyrP3oCnFJgrCUAFbFf3kOl4L/QxPDyQ= github.com/dolthub/maphash v0.1.0/go.mod h1:gkg4Ch4CdCDu5h6PMriVLawB7koZ+5ijb9puGMV50a4= github.com/dolthub/swiss v0.2.1 h1:gs2osYs5SJkAaH5/ggVJqXQxRXtWshF6uE0lgR/Y3Gw= @@ -94,8 +94,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= -github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.22.1 h1:40JcKH+bBNGFczGuoBYgX4I6m/i27HYW8P9FDk5PbgA= +github.com/go-playground/validator/v10 v10.22.1/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= @@ -141,8 +141,8 @@ github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iP github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/raft v1.7.0 h1:4u24Qn6lQ6uwziM++UgsyiT64Q8GyRn43CV41qPiz1o= -github.com/hashicorp/raft v1.7.0/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft v1.7.1 h1:ytxsNx4baHsRZrhUcbt3+79zc4ly8qm7pi0393pSchY= +github.com/hashicorp/raft v1.7.1/go.mod h1:hUeiEwQQR/Nk2iKDD0dkEhklSsu3jcAcqvPzPoZSAEM= github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= @@ -159,8 +159,8 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/klauspost/compress v1.17.10 h1:oXAz+Vh0PMUvJczoi+flxpnBEPxoER1IaAnU/NMPtT0= +github.com/klauspost/compress v1.17.10/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= @@ -183,14 +183,14 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= -github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= -github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/lestrrat-go/strftime v1.1.0 h1:gMESpZy44/4pXLO/m+sL0yBd1W6LjgjrrD4a68Gapyg= +github.com/lestrrat-go/strftime v1.1.0/go.mod h1:uzeIB52CeUJenCo1syghlugshMysrqUT51HlxphXVeI= github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s= github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ= github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw7k08o4c= github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= -github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= -github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -203,14 +203,14 @@ github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/ github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mholt/acmez/v2 v2.0.2 h1:OmK6xckte2JfKGPz4OAA8aNHTiLvGp8tLzmrd/wfSyw= -github.com/mholt/acmez/v2 v2.0.2/go.mod h1:fX4c9r5jYwMyMsC+7tkYRxHibkOTgta5DIFGoe67e1U= +github.com/mholt/acmez/v2 v2.0.3 h1:CgDBlEwg3QBp6s45tPQmFIBrkRIkBT4rW4orMM6p4sw= +github.com/mholt/acmez/v2 v2.0.3/go.mod h1:pQ1ysaDeGrIMvJ9dfJMk5kJNkn7L2sb3UhyrX6Q91cw= github.com/miekg/dns v1.1.62 h1:cN8OuEF1/x5Rq6Np+h1epln8OiyPWV+lROx9LxcGgIQ= github.com/miekg/dns v1.1.62/go.mod h1:mvDlcItzm+br7MToIKqkglaGhlFMHJ9DTNNWONWXbNQ= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.75 h1:0uLrB6u6teY2Jt+cJUVi9cTvDRuBKWSRzSAcznRkwlE= -github.com/minio/minio-go/v7 v7.0.75/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8= +github.com/minio/minio-go/v7 v7.0.77 h1:GaGghJRg9nwDVlNbwYjSDJT1rqltQkBFDsypWX1v3Bw= +github.com/minio/minio-go/v7 v7.0.77/go.mod h1:AVM3IUN6WwKzmwBxVdjzhH8xq+f57JSbbvzqvUzR6eg= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -224,8 +224,6 @@ github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0Mw github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -238,8 +236,8 @@ github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS1 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -247,8 +245,8 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= @@ -258,8 +256,8 @@ github.com/puzpuzpuz/xsync/v3 v3.4.0 h1:DuVBAdXuGFHv8adVXjWWZ63pJq+NRXOWVXlKDBZ+ github.com/puzpuzpuz/xsync/v3 v3.4.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= @@ -291,17 +289,17 @@ github.com/swaggo/swag v1.16.3 h1:PnCYjPCah8FK4I26l2F/KQ4yz3sILcVUN3cTlBFA9Pg= github.com/swaggo/swag v1.16.3/go.mod h1:DImHIuOFXKpMFAQjcC7FG4m3Dg4+QuUgUzJmKjI/gRk= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= -github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= -github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= +github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/urfave/cli/v2 v2.27.2 h1:6e0H+AkS+zDckwPCUrZkKX38mRaau4nL2uipkJpbkcI= -github.com/urfave/cli/v2 v2.27.2/go.mod h1:g0+79LmHHATl7DAcHO99smiR/T7uGLw84w8Y42x+4eM= +github.com/urfave/cli/v2 v2.27.4 h1:o1owoI+02Eb+K107p27wEX9Bb8eqIoZCfLXloLUSWJ8= +github.com/urfave/cli/v2 v2.27.4/go.mod h1:m4QzxcD2qpra4z7WhzEGn74WZLViBnMpb1ToCAKdGRQ= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.16 h1:1gcmLTvs3JLKXckwCwlUagVn/IlV2bwqle0vJ0vy5p8= -github.com/vektah/gqlparser/v2 v2.5.16/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= +github.com/vektah/gqlparser/v2 v2.5.17 h1:9At7WblLV7/36nulgekUgIaqHZWn5hxqluxrxGUhOmI= +github.com/vektah/gqlparser/v2 v2.5.17/go.mod h1:1lz1OeCqgQbQepsGxPVywrjdBHW2T08PUS3pJqepRww= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -309,8 +307,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHo github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 h1:+qGGcbkzsfDQNPPe9UDgpxAWQrhbbBXOYJFQDq/dtJw= -github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913/go.mod h1:4aEEwZQutDLsQv2Deui4iYQ6DWTxR14g6m8Wv88+Xqk= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 h1:gEOO8jv9F4OT7lGCjxCBTO/36wtF6j2nSip77qHd4x4= +github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1/go.mod h1:Ohn+xnUBiLI6FVj/9LpzZWtj1/D6lUovWYBkxHVV3aM= github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/zeebo/assert v1.1.0 h1:hU1L1vLTHsnO8x8c9KAR5GmM5QscxHg5RNU5z5qbUWY= @@ -319,10 +317,10 @@ github.com/zeebo/blake3 v0.2.4 h1:KYQPkhpRtcqh0ssGYcKLG1JYvddkEA8QwCM/yBqhaZI= github.com/zeebo/blake3 v0.2.4/go.mod h1:7eeQ6d2iXWRGF6npfaxl2CU+xy2Fjo2gxeyZGCRUjcE= github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -331,14 +329,14 @@ go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw= +golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U= +golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= +golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= +golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -360,19 +358,19 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= +golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= +golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= +google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/http/fs/fs.go b/http/fs/fs.go index 500ab733..d1f5cc74 100644 --- a/http/fs/fs.go +++ b/http/fs/fs.go @@ -17,7 +17,6 @@ type FS struct { DefaultFile string DefaultContentType string - Gzip bool Filesystem fs.Filesystem diff --git a/http/handler/api/config_test.go b/http/handler/api/config_test.go index 0757cfb0..69d67f9e 100644 --- a/http/handler/api/config_test.go +++ b/http/handler/api/config_test.go @@ -51,7 +51,7 @@ func TestConfigSetConflict(t *testing.T) { router, _ := getDummyConfigRouter(t) cfg := config.New(nil) - cfg.Storage.MimeTypes = "/path/to/mime.types" + cfg.Storage.MimeTypesFile = "/path/to/mime.types" var data bytes.Buffer diff --git a/http/middleware/compress/brotli.go b/http/middleware/compress/brotli.go index 1f692aa9..813657bc 100644 --- a/http/middleware/compress/brotli.go +++ b/http/middleware/compress/brotli.go @@ -15,7 +15,7 @@ func NewBrotli(level Level) Compression { brotliLevel := brotli.DefaultCompression if level == BestCompression { brotliLevel = brotli.BestCompression - } else { + } else if level == BestSpeed { brotliLevel = brotli.BestSpeed } diff --git a/http/middleware/compress/compress.go b/http/middleware/compress/compress.go index 6e5616eb..43688d2f 100644 --- a/http/middleware/compress/compress.go +++ b/http/middleware/compress/compress.go @@ -8,8 +8,9 @@ import ( "net" "net/http" "strings" - "sync" + "github.com/datarhei/core/v16/mem" + "github.com/datarhei/core/v16/slices" "github.com/labstack/echo/v4" "github.com/labstack/echo/v4/middleware" ) @@ -27,8 +28,11 @@ type Config struct { // is used. Optional. Default value 0 MinLength int - // Schemes is a list of enabled compressiond. Optional. Default [GzipScheme, ZstdScheme] - Schemes []Scheme + // Schemes is a list of enabled compressiond. Optional. Default [gzip] + Schemes []string + + // List of content type to compress. If empty, everything will be compressed + ContentTypes []string } type Compression interface { @@ -46,6 +50,7 @@ type Compressor interface { type compressResponseWriter struct { Compressor http.ResponseWriter + hasHeader bool wroteHeader bool wroteBody bool minLength int @@ -54,20 +59,10 @@ type compressResponseWriter struct { code int headerContentLength string scheme string + contentTypes []string + passThrough bool } -type Scheme string - -func (s Scheme) String() string { - return string(s) -} - -const ( - GzipScheme Scheme = "gzip" - BrotliScheme Scheme = "br" - ZstdScheme Scheme = "zstd" -) - type Level int const ( @@ -78,33 +73,11 @@ const ( // DefaultConfig is the default Gzip middleware config. var DefaultConfig = Config{ - Skipper: middleware.DefaultSkipper, - Level: DefaultCompression, - MinLength: 0, - Schemes: []Scheme{GzipScheme}, -} - -// ContentTypesSkipper returns a Skipper based on the list of content types -// that should be compressed. If the list is empty, all responses will be -// compressed. -func ContentTypeSkipper(contentTypes []string) middleware.Skipper { - return func(c echo.Context) bool { - // If no allowed content types are given, compress all - if len(contentTypes) == 0 { - return false - } - - // Iterate through the allowed content types and don't skip if the content type matches - responseContentType := c.Response().Header().Get(echo.HeaderContentType) - - for _, contentType := range contentTypes { - if strings.Contains(responseContentType, contentType) { - return false - } - } - - return true - } + Skipper: middleware.DefaultSkipper, + Level: DefaultCompression, + MinLength: 0, + Schemes: []string{"gzip"}, + ContentTypes: []string{}, } // New returns a middleware which compresses HTTP response using a compression @@ -133,38 +106,40 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { config.Schemes = DefaultConfig.Schemes } + contentTypes := slices.Copy(config.ContentTypes) + gzipEnable := false brotliEnable := false zstdEnable := false for _, s := range config.Schemes { switch s { - case GzipScheme: + case "gzip": gzipEnable = true - case BrotliScheme: + case "br": brotliEnable = true - case ZstdScheme: + case "zstd": zstdEnable = true } } - var gzipPool Compression - var brotliPool Compression - var zstdPool Compression + var gzipCompressor Compression + var brotliCompressor Compression + var zstdCompressor Compression if gzipEnable { - gzipPool = NewGzip(config.Level) + gzipCompressor = NewGzip(config.Level) } if brotliEnable { - brotliPool = NewBrotli(config.Level) + brotliCompressor = NewBrotli(config.Level) } if zstdEnable { - zstdPool = NewZstd(config.Level) + zstdCompressor = NewZstd(config.Level) } - bpool := bufferPool() + bufferPool := mem.NewBufferPool() return func(next echo.HandlerFunc) echo.HandlerFunc { return func(c echo.Context) error { @@ -173,62 +148,69 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { } res := c.Response() - res.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding) encodings := c.Request().Header.Get(echo.HeaderAcceptEncoding) - var pool Compression - var scheme Scheme + var compress Compression + var scheme string - if zstdEnable && strings.Contains(encodings, ZstdScheme.String()) { - pool = zstdPool - scheme = ZstdScheme - } else if brotliEnable && strings.Contains(encodings, BrotliScheme.String()) { - pool = brotliPool - scheme = BrotliScheme - } else if gzipEnable && strings.Contains(encodings, GzipScheme.String()) { - pool = gzipPool - scheme = GzipScheme + if zstdEnable && strings.Contains(encodings, "zstd") { + compress = zstdCompressor + scheme = "zstd" + } else if brotliEnable && strings.Contains(encodings, "br") { + compress = brotliCompressor + scheme = "br" + } else if gzipEnable && strings.Contains(encodings, "gzip") { + compress = gzipCompressor + scheme = "gzip" } - if pool != nil { - w := pool.Acquire() - if w == nil { + if compress != nil { + compressor := compress.Acquire() + if compressor == nil { return echo.NewHTTPError(http.StatusInternalServerError, fmt.Errorf("failed to acquire compressor for %s", scheme)) } rw := res.Writer - w.Reset(rw) + compressor.Reset(rw) - buf := bpool.Get().(*bytes.Buffer) - buf.Reset() + buffer := bufferPool.Get() - grw := &compressResponseWriter{Compressor: w, ResponseWriter: rw, minLength: config.MinLength, buffer: buf, scheme: scheme.String()} + grw := &compressResponseWriter{ + Compressor: compressor, + ResponseWriter: rw, + minLength: config.MinLength, + buffer: buffer, + scheme: scheme, + contentTypes: contentTypes, + } defer func() { - if !grw.wroteBody { - if res.Header().Get(echo.HeaderContentEncoding) == scheme.String() { - res.Header().Del(echo.HeaderContentEncoding) - } - // We have to reset response to it's pristine state when - // nothing is written to body or error is returned. - // See issue #424, #407. - res.Writer = rw - w.Reset(io.Discard) - } else if !grw.minLengthExceeded { - // If the minimum content length hasn't exceeded, write the uncompressed response - res.Writer = rw - if grw.wroteHeader { - // Restore Content-Length header in case it was deleted - if len(grw.headerContentLength) != 0 { - grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength) + if !grw.passThrough { + if !grw.wroteBody { + if res.Header().Get(echo.HeaderContentEncoding) == scheme { + res.Header().Del(echo.HeaderContentEncoding) } - grw.ResponseWriter.WriteHeader(grw.code) + // We have to reset response to it's pristine state when + // nothing is written to body or error is returned. + // See issue #424, #407. + res.Writer = rw + compressor.Reset(io.Discard) + } else if !grw.minLengthExceeded { + // If the minimum content length hasn't exceeded, write the uncompressed response + res.Writer = rw + if grw.wroteHeader { + // Restore Content-Length header in case it was deleted + if len(grw.headerContentLength) != 0 { + grw.Header().Set(echo.HeaderContentLength, grw.headerContentLength) + } + grw.ResponseWriter.WriteHeader(grw.code) + } + grw.buffer.WriteTo(rw) + compressor.Reset(io.Discard) } - grw.buffer.WriteTo(rw) - w.Reset(io.Discard) } - w.Close() - bpool.Put(buf) - pool.Release(w) + compressor.Close() + bufferPool.Put(buffer) + compress.Release(compressor) }() res.Writer = grw @@ -241,17 +223,37 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { func (w *compressResponseWriter) WriteHeader(code int) { if code == http.StatusNoContent { // Issue #489 - w.ResponseWriter.Header().Del(echo.HeaderContentEncoding) + w.Header().Del(echo.HeaderContentEncoding) } w.headerContentLength = w.Header().Get(echo.HeaderContentLength) w.Header().Del(echo.HeaderContentLength) // Issue #444 - w.wroteHeader = true + if !w.canCompress(w.Header().Get(echo.HeaderContentType)) { + w.passThrough = true + } + + w.hasHeader = true // Delay writing of the header until we know if we'll actually compress the response w.code = code } +func (w *compressResponseWriter) canCompress(responseContentType string) bool { + // If no content types are given, compress all + if len(w.contentTypes) == 0 { + return true + } + + // Iterate through the allowed content types and don't skip if the content type matches + for _, contentType := range w.contentTypes { + if strings.Contains(responseContentType, contentType) { + return true + } + } + + return false +} + func (w *compressResponseWriter) Write(b []byte) (int, error) { if w.Header().Get(echo.HeaderContentType) == "" { w.Header().Set(echo.HeaderContentType, http.DetectContentType(b)) @@ -259,6 +261,18 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) { w.wroteBody = true + if !w.hasHeader { + w.WriteHeader(http.StatusOK) + } + + if w.passThrough { + if !w.wroteHeader { + w.ResponseWriter.WriteHeader(w.code) + w.wroteHeader = true + } + return w.ResponseWriter.Write(b) + } + if !w.minLengthExceeded { n, err := w.buffer.Write(b) @@ -267,8 +281,10 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) { // The minimum length is exceeded, add Content-Encoding header and write the header w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806 - if w.wroteHeader { + w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding) + if w.hasHeader { w.ResponseWriter.WriteHeader(w.code) + w.wroteHeader = true } return w.Compressor.Write(w.buffer.Bytes()) @@ -281,12 +297,31 @@ func (w *compressResponseWriter) Write(b []byte) (int, error) { } func (w *compressResponseWriter) Flush() { + if !w.hasHeader { + w.WriteHeader(http.StatusOK) + } + + if w.passThrough { + if !w.wroteHeader { + w.ResponseWriter.WriteHeader(w.code) + w.wroteHeader = true + } + + if flusher, ok := w.ResponseWriter.(http.Flusher); ok { + flusher.Flush() + } + + return + } + if !w.minLengthExceeded { // Enforce compression w.minLengthExceeded = true w.Header().Set(echo.HeaderContentEncoding, w.scheme) // Issue #806 - if w.wroteHeader { + w.Header().Add(echo.HeaderVary, echo.HeaderAcceptEncoding) + if w.hasHeader { w.ResponseWriter.WriteHeader(w.code) + w.wroteHeader = true } w.Compressor.Write(w.buffer.Bytes()) @@ -308,12 +343,3 @@ func (w *compressResponseWriter) Push(target string, opts *http.PushOptions) err } return http.ErrNotSupported } - -func bufferPool() sync.Pool { - return sync.Pool{ - New: func() interface{} { - b := &bytes.Buffer{} - return b - }, - } -} diff --git a/http/middleware/compress/compress_test.go b/http/middleware/compress/compress_test.go index 60888989..b42971d2 100644 --- a/http/middleware/compress/compress_test.go +++ b/http/middleware/compress/compress_test.go @@ -58,15 +58,15 @@ func (rcr *nopReadCloseResetter) Reset(r io.Reader) error { return resetter.Reset(r) } -func getTestcases() map[Scheme]func(r io.Reader) (ReadCloseResetter, error) { - return map[Scheme]func(r io.Reader) (ReadCloseResetter, error){ - GzipScheme: func(r io.Reader) (ReadCloseResetter, error) { +func getTestcases() map[string]func(r io.Reader) (ReadCloseResetter, error) { + return map[string]func(r io.Reader) (ReadCloseResetter, error){ + "gzip": func(r io.Reader) (ReadCloseResetter, error) { return gzip.NewReader(r) }, - BrotliScheme: func(r io.Reader) (ReadCloseResetter, error) { + "br": func(r io.Reader) (ReadCloseResetter, error) { return &nopReadCloseResetter{brotli.NewReader(r)}, nil }, - ZstdScheme: func(r io.Reader) (ReadCloseResetter, error) { + "zstd": func(r io.Reader) (ReadCloseResetter, error) { reader, err := zstd.NewReader(r) return &nopReadCloseResetter{reader}, err }, @@ -77,18 +77,18 @@ func TestCompress(t *testing.T) { schemes := getTestcases() for scheme, reader := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() req := httptest.NewRequest(http.MethodGet, "/", nil) rec := httptest.NewRecorder() - c := e.NewContext(req, rec) + ctx := e.NewContext(req, rec) // Skip if no Accept-Encoding header - h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error { + handler := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error { c.Response().Write([]byte("test")) // For Content-Type sniffing return nil }) - h(c) + handler(ctx) assert := assert.New(t) @@ -96,15 +96,15 @@ func TestCompress(t *testing.T) { // Compression req = httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec = httptest.NewRecorder() - c = e.NewContext(req, rec) - h(c) - assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding)) + ctx = e.NewContext(req, rec) + handler(ctx) + assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding)) assert.Contains(rec.Header().Get(echo.HeaderContentType), echo.MIMETextPlain) r, err := reader(rec.Body) if assert.NoError(err) { - buf := new(bytes.Buffer) + buf := &bytes.Buffer{} defer r.Close() buf.ReadFrom(r) assert.Equal("test", buf.String()) @@ -112,11 +112,11 @@ func TestCompress(t *testing.T) { // Gzip chunked req = httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec = httptest.NewRecorder() - c = e.NewContext(req, rec) - NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error { + ctx = e.NewContext(req, rec) + NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error { c.Response().Header().Set("Content-Type", "text/event-stream") c.Response().Header().Set("Transfer-Encoding", "chunked") @@ -126,7 +126,7 @@ func TestCompress(t *testing.T) { // Read the first part of the data assert.True(rec.Flushed) - assert.Equal(scheme.String(), rec.Header().Get(echo.HeaderContentEncoding)) + assert.Equal(scheme, rec.Header().Get(echo.HeaderContentEncoding)) // Write and flush the second part of the data c.Response().Write([]byte("tost\n")) @@ -135,7 +135,7 @@ func TestCompress(t *testing.T) { // Write the final part of the data and return c.Response().Write([]byte("tast")) return nil - })(c) + })(ctx) buf := new(bytes.Buffer) r.Reset(rec.Body) @@ -146,14 +146,53 @@ func TestCompress(t *testing.T) { } } +func TestCompressWithPassthrough(t *testing.T) { + schemes := getTestcases() + + for scheme, reader := range schemes { + t.Run(scheme, func(t *testing.T) { + e := echo.New() + e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}, ContentTypes: []string{"text/compress"}})) + e.GET("/plain", func(c echo.Context) error { + c.Response().Header().Set("Content-Type", "text/plain") + c.Response().Write([]byte("testtest")) + return nil + }) + e.GET("/compress", func(c echo.Context) error { + c.Response().Header().Set("Content-Type", "text/compress") + c.Response().Write([]byte("testtest")) + return nil + }) + req := httptest.NewRequest(http.MethodGet, "/plain", nil) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) + rec := httptest.NewRecorder() + e.ServeHTTP(rec, req) + assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding)) + assert.Contains(t, rec.Body.String(), "testtest") + + req = httptest.NewRequest(http.MethodGet, "/compress", nil) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) + rec = httptest.NewRecorder() + e.ServeHTTP(rec, req) + assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding)) + r, err := reader(rec.Body) + if assert.NoError(t, err) { + buf := new(bytes.Buffer) + defer r.Close() + buf.ReadFrom(r) + assert.Equal(t, "testtest", buf.String()) + } + }) + } +} + func TestCompressWithMinLength(t *testing.T) { schemes := getTestcases() for scheme, reader := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() - // Invalid level - e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []Scheme{scheme}})) + e.Use(NewWithConfig(Config{MinLength: 5, Schemes: []string{scheme}})) e.GET("/", func(c echo.Context) error { c.Response().Write([]byte("test")) return nil @@ -163,17 +202,17 @@ func TestCompressWithMinLength(t *testing.T) { return nil }) req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec := httptest.NewRecorder() e.ServeHTTP(rec, req) assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding)) assert.Contains(t, rec.Body.String(), "test") req = httptest.NewRequest(http.MethodGet, "/foobar", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec = httptest.NewRecorder() e.ServeHTTP(rec, req) - assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding)) + assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding)) r, err := reader(rec.Body) if assert.NoError(t, err) { buf := new(bytes.Buffer) @@ -185,17 +224,60 @@ func TestCompressWithMinLength(t *testing.T) { } } +func TestCompressWithAroundMinLength(t *testing.T) { + schemes := getTestcases() + minLength := 1000 + + for scheme, reader := range schemes { + for i := minLength - 64; i < minLength+64; i++ { + name := fmt.Sprintf("%s-%d", scheme, i) + + t.Run(name, func(t *testing.T) { + data := rand.Bytes(i) + e := echo.New() + e.Use(NewWithConfig(Config{MinLength: minLength, Schemes: []string{scheme}})) + e.GET("/", func(c echo.Context) error { + c.Response().Write(data[:1]) + c.Response().Write(data[1:]) + return nil + }) + req := httptest.NewRequest(http.MethodGet, "/", nil) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) + rec := httptest.NewRecorder() + e.ServeHTTP(rec, req) + + if i < minLength { + assert.Equal(t, "", rec.Header().Get(echo.HeaderContentEncoding)) + res, err := io.ReadAll(rec.Body) + if assert.NoError(t, err) { + assert.Equal(t, data, res) + } + } else { + assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding)) + r, err := reader(rec.Body) + if assert.NoError(t, err) { + buf := new(bytes.Buffer) + defer r.Close() + buf.ReadFrom(r) + assert.Equal(t, data, buf.Bytes()) + } + } + }) + } + } +} + func TestCompressNoContent(t *testing.T) { schemes := getTestcases() for scheme := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec := httptest.NewRecorder() c := e.NewContext(req, rec) - h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error { + h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error { return c.NoContent(http.StatusNoContent) }) if assert.NoError(t, h(c)) { @@ -211,17 +293,17 @@ func TestCompressEmpty(t *testing.T) { schemes := getTestcases() for scheme, reader := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec := httptest.NewRecorder() c := e.NewContext(req, rec) - h := NewWithConfig(Config{Schemes: []Scheme{scheme}})(func(c echo.Context) error { + h := NewWithConfig(Config{Schemes: []string{scheme}})(func(c echo.Context) error { return c.String(http.StatusOK, "") }) if assert.NoError(t, h(c)) { - assert.Equal(t, scheme.String(), rec.Header().Get(echo.HeaderContentEncoding)) + assert.Equal(t, scheme, rec.Header().Get(echo.HeaderContentEncoding)) assert.Equal(t, "text/plain; charset=UTF-8", rec.Header().Get(echo.HeaderContentType)) r, err := reader(rec.Body) if assert.NoError(t, err) { @@ -238,14 +320,14 @@ func TestCompressErrorReturned(t *testing.T) { schemes := getTestcases() for scheme := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() - e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}})) + e.Use(NewWithConfig(Config{Schemes: []string{scheme}})) e.GET("/", func(c echo.Context) error { return echo.ErrNotFound }) req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec := httptest.NewRecorder() e.ServeHTTP(rec, req) assert.Equal(t, http.StatusNotFound, rec.Code) @@ -259,12 +341,12 @@ func TestCompressWithStatic(t *testing.T) { schemes := getTestcases() for scheme, reader := range schemes { - t.Run(scheme.String(), func(t *testing.T) { + t.Run(scheme, func(t *testing.T) { e := echo.New() - e.Use(NewWithConfig(Config{Schemes: []Scheme{scheme}})) + e.Use(NewWithConfig(Config{Schemes: []string{scheme}})) e.Static("/test", "./") req := httptest.NewRequest(http.MethodGet, "/test/compress.go", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) rec := httptest.NewRecorder() e.ServeHTTP(rec, req) assert.Equal(t, http.StatusOK, rec.Code) @@ -292,17 +374,17 @@ func BenchmarkCompress(b *testing.B) { for i := 1; i <= 18; i++ { datalen := 2 << i - data := []byte(rand.String(datalen)) + data := rand.Bytes(datalen) for scheme := range schemes { - name := fmt.Sprintf("%s-%d", scheme.String(), datalen) + name := fmt.Sprintf("%s-%d", scheme, datalen) b.Run(name, func(b *testing.B) { e := echo.New() req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) - h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error { + h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error { c.Response().Write(data) return nil }) @@ -327,13 +409,13 @@ func BenchmarkCompressJSON(b *testing.B) { schemes := getTestcases() for scheme := range schemes { - b.Run(scheme.String(), func(b *testing.B) { + b.Run(scheme, func(b *testing.B) { e := echo.New() req := httptest.NewRequest(http.MethodGet, "/", nil) - req.Header.Set(echo.HeaderAcceptEncoding, scheme.String()) + req.Header.Set(echo.HeaderAcceptEncoding, scheme) - h := NewWithConfig(Config{Level: BestSpeed, Schemes: []Scheme{scheme}})(func(c echo.Context) error { + h := NewWithConfig(Config{Level: BestSpeed, Schemes: []string{scheme}})(func(c echo.Context) error { c.Response().Write(data) return nil }) diff --git a/http/middleware/compress/gogzip.go b/http/middleware/compress/gogzip.go new file mode 100644 index 00000000..589fbb99 --- /dev/null +++ b/http/middleware/compress/gogzip.go @@ -0,0 +1,55 @@ +package compress + +import ( + "compress/gzip" + "io" + "sync" +) + +type gogzipImpl struct { + pool sync.Pool +} + +func NewGoGzip(level Level) Compression { + gzipLevel := gzip.DefaultCompression + if level == BestCompression { + gzipLevel = gzip.BestCompression + } else if level == BestSpeed { + gzipLevel = gzip.BestSpeed + } + + g := &gogzipImpl{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(io.Discard, gzipLevel) + if err != nil { + return nil + } + return w + }, + }, + } + + return g +} + +func (g *gogzipImpl) Acquire() Compressor { + c := g.pool.Get() + if c == nil { + return nil + } + + x, ok := c.(Compressor) + if !ok { + return nil + } + + x.Reset(io.Discard) + + return x +} + +func (g *gogzipImpl) Release(c Compressor) { + c.Reset(io.Discard) + g.pool.Put(c) +} diff --git a/http/middleware/compress/gzip.go b/http/middleware/compress/gzip.go index f2441fe1..2482d4d2 100644 --- a/http/middleware/compress/gzip.go +++ b/http/middleware/compress/gzip.go @@ -15,7 +15,7 @@ func NewGzip(level Level) Compression { gzipLevel := gzip.DefaultCompression if level == BestCompression { gzipLevel = gzip.BestCompression - } else { + } else if level == BestSpeed { gzipLevel = gzip.BestSpeed } diff --git a/http/middleware/compress/zstd.go b/http/middleware/compress/zstd.go index 3eaacb56..970732c6 100644 --- a/http/middleware/compress/zstd.go +++ b/http/middleware/compress/zstd.go @@ -15,7 +15,7 @@ func NewZstd(level Level) Compression { zstdLevel := zstd.SpeedDefault if level == BestCompression { zstdLevel = zstd.SpeedBestCompression - } else { + } else if level == BestSpeed { zstdLevel = zstd.SpeedFastest } diff --git a/http/middleware/session/HLS.go b/http/middleware/session/HLS.go index 6e2ad5c4..fbb714b6 100644 --- a/http/middleware/session/HLS.go +++ b/http/middleware/session/HLS.go @@ -29,7 +29,7 @@ func (h *handler) handleHLS(c echo.Context, ctxuser string, data map[string]inte return next(c) } -func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error { +func (h *handler) handleHLSIngress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error { req := c.Request() path := req.URL.Path @@ -97,7 +97,7 @@ func (h *handler) handleHLSIngress(c echo.Context, ctxuser string, data map[stri return next(c) } -func (h *handler) handleHLSEgress(c echo.Context, ctxuser string, data map[string]interface{}, next echo.HandlerFunc) error { +func (h *handler) handleHLSEgress(c echo.Context, _ string, data map[string]interface{}, next echo.HandlerFunc) error { req := c.Request() res := c.Response() diff --git a/http/server.go b/http/server.go index 8f5c2ae1..09322e87 100644 --- a/http/server.go +++ b/http/server.go @@ -102,12 +102,19 @@ type Config struct { IAM iam.IAM IAMSkipper func(ip string) bool Resources resources.Resources + Compress CompressConfig } type CorsConfig struct { Origins []string } +type CompressConfig struct { + Encoding []string + MimeTypes []string + MinLength int +} + type server struct { logger log.Logger @@ -143,8 +150,10 @@ type server struct { iam echo.MiddlewareFunc } - gzip struct { + compress struct { + encoding []string mimetypes []string + minLength int } filesystems map[string]*filesystem @@ -375,15 +384,9 @@ func NewServer(config Config) (serverhandler.Server, error) { IAM: config.IAM, }, "/api/graph/query") - s.gzip.mimetypes = []string{ - "text/plain", - "text/html", - "text/javascript", - "application/json", - //"application/x-mpegurl", - //"application/vnd.apple.mpegurl", - "image/svg+xml", - } + s.compress.encoding = config.Compress.Encoding + s.compress.mimetypes = config.Compress.MimeTypes + s.compress.minLength = config.Compress.MinLength s.router = echo.New() s.router.JSONSerializer = &GoJSONSerializer{} @@ -409,6 +412,13 @@ func NewServer(config Config) (serverhandler.Server, error) { s.router.Use(s.middleware.iam) + s.router.Use(mwcompress.NewWithConfig(mwcompress.Config{ + Level: mwcompress.BestSpeed, + MinLength: config.Compress.MinLength, + Schemes: config.Compress.Encoding, + ContentTypes: config.Compress.MimeTypes, + })) + s.router.Use(mwsession.NewWithConfig(mwsession.Config{ HLSIngressCollector: config.Sessions.Collector("hlsingress"), HLSEgressCollector: config.Sessions.Collector("hls"), @@ -487,13 +497,6 @@ func (s *server) HTTPStatus() map[int]uint64 { } func (s *server) setRoutes() { - gzipMiddleware := mwcompress.NewWithConfig(mwcompress.Config{ - Skipper: mwcompress.ContentTypeSkipper(nil), - Level: mwcompress.BestSpeed, - MinLength: 1000, - Schemes: []mwcompress.Scheme{mwcompress.GzipScheme}, - }) - // API router grouo api := s.router.Group("/api") @@ -509,7 +512,6 @@ func (s *server) setRoutes() { // Swagger API documentation router group doc := s.router.Group("/api/swagger/*") - doc.Use(gzipMiddleware) doc.GET("", echoSwagger.WrapHandler) // Mount filesystems @@ -528,15 +530,6 @@ func (s *server) setRoutes() { DefaultContentType: filesystem.DefaultContentType, })) - if filesystem.Gzip { - fs.Use(mwcompress.NewWithConfig(mwcompress.Config{ - Skipper: mwcompress.ContentTypeSkipper(s.gzip.mimetypes), - Level: mwcompress.BestSpeed, - MinLength: 1000, - Schemes: []mwcompress.Scheme{mwcompress.GzipScheme}, - })) - } - if filesystem.Cache != nil { mwcache := mwcache.NewWithConfig(mwcache.Config{ Cache: filesystem.Cache, @@ -590,7 +583,7 @@ func (s *server) setRoutes() { // GraphQL graphql := api.Group("/graph") - graphql.Use(gzipMiddleware) + //graphql.Use(gzipMiddleware) graphql.GET("", s.handler.graph.Playground) graphql.POST("/query", s.handler.graph.Query) @@ -598,7 +591,7 @@ func (s *server) setRoutes() { // APIv3 router group v3 := api.Group("/v3") - v3.Use(gzipMiddleware) + //v3.Use(gzipMiddleware) s.setRoutesV3(v3) } diff --git a/io/fs/mem.go b/io/fs/mem.go index 97b45318..ba102ed1 100644 --- a/io/fs/mem.go +++ b/io/fs/mem.go @@ -15,6 +15,7 @@ import ( "github.com/datarhei/core/v16/glob" "github.com/datarhei/core/v16/log" + "github.com/datarhei/core/v16/mem" ) // MemConfig is the config that is required for creating @@ -126,37 +127,10 @@ func (f *memFile) free() { f.data = nil } -type fileDataPool struct { - pool sync.Pool -} - -var pool *fileDataPool = nil - -func NewFileDataPool() *fileDataPool { - p := &fileDataPool{ - pool: sync.Pool{ - New: func() any { - return &bytes.Buffer{} - }, - }, - } - - return p -} - -func (p *fileDataPool) Get() *bytes.Buffer { - buf := p.pool.Get().(*bytes.Buffer) - buf.Reset() - - return buf -} - -func (p *fileDataPool) Put(buf *bytes.Buffer) { - p.pool.Put(buf) -} +var pool *mem.BufferPool = nil func init() { - pool = NewFileDataPool() + pool = mem.NewBufferPool() } type memFilesystem struct { diff --git a/math/rand/rand.go b/math/rand/rand.go index 5f81ad90..fead355c 100644 --- a/math/rand/rand.go +++ b/math/rand/rand.go @@ -21,6 +21,12 @@ var seededRand *rand.Rand = rand.New(rand.NewSource(time.Now().UnixNano())) var lock sync.Mutex func StringWithCharset(length int, charset string) string { + b := BytesWithCharset(length, charset) + + return string(b) +} + +func BytesWithCharset(length int, charset string) []byte { lock.Lock() defer lock.Unlock() @@ -29,7 +35,7 @@ func StringWithCharset(length int, charset string) string { b[i] = charset[seededRand.Intn(len(charset))] } - return string(b) + return b } func StringLetters(length int) string { @@ -47,3 +53,7 @@ func StringAlphanumeric(length int) string { func String(length int) string { return StringWithCharset(length, CharsetAll) } + +func Bytes(length int) []byte { + return BytesWithCharset(length, CharsetAll) +} diff --git a/mem/buffer.go b/mem/buffer.go new file mode 100644 index 00000000..bcadc871 --- /dev/null +++ b/mem/buffer.go @@ -0,0 +1,33 @@ +package mem + +import ( + "bytes" + "sync" +) + +type BufferPool struct { + pool sync.Pool +} + +func NewBufferPool() *BufferPool { + p := &BufferPool{ + pool: sync.Pool{ + New: func() any { + return &bytes.Buffer{} + }, + }, + } + + return p +} + +func (p *BufferPool) Get() *bytes.Buffer { + buf := p.pool.Get().(*bytes.Buffer) + buf.Reset() + + return buf +} + +func (p *BufferPool) Put(buf *bytes.Buffer) { + p.pool.Put(buf) +} diff --git a/vendor/github.com/99designs/gqlgen/.golangci.yml b/vendor/github.com/99designs/gqlgen/.golangci.yml index 098727cb..79d6627e 100644 --- a/vendor/github.com/99designs/gqlgen/.golangci.yml +++ b/vendor/github.com/99designs/gqlgen/.golangci.yml @@ -106,6 +106,10 @@ issues: - path: codegen/testserver/.*/resolver\.go linters: - gocritic + # The interfaces are autogenerated and don't conform to the paramTypeCombine rule + - path: _examples/federation/products/graph/entity.resolvers.go + linters: + - gocritic # Disable revive.use-any for backwards compatibility - path: graphql/map.go text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" @@ -113,3 +117,11 @@ issues: text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" - path: codegen/testserver/singlefile/resolver.go text: "use-any: since GO 1.18 'interface{}' can be replaced by 'any'" + - path: codegen/testserver/generated_test.go + linters: + - staticcheck + text: SA1019 + - path: plugin/modelgen/models_test.go + linters: + - staticcheck + text: SA1019 diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md index acea9f11..bbcc1722 100644 --- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md +++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md @@ -5,10 +5,2085 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.45...HEAD) +## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.50...HEAD) + +## [v0.17.50](https://github.com/99designs/gqlgen/compare/v0.17.49...v0.17.50) - 2024-09-13 +- a6d5d843 release v0.17.50 + +- f154d99d Fix Nancy to use Go 1.22 + +- 6b9e40e8 make rewrite default for resolver layout single-file (#3243) + +
1855758d chore(deps): bump dset in /integration in the npm_and_yarn group (#3268) + +Bumps the npm_and_yarn group in /integration with 1 update: [dset](https://github.com/lukeed/dset). + + +Updates `dset` from 3.1.3 to 3.1.4 +- [Release notes](https://github.com/lukeed/dset/releases) +- [Commits](https://github.com/lukeed/dset/compare/v3.1.3...v3.1.4) + +--- +updated-dependencies: +- dependency-name: dset + dependency-type: indirect + dependency-group: npm_and_yarn +... + +
+ +
fda0539e Bump some more module versions (#3262) + +* Bump some more module versions + + +* Update aurora + + +* Avoid upgrade to go 1.23 + + +* downgrade goquery to support pre-Go 1.23 for now + + +* Downgrade moq to support pre-Go 1.23 as well + + +--------- + +
+ +- 59f0d04c Bump golang.org/x/net 0.29 (#3261) + +
cf42b253 chore(deps): bump golang.org/x/text from 0.17.0 to 0.18.0 (#3259) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.17.0 to 0.18.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.17.0...v0.18.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
b728c12f chore(deps): bump golang.org/x/text from 0.17.0 to 0.18.0 in /_examples (#3256) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.17.0 to 0.18.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.17.0...v0.18.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
cba40a38 chore(deps-dev): bump vite from 5.4.2 to 5.4.3 in /integration (#3257) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.2 to 5.4.3. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.4.3/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
f7bee06f chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3258) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.5...v3.11.8) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
81ac627d chore(deps): bump robherley/go-test-action from 0.4.1 to 0.5.0 (#3255) + +Bumps [robherley/go-test-action](https://github.com/robherley/go-test-action) from 0.4.1 to 0.5.0. +- [Release notes](https://github.com/robherley/go-test-action/releases) +- [Commits](https://github.com/robherley/go-test-action/compare/v0.4.1...v0.5.0) + +--- +updated-dependencies: +- dependency-name: robherley/go-test-action + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
86ac6b36 internal/code: `Unalias` element of pointer (#3250) (closes #3247) + +This reverts commit 4c4be0aeaaad758e703724fe4a6575768017ac53. + +* code: `Unalias` element of pointer + +* chore: added comment + +
+ +- 4c4be0ae codegen: Unalias before lookup type (#3247) + +
ab1781b1 codegen: Go 1.23 alias support (#3246) + +* code: added `Unalias` for Go 1.22 + +* codegen: Go 1.23 alias support + +
+ +
814f7c71 chore(deps): bump actions/upload-artifact from 4.3.6 to 4.4.0 (#3235) + +Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.6 to 4.4.0. +- [Release notes](https://github.com/actions/upload-artifact/releases) +- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.6...v4.4.0) + +--- +updated-dependencies: +- dependency-name: actions/upload-artifact + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
1cbbc120 chore(deps): bump github.com/rs/cors from 1.11.0 to 1.11.1 in /_examples (#3236) + +Bumps [github.com/rs/cors](https://github.com/rs/cors) from 1.11.0 to 1.11.1. +- [Commits](https://github.com/rs/cors/compare/v1.11.0...v1.11.1) + +--- +updated-dependencies: +- dependency-name: github.com/rs/cors + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
2da2ac36 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3237) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.4...v3.11.5) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
0b9bd5ee refactor: don't extract [@goField](https://github.com/goField) twice (#3234) + +We already extract the values in config.Init(). Remove the duplicate logic in the modelgen plugin. + +We leave the reference to GoFieldHook even though it's a noop since it's public. This makes this a non-breaking change. We will remove this during the next breaking release. + +
+ +
18378f90 feat: allow argument directives to be called even if the argument is null (#3233) (closes #3188) + +The existing implementation assumes that if an input argument is null, you don't want to call the directive. This is a very constraining assumption — directives may want to not just mutate an argument but to actually outright set it. + +This is a breaking change as argument directives now need to handle null input values. Added a new config switch: + +call_argument_directives_with_nulls: bool + +to control this new behavior. + +* Run go generate ./... + +
+ +- 3e76e7ee only close websocket once (#3231) + +
256794aa chore(deps-dev): bump vite from 5.4.0 to 5.4.2 in /integration (#3229) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.4.0 to 5.4.2. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.4.2/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
6acc182c Go 1.23 support (#3226) + +* Added support for go 1.23 + +* Added handling for *types.Alias + +* Updated golang ci lint to 1.60.2 + +* Fixed lint issues and ignore SA1019 on generated test files + +* Update coverage.yml + +* Update fmt-and-generate.yml + +* Update integration.yml + +* Update lint.yml + +* Update test.yml + +--------- + +
+ +
f6a82204 chore(deps): bump golang.org/x/tools from 0.23.0 to 0.24.0 (#3219) + +* chore(deps): bump golang.org/x/tools from 0.23.0 to 0.24.0 + +Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.23.0 to 0.24.0. +- [Release notes](https://github.com/golang/tools/releases) +- [Commits](https://github.com/golang/tools/compare/v0.23.0...v0.24.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/tools + dependency-type: direct:production + update-type: version-update:semver-minor +... + + +* _examples fixup + + +--------- + +
+ +
1849e124 chore(deps): bump golang.org/x/text from 0.16.0 to 0.17.0 (#3218) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.16.0 to 0.17.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.16.0...v0.17.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
2f7772c9 [proposal] Add [@concurrent](https://github.com/concurrent) directive for types (#3203) + +* Issue 3202 + +* Issue 3202 + +* Issue 3202 + +* Make optional concurrent for fields of objects + +* Make optional concurrent for fields of objects + +
+ +
3556475a Fix marshaling interfaces and union types (#3211) + +* Fixed marshaling interfaces and union + +* Fixed marshaling interfaces and union + +
+ +
23abdc56 chore(deps): bump github.com/urfave/cli/v2 from 2.27.3 to 2.27.4 (#3217) + +Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.3 to 2.27.4. +- [Release notes](https://github.com/urfave/cli/releases) +- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md) +- [Commits](https://github.com/urfave/cli/compare/v2.27.3...v2.27.4) + +--- +updated-dependencies: +- dependency-name: github.com/urfave/cli/v2 + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +- bbc354c6 Add local toolchain for matrix + +
3fe8329d chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3215) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.11.2...v3.11.4) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
edca7992 chore(deps-dev): bump vite from 5.3.5 to 5.4.0 in /integration (#3216) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.5 to 5.4.0. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
f0b7ee3f chore(deps): bump actions/upload-artifact from 4.3.5 to 4.3.6 (#3220) + +Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.5 to 4.3.6. +- [Release notes](https://github.com/actions/upload-artifact/releases) +- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.5...v4.3.6) + +--- +updated-dependencies: +- dependency-name: actions/upload-artifact + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
719b7af3 chore(deps): bump golang.org/x/text from 0.16.0 to 0.17.0 in /_examples (#3221) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.16.0 to 0.17.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.16.0...v0.17.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
d14fd791 chore(deps): bump actions/upload-artifact from 4.3.4 to 4.3.5 (#3208) + +Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.4 to 4.3.5. +- [Release notes](https://github.com/actions/upload-artifact/releases) +- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.4...v4.3.5) + +--- +updated-dependencies: +- dependency-name: actions/upload-artifact + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
564e2dc5 chore(deps): bump golangci/golangci-lint-action from 6.0.1 to 6.1.0 (#3207) + +Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 6.0.1 to 6.1.0. +- [Release notes](https://github.com/golangci/golangci-lint-action/releases) +- [Commits](https://github.com/golangci/golangci-lint-action/compare/v6.0.1...v6.1.0) + +--- +updated-dependencies: +- dependency-name: golangci/golangci-lint-action + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
d3d147e6 chore(deps): bump golang.org/x/tools from 0.22.0 to 0.23.0 (#3172) + +* chore(deps): bump golang.org/x/tools from 0.22.0 to 0.23.0 + +Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.22.0 to 0.23.0. +- [Release notes](https://github.com/golang/tools/releases) +- [Commits](https://github.com/golang/tools/compare/v0.22.0...v0.23.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/tools + dependency-type: direct:production + update-type: version-update:semver-minor +... + + + + +--------- + +
+ +
2d7e00b5 chore(deps-dev): bump typescript from 5.5.3 to 5.5.4 in /integration (#3196) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.5.3 to 5.5.4. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.5.3...v5.5.4) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
5f86c55a chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3197) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
552bb4b9 chore(deps-dev): bump vite from 5.3.4 to 5.3.5 in /integration (#3199) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.4 to 5.3.5. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.3.5/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
45a29fe0 chore(deps): bump github.com/urfave/cli/v2 from 2.27.2 to 2.27.3 (#3200) + +Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.2 to 2.27.3. +- [Release notes](https://github.com/urfave/cli/releases) +- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md) +- [Commits](https://github.com/urfave/cli/compare/v2.27.2...v2.27.3) + +--- +updated-dependencies: +- dependency-name: github.com/urfave/cli/v2 + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
3c2443e4 chore(deps): bump golang.org/x/sync from 0.7.0 to 0.8.0 in /_examples (#3206) + +Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.7.0 to 0.8.0. +- [Commits](https://github.com/golang/sync/compare/v0.7.0...v0.8.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/sync + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
52f65d0f chore(deps-dev): bump vitest from 2.0.4 to 2.0.5 in /integration (#3209) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.4 to 2.0.5. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.5/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
1beac8b7 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3210) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.8...v3.11.2) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +- 9b031e4d chore: fix typos in comments, tests and unexported vars (#3193) + +- 892c4842 refactor: decrease indentation in api.ReplacePlugin (#3194) + +
d1682f7c chore(deps-dev): bump vite from 5.3.3 to 5.3.4 in /integration (#3190) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.3 to 5.3.4. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.3.4/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
cfc9863a chore(deps-dev): bump vitest from 2.0.2 to 2.0.4 in /integration (#3189) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 2.0.2 to 2.0.4. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.4/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
1cc0a17b Revert "feat: allow argument directives to be called even if the argument is …" (#3191) + +This reverts commit 0fb31a3ed2a63552eddcf7c2a6c40aa0d59bd4cc. + +
+ +
0fb31a3e feat: allow argument directives to be called even if the argument is null (#3188) + +The existing implementation assumes that if an input argument is null, you don't want to call the directive. This is a very constraining assumption — directives may want to not just mutate an argument but to actually outright set it. + +This is a breaking change as argument directives now need to handle null input values. Added a new config switch: + +call_argument_directives_with_nulls: bool + +to control this new behavior. + +
+ +
cd82be01 refactor: significantly clean up the federation.gotpl template (#3187) (closes #2991) + +* fix: fix Federation example + +Some configurations weren't working due to a missing resolver. + +* chore: Introduce mechanism for running all example Federation subgraphs + +This enables engineers to more easily run the debugger on the Federation example. Updated README to show how to use it. + +* refactor: significantly clean up the federation.gotpl template + +There were a number of inline structs and inline functions that made it extremely hard to reason about what the code is doing. Split these out into smaller functions with less closures and mutation. + +
+ +
a63f94bb chore(deps-dev): bump vitest from 1.6.0 to 2.0.2 in /integration (#3185) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.6.0 to 2.0.2. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v2.0.2/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-major +... + +
+ +
de315d3d chore: Refactor federation.go to make it easier to read (#3183) (closes #2991) + +* chore: Refactor federation.go + +- Cut functions into smaller functions +- Remove mutation in several locations + + +* Refactor InjectSourcesLate + +Easier to reason about and read this way. + +* Re-run go generate ./... + +* regenerate + + +--------- + +
+ +
4d8d93cd Make cache generic to avoid casting (#3179) + +* Make cache generic to avoid casting + + +* Update handler/handler.go + +--------- + +
+ +
f2cf11e5 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3174) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
fc150db0 chore(deps-dev): bump typescript from 5.5.2 to 5.5.3 in /integration (#3175) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.5.2 to 5.5.3. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.5.2...v5.5.3) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
60c9f671 chore(deps): bump actions/upload-artifact from 4.3.3 to 4.3.4 (#3176) + +Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4.3.3 to 4.3.4. +- [Release notes](https://github.com/actions/upload-artifact/releases) +- [Commits](https://github.com/actions/upload-artifact/compare/v4.3.3...v4.3.4) + +--- +updated-dependencies: +- dependency-name: actions/upload-artifact + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
59bdde19 chore(deps-dev): bump vite from 5.3.2 to 5.3.3 in /integration (#3173) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.2 to 5.3.3. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.3.3/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
0ca3b19e chore(deps): bump github.com/rs/cors (#3171) + +Bumps the go_modules group with 1 update in the /_examples/websocket-initfunc/server directory: [github.com/rs/cors](https://github.com/rs/cors). + + +Updates `github.com/rs/cors` from 1.9.0 to 1.11.0 +- [Commits](https://github.com/rs/cors/compare/v1.9.0...v1.11.0) + +--- +updated-dependencies: +- dependency-name: github.com/rs/cors + dependency-type: direct:production + dependency-group: go_modules +... + +
+ +
d0e68928 Nulls are now unmarshalled as zero values for primitive types (#3162) + +* Nulls are now unmarshalled as zero values for primitive types + +* Address uint and run gofumpt + + +--------- + +
+ +
dce2e353 chore(deps): bump test-summary/action from 2.3 to 2.4 (#3163) + +Bumps [test-summary/action](https://github.com/test-summary/action) from 2.3 to 2.4. +- [Release notes](https://github.com/test-summary/action/releases) +- [Commits](https://github.com/test-summary/action/compare/v2.3...v2.4) + +--- +updated-dependencies: +- dependency-name: test-summary/action + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
2afa0c22 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3164) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.6...v3.10.8) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
2aeb1518 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3165) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
28b2f494 chore(deps-dev): bump vite from 5.3.1 to 5.3.2 in /integration (#3166) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.3.1 to 5.3.2. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.3.2/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
f82d604a chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/schema-ast in /integration (#3167) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/plugins/other/schema-ast/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
dd37ea00 chore(deps-dev): bump typescript from 5.4.5 to 5.5.2 in /integration (#3157) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.5 to 5.5.2. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.5...v5.5.2) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
7b9c3223 chore(deps-dev): bump graphql from 16.8.2 to 16.9.0 in /integration (#3158) + +Bumps [graphql](https://github.com/graphql/graphql-js) from 16.8.2 to 16.9.0. +- [Release notes](https://github.com/graphql/graphql-js/releases) +- [Commits](https://github.com/graphql/graphql-js/compare/v16.8.2...v16.9.0) + +--- +updated-dependencies: +- dependency-name: graphql + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
b822c2c0 chore(deps): bump mikepenz/action-junit-report from 4.3.0 to 4.3.1 (#3159) + +Bumps [mikepenz/action-junit-report](https://github.com/mikepenz/action-junit-report) from 4.3.0 to 4.3.1. +- [Release notes](https://github.com/mikepenz/action-junit-report/releases) +- [Commits](https://github.com/mikepenz/action-junit-report/compare/v4.3.0...v4.3.1) + +--- +updated-dependencies: +- dependency-name: mikepenz/action-junit-report + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
c1525831 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3156) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.5...v3.10.6) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- feab5f51 fix bug: POST Insufficient rigorous judgment leads to invalid SSE (#3153) + +- 7c8bc50d Add failing test as example (#3151) + +
d00ace38 Add prettier test results (#3148) + +* Add prettier test results + +
+ +
641377d7 chore(deps-dev): bump ws in /integration in the npm_and_yarn group (#3147) + +Bumps the npm_and_yarn group in /integration with 1 update: [ws](https://github.com/websockets/ws). + + +Updates `ws` from 8.16.0 to 8.17.1 +- [Release notes](https://github.com/websockets/ws/releases) +- [Commits](https://github.com/websockets/ws/compare/8.16.0...8.17.1) + +--- +updated-dependencies: +- dependency-name: ws + dependency-type: indirect + dependency-group: npm_and_yarn +... + +
+ +- e724bde5 docs: missing 'repeatable' in [@goExtraField](https://github.com/goExtraField) directive (#3150) + +- 85459a32 Fix typo in config field names (#3149) + +- 1422ff25 feat: Change plugin signatures (#2011) + +
04b13fdb chore(deps-dev): bump vite from 5.2.13 to 5.3.1 in /integration (#3144) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.13 to 5.3.1. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.3.1/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
a1ccf971 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3143) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.4...v3.10.5) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
8a59a2c4 chore(deps-dev): bump graphql from 16.8.1 to 16.8.2 in /integration (#3142) + +Bumps [graphql](https://github.com/graphql/graphql-js) from 16.8.1 to 16.8.2. +- [Release notes](https://github.com/graphql/graphql-js/releases) +- [Commits](https://github.com/graphql/graphql-js/compare/v16.8.1...v16.8.2) + +--- +updated-dependencies: +- dependency-name: graphql + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
80098c67 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3141) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
fc90169b chore(deps): bump google.golang.org/protobuf from 1.34.1 to 1.34.2 (#3140) + +Bumps google.golang.org/protobuf from 1.34.1 to 1.34.2. + +--- +updated-dependencies: +- dependency-name: google.golang.org/protobuf + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +- fb67b709 v0.17.49 postrelease bump + + + + + + +## [v0.17.49](https://github.com/99designs/gqlgen/compare/v0.17.48...v0.17.49) - 2024-06-13 +- d093c6e5 release v0.17.49 + +- a4f997f8 refactor: add missed file.Close() and use t.TempDir() (#3137) + +
f813598b #3118 Add token limit option to fix CVE-2023-49559 (#3136) + +* Use ParseQueryWithLmit and add parserTokenLimit to executor + +* add parser token limit test + +* remove failing test + +* move default token limit to const + +--------- + +
+ +
ee1e18c7 chore(deps-dev): bump braces in /integration in the npm_and_yarn group (#3134) + +Bumps the npm_and_yarn group in /integration with 1 update: [braces](https://github.com/micromatch/braces). + + +Updates `braces` from 3.0.2 to 3.0.3 +- [Changelog](https://github.com/micromatch/braces/blob/master/CHANGELOG.md) +- [Commits](https://github.com/micromatch/braces/compare/3.0.2...3.0.3) + +--- +updated-dependencies: +- dependency-name: braces + dependency-type: indirect + dependency-group: npm_and_yarn +... + +
+ +
d6226db6 chore(deps): bump github.com/vektah/gqlparser/v2 from 2.5.12 to 2.5.14 in the go_modules group (#3133) + +* chore(deps): bump github.com/vektah/gqlparser/v2 in the go_modules group + +Bumps the go_modules group with 1 update: [github.com/vektah/gqlparser/v2](https://github.com/vektah/gqlparser). + + +Updates `github.com/vektah/gqlparser/v2` from 2.5.12 to 2.5.14 +- [Release notes](https://github.com/vektah/gqlparser/releases) +- [Commits](https://github.com/vektah/gqlparser/compare/v2.5.12...v2.5.14) + +--- +updated-dependencies: +- dependency-name: github.com/vektah/gqlparser/v2 + dependency-type: direct:production + dependency-group: go_modules +... + + +* Update to v2.5.16 + + +--------- + +
+ +
6daceaf3 Linter update + add revive rules (#3127) + +* Linter update + add revive rules + + +* More revive lints + + +--------- + +
+ +
e6860c35 chore(deps): bump golang.org/x/tools from 0.21.0 to 0.22.0 (#3125) + +Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.21.0 to 0.22.0. +- [Release notes](https://github.com/golang/tools/releases) +- [Commits](https://github.com/golang/tools/compare/v0.21.0...v0.22.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/tools + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
3bad9617 chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0 (#3124) + +* chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0 + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.15.0 to 0.16.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.15.0...v0.16.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + + +* Update examples go mod + + +--------- + +
+ +
4492b3c0 chore(deps-dev): bump vite from 5.2.12 to 5.2.13 in /integration (#3126) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.12 to 5.2.13. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/v5.2.13/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.13/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
8ec8d795 chore(deps): bump golang.org/x/text from 0.15.0 to 0.16.0 in /_examples (#3123) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.15.0 to 0.16.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.15.0...v0.16.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +- d9ba3405 v0.17.48 postrelease bump + + + + + + +## [v0.17.48](https://github.com/99designs/gqlgen/compare/v0.17.47...v0.17.48) - 2024-06-06 +- 621350a1 release v0.17.48 + +
fbf73ee1 chore(deps-dev): bump vite from 5.2.11 to 5.2.12 in /integration (#3117) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.11 to 5.2.12. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.12/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
e07134ab add option to omit panic handlers during development (#3114) + +see docs for motivation + +
+ +- 1a7c6090 refactor: fix gocritic lint issues (#3113) + +- 4114515f refactor: use errors.New instead of fmt.Errorf (#3112) + +- 93f6366d Omit gqlgen version in config files used for tests (#3111) + +
dae915d2 Correct dataloader example (#3110) + +Dataloader requires the value and error slice to be of equal length, in order to correctly return the values. + +Link: https://github.com/vikstrous/dataloadgen/blob/7de6ebe3d882737607ce2ba646e8d6ec652b32e3/dataloadgen_test.go#L19-L20 + +
+ +
bd9219dd Go template function to split string into array of strings. (#3108) + +* added new template function to split string + +* StrSplit func to upper + +--------- + +
+ +- 6c83b9ea Remove duplicated return_pointers_in_unmarshalinput explanation (#3109) + +- d2a6bd5f refactor: fix testifylint.go-require lint issues (#3107) + +
b18d0287 testifylint v1.3.0 fixes (#3103) + +* Resolve Merge conflict + + +* Autofixes + + +* Lots more fixes and formatting + + +* Add one more + + +* Apply suggestions from code review + + +--------- + +
+ +- bbb0c959 chore: fix tests, pin golangci-lint version (#3105) + +- 57e88b27 Forgot the examples portion (#3101) + +- ff77f8b2 Some minor test lint (#3102) + +
90f2271e refactor: use t.Log instead of fmt.Print (#3099) + +* refactor: use t.Log instead of fmt.Printf + +* Add back failure context as to what errors happened and where + + +--------- + +
+ +- d7447c69 refactor: rename local variables to match Go codestyle (#3100) + +- 834d832c refactor: avoid panic in tests (#3098) + +- 71845858 Ignore gorilla/websocket 1.5.1 in dependabot (#3097) + +- 4ecfec90 Fix go install gqlgen binary (#3095) + +- 866075cd refactor: simplify with strconv.FormatBool (#3094) + +- ab19907d refactor: UnmarshalID implementation (#3093) + +- a9965fbd refactor: use 'any' instead of 'interface{}' for consistency (#3090) + +
d5c9f896 Embed extra fields config (#3088) + +--------- + +
+ +
0b9e6f9c chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3085) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.3...v3.10.4) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
33aad657 chore(deps-dev): bump [@graphql](https://github.com/graphql)-codegen/client-preset in /integration (#3084) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- 58d6978e v0.17.47 postrelease bump + + + + + + +## [v0.17.47](https://github.com/99designs/gqlgen/compare/v0.17.46...v0.17.47) - 2024-05-18 +- a9f2b500 release v0.17.47 + +- 611cbcec Update gqlparser (#3080) + +
3a5827d4 Fix #2856: resolver receive previous implementation on render (#2886) + +* pass previous impl to resolver + +* pass previous only and not default + +
+ +- e0125301 bugfix for [@goField](https://github.com/goField) + [@goExtraField](https://github.com/goExtraField) combination (#3078) + +
e61a7200 Federation: Update docs to use IntrospectAndCompose (#3077) + +`serviceList` now gets a deprecation warning to use IntrospectAndCompose +instead. We update our docs to avoid referring to deprecated services + +
+ +
de31828a Ability to inline extraFields configuration. New [@goExtraField](https://github.com/goExtraField) directive. (#3076) + +--------- + +
+ +- 8b4df636 Go mod tidy (#3075) + +
ae9787cb chore(deps): bump github.com/sosodev/duration from 1.3.0 to 1.3.1 (#3070) + +* chore(deps): bump github.com/sosodev/duration from 1.3.0 to 1.3.1 + +Bumps [github.com/sosodev/duration](https://github.com/sosodev/duration) from 1.3.0 to 1.3.1. +- [Release notes](https://github.com/sosodev/duration/releases) +- [Commits](https://github.com/sosodev/duration/compare/v1.3.0...v1.3.1) + +--- +updated-dependencies: +- dependency-name: github.com/sosodev/duration + dependency-type: direct:production + update-type: version-update:semver-patch +... + + +* go mod tidy examples + + +* Pin gorilla to skip 1.5.1 + + +--------- + +
+ +
32014fdb chore(deps): bump golang.org/x/tools from 0.20.0 to 0.21.0 (#3072) + +Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.20.0 to 0.21.0. +- [Release notes](https://github.com/golang/tools/releases) +- [Commits](https://github.com/golang/tools/compare/v0.20.0...v0.21.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/tools + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
1b5ed7c0 chore(deps-dev): bump urql from 4.0.7 to 4.1.0 in /integration (#3074) + +Bumps [urql](https://github.com/urql-graphql/urql/tree/HEAD/packages/react-urql) from 4.0.7 to 4.1.0. +- [Release notes](https://github.com/urql-graphql/urql/releases) +- [Changelog](https://github.com/urql-graphql/urql/blob/main/packages/react-urql/CHANGELOG.md) + +--- +updated-dependencies: +- dependency-name: urql + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
77ea79a8 chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3073) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.2...v3.10.3) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
358c7a2b chore(deps): bump google.golang.org/protobuf from 1.34.0 to 1.34.1 (#3071) + +Bumps google.golang.org/protobuf from 1.34.0 to 1.34.1. + +--- +updated-dependencies: +- dependency-name: google.golang.org/protobuf + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
5c951f4e chore(deps): bump golangci/golangci-lint-action from 5.3.0 to 6.0.1 (#3069) + +Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.3.0 to 6.0.1. +- [Release notes](https://github.com/golangci/golangci-lint-action/releases) +- [Commits](https://github.com/golangci/golangci-lint-action/compare/v5.3.0...v6.0.1) + +--- +updated-dependencies: +- dependency-name: golangci/golangci-lint-action + dependency-type: direct:production + update-type: version-update:semver-major +... + +
+ +- 42cae907 chore: remove deprecated errcheck.ignore lint option (#3062) + +- 1a59d58b Fix typo in error message (#3065) + +- 39d3d8d0 refactor: simplify test asserts (#3061) + +- 7421bdfb refactor: compile regex only once (#3063) + +- a4bf3a7e chore: simplify generating examples in release script (#3064) + +- 45f6eb56 v0.17.46 postrelease bump + + + + + + +## [v0.17.46](https://github.com/99designs/gqlgen/compare/v0.17.45...v0.17.46) - 2024-05-07 +- 90af8bf5 release v0.17.46 + +- bf49e56a fix: failed to build _examples/websocket-initfunc/server/server.go (#3055) (#3058) + +
1ee0fa80 chore(deps-dev): bump vite from 5.2.10 to 5.2.11 in /integration (#3047) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.10 to 5.2.11. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.11/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
ddd9a6ba chore(deps): bump golang.org/x/text from 0.14.0 to 0.15.0 (#3052) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.14.0 to 0.15.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.14.0...v0.15.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
36b66607 chore(deps): bump github.com/PuerkitoBio/goquery from 1.9.1 to 1.9.2 (#3051) + +* chore(deps): bump github.com/PuerkitoBio/goquery from 1.9.1 to 1.9.2 + +Bumps [github.com/PuerkitoBio/goquery](https://github.com/PuerkitoBio/goquery) from 1.9.1 to 1.9.2. +- [Release notes](https://github.com/PuerkitoBio/goquery/releases) +- [Commits](https://github.com/PuerkitoBio/goquery/compare/v1.9.1...v1.9.2) + +--- +updated-dependencies: +- dependency-name: github.com/PuerkitoBio/goquery + dependency-type: direct:production + update-type: version-update:semver-patch +... + + +* go mod tidy + + +--------- + +
+ +
ad91bf6c chore(deps-dev): bump vitest from 1.5.2 to 1.6.0 in /integration (#3048) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.5.2 to 1.6.0. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v1.6.0/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
a5cb576c chore(deps-dev): bump [@apollo](https://github.com/apollo)/client in /integration (#3049) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.10.1...v3.10.2) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
6b423e51 chore(deps): bump google.golang.org/protobuf from 1.33.0 to 1.34.0 (#3050) + +Bumps google.golang.org/protobuf from 1.33.0 to 1.34.0. + +--- +updated-dependencies: +- dependency-name: google.golang.org/protobuf + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
c34e246b chore(deps): bump golang.org/x/text from 0.14.0 to 0.15.0 in /_examples (#3053) + +Bumps [golang.org/x/text](https://github.com/golang/text) from 0.14.0 to 0.15.0. +- [Release notes](https://github.com/golang/text/releases) +- [Commits](https://github.com/golang/text/compare/v0.14.0...v0.15.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/text + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
a3991df0 chore(deps): bump golangci/golangci-lint-action from 5.0.0 to 5.3.0 (#3054) + +Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 5.0.0 to 5.3.0. +- [Release notes](https://github.com/golangci/golangci-lint-action/releases) +- [Commits](https://github.com/golangci/golangci-lint-action/compare/v5.0.0...v5.3.0) + +--- +updated-dependencies: +- dependency-name: golangci/golangci-lint-action + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +- 769632a1 chore: simplify go generate in examples (#3033) + +- f24ae887 enum values binding v2 (#3014) + +
b3a10547 Add initial cache tests for MapCache and NoCache (#3040) + +* Add initial cache tests for MapCache and NoCache + +* Add edge case testing to MapCache and NoCache + +* Reformat, regenerate + + +--------- + +
+ +- 16854647 chore: lint _examples directory (#3042) + +- 2bb32fe7 chore: remove deprecated build tag (#3041) + +- 4b559b33 Fix codegen config tests: add file closing (#3037) + +- 293991e9 docs: fix links to the docs latest version (#3038) + +- 79dc5e03 refactor: change test asserts to be more idiomatic (#3036) + +- a1989525 chore: remove unnecessary empty lines (#3035) + +- 6998f19f chore: `run.skip-dirs` is deprecated in golangci-lint v1.57 (#3034) + +
835c2d11 Improve federation resolver selection (#3029) + +* Improve federation resolver selection + +Just checking for existence of keys in the representations isn't enough. If the values are null, we should skip the resolver. + +* Run go generate ./... + +* Add test cases + +* Fix linter + +
+ +
9e8e7edd refactor: simplify tests for `api.Generate` (#3031) + +* refactor: simplify tests for Generate + +* Add deleted files to git ignore + + +--------- + +
+ +- 28405ac1 Fix test asserts: reverse expected and actual params (#3027) + +
75326bc7 Bump github.com/sosodev/duration from 1.2.0 to 1.3.0 (#3024) + +* Bump github.com/sosodev/duration from 1.2.0 to 1.3.0 + +Bumps [github.com/sosodev/duration](https://github.com/sosodev/duration) from 1.2.0 to 1.3.0. +- [Release notes](https://github.com/sosodev/duration/releases) +- [Commits](https://github.com/sosodev/duration/compare/v1.2.0...v1.3.0) + +--- +updated-dependencies: +- dependency-name: github.com/sosodev/duration + dependency-type: direct:production + update-type: version-update:semver-minor +... + + +* go mod tidy + + +--------- + +
+ +
bf4406a1 Bump vitest from 1.5.0 to 1.5.2 in /integration (#3021) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.5.0 to 1.5.2. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v1.5.2/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
1a8ebe9b Bump [@apollo](https://github.com/apollo)/client from 3.9.11 to 3.10.1 in /integration (#3022) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.11...v3.10.1) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
bacaab8e Bump github.com/urfave/cli/v2 from 2.27.1 to 2.27.2 (#3023) + +Bumps [github.com/urfave/cli/v2](https://github.com/urfave/cli) from 2.27.1 to 2.27.2. +- [Release notes](https://github.com/urfave/cli/releases) +- [Changelog](https://github.com/urfave/cli/blob/main/docs/CHANGELOG.md) +- [Commits](https://github.com/urfave/cli/compare/v2.27.1...v2.27.2) + +--- +updated-dependencies: +- dependency-name: github.com/urfave/cli/v2 + dependency-type: direct:production + update-type: version-update:semver-patch +... + +
+ +
3f515543 Bump github.com/rs/cors from 1.10.1 to 1.11.0 in /_examples (#3025) + +Bumps [github.com/rs/cors](https://github.com/rs/cors) from 1.10.1 to 1.11.0. +- [Commits](https://github.com/rs/cors/compare/v1.10.1...v1.11.0) + +--- +updated-dependencies: +- dependency-name: github.com/rs/cors + dependency-type: direct:production + update-type: version-update:semver-minor +... + +
+ +
ced2189d Bump golangci/golangci-lint-action from 4.0.0 to 5.0.0 (#3026) + +Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 4.0.0 to 5.0.0. +- [Release notes](https://github.com/golangci/golangci-lint-action/releases) +- [Commits](https://github.com/golangci/golangci-lint-action/compare/v4.0.0...v5.0.0) + +--- +updated-dependencies: +- dependency-name: golangci/golangci-lint-action + dependency-type: direct:production + update-type: version-update:semver-major +... + +
+ +- ada00f78 chore: remove unused lint.txt (#3017) + +
8bd35429 chore: fix some typos in comments (#3020) + +* chore: fix some typos in comments + + +* Apply suggestions from code review + +--------- + +
+ +
e1ef86e7 Bump vite from 5.2.8 to 5.2.10 in /integration (#3015) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.8 to 5.2.10. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.10/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
ecc3f647 Bump [@apollo](https://github.com/apollo)/client from 3.9.10 to 3.9.11 in /integration (#3011) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.10...v3.9.11) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
c92b511c Bump typescript from 5.4.4 to 5.4.5 in /integration (#3010) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.4 to 5.4.5. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.4...v5.4.5) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
cc2d95a2 Bump vitest from 1.4.0 to 1.5.0 in /integration (#3012) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.4.0 to 1.5.0. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v1.5.0/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
c17a4b6f fix: codegen will _ the fieldset parameter if its not needed (#3006) + +* fix: codegen will _ the fieldset parameter if its not needed + +* update generated examples + +
+ +- 0b0f6592 chore: update Automatic Persisted Queries Link (#3005) + +
79aa0ceb Mark ctx as unused when no arguments for FieldContextFunc (#2999) + +* Mark ctx as unused when no arguments for FieldContextFunc + +* Regenerate + + +--------- + +
+ +
f3b34683 Bump urql from 4.0.6 to 4.0.7 in /integration (#2995) (closes #2998) + +* Bump urql from 4.0.6 to 4.0.7 in /integration + +Bumps [urql](https://github.com/urql-graphql/urql/tree/HEAD/packages/react-urql) from 4.0.6 to 4.0.7. +- [Release notes](https://github.com/urql-graphql/urql/releases) +- [Changelog](https://github.com/urql-graphql/urql/blob/main/packages/react-urql/CHANGELOG.md) + +--- +updated-dependencies: +- dependency-name: urql + dependency-type: direct:development + update-type: version-update:semver-patch +... + + + +client. + +--------- + +
+ +
8ab31646 Bump graphql-ws from 5.15.0 to 5.16.0 in /integration (#2986) + +Bumps [graphql-ws](https://github.com/enisdenjo/graphql-ws) from 5.15.0 to 5.16.0. +- [Release notes](https://github.com/enisdenjo/graphql-ws/releases) +- [Changelog](https://github.com/enisdenjo/graphql-ws/blob/master/CHANGELOG.md) +- [Commits](https://github.com/enisdenjo/graphql-ws/compare/v5.15.0...v5.16.0) + +--- +updated-dependencies: +- dependency-name: graphql-ws + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
45fafedc Bump golang.org/x/tools from 0.19.0 to 0.20.0 (#2996) + +* Bump golang.org/x/tools from 0.19.0 to 0.20.0 + +Bumps [golang.org/x/tools](https://github.com/golang/tools) from 0.19.0 to 0.20.0. +- [Release notes](https://github.com/golang/tools/releases) +- [Commits](https://github.com/golang/tools/compare/v0.19.0...v0.20.0) + +--- +updated-dependencies: +- dependency-name: golang.org/x/tools + dependency-type: direct:production + update-type: version-update:semver-minor +... + + +* Update examples to match root go.mod + + +--------- + +
+ +
4c45be21 Bump vite from 5.2.7 to 5.2.8 in /integration (#2992) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.7 to 5.2.8. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.8/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- 6e5a7758 Update `tools.go` url (#2987) + +
6771a804 Bump [@apollo](https://github.com/apollo)/client from 3.9.9 to 3.9.10 in /integration (#2994) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.9...v3.9.10) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
2ce5fcd1 Bump typescript from 5.4.3 to 5.4.4 in /integration (#2993) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.3 to 5.4.4. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.3...v5.4.4) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- 99d7d887 fix: stop loading package dependencies (#2988) + +- d0a1aec2 enum values binding (#2982) + +
6352b800 Bump vite from 5.2.6 to 5.2.7 in /integration (#2984) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.2.6 to 5.2.7. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.7/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
2286b0e8 Bump graphql-sse from 2.5.2 to 2.5.3 in /integration (#2985) + +Bumps [graphql-sse](https://github.com/enisdenjo/graphql-sse) from 2.5.2 to 2.5.3. +- [Release notes](https://github.com/enisdenjo/graphql-sse/releases) +- [Changelog](https://github.com/enisdenjo/graphql-sse/blob/master/CHANGELOG.md) +- [Commits](https://github.com/enisdenjo/graphql-sse/compare/v2.5.2...v2.5.3) + +--- +updated-dependencies: +- dependency-name: graphql-sse + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
8ab2c27a Bump [@graphql](https://github.com/graphql)-codegen/client-preset from 4.2.4 to 4.2.5 in /integration (#2983) + +- [Release notes](https://github.com/dotansimha/graphql-code-generator/releases) +- [Changelog](https://github.com/dotansimha/graphql-code-generator/blob/master/packages/presets/client/CHANGELOG.md) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- 780bf27a Add UintID type binding (#2980) + +
d192a591 Bump [@apollo](https://github.com/apollo)/client from 3.9.7 to 3.9.9 in /integration (#2977) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.7...v3.9.9) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
62289425 Bump vite from 5.1.6 to 5.2.6 in /integration (#2978) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.1.6 to 5.2.6. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.2.6/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
105ec44b Bump typescript from 5.4.2 to 5.4.3 in /integration (#2979) + +Bumps [typescript](https://github.com/Microsoft/TypeScript) from 5.4.2 to 5.4.3. +- [Release notes](https://github.com/Microsoft/TypeScript/releases) +- [Changelog](https://github.com/microsoft/TypeScript/blob/main/azure-pipelines.release.yml) +- [Commits](https://github.com/Microsoft/TypeScript/compare/v5.4.2...v5.4.3) + +--- +updated-dependencies: +- dependency-name: typescript + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- 0afd63a5 chore: remove repetitive words (#2976) + +
ee526b05 Bump vite from 5.1.5 to 5.1.6 in /integration (#2971) + +Bumps [vite](https://github.com/vitejs/vite/tree/HEAD/packages/vite) from 5.1.5 to 5.1.6. +- [Release notes](https://github.com/vitejs/vite/releases) +- [Changelog](https://github.com/vitejs/vite/blob/main/packages/vite/CHANGELOG.md) +- [Commits](https://github.com/vitejs/vite/commits/v5.1.6/packages/vite) + +--- +updated-dependencies: +- dependency-name: vite + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +
00bf8ef3 Bump vitest from 1.3.1 to 1.4.0 in /integration (#2972) + +Bumps [vitest](https://github.com/vitest-dev/vitest/tree/HEAD/packages/vitest) from 1.3.1 to 1.4.0. +- [Release notes](https://github.com/vitest-dev/vitest/releases) +- [Commits](https://github.com/vitest-dev/vitest/commits/v1.4.0/packages/vitest) + +--- +updated-dependencies: +- dependency-name: vitest + dependency-type: direct:development + update-type: version-update:semver-minor +... + +
+ +
bdbdddf5 Bump [@apollo](https://github.com/apollo)/client from 3.9.6 to 3.9.7 in /integration (#2970) + +- [Release notes](https://github.com/apollographql/apollo-client/releases) +- [Changelog](https://github.com/apollographql/apollo-client/blob/main/CHANGELOG.md) +- [Commits](https://github.com/apollographql/apollo-client/compare/v3.9.6...v3.9.7) + +--- +updated-dependencies: + dependency-type: direct:development + update-type: version-update:semver-patch +... + +
+ +- fa221f64 Update Changelog + +- f897668b v0.17.45 postrelease bump + + + + + ## [v0.17.45](https://github.com/99designs/gqlgen/compare/v0.17.44...v0.17.45) - 2024-03-11 - b6d1a8b9 release v0.17.45 @@ -3573,7 +5648,7 @@ when generating next the context was captured there. Which means later when the returned function from DispatchOperation is called. The responseContext which accumulates the errors is the -tempResponseContext which we no longer have access to read the errors +tempResponseContext which we no longer have access to to read the errors out of it. Instead add a context to next() so that it can be passed through and diff --git a/vendor/github.com/99designs/gqlgen/api/generate.go b/vendor/github.com/99designs/gqlgen/api/generate.go index 9e7b4188..7b83be50 100644 --- a/vendor/github.com/99designs/gqlgen/api/generate.go +++ b/vendor/github.com/99designs/gqlgen/api/generate.go @@ -45,7 +45,11 @@ func Generate(cfg *config.Config, option ...Option) error { } } } - plugins = append([]plugin.Plugin{federation.New(cfg.Federation.Version)}, plugins...) + federationPlugin, err := federation.New(cfg.Federation.Version, cfg) + if err != nil { + return fmt.Errorf("failed to construct the Federation plugin: %w", err) + } + plugins = append([]plugin.Plugin{federationPlugin}, plugins...) } for _, o := range option { @@ -58,6 +62,13 @@ func Generate(cfg *config.Config, option ...Option) error { cfg.Sources = append(cfg.Sources, s) } } + if inj, ok := p.(plugin.EarlySourcesInjector); ok { + s, err := inj.InjectSourcesEarly() + if err != nil { + return fmt.Errorf("%s: %w", p.Name(), err) + } + cfg.Sources = append(cfg.Sources, s...) + } } if err := cfg.LoadSchema(); err != nil { @@ -70,6 +81,13 @@ func Generate(cfg *config.Config, option ...Option) error { cfg.Sources = append(cfg.Sources, s) } } + if inj, ok := p.(plugin.LateSourcesInjector); ok { + s, err := inj.InjectSourcesLate(cfg.Schema) + if err != nil { + return fmt.Errorf("%s: %w", p.Name(), err) + } + cfg.Sources = append(cfg.Sources, s...) + } } // LoadSchema again now we have everything diff --git a/vendor/github.com/99designs/gqlgen/api/option.go b/vendor/github.com/99designs/gqlgen/api/option.go index d376193d..344aa819 100644 --- a/vendor/github.com/99designs/gqlgen/api/option.go +++ b/vendor/github.com/99designs/gqlgen/api/option.go @@ -29,19 +29,20 @@ func PrependPlugin(p plugin.Plugin) Option { // ReplacePlugin replaces any existing plugin with a matching plugin name func ReplacePlugin(p plugin.Plugin) Option { return func(cfg *config.Config, plugins *[]plugin.Plugin) { - if plugins != nil { - found := false - ps := *plugins - for i, o := range ps { - if p.Name() == o.Name() { - ps[i] = p - found = true - } - } - if !found { - ps = append(ps, p) - } - *plugins = ps + if plugins == nil { + return } + found := false + ps := *plugins + for i, o := range ps { + if p.Name() == o.Name() { + ps[i] = p + found = true + } + } + if !found { + ps = append(ps, p) + } + *plugins = ps } } diff --git a/vendor/github.com/99designs/gqlgen/codegen/args.go b/vendor/github.com/99designs/gqlgen/codegen/args.go index 983a3a02..736680b3 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/args.go +++ b/vendor/github.com/99designs/gqlgen/codegen/args.go @@ -18,19 +18,20 @@ type ArgSet struct { type FieldArgument struct { *ast.ArgumentDefinition - TypeReference *config.TypeReference - VarName string // The name of the var in go - Object *Object // A link back to the parent object - Default any // The default value - Directives []*Directive - Value any // value set in Data + TypeReference *config.TypeReference + VarName string // The name of the var in go + Object *Object // A link back to the parent object + Default any // The default value + Directives []*Directive + Value any // value set in Data + CallArgumentDirectivesWithNull bool } -// ImplDirectives get not Builtin and location ARGUMENT_DEFINITION directive +// ImplDirectives get not SkipRuntime and location ARGUMENT_DEFINITION directive func (f *FieldArgument) ImplDirectives() []*Directive { d := make([]*Directive, 0) for i := range f.Directives { - if !f.Directives[i].Builtin && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) { + if !f.Directives[i].SkipRuntime && f.Directives[i].IsLocation(ast.LocationArgumentDefinition) { d = append(d, f.Directives[i]) } } @@ -57,11 +58,12 @@ func (b *builder) buildArg(obj *Object, arg *ast.ArgumentDefinition) (*FieldArgu return nil, err } newArg := FieldArgument{ - ArgumentDefinition: arg, - TypeReference: tr, - Object: obj, - VarName: templates.ToGoPrivate(arg.Name), - Directives: argDirs, + ArgumentDefinition: arg, + TypeReference: tr, + Object: obj, + VarName: templates.ToGoPrivate(arg.Name), + Directives: argDirs, + CallArgumentDirectivesWithNull: b.Config.CallArgumentDirectivesWithNull, } if arg.DefaultValue != nil { diff --git a/vendor/github.com/99designs/gqlgen/codegen/args.gotpl b/vendor/github.com/99designs/gqlgen/codegen/args.gotpl index 7b541ae1..2f3afdf0 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/args.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/args.gotpl @@ -2,35 +2,67 @@ func (ec *executionContext) {{ $name }}(ctx context.Context, rawArgs map[string]interface{}) (map[string]interface{}, error) { var err error args := map[string]interface{}{} + {{- range $i, $arg := . }} - var arg{{$i}} {{ $arg.TypeReference.GO | ref}} - if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok { - ctx := graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}})) - {{- if $arg.ImplDirectives }} - directive0 := func(ctx context.Context) (interface{}, error) { return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) } - {{ template "implDirectives" $arg }} - tmp, err = directive{{$arg.ImplDirectives|len}}(ctx) - if err != nil { - return nil, graphql.ErrorOnPath(ctx, err) - } - if data, ok := tmp.({{ $arg.TypeReference.GO | ref }}) ; ok { - arg{{$i}} = data - {{- if $arg.TypeReference.IsNilable }} - } else if tmp == nil { - arg{{$i}} = nil - {{- end }} - } else { - return nil, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp)) - } - {{- else }} - arg{{$i}}, err = ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) - if err != nil { - return nil, err - } - {{- end }} + arg{{$i}}, err := ec.{{ $name }}{{$arg.Name | go}}(ctx, rawArgs) + if err != nil { + return nil, err } args[{{$arg.Name|quote}}] = arg{{$i}} {{- end }} return args, nil } + + {{- range $i, $arg := . }} + func (ec *executionContext) {{ $name }}{{$arg.Name | go}}( + ctx context.Context, + rawArgs map[string]interface{}, + ) ({{ $arg.TypeReference.GO | ref}}, error) { + {{- if not .CallArgumentDirectivesWithNull}} + // We won't call the directive if the argument is null. + // Set call_argument_directives_with_null to true to call directives + // even if the argument is null. + _, ok := rawArgs[{{$arg.Name|quote}}] + if !ok { + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, nil + } + {{end}} + ctx = graphql.WithPathContext(ctx, graphql.NewPathWithField({{$arg.Name|quote}})) + {{- if $arg.ImplDirectives }} + directive0 := func(ctx context.Context) (interface{}, error) { + tmp, ok := rawArgs[{{$arg.Name|quote}}] + if !ok { + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, nil + } + return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) + } + {{ template "implDirectives" $arg }} + tmp, err := directive{{$arg.ImplDirectives|len}}(ctx) + if err != nil { + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, graphql.ErrorOnPath(ctx, err) + } + if data, ok := tmp.({{ $arg.TypeReference.GO | ref }}) ; ok { + return data, nil + {{- if $arg.TypeReference.IsNilable }} + } else if tmp == nil { + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, nil + {{- end }} + } else { + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, graphql.ErrorOnPath(ctx, fmt.Errorf(`unexpected type %T from directive, should be {{ $arg.TypeReference.GO }}`, tmp)) + } + {{- else }} + if tmp, ok := rawArgs[{{$arg.Name|quote}}]; ok { + return ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, tmp) + } + + var zeroVal {{ $arg.TypeReference.GO | ref}} + return zeroVal, nil + {{- end }} + } + {{end}} {{ end }} diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go index 91b6e500..fc7b2edc 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/binder.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/binder.go @@ -36,7 +36,7 @@ func (c *Config) NewBinder() *Binder { } func (b *Binder) TypePosition(typ types.Type) token.Position { - named, isNamed := typ.(*types.Named) + named, isNamed := code.Unalias(typ).(*types.Named) if !isNamed { return token.Position{ Filename: "unknown", @@ -77,10 +77,11 @@ func (b *Binder) FindType(pkgName, typeName string) (types.Type, error) { return nil, err } - if fun, isFunc := obj.(*types.Func); isFunc { - return fun.Type().(*types.Signature).Params().At(0).Type(), nil + t := code.Unalias(obj.Type()) + if _, isFunc := obj.(*types.Func); isFunc { + return code.Unalias(t.(*types.Signature).Params().At(0).Type()), nil } - return obj.Type(), nil + return t, nil } func (b *Binder) InstantiateType(orig types.Type, targs []types.Type) (types.Type, error) { @@ -120,7 +121,7 @@ func (b *Binder) DefaultUserObject(name string) (types.Type, error) { return nil, err } - return obj.Type(), nil + return code.Unalias(obj.Type()), nil } func (b *Binder) FindObject(pkgName, typeName string) (types.Object, error) { @@ -193,19 +194,19 @@ func (b *Binder) PointerTo(ref *TypeReference) *TypeReference { // TypeReference is used by args and field types. The Definition can refer to both input and output types. type TypeReference struct { - Definition *ast.Definition - GQL *ast.Type - GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target. - Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields. - CastType types.Type // Before calling marshalling functions cast from/to this base type - Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function - Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function - IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler - IsOmittable bool // Is the type wrapped with Omittable - IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety. - PointersInUmarshalInput bool // Inverse values and pointers in return. - IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription - EnumValues []EnumValueReference + Definition *ast.Definition + GQL *ast.Type + GO types.Type // Type of the field being bound. Could be a pointer or a value type of Target. + Target types.Type // The actual type that we know how to bind to. May require pointer juggling when traversing to fields. + CastType types.Type // Before calling marshalling functions cast from/to this base type + Marshaler *types.Func // When using external marshalling functions this will point to the Marshal function + Unmarshaler *types.Func // When using external marshalling functions this will point to the Unmarshal function + IsMarshaler bool // Does the type implement graphql.Marshaler and graphql.Unmarshaler + IsOmittable bool // Is the type wrapped with Omittable + IsContext bool // Is the Marshaler/Unmarshaller the context version; applies to either the method or interface variety. + PointersInUnmarshalInput bool // Inverse values and pointers in return. + IsRoot bool // Is the type a root level definition such as Query, Mutation or Subscription + EnumValues []EnumValueReference } func (ref *TypeReference) Elem() *TypeReference { @@ -264,13 +265,13 @@ func (ref *TypeReference) IsPtrToIntf() bool { } func (ref *TypeReference) IsNamed() bool { - _, isSlice := ref.GO.(*types.Named) - return isSlice + _, ok := ref.GO.(*types.Named) + return ok } func (ref *TypeReference) IsStruct() bool { - _, isStruct := ref.GO.Underlying().(*types.Struct) - return isStruct + _, ok := ref.GO.Underlying().(*types.Struct) + return ok } func (ref *TypeReference) IsScalar() bool { @@ -362,6 +363,9 @@ func unwrapOmittable(t types.Type) (types.Type, bool) { } func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret *TypeReference, err error) { + if bindTarget != nil { + bindTarget = code.Unalias(bindTarget) + } if innerType, ok := unwrapOmittable(bindTarget); ok { if schemaType.NonNull { return nil, fmt.Errorf("%s is wrapped with Omittable but non-null", schemaType.Name()) @@ -433,28 +437,28 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret if err != nil { return nil, err } - + t := code.Unalias(obj.Type()) if values := b.enumValues(def); len(values) > 0 { err = b.enumReference(ref, obj, values) if err != nil { return nil, err } } else if fun, isFunc := obj.(*types.Func); isFunc { - ref.GO = fun.Type().(*types.Signature).Params().At(0).Type() - ref.IsContext = fun.Type().(*types.Signature).Results().At(0).Type().String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler" + ref.GO = code.Unalias(t.(*types.Signature).Params().At(0).Type()) + ref.IsContext = code.Unalias(t.(*types.Signature).Results().At(0).Type()).String() == "github.com/99designs/gqlgen/graphql.ContextMarshaler" ref.Marshaler = fun ref.Unmarshaler = types.NewFunc(0, fun.Pkg(), "Unmarshal"+typeName, nil) - } else if hasMethod(obj.Type(), "MarshalGQLContext") && hasMethod(obj.Type(), "UnmarshalGQLContext") { - ref.GO = obj.Type() + } else if hasMethod(t, "MarshalGQLContext") && hasMethod(t, "UnmarshalGQLContext") { + ref.GO = t ref.IsContext = true ref.IsMarshaler = true - } else if hasMethod(obj.Type(), "MarshalGQL") && hasMethod(obj.Type(), "UnmarshalGQL") { - ref.GO = obj.Type() + } else if hasMethod(t, "MarshalGQL") && hasMethod(t, "UnmarshalGQL") { + ref.GO = t ref.IsMarshaler = true - } else if underlying := basicUnderlying(obj.Type()); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String { + } else if underlying := basicUnderlying(t); def.IsLeafType() && underlying != nil && underlying.Kind() == types.String { // TODO delete before v1. Backwards compatibility case for named types wrapping strings (see #595) - ref.GO = obj.Type() + ref.GO = t ref.CastType = underlying underlyingRef, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil) @@ -465,7 +469,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret ref.Marshaler = underlyingRef.Marshaler ref.Unmarshaler = underlyingRef.Unmarshaler } else { - ref.GO = obj.Type() + ref.GO = t } ref.Target = ref.GO @@ -478,7 +482,7 @@ func (b *Binder) TypeReference(schemaType *ast.Type, bindTarget types.Type) (ret ref.GO = bindTarget } - ref.PointersInUmarshalInput = b.cfg.ReturnPointersInUmarshalInput + ref.PointersInUnmarshalInput = b.cfg.ReturnPointersInUnmarshalInput return ref, nil } @@ -516,6 +520,10 @@ func (b *Binder) CopyModifiersFromAst(t *ast.Type, base types.Type) types.Type { } func IsNilable(t types.Type) bool { + // Note that we use types.Unalias rather than code.Unalias here + // because we want to always check the underlying type. + // code.Unalias only unwraps aliases in Go 1.23 + t = types.Unalias(t) if namedType, isNamed := t.(*types.Named); isNamed { return IsNilable(namedType.Underlying()) } @@ -587,10 +595,11 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[ return fmt.Errorf("not all enum values are binded for %v", ref.Definition.Name) } - if fn, ok := obj.Type().(*types.Signature); ok { - ref.GO = fn.Params().At(0).Type() + t := code.Unalias(obj.Type()) + if fn, ok := t.(*types.Signature); ok { + ref.GO = code.Unalias(fn.Params().At(0).Type()) } else { - ref.GO = obj.Type() + ref.GO = t } str, err := b.TypeReference(&ast.Type{NamedType: "String"}, nil) @@ -618,9 +627,10 @@ func (b *Binder) enumReference(ref *TypeReference, obj types.Object, values map[ return err } - if !types.AssignableTo(valueObj.Type(), ref.GO) { + valueTyp := code.Unalias(valueObj.Type()) + if !types.AssignableTo(valueTyp, ref.GO) { return fmt.Errorf("wrong type: %v, for enum value: %v, expected type: %v, of enum: %v", - valueObj.Type(), value.Name, ref.GO, ref.Definition.Name) + valueTyp, value.Name, ref.GO, ref.Definition.Name) } switch valueObj.(type) { diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/config.go b/vendor/github.com/99designs/gqlgen/codegen/config/config.go index 3228756c..761e3524 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/config.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/config.go @@ -42,16 +42,22 @@ type Config struct { OmitRootModels bool `yaml:"omit_root_models,omitempty"` OmitResolverFields bool `yaml:"omit_resolver_fields,omitempty"` OmitPanicHandler bool `yaml:"omit_panic_handler,omitempty"` - StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"` - ReturnPointersInUmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"` - ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"` - NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"` - EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"` - SkipValidation bool `yaml:"skip_validation,omitempty"` - SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"` - Sources []*ast.Source `yaml:"-"` - Packages *code.Packages `yaml:"-"` - Schema *ast.Schema `yaml:"-"` + // If this is set to true, argument directives that + // decorate a field with a null value will still be called. + // + // This enables argumment directives to not just mutate + // argument values but to set them even if they're null. + CallArgumentDirectivesWithNull bool `yaml:"call_argument_directives_with_null,omitempty"` + StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"` + ReturnPointersInUnmarshalInput bool `yaml:"return_pointers_in_unmarshalinput,omitempty"` + ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"` + NullableInputOmittable bool `yaml:"nullable_input_omittable,omitempty"` + EnableModelJsonOmitemptyTag *bool `yaml:"enable_model_json_omitempty_tag,omitempty"` + SkipValidation bool `yaml:"skip_validation,omitempty"` + SkipModTidy bool `yaml:"skip_mod_tidy,omitempty"` + Sources []*ast.Source `yaml:"-"` + Packages *code.Packages `yaml:"-"` + Schema *ast.Schema `yaml:"-"` // Deprecated: use Federation instead. Will be removed next release Federated bool `yaml:"federated,omitempty"` @@ -62,15 +68,15 @@ var cfgFilenames = []string{".gqlgen.yml", "gqlgen.yml", "gqlgen.yaml"} // DefaultConfig creates a copy of the default config func DefaultConfig() *Config { return &Config{ - SchemaFilename: StringList{"schema.graphql"}, - Model: PackageConfig{Filename: "models_gen.go"}, - Exec: ExecConfig{Filename: "generated.go"}, - Directives: map[string]DirectiveConfig{}, - Models: TypeMap{}, - StructFieldsAlwaysPointers: true, - ReturnPointersInUmarshalInput: false, - ResolversAlwaysReturnPointers: true, - NullableInputOmittable: false, + SchemaFilename: StringList{"schema.graphql"}, + Model: PackageConfig{Filename: "models_gen.go"}, + Exec: ExecConfig{Filename: "generated.go"}, + Directives: map[string]DirectiveConfig{}, + Models: TypeMap{}, + StructFieldsAlwaysPointers: true, + ReturnPointersInUnmarshalInput: false, + ResolversAlwaysReturnPointers: true, + NullableInputOmittable: false, } } @@ -320,24 +326,33 @@ func (c *Config) injectTypesFromSchema() error { } } - if schemaType.Kind == ast.Object || schemaType.Kind == ast.InputObject { + if schemaType.Kind == ast.Object || + schemaType.Kind == ast.InputObject || + schemaType.Kind == ast.Interface { for _, field := range schemaType.Fields { if fd := field.Directives.ForName("goField"); fd != nil { forceResolver := c.Models[schemaType.Name].Fields[field.Name].Resolver - fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName - if ra := fd.Arguments.ForName("forceResolver"); ra != nil { if fr, err := ra.Value.Value(nil); err == nil { forceResolver = fr.(bool) } } + fieldName := c.Models[schemaType.Name].Fields[field.Name].FieldName if na := fd.Arguments.ForName("name"); na != nil { if fr, err := na.Value.Value(nil); err == nil { fieldName = fr.(string) } } + omittable := c.Models[schemaType.Name].Fields[field.Name].Omittable + if arg := fd.Arguments.ForName("omittable"); arg != nil { + if k, err := arg.Value.Value(nil); err == nil { + val := k.(bool) + omittable = &val + } + } + if c.Models[schemaType.Name].Fields == nil { c.Models[schemaType.Name] = TypeMapEntry{ Model: c.Models[schemaType.Name].Model, @@ -349,6 +364,7 @@ func (c *Config) injectTypesFromSchema() error { c.Models[schemaType.Name].Fields[field.Name] = TypeMapField{ FieldName: fieldName, Resolver: forceResolver, + Omittable: omittable, } } } @@ -449,6 +465,7 @@ type TypeMapEntry struct { type TypeMapField struct { Resolver bool `yaml:"resolver"` FieldName string `yaml:"fieldName"` + Omittable *bool `yaml:"omittable"` GeneratedMethod string `yaml:"-"` } @@ -659,6 +676,16 @@ func (tm TypeMap) ForceGenerate(name string, forceGenerate bool) { type DirectiveConfig struct { SkipRuntime bool `yaml:"skip_runtime"` + + // If the directive implementation is statically defined, don't provide a hook for it + // in the generated server. This is useful for directives that are implemented + // by plugins or the runtime itself. + // + // The function implemmentation should be provided here as a string. + // + // The function should have the following signature: + // func(ctx context.Context, obj any, next graphql.Resolver[, directive arguments if any]) (res any, err error) + Implementation *string } func inStrSlice(haystack []string, needle string) bool { diff --git a/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go b/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go index 25e7331f..432c56e0 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go +++ b/vendor/github.com/99designs/gqlgen/codegen/config/initialisms.go @@ -14,7 +14,7 @@ type GoInitialismsConfig struct { Initialisms []string `yaml:"initialisms"` } -// setInitialisms adjustes GetInitialisms based on its settings. +// setInitialisms adjusts GetInitialisms based on its settings. func (i GoInitialismsConfig) setInitialisms() { toUse := i.determineGoInitialisms() templates.GetInitialisms = func() map[string]bool { @@ -22,7 +22,7 @@ func (i GoInitialismsConfig) setInitialisms() { } } -// determineGoInitialisms returns the Go initialims to be used, based on its settings. +// determineGoInitialisms returns the Go initialisms to be used, based on its settings. func (i GoInitialismsConfig) determineGoInitialisms() (initialismsToUse map[string]bool) { if i.ReplaceDefaults { initialismsToUse = make(map[string]bool, len(i.Initialisms)) diff --git a/vendor/github.com/99designs/gqlgen/codegen/data.go b/vendor/github.com/99designs/gqlgen/codegen/data.go index 7110de2f..c5c3fcc6 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/data.go +++ b/vendor/github.com/99designs/gqlgen/codegen/data.go @@ -64,6 +64,30 @@ type builder struct { Directives map[string]*Directive } +// Get only the directives which should have a user provided definition on server instantiation +func (d *Data) UserDirectives() DirectiveList { + res := DirectiveList{} + directives := d.Directives() + for k, directive := range directives { + if directive.Implementation == nil { + res[k] = directive + } + } + return res +} + +// Get only the directives which should have a statically provided definition +func (d *Data) BuiltInDirectives() DirectiveList { + res := DirectiveList{} + directives := d.Directives() + for k, directive := range directives { + if directive.Implementation != nil { + res[k] = directive + } + } + return res +} + // Get only the directives which are defined in the config's sources. func (d *Data) Directives() DirectiveList { res := DirectiveList{} @@ -97,7 +121,7 @@ func BuildData(cfg *config.Config, plugins ...any) (*Data, error) { dataDirectives := make(map[string]*Directive) for name, d := range b.Directives { - if !d.Builtin { + if !d.SkipRuntime { dataDirectives[name] = d } } diff --git a/vendor/github.com/99designs/gqlgen/codegen/directive.go b/vendor/github.com/99designs/gqlgen/codegen/directive.go index 30a79c35..077bd9f7 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/directive.go +++ b/vendor/github.com/99designs/gqlgen/codegen/directive.go @@ -7,6 +7,7 @@ import ( "github.com/vektah/gqlparser/v2/ast" + "github.com/99designs/gqlgen/codegen/config" "github.com/99designs/gqlgen/codegen/templates" ) @@ -19,9 +20,10 @@ func (dl DirectiveList) LocationDirectives(location string) DirectiveList { type Directive struct { *ast.DirectiveDefinition - Name string - Args []*FieldArgument - Builtin bool + Name string + Args []*FieldArgument + + config.DirectiveConfig } // IsLocation check location directive @@ -82,7 +84,7 @@ func (b *builder) buildDirectives() (map[string]*Directive, error) { DirectiveDefinition: dir, Name: name, Args: args, - Builtin: b.Config.Directives[name].SkipRuntime, + DirectiveConfig: b.Config.Directives[name], } } @@ -122,7 +124,7 @@ func (b *builder) getDirectives(list ast.DirectiveList) ([]*Directive, error) { Name: d.Name, Args: args, DirectiveDefinition: list[i].Definition, - Builtin: b.Config.Directives[d.Name].SkipRuntime, + DirectiveConfig: b.Config.Directives[d.Name], } } @@ -162,8 +164,12 @@ func (d *Directive) ResolveArgs(obj string, next int) string { return strings.Join(args, ", ") } +func (d *Directive) CallName() string { + return ucFirst(d.Name) +} + func (d *Directive) Declaration() string { - res := ucFirst(d.Name) + " func(ctx context.Context, obj interface{}, next graphql.Resolver" + res := d.CallName() + " func(ctx context.Context, obj interface{}, next graphql.Resolver" for _, arg := range d.Args { res += fmt.Sprintf(", %s %s", templates.ToGoPrivate(arg.Name), templates.CurrentImports.LookupType(arg.TypeReference.GO)) @@ -172,3 +178,23 @@ func (d *Directive) Declaration() string { res += ") (res interface{}, err error)" return res } + +func (d *Directive) IsBuiltIn() bool { + return d.Implementation != nil +} + +func (d *Directive) CallPath() string { + if d.IsBuiltIn() { + return "builtInDirective" + d.CallName() + } + + return "ec.directives." + d.CallName() +} + +func (d *Directive) FunctionImpl() string { + if d.Implementation == nil { + return "" + } + + return d.CallPath() + " = " + *d.Implementation +} diff --git a/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl b/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl index 23bcf0f8..c3fa8abe 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/directives.gotpl @@ -1,23 +1,29 @@ {{ define "implDirectives" }}{{ $in := .DirectiveObjName }} + {{ $zeroVal := .TypeReference.GO | ref}} {{- range $i, $directive := .ImplDirectives -}} directive{{add $i 1}} := func(ctx context.Context) (interface{}, error) { {{- range $arg := $directive.Args }} {{- if notNil "Value" $arg }} {{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Value | dump }}) if err != nil{ - return nil, err + var zeroVal {{$zeroVal}} + return zeroVal, err } {{- else if notNil "Default" $arg }} {{ $arg.VarName }}, err := ec.{{ $arg.TypeReference.UnmarshalFunc }}(ctx, {{ $arg.Default | dump }}) if err != nil{ - return nil, err + var zeroVal {{$zeroVal}} + return zeroVal, err } {{- end }} {{- end }} - if ec.directives.{{$directive.Name|ucFirst}} == nil { - return nil, errors.New("directive {{$directive.Name}} is not implemented") - } - return ec.directives.{{$directive.Name|ucFirst}}({{$directive.ResolveArgs $in $i }}) + {{- if not $directive.IsBuiltIn}} + if {{$directive.CallPath}} == nil { + var zeroVal {{$zeroVal}} + return zeroVal, errors.New("directive {{$directive.Name}} is not implemented") + } + {{- end}} + return {{$directive.CallPath}}({{$directive.ResolveArgs $in $i }}) } {{ end -}} {{ end }} @@ -37,10 +43,7 @@ {{- end }} n := next next = func(ctx context.Context) (interface{}, error) { - if ec.directives.{{$directive.Name|ucFirst}} == nil { - return nil, errors.New("directive {{$directive.Name}} is not implemented") - } - return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}}) + {{- template "callDirective" $directive -}} } {{- end }} } @@ -57,6 +60,15 @@ return graphql.Null {{end}} +{{define "callDirective"}} + {{- if not .IsBuiltIn}} + if {{.CallPath}} == nil { + return nil, errors.New("directive {{.Name}} is not implemented") + } + {{- end}} + return {{.CallPath}}({{.CallArgs}}) +{{end}} + {{ if .Directives.LocationDirectives "QUERY" }} func (ec *executionContext) _queryMiddleware(ctx context.Context, obj *ast.OperationDefinition, next func(ctx context.Context) (interface{}, error)) graphql.Marshaler { {{ template "queryDirectives" .Directives.LocationDirectives "QUERY" }} @@ -87,10 +99,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as {{- end }} n := next next = func(ctx context.Context) (interface{}, error) { - if ec.directives.{{$directive.Name|ucFirst}} == nil { - return nil, errors.New("directive {{$directive.Name}} is not implemented") - } - return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}}) + {{- template "callDirective" $directive -}} } {{- end }} } @@ -130,10 +139,7 @@ func (ec *executionContext) _subscriptionMiddleware(ctx context.Context, obj *as {{- end }} n := next next = func(ctx context.Context) (interface{}, error) { - if ec.directives.{{$directive.Name|ucFirst}} == nil { - return nil, errors.New("directive {{$directive.Name}} is not implemented") - } - return ec.directives.{{$directive.Name|ucFirst}}({{$directive.CallArgs}}) + {{- template "callDirective" $directive -}} } {{- end }} } diff --git a/vendor/github.com/99designs/gqlgen/codegen/field.go b/vendor/github.com/99designs/gqlgen/codegen/field.go index 509f48cd..7f4a5ad1 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/field.go +++ b/vendor/github.com/99designs/gqlgen/codegen/field.go @@ -16,6 +16,7 @@ import ( "github.com/99designs/gqlgen/codegen/config" "github.com/99designs/gqlgen/codegen/templates" + "github.com/99designs/gqlgen/internal/code" ) type Field struct { @@ -144,7 +145,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) { f.GoFieldName = b.Config.Models[obj.Name].Fields[f.Name].FieldName } - target, err := b.findBindTarget(obj.Type.(*types.Named), f.GoFieldName) + target, err := b.findBindTarget(obj.Type, f.GoFieldName) if err != nil { return err } @@ -229,7 +230,7 @@ func (b *builder) bindField(obj *Object, f *Field) (errret error) { } // findBindTarget attempts to match the name to a field or method on a Type -// with the following priorites: +// with the following priorities: // 1. Any Fields with a struct tag (see config.StructTag). Errors if more than one match is found // 2. Any method or field with a matching name. Errors if more than one match is found // 3. Same logic again for embedded fields @@ -380,7 +381,7 @@ func (b *builder) findBindStructEmbedsTarget(strukt *types.Struct, name string) continue } - fieldType := field.Type() + fieldType := code.Unalias(field.Type()) if ptr, ok := fieldType.(*types.Pointer); ok { fieldType = ptr.Elem() } @@ -442,7 +443,7 @@ func (f *Field) ImplDirectives() []*Directive { loc = ast.LocationInputFieldDefinition } for i := range f.Directives { - if !f.Directives[i].Builtin && + if !f.Directives[i].SkipRuntime && (f.Directives[i].IsLocation(loc, ast.LocationObject) || f.Directives[i].IsLocation(loc, ast.LocationInputObject)) { d = append(d, f.Directives[i]) } diff --git a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl index b343e626..1e5bfbfc 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/generated!.gotpl @@ -1,3 +1,4 @@ +{{/* Context object: codegen.Data */}} {{ reserveImport "context" }} {{ reserveImport "fmt" }} {{ reserveImport "io" }} @@ -46,7 +47,7 @@ } type DirectiveRoot struct { - {{ range $directive := .Directives }} + {{ range $directive := .UserDirectives }} {{- $directive.Declaration }} {{ end }} } @@ -93,6 +94,12 @@ {{- end }} {{- end }} +{{ range $directive := .BuiltInDirectives }} + var ( + {{- $directive.FunctionImpl }} + ) +{{ end }} + {{ if eq .Config.Exec.Layout "single-file" }} type executableSchema struct { schema *ast.Schema diff --git a/vendor/github.com/99designs/gqlgen/codegen/input.gotpl b/vendor/github.com/99designs/gqlgen/codegen/input.gotpl index 9240b56c..1b91d8db 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/input.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/input.gotpl @@ -1,10 +1,10 @@ {{- range $input := .Inputs }} {{- if not .HasUnmarshal }} {{- $it := "it" }} - {{- if .PointersInUmarshalInput }} + {{- if .PointersInUnmarshalInput }} {{- $it = "&it" }} {{- end }} - func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUmarshalInput }}*{{ end }}{{.Type | ref}}, error) { + func (ec *executionContext) unmarshalInput{{ .Name }}(ctx context.Context, obj interface{}) ({{ if .PointersInUnmarshalInput }}*{{ end }}{{.Type | ref}}, error) { {{- if $input.IsMap }} it := make(map[string]interface{}, len(obj.(map[string]interface{}))) {{- else }} diff --git a/vendor/github.com/99designs/gqlgen/codegen/object.go b/vendor/github.com/99designs/gqlgen/codegen/object.go index eee43849..869d1b36 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/object.go +++ b/vendor/github.com/99designs/gqlgen/codegen/object.go @@ -26,15 +26,15 @@ const ( type Object struct { *ast.Definition - Type types.Type - ResolverInterface types.Type - Root bool - Fields []*Field - Implements []*ast.Definition - DisableConcurrency bool - Stream bool - Directives []*Directive - PointersInUmarshalInput bool + Type types.Type + ResolverInterface types.Type + Root bool + Fields []*Field + Implements []*ast.Definition + DisableConcurrency bool + Stream bool + Directives []*Directive + PointersInUnmarshalInput bool } func (b *builder) buildObject(typ *ast.Definition) (*Object, error) { @@ -44,12 +44,12 @@ func (b *builder) buildObject(typ *ast.Definition) (*Object, error) { } caser := cases.Title(language.English, cases.NoLower) obj := &Object{ - Definition: typ, - Root: b.Config.IsRoot(typ), - DisableConcurrency: typ == b.Schema.Mutation, - Stream: typ == b.Schema.Subscription, - Directives: dirs, - PointersInUmarshalInput: b.Config.ReturnPointersInUmarshalInput, + Definition: typ, + Root: b.Config.IsRoot(typ), + DisableConcurrency: typ == b.Schema.Mutation, + Stream: typ == b.Schema.Subscription, + Directives: dirs, + PointersInUnmarshalInput: b.Config.ReturnPointersInUnmarshalInput, ResolverInterface: types.NewNamed( types.NewTypeName(0, b.Config.Exec.Pkg(), caser.String(typ.Name)+"Resolver", nil), nil, diff --git a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl index 0b90ad53..d392ef53 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/root_.gotpl @@ -1,3 +1,4 @@ +{{/* Context object: codegen.Data */}} {{ reserveImport "context" }} {{ reserveImport "fmt" }} {{ reserveImport "io" }} @@ -45,7 +46,7 @@ type ResolverRoot interface { } type DirectiveRoot struct { -{{ range $directive := .Directives }} +{{ range $directive := .UserDirectives }} {{- $directive.Declaration }} {{ end }} } @@ -67,6 +68,12 @@ type ComplexityRoot struct { {{- end }} } +{{ range $directive := .BuiltInDirectives }} + var ( + {{- $directive.FunctionImpl }} + ) +{{ end }} + type executableSchema struct { schema *ast.Schema resolvers ResolverRoot diff --git a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go index 4de30761..9b6e4cfb 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go +++ b/vendor/github.com/99designs/gqlgen/codegen/templates/templates.go @@ -495,18 +495,18 @@ func wordWalker(str string, f func(*wordInfo)) { if initialisms[upperWord] { // If the uppercase word (string(runes[w:i]) is "ID" or "IP" // AND - // the word is the first two characters of the str + // the word is the first two characters of the current word // AND // that is not the end of the word // AND - // the length of the string is greater than 3 + // the length of the remaining string is greater than 3 // AND // the third rune is an uppercase one // THEN // do NOT count this as an initialism. switch upperWord { case "ID", "IP": - if word == str[:2] && !eow && len(str) > 3 && unicode.IsUpper(runes[3]) { + if remainingRunes := runes[w:]; word == string(remainingRunes[:2]) && !eow && len(remainingRunes) > 3 && unicode.IsUpper(remainingRunes[3]) { continue } } @@ -694,7 +694,7 @@ var pkgReplacer = strings.NewReplacer( func TypeIdentifier(t types.Type) string { res := "" for { - switch it := t.(type) { + switch it := code.Unalias(t).(type) { case *types.Pointer: t.Underlying() res += "ᚖ" @@ -771,6 +771,8 @@ var CommonInitialisms = map[string]bool{ "XMPP": true, "XSRF": true, "XSS": true, + "AWS": true, + "GCP": true, } // GetInitialisms returns the initialisms to capitalize in Go names. If unchanged, default initialisms will be returned diff --git a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl index ebebdf14..1898d444 100644 --- a/vendor/github.com/99designs/gqlgen/codegen/type.gotpl +++ b/vendor/github.com/99designs/gqlgen/codegen/type.gotpl @@ -76,9 +76,9 @@ return res, graphql.ErrorOnPath(ctx, err) {{- else }} res, err := ec.unmarshalInput{{ $type.GQL.Name }}(ctx, v) - {{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUmarshalInput) }} + {{- if and $type.IsNilable (not $type.IsMap) (not $type.PointersInUnmarshalInput) }} return &res, graphql.ErrorOnPath(ctx, err) - {{- else if and (not $type.IsNilable) $type.PointersInUmarshalInput }} + {{- else if and (not $type.IsNilable) $type.PointersInUnmarshalInput }} return *res, graphql.ErrorOnPath(ctx, err) {{- else }} return res, graphql.ErrorOnPath(ctx, err) diff --git a/vendor/github.com/99designs/gqlgen/graphql/bool.go b/vendor/github.com/99designs/gqlgen/graphql/bool.go index b01f6eb1..d9797a38 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/bool.go +++ b/vendor/github.com/99designs/gqlgen/graphql/bool.go @@ -20,6 +20,8 @@ func UnmarshalBoolean(v any) (bool, error) { return v != 0, nil case bool: return v, nil + case nil: + return false, nil default: return false, fmt.Errorf("%T is not a bool", v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/cache.go b/vendor/github.com/99designs/gqlgen/graphql/cache.go index ef2dd5a5..8804cfe0 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/cache.go +++ b/vendor/github.com/99designs/gqlgen/graphql/cache.go @@ -3,27 +3,29 @@ package graphql import "context" // Cache is a shared store for APQ and query AST caching -type Cache interface { +type Cache[T any] interface { // Get looks up a key's value from the cache. - Get(ctx context.Context, key string) (value any, ok bool) + Get(ctx context.Context, key string) (value T, ok bool) // Add adds a value to the cache. - Add(ctx context.Context, key string, value any) + Add(ctx context.Context, key string, value T) } // MapCache is the simplest implementation of a cache, because it can not evict it should only be used in tests -type MapCache map[string]any +type MapCache[T any] map[string]T // Get looks up a key's value from the cache. -func (m MapCache) Get(_ context.Context, key string) (value any, ok bool) { +func (m MapCache[T]) Get(_ context.Context, key string) (value T, ok bool) { v, ok := m[key] return v, ok } // Add adds a value to the cache. -func (m MapCache) Add(_ context.Context, key string, value any) { m[key] = value } +func (m MapCache[T]) Add(_ context.Context, key string, value T) { m[key] = value } -type NoCache struct{} +type NoCache[T any, T2 *T] struct{} -func (n NoCache) Get(_ context.Context, _ string) (value any, ok bool) { return nil, false } -func (n NoCache) Add(_ context.Context, _ string, _ any) {} +var _ Cache[*string] = (*NoCache[string, *string])(nil) + +func (n NoCache[T, T2]) Get(_ context.Context, _ string) (value T2, ok bool) { return nil, false } +func (n NoCache[T, T2]) Add(_ context.Context, _ string, _ T2) {} diff --git a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go index 426ad09b..566b0476 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go +++ b/vendor/github.com/99designs/gqlgen/graphql/executor/executor.go @@ -22,7 +22,7 @@ type Executor struct { errorPresenter graphql.ErrorPresenterFunc recoverFunc graphql.RecoverFunc - queryCache graphql.Cache + queryCache graphql.Cache[*ast.QueryDocument] parserTokenLimit int } @@ -36,7 +36,7 @@ func New(es graphql.ExecutableSchema) *Executor { es: es, errorPresenter: graphql.DefaultErrorPresenter, recoverFunc: graphql.DefaultRecover, - queryCache: graphql.NoCache{}, + queryCache: graphql.NoCache[ast.QueryDocument, *ast.QueryDocument]{}, ext: processExtensions(nil), parserTokenLimit: parserTokenNoLimit, } @@ -84,7 +84,6 @@ func (e *Executor) CreateOperationContext( var err error rc.Variables, err = validator.VariableValues(e.es.Schema(), rc.Operation, params.Variables) - if err != nil { gqlErr, ok := err.(*gqlerror.Error) if ok { @@ -162,7 +161,7 @@ func (e *Executor) PresentRecoveredError(ctx context.Context, err any) error { return e.errorPresenter(ctx, e.recoverFunc(ctx, err)) } -func (e *Executor) SetQueryCache(cache graphql.Cache) { +func (e *Executor) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) { e.queryCache = cache } @@ -195,7 +194,7 @@ func (e *Executor) parseQuery( stats.Parsing.End = now stats.Validation.Start = now - return doc.(*ast.QueryDocument), nil + return doc, nil } doc, err := parser.ParseQueryWithTokenLimit(&ast.Source{Input: query}, e.parserTokenLimit) diff --git a/vendor/github.com/99designs/gqlgen/graphql/float.go b/vendor/github.com/99designs/gqlgen/graphql/float.go index 465f46af..b140d5bc 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/float.go +++ b/vendor/github.com/99designs/gqlgen/graphql/float.go @@ -28,6 +28,8 @@ func UnmarshalFloat(v any) (float64, error) { return v, nil case json.Number: return strconv.ParseFloat(string(v), 64) + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an float", v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go index 115aaa8a..a4cb32c9 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/extension/apq.go @@ -23,7 +23,7 @@ const ( // hash in the next request. // see https://github.com/apollographql/apollo-link-persisted-queries type AutomaticPersistedQuery struct { - Cache graphql.Cache + Cache graphql.Cache[string] } type ApqStats struct { @@ -72,14 +72,14 @@ func (a AutomaticPersistedQuery) MutateOperationParameters(ctx context.Context, fullQuery := false if rawParams.Query == "" { + var ok bool // client sent optimistic query hash without query string, get it from the cache - query, ok := a.Cache.Get(ctx, extension.Sha256) + rawParams.Query, ok = a.Cache.Get(ctx, extension.Sha256) if !ok { err := gqlerror.Errorf(errPersistedQueryNotFound) errcode.Set(err, errPersistedQueryNotFoundCode) return err } - rawParams.Query = query.(string) } else { // client sent optimistic query hash with query string, verify and store it if computeQueryHash(rawParams.Query) != extension.Sha256 { diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go index 9dc480e9..946022bf 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/lru/lru.go @@ -8,26 +8,26 @@ import ( "github.com/99designs/gqlgen/graphql" ) -type LRU struct { - lru *lru.Cache[string, any] +type LRU[T any] struct { + lru *lru.Cache[string, T] } -var _ graphql.Cache = &LRU{} +var _ graphql.Cache[any] = &LRU[any]{} -func New(size int) *LRU { - cache, err := lru.New[string, any](size) +func New[T any](size int) *LRU[T] { + cache, err := lru.New[string, T](size) if err != nil { // An error is only returned for non-positive cache size // and we already checked for that. panic("unexpected error creating cache: " + err.Error()) } - return &LRU{cache} + return &LRU[T]{cache} } -func (l LRU) Get(ctx context.Context, key string) (value any, ok bool) { +func (l LRU[T]) Get(ctx context.Context, key string) (value T, ok bool) { return l.lru.Get(key) } -func (l LRU) Add(ctx context.Context, key string, value any) { +func (l LRU[T]) Add(ctx context.Context, key string, value T) { l.lru.Add(key, value) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go index 54376b13..644bad8d 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/server.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/server.go @@ -8,6 +8,7 @@ import ( "net/http" "time" + "github.com/vektah/gqlparser/v2/ast" "github.com/vektah/gqlparser/v2/gqlerror" "github.com/99designs/gqlgen/graphql" @@ -41,11 +42,11 @@ func NewDefaultServer(es graphql.ExecutableSchema) *Server { srv.AddTransport(transport.POST{}) srv.AddTransport(transport.MultipartForm{}) - srv.SetQueryCache(lru.New(1000)) + srv.SetQueryCache(lru.New[*ast.QueryDocument](1000)) srv.Use(extension.Introspection{}) srv.Use(extension.AutomaticPersistedQuery{ - Cache: lru.New(100), + Cache: lru.New[string](100), }) return srv @@ -63,7 +64,7 @@ func (s *Server) SetRecoverFunc(f graphql.RecoverFunc) { s.exec.SetRecoverFunc(f) } -func (s *Server) SetQueryCache(cache graphql.Cache) { +func (s *Server) SetQueryCache(cache graphql.Cache[*ast.QueryDocument]) { s.exec.SetQueryCache(cache) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go index bd511525..0bad1110 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/http_graphql.go @@ -88,7 +88,6 @@ func cleanupBody(body string) (out string, err error) { // is where query starts. If it is, query is url encoded. if strings.HasPrefix(body, "%7B") { body, err = url.QueryUnescape(body) - if err != nil { return body, err } diff --git a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go index 651ccee4..32e31c7c 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go +++ b/vendor/github.com/99designs/gqlgen/graphql/handler/transport/websocket.go @@ -198,7 +198,7 @@ func (c *wsConnection) init() bool { var ctx context.Context ctx, initAckPayload, err = c.InitFunc(c.ctx, c.initPayload) if err != nil { - c.sendConnectionError(err.Error()) + c.sendConnectionError("%s", err.Error()) c.close(websocket.CloseNormalClosure, "terminated") return false } @@ -239,7 +239,6 @@ func (c *wsConnection) run() { ctx, cancel := context.WithCancel(c.ctx) defer func() { cancel() - c.close(websocket.CloseAbnormalClosure, "unexpected closure") }() // If we're running in graphql-ws mode, create a timer that will trigger a @@ -369,7 +368,7 @@ func (c *wsConnection) closeOnCancel(ctx context.Context) { <-ctx.Done() if r := closeReasonForContext(ctx); r != "" { - c.sendConnectionError(r) + c.sendConnectionError("%s", r) } c.close(websocket.CloseNormalClosure, "terminated") } diff --git a/vendor/github.com/99designs/gqlgen/graphql/int.go b/vendor/github.com/99designs/gqlgen/graphql/int.go index 2a5604e9..41cad3f1 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/int.go +++ b/vendor/github.com/99designs/gqlgen/graphql/int.go @@ -23,6 +23,8 @@ func UnmarshalInt(v any) (int, error) { return int(v), nil case json.Number: return strconv.Atoi(string(v)) + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an int", v) } @@ -44,6 +46,8 @@ func UnmarshalInt64(v any) (int64, error) { return v, nil case json.Number: return strconv.ParseInt(string(v), 10, 64) + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an int", v) } @@ -73,6 +77,8 @@ func UnmarshalInt32(v any) (int32, error) { return 0, err } return int32(iv), nil + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an int", v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/string.go b/vendor/github.com/99designs/gqlgen/graphql/string.go index 61da5810..6622734e 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/string.go +++ b/vendor/github.com/99designs/gqlgen/graphql/string.go @@ -62,7 +62,7 @@ func UnmarshalString(v any) (string, error) { case bool: return strconv.FormatBool(v), nil case nil: - return "null", nil + return "", nil default: return "", fmt.Errorf("%T is not a string", v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/uint.go b/vendor/github.com/99designs/gqlgen/graphql/uint.go index ffccaf64..cd5d2355 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/uint.go +++ b/vendor/github.com/99designs/gqlgen/graphql/uint.go @@ -34,6 +34,8 @@ func UnmarshalUint(v any) (uint, error) { case json.Number: u64, err := strconv.ParseUint(string(v), 10, 64) return uint(u64), err + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an uint", v) } @@ -63,6 +65,8 @@ func UnmarshalUint64(v any) (uint64, error) { return uint64(v), nil case json.Number: return strconv.ParseUint(string(v), 10, 64) + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an uint", v) } @@ -100,6 +104,8 @@ func UnmarshalUint32(v any) (uint32, error) { return 0, err } return uint32(iv), nil + case nil: + return 0, nil default: return 0, fmt.Errorf("%T is not an uint", v) } diff --git a/vendor/github.com/99designs/gqlgen/graphql/version.go b/vendor/github.com/99designs/gqlgen/graphql/version.go index 82266736..2031242f 100644 --- a/vendor/github.com/99designs/gqlgen/graphql/version.go +++ b/vendor/github.com/99designs/gqlgen/graphql/version.go @@ -1,3 +1,3 @@ package graphql -const Version = "v0.17.49" +const Version = "v0.17.55" diff --git a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl index 6e97f8bf..648ec2b4 100644 --- a/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl +++ b/vendor/github.com/99designs/gqlgen/init-templates/gqlgen.yml.gotmpl @@ -11,6 +11,9 @@ exec: # federation: # filename: graph/federation.go # package: graph +# version: 2 +# options +# computed_requires: true # Where should any generated models go? model: @@ -63,6 +66,13 @@ resolver: # Optional: set to skip running `go mod tidy` when generating server code # skip_mod_tidy: true +# Optional: if this is set to true, argument directives that +# decorate a field with a null value will still be called. +# +# This enables argumment directives to not just mutate +# argument values but to set them even if they're null. +call_argument_directives_with_null: true + # gqlgen will search for any type names in the schema in these go packages # if they match it will use them, otherwise it will generate them. autobind: diff --git a/vendor/github.com/99designs/gqlgen/internal/code/alias.go b/vendor/github.com/99designs/gqlgen/internal/code/alias.go new file mode 100644 index 00000000..f7685801 --- /dev/null +++ b/vendor/github.com/99designs/gqlgen/internal/code/alias.go @@ -0,0 +1,13 @@ +//go:build !go1.23 + +package code + +import ( + "go/types" +) + +// Unalias unwraps an alias type +// TODO: Drop this function when we drop support for go1.22 +func Unalias(t types.Type) types.Type { + return t // No-op +} diff --git a/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go b/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go new file mode 100644 index 00000000..fa0b216c --- /dev/null +++ b/vendor/github.com/99designs/gqlgen/internal/code/alias_1.23.go @@ -0,0 +1,19 @@ +//go:build go1.23 + +package code + +import ( + "go/types" +) + +// Unalias unwraps an alias type +func Unalias(t types.Type) types.Type { + if p, ok := t.(*types.Pointer); ok { + // If the type come from auto-binding, + // it will be a pointer to an alias type. + // (e.g: `type Cursor = entgql.Cursor[int]`) + // *ent.Cursor is the type we got from auto-binding. + return types.NewPointer(Unalias(p.Elem())) + } + return types.Unalias(t) +} diff --git a/vendor/github.com/99designs/gqlgen/internal/code/compare.go b/vendor/github.com/99designs/gqlgen/internal/code/compare.go index a3f15f18..05fad22d 100644 --- a/vendor/github.com/99designs/gqlgen/internal/code/compare.go +++ b/vendor/github.com/99designs/gqlgen/internal/code/compare.go @@ -8,6 +8,8 @@ import ( // CompatibleTypes isnt a strict comparison, it allows for pointer differences func CompatibleTypes(expected, actual types.Type) error { + // Unwrap any aliases + expected, actual = Unalias(expected), Unalias(actual) // Special case to deal with pointer mismatches { expectedPtr, expectedIsPtr := expected.(*types.Pointer) diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go new file mode 100644 index 00000000..8571db1f --- /dev/null +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/constants.go @@ -0,0 +1,185 @@ +package federation + +import ( + "github.com/99designs/gqlgen/codegen/config" + "github.com/vektah/gqlparser/v2/ast" +) + +// The name of the field argument that is injected into the resolver to support @requires. +const fieldArgRequires = "_federationRequires" + +// The name of the scalar type used in the injected field argument to support @requires. +const mapTypeName = "_RequiresMap" + +// The @key directive that defines the key fields for an entity. +const dirNameKey = "key" + +// The @requires directive that defines the required fields for an entity to be resolved. +const dirNameRequires = "requires" + +// The @entityResolver directive allows users to specify entity resolvers as batch lookups +const dirNameEntityResolver = "entityResolver" + +const dirNamePopulateFromRepresentations = "populateFromRepresentations" + +var populateFromRepresentationsImplementation = `func(ctx context.Context, obj any, next graphql.Resolver) (res any, err error) { + fc := graphql.GetFieldContext(ctx) + + // We get the Federation representations argument from the _entities resolver + representations, ok := fc.Parent.Parent.Args["representations"].([]map[string]any) + if !ok { + return nil, errors.New("must be called from within _entities") + } + + // Get the index of the current entity in the representations list. This is + // set by the execution context after the _entities resolver is called. + index := fc.Parent.Index + if index == nil { + return nil, errors.New("couldn't find input index for entity") + } + + if len(representations) < *index { + return nil, errors.New("representation not found") + } + + return representations[*index], nil +}` + +const DirNameEntityReference = "entityReference" + +// The fields arguments must be provided to both key and requires directives. +const DirArgFields = "fields" + +// Tells the code generator what type the directive is referencing +const DirArgType = "type" + +// The file name for Federation directives +const dirGraphQLQFile = "federation/directives.graphql" + +// The file name for Federation entities +const entityGraphQLQFile = "federation/entity.graphql" + +const federationVersion1Schema = ` + directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE + directive @requires(fields: _FieldSet!) on FIELD_DEFINITION + directive @provides(fields: _FieldSet!) on FIELD_DEFINITION + directive @extends on OBJECT | INTERFACE + directive @external on FIELD_DEFINITION + scalar _Any + scalar _FieldSet +` + +const federationVersion2Schema = ` + directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM + directive @composeDirective(name: String!) repeatable on SCHEMA + directive @extends on OBJECT | INTERFACE + directive @external on OBJECT | FIELD_DEFINITION + directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE + directive @inaccessible on + | ARGUMENT_DEFINITION + | ENUM + | ENUM_VALUE + | FIELD_DEFINITION + | INPUT_FIELD_DEFINITION + | INPUT_OBJECT + | INTERFACE + | OBJECT + | SCALAR + | UNION + directive @interfaceObject on OBJECT + directive @link(import: [String!], url: String!) repeatable on SCHEMA + directive @override(from: String!, label: String) on FIELD_DEFINITION + directive @policy(policies: [[federation__Policy!]!]!) on + | FIELD_DEFINITION + | OBJECT + | INTERFACE + | SCALAR + | ENUM + directive @provides(fields: FieldSet!) on FIELD_DEFINITION + directive @requires(fields: FieldSet!) on FIELD_DEFINITION + directive @requiresScopes(scopes: [[federation__Scope!]!]!) on + | FIELD_DEFINITION + | OBJECT + | INTERFACE + | SCALAR + | ENUM + directive @shareable repeatable on FIELD_DEFINITION | OBJECT + directive @tag(name: String!) repeatable on + | ARGUMENT_DEFINITION + | ENUM + | ENUM_VALUE + | FIELD_DEFINITION + | INPUT_FIELD_DEFINITION + | INPUT_OBJECT + | INTERFACE + | OBJECT + | SCALAR + | UNION + scalar _Any + scalar FieldSet + scalar federation__Policy + scalar federation__Scope +` + +var builtins = config.TypeMap{ + "_Service": { + Model: config.StringList{ + "github.com/99designs/gqlgen/plugin/federation/fedruntime.Service", + }, + }, + "_Entity": { + Model: config.StringList{ + "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity", + }, + }, + "Entity": { + Model: config.StringList{ + "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity", + }, + }, + "_Any": { + Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"}, + }, + "federation__Scope": { + Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, + }, + "federation__Policy": { + Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, + }, +} + +var dirPopulateFromRepresentations = &ast.DirectiveDefinition{ + Name: dirNamePopulateFromRepresentations, + IsRepeatable: false, + Description: `This is a runtime directive used to implement @requires. It's automatically placed +on the generated _federationRequires argument, and the implementation of it extracts the +correct value from the input representations list.`, + Locations: []ast.DirectiveLocation{ast.LocationArgumentDefinition}, + Position: &ast.Position{Src: &ast.Source{ + Name: dirGraphQLQFile, + }}, +} + +var dirEntityReference = &ast.DirectiveDefinition{ + Name: DirNameEntityReference, + IsRepeatable: false, + Description: `This is a compile-time directive used to implement @requires. +It tells the code generator how to generate the model for the scalar.`, + Locations: []ast.DirectiveLocation{ast.LocationScalar}, + Arguments: ast.ArgumentDefinitionList{ + { + Name: DirArgType, + Type: ast.NonNullNamedType("String", nil), + Description: `The name of the entity that the fields selection +set should be validated against.`, + }, + { + Name: DirArgFields, + Type: ast.NonNullNamedType("FieldSet", nil), + Description: "The selection that the scalar should generate into.", + }, + }, + Position: &ast.Position{Src: &ast.Source{ + Name: dirGraphQLQFile, + }}, +} diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go b/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go index 4e9e1afc..562d86c5 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/entity.go @@ -22,10 +22,12 @@ type Entity struct { } type EntityResolver struct { - ResolverName string // The resolver name, such as FindUserByID - KeyFields []*KeyField // The fields declared in @key. - InputType types.Type // The Go generated input type for multi entity resolvers - InputTypeName string + ResolverName string // The resolver name, such as FindUserByID + KeyFields []*KeyField // The fields declared in @key. + InputType types.Type // The Go generated input type for multi entity resolvers + InputTypeName string + ReturnType types.Type // The Go generated return type for the entity + ReturnTypeName string } func (e *EntityResolver) LookupInputType() string { @@ -60,7 +62,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio if federationVersion != 2 { return false } - // TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entitiy union. + // TODO: From the spec, it seems like if an entity is not resolvable then it should not only not have a resolver, but should not appear in the _Entity union. // The current implementation is a less drastic departure from the previous behavior, but should probably be reviewed. // See https://www.apollographql.com/docs/federation/subgraph-spec/ if e.isResolvable() { @@ -76,7 +78,7 @@ func (e *Entity) isFieldImplicitlyExternal(field *ast.FieldDefinition, federatio // Determine if the entity is resolvable. func (e *Entity) isResolvable() bool { - key := e.Def.Directives.ForName("key") + key := e.Def.Directives.ForName(dirNameKey) if key == nil { // If there is no key directive, the entity is resolvable. return true @@ -102,11 +104,11 @@ func (e *Entity) isKeyField(field *ast.FieldDefinition) bool { // Get the key fields for this entity. func (e *Entity) keyFields() []string { - key := e.Def.Directives.ForName("key") + key := e.Def.Directives.ForName(dirNameKey) if key == nil { return []string{} } - fields := key.Arguments.ForName("fields") + fields := key.Arguments.ForName(DirArgFields) if fields == nil { return []string{} } diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go index fd33255b..f7a4d6be 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.go @@ -2,6 +2,7 @@ package federation import ( _ "embed" + "errors" "fmt" "sort" "strings" @@ -12,7 +13,6 @@ import ( "github.com/99designs/gqlgen/codegen/config" "github.com/99designs/gqlgen/codegen/templates" "github.com/99designs/gqlgen/internal/rewrite" - "github.com/99designs/gqlgen/plugin" "github.com/99designs/gqlgen/plugin/federation/fieldset" ) @@ -22,56 +22,81 @@ var federationTemplate string //go:embed requires.gotpl var explicitRequiresTemplate string -type federation struct { +type Federation struct { Entities []*Entity - Version int - PackageOptions map[string]bool + PackageOptions PackageOptions + + version int + + // true if @requires is used in the schema + usesRequires bool +} + +type PackageOptions struct { + // ExplicitRequires will generate a function in the execution context + // to populate fields using the @required directive into the entity. + // + // You can only set one of ExplicitRequires or ComputedRequires to true. + ExplicitRequires bool + // ComputedRequires generates resolver functions to compute values for + // fields using the @required directive. + ComputedRequires bool } // New returns a federation plugin that injects // federated directives and types into the schema -func New(version int) plugin.Plugin { +func New(version int, cfg *config.Config) (*Federation, error) { if version == 0 { version = 1 } - return &federation{Version: version} + options, err := buildPackageOptions(cfg) + if err != nil { + return nil, fmt.Errorf("invalid federation package options: %w", err) + } + return &Federation{ + version: version, + PackageOptions: options, + }, nil +} + +func buildPackageOptions(cfg *config.Config) (PackageOptions, error) { + packageOptions := cfg.Federation.Options + + explicitRequires := packageOptions["explicit_requires"] + computedRequires := packageOptions["computed_requires"] + if explicitRequires && computedRequires { + return PackageOptions{}, errors.New("only one of explicit_requires or computed_requires can be set to true") + } + + if computedRequires { + if cfg.Federation.Version != 2 { + return PackageOptions{}, errors.New("when using federation.options.computed_requires you must be using Federation 2") + } + + // We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure + // our directive is always called. + if !cfg.CallArgumentDirectivesWithNull { + return PackageOptions{}, errors.New("when using federation.options.computed_requires, call_argument_directives_with_null must be set to true") + } + } + + // We rely on injecting a null argument with a directives for fields with @requires, so we need to ensure + // our directive is always called. + + return PackageOptions{ + ExplicitRequires: explicitRequires, + ComputedRequires: computedRequires, + }, nil } // Name returns the plugin name -func (f *federation) Name() string { +func (f *Federation) Name() string { return "federation" } // MutateConfig mutates the configuration -func (f *federation) MutateConfig(cfg *config.Config) error { - builtins := config.TypeMap{ - "_Service": { - Model: config.StringList{ - "github.com/99designs/gqlgen/plugin/federation/fedruntime.Service", - }, - }, - "_Entity": { - Model: config.StringList{ - "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity", - }, - }, - "Entity": { - Model: config.StringList{ - "github.com/99designs/gqlgen/plugin/federation/fedruntime.Entity", - }, - }, - "_Any": { - Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"}, - }, - "federation__Scope": { - Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, - }, - "federation__Policy": { - Model: config.StringList{"github.com/99designs/gqlgen/graphql.String"}, - }, - } - +func (f *Federation) MutateConfig(cfg *config.Config) error { for typeName, entry := range builtins { if cfg.Models.Exists(typeName) { return fmt.Errorf("%v already exists which must be reserved when Federation is enabled", typeName) @@ -79,13 +104,14 @@ func (f *federation) MutateConfig(cfg *config.Config) error { cfg.Models[typeName] = entry } cfg.Directives["external"] = config.DirectiveConfig{SkipRuntime: true} - cfg.Directives["requires"] = config.DirectiveConfig{SkipRuntime: true} + cfg.Directives[dirNameRequires] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["provides"] = config.DirectiveConfig{SkipRuntime: true} - cfg.Directives["key"] = config.DirectiveConfig{SkipRuntime: true} + cfg.Directives[dirNameKey] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["extends"] = config.DirectiveConfig{SkipRuntime: true} + cfg.Directives[dirNameEntityResolver] = config.DirectiveConfig{SkipRuntime: true} // Federation 2 specific directives - if f.Version == 2 { + if f.version == 2 { cfg.Directives["shareable"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["link"] = config.DirectiveConfig{SkipRuntime: true} cfg.Directives["tag"] = config.DirectiveConfig{SkipRuntime: true} @@ -98,95 +124,48 @@ func (f *federation) MutateConfig(cfg *config.Config) error { cfg.Directives["composeDirective"] = config.DirectiveConfig{SkipRuntime: true} } + if f.usesRequires && f.PackageOptions.ComputedRequires { + cfg.Schema.Directives[dirPopulateFromRepresentations.Name] = dirPopulateFromRepresentations + cfg.Directives[dirPopulateFromRepresentations.Name] = config.DirectiveConfig{Implementation: &populateFromRepresentationsImplementation} + + cfg.Schema.Directives[dirEntityReference.Name] = dirEntityReference + cfg.Directives[dirEntityReference.Name] = config.DirectiveConfig{SkipRuntime: true} + + f.addMapType(cfg) + f.mutateSchemaForRequires(cfg.Schema, cfg) + } + return nil } -func (f *federation) InjectSourceEarly() *ast.Source { +func (f *Federation) InjectSourcesEarly() ([]*ast.Source, error) { input := `` // add version-specific changes on key directive, as well as adding the new directives for federation 2 - if f.Version == 1 { - input += ` - directive @key(fields: _FieldSet!) repeatable on OBJECT | INTERFACE - directive @requires(fields: _FieldSet!) on FIELD_DEFINITION - directive @provides(fields: _FieldSet!) on FIELD_DEFINITION - directive @extends on OBJECT | INTERFACE - directive @external on FIELD_DEFINITION - scalar _Any - scalar _FieldSet -` - } else if f.Version == 2 { - input += ` - directive @authenticated on FIELD_DEFINITION | OBJECT | INTERFACE | SCALAR | ENUM - directive @composeDirective(name: String!) repeatable on SCHEMA - directive @extends on OBJECT | INTERFACE - directive @external on OBJECT | FIELD_DEFINITION - directive @key(fields: FieldSet!, resolvable: Boolean = true) repeatable on OBJECT | INTERFACE - directive @inaccessible on - | ARGUMENT_DEFINITION - | ENUM - | ENUM_VALUE - | FIELD_DEFINITION - | INPUT_FIELD_DEFINITION - | INPUT_OBJECT - | INTERFACE - | OBJECT - | SCALAR - | UNION - directive @interfaceObject on OBJECT - directive @link(import: [String!], url: String!) repeatable on SCHEMA - directive @override(from: String!, label: String) on FIELD_DEFINITION - directive @policy(policies: [[federation__Policy!]!]!) on - | FIELD_DEFINITION - | OBJECT - | INTERFACE - | SCALAR - | ENUM - directive @provides(fields: FieldSet!) on FIELD_DEFINITION - directive @requires(fields: FieldSet!) on FIELD_DEFINITION - directive @requiresScopes(scopes: [[federation__Scope!]!]!) on - | FIELD_DEFINITION - | OBJECT - | INTERFACE - | SCALAR - | ENUM - directive @shareable repeatable on FIELD_DEFINITION | OBJECT - directive @tag(name: String!) repeatable on - | ARGUMENT_DEFINITION - | ENUM - | ENUM_VALUE - | FIELD_DEFINITION - | INPUT_FIELD_DEFINITION - | INPUT_OBJECT - | INTERFACE - | OBJECT - | SCALAR - | UNION - scalar _Any - scalar FieldSet - scalar federation__Policy - scalar federation__Scope -` + if f.version == 1 { + input += federationVersion1Schema + } else if f.version == 2 { + input += federationVersion2Schema } - return &ast.Source{ - Name: "federation/directives.graphql", + + return []*ast.Source{{ + Name: dirGraphQLQFile, Input: input, BuiltIn: true, - } + }}, nil } // InjectSourceLate creates a GraphQL Entity type with all // the fields that had the @key directive -func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source { - f.setEntities(schema) +func (f *Federation) InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error) { + f.Entities = f.buildEntities(schema, f.version) - var entities, resolvers, entityResolverInputDefinitions string + entities := make([]string, 0) + resolvers := make([]string, 0) + entityResolverInputDefinitions := make([]string, 0) for _, e := range f.Entities { if e.Def.Kind != ast.Interface { - if entities != "" { - entities += " | " - } - entities += e.Name + entities = append(entities, e.Name) } else if len(schema.GetPossibleTypes(e.Def)) == 0 { fmt.Println( "skipping @key field on interface " + e.Def.Name + " as no types implement it", @@ -194,48 +173,33 @@ func (f *federation) InjectSourceLate(schema *ast.Schema) *ast.Source { } for _, r := range e.Resolvers { - if e.Multi { - if entityResolverInputDefinitions != "" { - entityResolverInputDefinitions += "\n\n" - } - entityResolverInputDefinitions += "input " + r.InputTypeName + " {\n" - for _, keyField := range r.KeyFields { - entityResolverInputDefinitions += fmt.Sprintf( - "\t%s: %s\n", - keyField.Field.ToGo(), - keyField.Definition.Type.String(), - ) - } - entityResolverInputDefinitions += "}" - resolvers += fmt.Sprintf("\t%s(reps: [%s]!): [%s]\n", r.ResolverName, r.InputTypeName, e.Name) - } else { - resolverArgs := "" - for _, keyField := range r.KeyFields { - resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String()) - } - resolvers += fmt.Sprintf("\t%s(%s): %s!\n", r.ResolverName, resolverArgs, e.Name) + resolverSDL, entityResolverInputSDL := buildResolverSDL(r, e.Multi) + resolvers = append(resolvers, resolverSDL) + if entityResolverInputSDL != "" { + entityResolverInputDefinitions = append(entityResolverInputDefinitions, entityResolverInputSDL) } } } var blocks []string - if entities != "" { - entities = `# a union of all types that use the @key directive -union _Entity = ` + entities - blocks = append(blocks, entities) + if len(entities) > 0 { + entitiesSDL := `# a union of all types that use the @key directive +union _Entity = ` + strings.Join(entities, " | ") + blocks = append(blocks, entitiesSDL) } // resolvers can be empty if a service defines only "empty // extend" types. This should be rare. - if resolvers != "" { - if entityResolverInputDefinitions != "" { - blocks = append(blocks, entityResolverInputDefinitions) + if len(resolvers) > 0 { + if len(entityResolverInputDefinitions) > 0 { + inputSDL := strings.Join(entityResolverInputDefinitions, "\n\n") + blocks = append(blocks, inputSDL) } - resolvers = `# fake type to build resolver interfaces for users to implement + resolversSDL := `# fake type to build resolver interfaces for users to implement type Entity { - ` + resolvers + ` +` + strings.Join(resolvers, "\n") + ` }` - blocks = append(blocks, resolvers) + blocks = append(blocks, resolversSDL) } _serviceTypeDef := `type _Service { @@ -259,14 +223,14 @@ type Entity { }` blocks = append(blocks, extendTypeQueryDef) - return &ast.Source{ - Name: "federation/entity.graphql", + return []*ast.Source{{ + Name: entityGraphQLQFile, BuiltIn: true, Input: "\n" + strings.Join(blocks, "\n\n") + "\n", - } + }}, nil } -func (f *federation) GenerateCode(data *codegen.Data) error { +func (f *Federation) GenerateCode(data *codegen.Data) error { // requires imports requiresImports := make(map[string]bool, 0) requiresImports["context"] = true @@ -275,7 +239,11 @@ func (f *federation) GenerateCode(data *codegen.Data) error { requiresEntities := make(map[string]*Entity, 0) // Save package options on f for template use - f.PackageOptions = data.Config.Federation.Options + packageOptions, err := buildPackageOptions(data.Config) + if err != nil { + return fmt.Errorf("invalid federation package options: %w", err) + } + f.PackageOptions = packageOptions if len(f.Entities) > 0 { if data.Objects.ByName("Entity") != nil { @@ -295,18 +263,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error { } for _, r := range e.Resolvers { - // fill in types for key fields - // - for _, keyField := range r.KeyFields { - if len(keyField.Field) == 0 { - fmt.Println( - "skipping @key field " + keyField.Definition.Name + " in " + r.ResolverName + " in " + e.Def.Name, - ) - continue - } - cgField := keyField.Field.TypeReference(obj, data.Objects) - keyField.Type = cgField.TypeReference - } + populateKeyFieldTypes(r, obj, data.Objects, e.Def.Name) } // fill in types for requires fields @@ -348,69 +305,12 @@ func (f *federation) GenerateCode(data *codegen.Data) error { } } - if data.Config.Federation.Options["explicit_requires"] && len(requiresEntities) > 0 { - // check for existing requires functions - type Populator struct { - FuncName string - Exists bool - Comment string - Implementation string - Entity *Entity - } - populators := make([]Populator, 0) - - rewriter, err := rewrite.New(data.Config.Federation.Dir()) - if err != nil { - return err - } - - for name, entity := range requiresEntities { - populator := Populator{ - FuncName: fmt.Sprintf("Populate%sRequires", name), - Entity: entity, - } - - populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`)) - populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName)) - - if populator.Implementation == "" { - populator.Exists = false - populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName) - } - populators = append(populators, populator) - } - - sort.Slice(populators, func(i, j int) bool { - return populators[i].FuncName < populators[j].FuncName - }) - - requiresFile := data.Config.Federation.Dir() + "/federation.requires.go" - existingImports := rewriter.ExistingImports(requiresFile) - for _, imp := range existingImports { - if imp.Alias == "" { - // import exists in both places, remove - delete(requiresImports, imp.ImportPath) - } - } - - for k := range requiresImports { - existingImports = append(existingImports, rewrite.Import{ImportPath: k}) - } - - // render requires populators - err = templates.Render(templates.Options{ - PackageName: data.Config.Federation.Package, - Filename: requiresFile, - Data: struct { - federation - ExistingImports []rewrite.Import - Populators []Populator - OriginalSource string - }{*f, existingImports, populators, ""}, - GeneratedHeader: false, - Packages: data.Config.Packages, - Template: explicitRequiresTemplate, - }) + if f.PackageOptions.ExplicitRequires && len(requiresEntities) > 0 { + err := f.generateExplicitRequires( + data, + requiresEntities, + requiresImports, + ) if err != nil { return err } @@ -420,7 +320,7 @@ func (f *federation) GenerateCode(data *codegen.Data) error { PackageName: data.Config.Federation.Package, Filename: data.Config.Federation.Filename, Data: struct { - federation + Federation UsePointers bool }{*f, data.Config.ResolversAlwaysReturnPointers}, GeneratedHeader: true, @@ -429,137 +329,227 @@ func (f *federation) GenerateCode(data *codegen.Data) error { }) } -func (f *federation) setEntities(schema *ast.Schema) { +// Fill in types for key fields +func populateKeyFieldTypes( + resolver *EntityResolver, + obj *codegen.Object, + allObjects codegen.Objects, + name string, +) { + for _, keyField := range resolver.KeyFields { + if len(keyField.Field) == 0 { + fmt.Println( + "skipping @key field " + keyField.Definition.Name + " in " + resolver.ResolverName + " in " + name, + ) + continue + } + cgField := keyField.Field.TypeReference(obj, allObjects) + keyField.Type = cgField.TypeReference + } +} + +func (f *Federation) buildEntities(schema *ast.Schema, version int) []*Entity { + entities := make([]*Entity, 0) for _, schemaType := range schema.Types { - keys, ok := isFederatedEntity(schemaType) - if !ok { - continue + entity := f.buildEntity(schemaType, schema, version) + if entity != nil { + entities = append(entities, entity) } - - if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) { - fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name) - continue - } - - e := &Entity{ - Name: schemaType.Name, - Def: schemaType, - Resolvers: nil, - Requires: nil, - } - - // Let's process custom entity resolver settings. - dir := schemaType.Directives.ForName("entityResolver") - if dir != nil { - if dirArg := dir.Arguments.ForName("multi"); dirArg != nil { - if dirVal, err := dirArg.Value.Value(nil); err == nil { - e.Multi = dirVal.(bool) - } - } - } - - // If our schema has a field with a type defined in - // another service, then we need to define an "empty - // extend" of that type in this service, so this service - // knows what the type is like. But the graphql-server - // will never ask us to actually resolve this "empty - // extend", so we don't require a resolver function for - // it. (Well, it will never ask in practice; it's - // unclear whether the spec guarantees this. See - // https://github.com/apollographql/apollo-server/issues/3852 - // ). Example: - // type MyType { - // myvar: TypeDefinedInOtherService - // } - // // Federation needs this type, but - // // it doesn't need a resolver for it! - // extend TypeDefinedInOtherService @key(fields: "id") { - // id: ID @external - // } - if !e.allFieldsAreExternal(f.Version) { - for _, dir := range keys { - if len(dir.Arguments) > 2 { - panic("More than two arguments provided for @key declaration.") - } - var arg *ast.Argument - - // since keys are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="") - for _, a := range dir.Arguments { - if a.Name == "fields" { - if arg != nil { - panic("More than one `fields` provided for @key declaration.") - } - arg = a - } - } - - keyFieldSet := fieldset.New(arg.Value.Raw, nil) - - keyFields := make([]*KeyField, len(keyFieldSet)) - resolverFields := []string{} - for i, field := range keyFieldSet { - def := field.FieldDefinition(schemaType, schema) - - if def == nil { - panic(fmt.Sprintf("no field for %v", field)) - } - - keyFields[i] = &KeyField{Definition: def, Field: field} - resolverFields = append(resolverFields, keyFields[i].Field.ToGo()) - } - - resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And") - var resolverName string - if e.Multi { - resolverFieldsToGo += "s" // Pluralize for better API readability - resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo) - } else { - resolverName = fmt.Sprintf("find%s", resolverFieldsToGo) - } - - e.Resolvers = append(e.Resolvers, &EntityResolver{ - ResolverName: resolverName, - KeyFields: keyFields, - InputTypeName: resolverFieldsToGo + "Input", - }) - } - - e.Requires = []*Requires{} - for _, f := range schemaType.Fields { - dir := f.Directives.ForName("requires") - if dir == nil { - continue - } - if len(dir.Arguments) != 1 || dir.Arguments[0].Name != "fields" { - panic("Exactly one `fields` argument needed for @requires declaration.") - } - requiresFieldSet := fieldset.New(dir.Arguments[0].Value.Raw, nil) - for _, field := range requiresFieldSet { - e.Requires = append(e.Requires, &Requires{ - Name: field.ToGoPrivate(), - Field: field, - }) - } - } - } - f.Entities = append(f.Entities, e) } // make sure order remains stable across multiple builds - sort.Slice(f.Entities, func(i, j int) bool { - return f.Entities[i].Name < f.Entities[j].Name + sort.Slice(entities, func(i, j int) bool { + return entities[i].Name < entities[j].Name }) + + return entities +} + +func (f *Federation) buildEntity( + schemaType *ast.Definition, + schema *ast.Schema, + version int, +) *Entity { + keys, ok := isFederatedEntity(schemaType) + if !ok { + return nil + } + + if (schemaType.Kind == ast.Interface) && (len(schema.GetPossibleTypes(schemaType)) == 0) { + fmt.Printf("@key directive found on unused \"interface %s\". Will be ignored.\n", schemaType.Name) + return nil + } + + entity := &Entity{ + Name: schemaType.Name, + Def: schemaType, + Resolvers: nil, + Requires: nil, + Multi: isMultiEntity(schemaType), + } + + // If our schema has a field with a type defined in + // another service, then we need to define an "empty + // extend" of that type in this service, so this service + // knows what the type is like. But the graphql-server + // will never ask us to actually resolve this "empty + // extend", so we don't require a resolver function for + // it. (Well, it will never ask in practice; it's + // unclear whether the spec guarantees this. See + // https://github.com/apollographql/apollo-server/issues/3852 + // ). Example: + // type MyType { + // myvar: TypeDefinedInOtherService + // } + // // Federation needs this type, but + // // it doesn't need a resolver for it! + // extend TypeDefinedInOtherService @key(fields: "id") { + // id: ID @external + // } + if entity.allFieldsAreExternal(version) { + return entity + } + + entity.Resolvers = buildResolvers(schemaType, schema, keys, entity.Multi) + entity.Requires = buildRequires(schemaType) + if len(entity.Requires) > 0 { + f.usesRequires = true + } + + return entity +} + +func isMultiEntity(schemaType *ast.Definition) bool { + dir := schemaType.Directives.ForName(dirNameEntityResolver) + if dir == nil { + return false + } + + if dirArg := dir.Arguments.ForName("multi"); dirArg != nil { + if dirVal, err := dirArg.Value.Value(nil); err == nil { + return dirVal.(bool) + } + } + + return false +} + +func buildResolvers( + schemaType *ast.Definition, + schema *ast.Schema, + keys []*ast.Directive, + multi bool, +) []*EntityResolver { + resolvers := make([]*EntityResolver, 0) + for _, dir := range keys { + if len(dir.Arguments) > 2 { + panic("More than two arguments provided for @key declaration.") + } + keyFields, resolverFields := buildKeyFields( + schemaType, + schema, + dir, + ) + + resolverFieldsToGo := schemaType.Name + "By" + strings.Join(resolverFields, "And") + var resolverName string + if multi { + resolverFieldsToGo += "s" // Pluralize for better API readability + resolverName = fmt.Sprintf("findMany%s", resolverFieldsToGo) + } else { + resolverName = fmt.Sprintf("find%s", resolverFieldsToGo) + } + + resolvers = append(resolvers, &EntityResolver{ + ResolverName: resolverName, + KeyFields: keyFields, + InputTypeName: resolverFieldsToGo + "Input", + ReturnTypeName: schemaType.Name, + }) + } + + return resolvers +} + +func extractFields( + dir *ast.Directive, +) (string, error) { + var arg *ast.Argument + + // since directives are able to now have multiple arguments, we need to check both possible for a possible @key(fields="" fields="") + for _, a := range dir.Arguments { + if a.Name == DirArgFields { + if arg != nil { + return "", errors.New("more than one \"fields\" argument provided for declaration") + } + arg = a + } + } + + return arg.Value.Raw, nil +} + +func buildKeyFields( + schemaType *ast.Definition, + schema *ast.Schema, + dir *ast.Directive, +) ([]*KeyField, []string) { + fieldsRaw, err := extractFields(dir) + if err != nil { + panic("More than one `fields` argument provided for declaration.") + } + + keyFieldSet := fieldset.New(fieldsRaw, nil) + + keyFields := make([]*KeyField, len(keyFieldSet)) + resolverFields := []string{} + for i, field := range keyFieldSet { + def := field.FieldDefinition(schemaType, schema) + + if def == nil { + panic(fmt.Sprintf("no field for %v", field)) + } + + keyFields[i] = &KeyField{Definition: def, Field: field} + resolverFields = append(resolverFields, keyFields[i].Field.ToGo()) + } + + return keyFields, resolverFields +} + +func buildRequires(schemaType *ast.Definition) []*Requires { + requires := make([]*Requires, 0) + for _, f := range schemaType.Fields { + dir := f.Directives.ForName(dirNameRequires) + if dir == nil { + continue + } + + fieldsRaw, err := extractFields(dir) + if err != nil { + panic("Exactly one `fields` argument needed for @requires declaration.") + } + requiresFieldSet := fieldset.New(fieldsRaw, nil) + for _, field := range requiresFieldSet { + requires = append(requires, &Requires{ + Name: field.ToGoPrivate(), + Field: field, + }) + } + } + + return requires } func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) { switch schemaType.Kind { case ast.Object: - keys := schemaType.Directives.ForNames("key") + keys := schemaType.Directives.ForNames(dirNameKey) if len(keys) > 0 { return keys, true } case ast.Interface: - keys := schemaType.Directives.ForNames("key") + keys := schemaType.Directives.ForNames(dirNameKey) if len(keys) > 0 { return keys, true } @@ -577,3 +567,146 @@ func isFederatedEntity(schemaType *ast.Definition) ([]*ast.Directive, bool) { } return nil, false } + +func (f *Federation) generateExplicitRequires( + data *codegen.Data, + requiresEntities map[string]*Entity, + requiresImports map[string]bool, +) error { + // check for existing requires functions + type Populator struct { + FuncName string + Exists bool + Comment string + Implementation string + Entity *Entity + } + populators := make([]Populator, 0) + + rewriter, err := rewrite.New(data.Config.Federation.Dir()) + if err != nil { + return err + } + + for name, entity := range requiresEntities { + populator := Populator{ + FuncName: fmt.Sprintf("Populate%sRequires", name), + Entity: entity, + } + + populator.Comment = strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment("executionContext", populator.FuncName), `\`)) + populator.Implementation = strings.TrimSpace(rewriter.GetMethodBody("executionContext", populator.FuncName)) + + if populator.Implementation == "" { + populator.Exists = false + populator.Implementation = fmt.Sprintf("panic(fmt.Errorf(\"not implemented: %v\"))", populator.FuncName) + } + populators = append(populators, populator) + } + + sort.Slice(populators, func(i, j int) bool { + return populators[i].FuncName < populators[j].FuncName + }) + + requiresFile := data.Config.Federation.Dir() + "/federation.requires.go" + existingImports := rewriter.ExistingImports(requiresFile) + for _, imp := range existingImports { + if imp.Alias == "" { + // import exists in both places, remove + delete(requiresImports, imp.ImportPath) + } + } + + for k := range requiresImports { + existingImports = append(existingImports, rewrite.Import{ImportPath: k}) + } + + // render requires populators + return templates.Render(templates.Options{ + PackageName: data.Config.Federation.Package, + Filename: requiresFile, + Data: struct { + Federation + ExistingImports []rewrite.Import + Populators []Populator + OriginalSource string + }{*f, existingImports, populators, ""}, + GeneratedHeader: false, + Packages: data.Config.Packages, + Template: explicitRequiresTemplate, + }) +} + +func buildResolverSDL( + resolver *EntityResolver, + multi bool, +) (resolverSDL, entityResolverInputSDL string) { + if multi { + entityResolverInputSDL = buildEntityResolverInputDefinitionSDL(resolver) + resolverSDL := fmt.Sprintf("\t%s(reps: [%s]!): [%s]", resolver.ResolverName, resolver.InputTypeName, resolver.ReturnTypeName) + return resolverSDL, entityResolverInputSDL + } + + resolverArgs := "" + for _, keyField := range resolver.KeyFields { + resolverArgs += fmt.Sprintf("%s: %s,", keyField.Field.ToGoPrivate(), keyField.Definition.Type.String()) + } + resolverSDL = fmt.Sprintf("\t%s(%s): %s!", resolver.ResolverName, resolverArgs, resolver.ReturnTypeName) + return resolverSDL, "" +} + +func buildEntityResolverInputDefinitionSDL(resolver *EntityResolver) string { + entityResolverInputDefinition := "input " + resolver.InputTypeName + " {\n" + for _, keyField := range resolver.KeyFields { + entityResolverInputDefinition += fmt.Sprintf( + "\t%s: %s\n", + keyField.Field.ToGo(), + keyField.Definition.Type.String(), + ) + } + return entityResolverInputDefinition + "}" +} + +func (f *Federation) addMapType(cfg *config.Config) { + cfg.Models[mapTypeName] = config.TypeMapEntry{ + Model: config.StringList{"github.com/99designs/gqlgen/graphql.Map"}, + } + cfg.Schema.Types[mapTypeName] = &ast.Definition{ + Kind: ast.Scalar, + Name: mapTypeName, + Description: "Maps an arbitrary GraphQL value to a map[string]any Go type.", + } +} + +func (f *Federation) mutateSchemaForRequires( + schema *ast.Schema, + cfg *config.Config, +) { + for _, schemaType := range schema.Types { + for _, field := range schemaType.Fields { + if dir := field.Directives.ForName(dirNameRequires); dir != nil { + // ensure we always generate a resolver for any @requires field + model := cfg.Models[schemaType.Name] + fieldConfig := model.Fields[field.Name] + fieldConfig.Resolver = true + if model.Fields == nil { + model.Fields = make(map[string]config.TypeMapField) + } + model.Fields[field.Name] = fieldConfig + cfg.Models[schemaType.Name] = model + + requiresArgument := &ast.ArgumentDefinition{ + Name: fieldArgRequires, + Type: ast.NamedType(mapTypeName, nil), + Directives: ast.DirectiveList{ + { + Name: dirNamePopulateFromRepresentations, + Definition: dirPopulateFromRepresentations, + }, + }, + } + field.Arguments = append(field.Arguments, requiresArgument) + } + } + } +} diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl index 119bab5b..fdb40a6a 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/federation.gotpl @@ -36,15 +36,50 @@ func (ec *executionContext) __resolve__service(ctx context.Context) (fedruntime. func (ec *executionContext) __resolve_entities(ctx context.Context, representations []map[string]interface{}) []fedruntime.Entity { list := make([]fedruntime.Entity, len(representations)) - repsMap := map[string]struct { - i []int - r []map[string]interface{} - }{} + repsMap := ec.buildRepresentationGroups(ctx, representations) + + switch len(repsMap) { + case 0: + return list + case 1: + for typeName, reps := range repsMap { + ec.resolveEntityGroup(ctx, typeName, reps, list) + } + return list + default: + var g sync.WaitGroup + g.Add(len(repsMap)) + for typeName, reps := range repsMap { + go func(typeName string, reps []EntityWithIndex) { + ec.resolveEntityGroup(ctx, typeName, reps, list) + g.Done() + }(typeName, reps) + } + g.Wait() + return list + } +} + +type EntityWithIndex struct { + // The index in the original representation array + index int + entity EntityRepresentation +} + +// EntityRepresentation is the JSON representation of an entity sent by the Router +// used as the inputs for us to resolve. +// +// We make it a map because we know the top level JSON is always an object. +type EntityRepresentation map[string]any // We group entities by typename so that we can parallelize their resolution. // This is particularly helpful when there are entity groups in multi mode. - buildRepresentationGroups := func(reps []map[string]interface{}) { - for i, rep := range reps { +func (ec *executionContext) buildRepresentationGroups( + ctx context.Context, + representations []map[string]any, +) map[string][]EntityWithIndex { + repsMap := make(map[string][]EntityWithIndex) + for i, rep := range representations { typeName, ok := rep["__typename"].(string) if !ok { // If there is no __typename, we just skip the representation; @@ -53,14 +88,48 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati continue } - _r := repsMap[typeName] - _r.i = append(_r.i, i) - _r.r = append(_r.r, rep) - repsMap[typeName] = _r - } + repsMap[typeName] = append(repsMap[typeName], EntityWithIndex{ + index: i, + entity: rep, + }) } - isMulti := func(typeName string) bool { + return repsMap +} + +func (ec *executionContext) resolveEntityGroup( + ctx context.Context, + typeName string, + reps []EntityWithIndex, + list []fedruntime.Entity, +) { + if isMulti(typeName) { + err := ec.resolveManyEntities(ctx, typeName, reps, list) + if err != nil { + ec.Error(ctx, err) + } + } else { + // if there are multiple entities to resolve, parallelize (similar to + // graphql.FieldSet.Dispatch) + var e sync.WaitGroup + e.Add(len(reps)) + for i, rep := range reps { + i, rep := i, rep + go func(i int, rep EntityWithIndex) { + entity, err := ec.resolveEntity(ctx, typeName, rep.entity) + if err != nil { + ec.Error(ctx, err) + } else { + list[rep.index] = entity + } + e.Done() + }(i, rep) + } + e.Wait() + } +} + +func isMulti(typeName string) bool { switch typeName { {{- range .Entities -}} {{- if .Resolvers -}} @@ -75,7 +144,11 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati } } - resolveEntity := func(ctx context.Context, typeName string, rep map[string]interface{}, idx []int, i int) (err error) { +func (ec *executionContext) resolveEntity( + ctx context.Context, + typeName string, + rep EntityRepresentation, +) (e fedruntime.Entity, err error) { // we need to do our own panic handling, because we may be called in a // goroutine, where the usual panic handling can't catch us defer func () { @@ -90,45 +163,51 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati case "{{.Def.Name}}": resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, rep) if err != nil { - return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err) + return nil, fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err) } switch resolverName { {{ range $i, $resolver := .Resolvers }} case "{{.ResolverName}}": {{- range $j, $keyField := .KeyFields }} - id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"]) + id{{$j}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"]) if err != nil { - return fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err) + return nil, fmt.Errorf(`unmarshalling param {{$j}} for {{$resolver.ResolverName}}(): %w`, err) } {{- end}} entity, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, {{- range $j, $_ := .KeyFields -}} id{{$j}}, {{end}}) if err != nil { - return fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err) + return nil, fmt.Errorf(`resolving Entity "{{$entity.Def.Name}}": %w`, err) } - {{ if and (index $options "explicit_requires") $entity.Requires }} + {{- if $options.ComputedRequires }} + {{/* We don't do anything in this case, computed requires are handled by standard resolvers */}} + {{- else if and $options.ExplicitRequires $entity.Requires }} err = ec.Populate{{$entity.Def.Name}}Requires(ctx, {{- if (not $usePointers) -}}&{{- end -}}entity, rep) if err != nil { - return fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err) + return nil, fmt.Errorf(`populating requires for Entity "{{$entity.Def.Name}}": %w`, err) } {{- else }} {{ range $entity.Requires }} entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"]) if err != nil { - return err + return nil, err } {{- end }} {{- end }} - list[idx[i]] = entity - return nil + return entity, nil {{- end }} } {{ end }} {{- end }} } - return fmt.Errorf("%w: %s", ErrUnknownType, typeName) + return nil, fmt.Errorf("%w: %s", ErrUnknownType, typeName) } - resolveManyEntities := func(ctx context.Context, typeName string, reps []map[string]interface{}, idx []int) (err error) { +func (ec *executionContext) resolveManyEntities( + ctx context.Context, + typeName string, + reps []EntityWithIndex, + list []fedruntime.Entity, +) (err error) { // we need to do our own panic handling, because we may be called in a // goroutine, where the usual panic handling can't catch us defer func () { @@ -141,43 +220,43 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati {{ range $_, $entity := .Entities }} {{ if and .Resolvers .Multi -}} case "{{.Def.Name}}": - resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0]) + resolverName, err := entityResolverNameFor{{.Def.Name}}(ctx, reps[0].entity) if err != nil { return fmt.Errorf(`finding resolver for Entity "{{.Def.Name}}": %w`, err) } switch resolverName { {{ range $i, $resolver := .Resolvers }} case "{{.ResolverName}}": - _reps := make([]*{{.LookupInputType}}, len(reps)) + typedReps := make([]*{{.LookupInputType}}, len(reps)) for i, rep := range reps { {{ range $i, $keyField := .KeyFields -}} - id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep["{{.Field.Join `"].(map[string]interface{})["`}}"]) + id{{$i}}, err := ec.{{.Type.UnmarshalFunc}}(ctx, rep.entity["{{.Field.Join `"].(map[string]interface{})["`}}"]) if err != nil { return errors.New(fmt.Sprintf("Field %s undefined in schema.", "{{.Definition.Name}}")) } {{end}} - _reps[i] = &{{.LookupInputType}} { + typedReps[i] = &{{.LookupInputType}} { {{ range $i, $keyField := .KeyFields -}} {{$keyField.Field.ToGo}}: id{{$i}}, {{end}} } } - entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, _reps) + entities, err := ec.resolvers.Entity().{{.ResolverName | go}}(ctx, typedReps) if err != nil { return err } for i, entity := range entities { {{- range $entity.Requires }} - entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i]["{{.Field.Join `"].(map[string]interface{})["`}}"]) + entity.{{.Field.JoinGo `.`}}, err = ec.{{.Type.UnmarshalFunc}}(ctx, reps[i].entity["{{.Field.Join `"].(map[string]interface{})["`}}"]) if err != nil { return err } {{- end}} - list[idx[i]] = entity + list[reps[i].index] = entity } return nil {{ end }} @@ -188,54 +267,6 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati {{- end }} default: return errors.New("unknown type: "+typeName) - } - } - - resolveEntityGroup := func(typeName string, reps []map[string]interface{}, idx []int) { - if isMulti(typeName) { - err := resolveManyEntities(ctx, typeName, reps, idx) - if err != nil { - ec.Error(ctx, err) - } - } else { - // if there are multiple entities to resolve, parallelize (similar to - // graphql.FieldSet.Dispatch) - var e sync.WaitGroup - e.Add(len(reps)) - for i, rep := range reps { - i, rep := i, rep - go func(i int, rep map[string]interface{}) { - err := resolveEntity(ctx, typeName, rep, idx, i) - if err != nil { - ec.Error(ctx, err) - } - e.Done() - }(i, rep) - } - e.Wait() - } - } - buildRepresentationGroups(representations) - - switch len(repsMap) { - case 0: - return list - case 1: - for typeName, reps := range repsMap { - resolveEntityGroup(typeName, reps.r, reps.i) - } - return list - default: - var g sync.WaitGroup - g.Add(len(repsMap)) - for typeName, reps := range repsMap { - go func(typeName string, reps []map[string]interface{}, idx []int) { - resolveEntityGroup(typeName, reps, idx) - g.Done() - }(typeName, reps.r, reps.i) - } - g.Wait() - return list } } @@ -244,13 +275,13 @@ func (ec *executionContext) __resolve_entities(ctx context.Context, representati {{ range $_, $entity := .Entities }} {{- if .Resolvers }} - func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep map[string]interface{}) (string, error) { + func entityResolverNameFor{{$entity.Name}}(ctx context.Context, rep EntityRepresentation) (string, error) { {{- range .Resolvers }} for { var ( - m map[string]interface{} + m EntityRepresentation val interface{} - ok bool + ok bool ) _ = val // if all of the KeyFields values for this resolver are null, diff --git a/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md b/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md index d5dd0628..e8888c1c 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md +++ b/vendor/github.com/99designs/gqlgen/plugin/federation/readme.md @@ -18,7 +18,7 @@ TODO(miguel): add details. # Entity resolvers - GetMany entities -The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently optin tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g. +The federation plugin implements `GetMany` semantics in which entity resolvers get the entire list of representations that need to be resolved. This functionality is currently option tho, and to enable it you need to specify the directive `@entityResolver` in the federated entity you want this feature for. E.g. ``` directive @entityResolver(multi: Boolean) on OBJECT @@ -39,4 +39,4 @@ func (r *entityResolver) FindManyMultiHellosByName(ctx context.Context, reps []* ``` **Note:** -If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true` \ No newline at end of file +If you are using `omit_slice_element_pointers: true` option in your config yaml, your `GetMany` resolver will still generate in the example above the same signature `FindManyMultiHellosByName(ctx context.Context, reps []*generated.ManyMultiHellosByNameInput) ([]*generated.MultiHello, error)`. But all other instances will continue to honor `omit_slice_element_pointers: true` diff --git a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go index 5f6ce94e..660e3537 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go +++ b/vendor/github.com/99designs/gqlgen/plugin/modelgen/models.go @@ -26,11 +26,6 @@ type ( // DefaultFieldMutateHook is the default hook for the Plugin which applies the GoFieldHook and GoTagFieldHook. func DefaultFieldMutateHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) { - var err error - f, err = GoFieldHook(td, fd, f) - if err != nil { - return f, err - } return GoTagFieldHook(td, fd, f) } @@ -337,111 +332,16 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition) binder := cfg.NewBinder() fields := make([]*Field, 0) - var omittableType types.Type - for _, field := range schemaType.Fields { - var typ types.Type - fieldDef := cfg.Schema.Types[field.Type.Name()] - - if cfg.Models.UserDefined(field.Type.Name()) { - var err error - typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0]) - if err != nil { - return nil, err - } - } else { - switch fieldDef.Kind { - case ast.Scalar: - // no user defined model, referencing a default scalar - typ = types.NewNamed( - types.NewTypeName(0, cfg.Model.Pkg(), "string", nil), - nil, - nil, - ) - - case ast.Interface, ast.Union: - // no user defined model, referencing a generated interface type - typ = types.NewNamed( - types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), - types.NewInterfaceType([]*types.Func{}, []types.Type{}), - nil, - ) - - case ast.Enum: - // no user defined model, must reference a generated enum - typ = types.NewNamed( - types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), - nil, - nil, - ) - - case ast.Object, ast.InputObject: - // no user defined model, must reference a generated struct - typ = types.NewNamed( - types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), - types.NewStruct(nil, nil), - nil, - ) - - default: - panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind)) - } + f, err := m.generateField(cfg, binder, schemaType, field) + if err != nil { + return nil, err } - name := templates.ToGo(field.Name) - if nameOveride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOveride != "" { - name = nameOveride - } - - typ = binder.CopyModifiersFromAst(field.Type, typ) - - if cfg.StructFieldsAlwaysPointers { - if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) { - typ = types.NewPointer(typ) - } - } - - f := &Field{ - Name: field.Name, - GoName: name, - Type: typ, - Description: field.Description, - Tag: getStructTagFromField(cfg, field), - Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull, - } - - if m.FieldHook != nil { - mf, err := m.FieldHook(schemaType, field, f) - if err != nil { - return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err) - } - f = mf - } - - if f.IsResolver && cfg.OmitResolverFields { + if f == nil { continue } - if f.Omittable { - if schemaType.Kind != ast.InputObject || field.Type.NonNull { - return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name) - } - - var err error - - if omittableType == nil { - omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable") - if err != nil { - return nil, err - } - } - - f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type}) - if err != nil { - return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err) - } - } - fields = append(fields, f) } @@ -450,6 +350,123 @@ func (m *Plugin) generateFields(cfg *config.Config, schemaType *ast.Definition) return fields, nil } +func (m *Plugin) generateField( + cfg *config.Config, + binder *config.Binder, + schemaType *ast.Definition, + field *ast.FieldDefinition, +) (*Field, error) { + var omittableType types.Type + var typ types.Type + fieldDef := cfg.Schema.Types[field.Type.Name()] + + if cfg.Models.UserDefined(field.Type.Name()) { + var err error + typ, err = binder.FindTypeFromName(cfg.Models[field.Type.Name()].Model[0]) + if err != nil { + return nil, err + } + } else { + switch fieldDef.Kind { + case ast.Scalar: + // no user defined model, referencing a default scalar + typ = types.NewNamed( + types.NewTypeName(0, cfg.Model.Pkg(), "string", nil), + nil, + nil, + ) + + case ast.Interface, ast.Union: + // no user defined model, referencing a generated interface type + typ = types.NewNamed( + types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), + types.NewInterfaceType([]*types.Func{}, []types.Type{}), + nil, + ) + + case ast.Enum: + // no user defined model, must reference a generated enum + typ = types.NewNamed( + types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), + nil, + nil, + ) + + case ast.Object, ast.InputObject: + // no user defined model, must reference a generated struct + typ = types.NewNamed( + types.NewTypeName(0, cfg.Model.Pkg(), templates.ToGo(field.Type.Name()), nil), + types.NewStruct(nil, nil), + nil, + ) + + default: + panic(fmt.Errorf("unknown ast type %s", fieldDef.Kind)) + } + } + + name := templates.ToGo(field.Name) + if nameOverride := cfg.Models[schemaType.Name].Fields[field.Name].FieldName; nameOverride != "" { + name = nameOverride + } + + typ = binder.CopyModifiersFromAst(field.Type, typ) + + if cfg.StructFieldsAlwaysPointers { + if isStruct(typ) && (fieldDef.Kind == ast.Object || fieldDef.Kind == ast.InputObject) { + typ = types.NewPointer(typ) + } + } + + f := &Field{ + Name: field.Name, + GoName: name, + Type: typ, + Description: field.Description, + Tag: getStructTagFromField(cfg, field), + Omittable: cfg.NullableInputOmittable && schemaType.Kind == ast.InputObject && !field.Type.NonNull, + IsResolver: cfg.Models[schemaType.Name].Fields[field.Name].Resolver, + } + + if omittable := cfg.Models[schemaType.Name].Fields[field.Name].Omittable; omittable != nil { + f.Omittable = *omittable + } + + if m.FieldHook != nil { + mf, err := m.FieldHook(schemaType, field, f) + if err != nil { + return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err) + } + f = mf + } + + if f.IsResolver && cfg.OmitResolverFields { + return nil, nil + } + + if f.Omittable { + if schemaType.Kind != ast.InputObject || field.Type.NonNull { + return nil, fmt.Errorf("generror: field %v.%v: omittable is only applicable to nullable input fields", schemaType.Name, field.Name) + } + + var err error + + if omittableType == nil { + omittableType, err = binder.FindTypeFromName("github.com/99designs/gqlgen/graphql.Omittable") + if err != nil { + return nil, err + } + } + + f.Type, err = binder.InstantiateType(omittableType, []types.Type{f.Type}) + if err != nil { + return nil, fmt.Errorf("generror: field %v.%v: %w", schemaType.Name, field.Name, err) + } + } + + return f, nil +} + func getExtraFields(cfg *config.Config, modelName string) []*Field { modelcfg := cfg.Models[modelName] @@ -636,29 +653,9 @@ func removeDuplicateTags(t string) string { return returnTags } -// GoFieldHook applies the goField directive to the generated Field f. +// GoFieldHook is a noop +// TODO: This will be removed in the next breaking release func GoFieldHook(td *ast.Definition, fd *ast.FieldDefinition, f *Field) (*Field, error) { - args := make([]string, 0) - _ = args - for _, goField := range fd.Directives.ForNames("goField") { - if arg := goField.Arguments.ForName("name"); arg != nil { - if k, err := arg.Value.Value(nil); err == nil { - f.GoName = k.(string) - } - } - - if arg := goField.Arguments.ForName("forceResolver"); arg != nil { - if k, err := arg.Value.Value(nil); err == nil { - f.IsResolver = k.(bool) - } - } - - if arg := goField.Arguments.ForName("omittable"); arg != nil { - if k, err := arg.Value.Value(nil); err == nil { - f.Omittable = k.(bool) - } - } - } return f, nil } diff --git a/vendor/github.com/99designs/gqlgen/plugin/plugin.go b/vendor/github.com/99designs/gqlgen/plugin/plugin.go index a5e1ba84..6cc4f6da 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/plugin.go +++ b/vendor/github.com/99designs/gqlgen/plugin/plugin.go @@ -22,11 +22,18 @@ type CodeGenerator interface { } // EarlySourceInjector is used to inject things that are required for user schema files to compile. +// Deprecated: Use EarlySourcesInjector instead type EarlySourceInjector interface { InjectSourceEarly() *ast.Source } +// EarlySourcesInjector is used to inject things that are required for user schema files to compile. +type EarlySourcesInjector interface { + InjectSourcesEarly() ([]*ast.Source, error) +} + // LateSourceInjector is used to inject more sources, after we have loaded the users schema. +// Deprecated: Use LateSourcesInjector instead type LateSourceInjector interface { InjectSourceLate(schema *ast.Schema) *ast.Source } @@ -35,3 +42,8 @@ type LateSourceInjector interface { type ResolverImplementer interface { Implement(prevImplementation string, field *codegen.Field) string } + +// LateSourcesInjector is used to inject more sources, after we have loaded the users schema. +type LateSourcesInjector interface { + InjectSourcesLate(schema *ast.Schema) ([]*ast.Source, error) +} diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go index 38138d52..c0e04089 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go +++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.go @@ -53,26 +53,44 @@ func (m *Plugin) GenerateCode(data *codegen.Data) error { func (m *Plugin) generateSingleFile(data *codegen.Data) error { file := File{} - - if _, err := os.Stat(data.Config.Resolver.Filename); err == nil { - // file already exists and we do not support updating resolvers with layout = single so just return - return nil + rewriter, err := rewrite.New(data.Config.Resolver.Dir()) + if err != nil { + return err } for _, o := range data.Objects { if o.HasResolvers() { + caser := cases.Title(language.English, cases.NoLower) + rewriter.MarkStructCopied(templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type)) + rewriter.GetMethodBody(data.Config.Resolver.Type, caser.String(o.Name)) + file.Objects = append(file.Objects, o) } + for _, f := range o.Fields { if !f.IsResolver { continue } - resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil} - file.Resolvers = append(file.Resolvers, &resolver) + structName := templates.LcFirst(o.Name) + templates.UcFirst(data.Config.Resolver.Type) + comment := strings.TrimSpace(strings.TrimLeft(rewriter.GetMethodComment(structName, f.GoFieldName), `\`)) + implementation := strings.TrimSpace(rewriter.GetMethodBody(structName, f.GoFieldName)) + if implementation != "" { + resolver := Resolver{o, f, rewriter.GetPrevDecl(structName, f.GoFieldName), comment, implementation, nil} + file.Resolvers = append(file.Resolvers, &resolver) + } else { + resolver := Resolver{o, f, nil, "", `panic("not implemented")`, nil} + file.Resolvers = append(file.Resolvers, &resolver) + } } } + if _, err := os.Stat(data.Config.Resolver.Filename); err == nil { + file.name = data.Config.Resolver.Filename + file.imports = rewriter.ExistingImports(file.name) + file.RemainingSource = rewriter.RemainingSource(file.name) + } + resolverBuild := &ResolverBuild{ File: &file, PackageName: data.Config.Resolver.Package, @@ -88,7 +106,7 @@ func (m *Plugin) generateSingleFile(data *codegen.Data) error { return templates.Render(templates.Options{ PackageName: data.Config.Resolver.Package, - FileNotice: `// THIS CODE IS A STARTING POINT ONLY. IT WILL NOT BE UPDATED WITH SCHEMA CHANGES.`, + FileNotice: `// THIS CODE WILL BE UPDATED WITH SCHEMA CHANGES. PREVIOUS IMPLEMENTATION FOR SCHEMA CHANGES WILL BE KEPT IN THE COMMENT SECTION. IMPLEMENTATION FOR UNCHANGED SCHEMA WILL BE KEPT.`, Filename: data.Config.Resolver.Filename, Data: resolverBuild, Packages: data.Config.Packages, diff --git a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl index c25bd1d5..ad6c1085 100644 --- a/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl +++ b/vendor/github.com/99designs/gqlgen/plugin/resolvergen/resolver.gotpl @@ -48,5 +48,7 @@ // - When renaming or deleting a resolver the old code will be put in here. You can safely delete // it when you're done. // - You have helper methods in this file. Move them out to keep these resolver files clean. + /* {{ .RemainingSource }} + */ {{ end }} diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md index f1262642..f95a504f 100644 --- a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -1,5 +1,33 @@ # Changelog +## 3.3.0 (2024-08-27) + +### Added + +- #238: Add LessThanEqual and GreaterThanEqual functions (thanks @grosser) +- #213: nil version equality checking (thanks @KnutZuidema) + +### Changed + +- #241: Simplify StrictNewVersion parsing (thanks @grosser) +- Testing support up through Go 1.23 +- Minimum version set to 1.21 as this is what's tested now +- Fuzz testing now supports caching + +## 3.2.1 (2023-04-10) + +### Changed + +- #198: Improved testing around pre-release names +- #200: Improved code scanning with addition of CodeQL +- #201: Testing now includes Go 1.20. Go 1.17 has been dropped +- #202: Migrated Fuzz testing to Go built-in Fuzzing. CI runs daily +- #203: Docs updated for security details + +### Fixed + +- #199: Fixed issue with range transformations + ## 3.2.0 (2022-11-28) ### Added diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile index 0e7b5c71..9ca87a2c 100644 --- a/vendor/github.com/Masterminds/semver/v3/Makefile +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -19,6 +19,7 @@ test-cover: .PHONY: fuzz fuzz: @echo "==> Running Fuzz Tests" + go env GOCACHE go test -fuzz=FuzzNewVersion -fuzztime=15s . go test -fuzz=FuzzStrictNewVersion -fuzztime=15s . go test -fuzz=FuzzNewConstraint -fuzztime=15s . @@ -27,4 +28,4 @@ $(GOLANGCI_LINT): # Install golangci-lint. The configuration for it is in the .golangci.yml # file in the root of the repository echo ${GOPATH} - curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.56.2 diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md index eab8cac3..ed569360 100644 --- a/vendor/github.com/Masterminds/semver/v3/README.md +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -13,12 +13,9 @@ Active](https://masterminds.github.io/stability/active.svg)](https://masterminds [![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) [![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) -If you are looking for a command line tool for version comparisons please see -[vert](https://github.com/Masterminds/vert) which uses this library. - ## Package Versions -Note, import `github.com/github.com/Masterminds/semver/v3` to use the latest version. +Note, import `github.com/Masterminds/semver/v3` to use the latest version. There are three major versions fo the `semver` package. @@ -80,12 +77,12 @@ There are two methods for comparing versions. One uses comparison methods on differences to notes between these two methods of comparison. 1. When two versions are compared using functions such as `Compare`, `LessThan`, - and others it will follow the specification and always include prereleases + and others it will follow the specification and always include pre-releases within the comparison. It will provide an answer that is valid with the comparison section of the spec at https://semver.org/#spec-item-11 2. When constraint checking is used for checks or validation it will follow a different set of rules that are common for ranges with tools like npm/js - and Rust/Cargo. This includes considering prereleases to be invalid if the + and Rust/Cargo. This includes considering pre-releases to be invalid if the ranges does not include one. If you want to have it include pre-releases a simple solution is to include `-0` in your range. 3. Constraint ranges can have some complex rules including the shorthand use of @@ -113,7 +110,7 @@ v, err := semver.NewVersion("1.3") if err != nil { // Handle version not being parsable. } -// Check if the version meets the constraints. The a variable will be true. +// Check if the version meets the constraints. The variable a will be true. a := c.Check(v) ``` @@ -137,20 +134,20 @@ The basic comparisons are: ### Working With Prerelease Versions Pre-releases, for those not familiar with them, are used for software releases -prior to stable or generally available releases. Examples of prereleases include -development, alpha, beta, and release candidate releases. A prerelease may be +prior to stable or generally available releases. Examples of pre-releases include +development, alpha, beta, and release candidate releases. A pre-release may be a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the -order of precedence, prereleases come before their associated releases. In this +order of precedence, pre-releases come before their associated releases. In this example `1.2.3-beta.1 < 1.2.3`. -According to the Semantic Version specification prereleases may not be +According to the Semantic Version specification, pre-releases may not be API compliant with their release counterpart. It says, > A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. -SemVer comparisons using constraints without a prerelease comparator will skip -prerelease versions. For example, `>=1.2.3` will skip prereleases when looking -at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. +SemVer's comparisons using constraints without a pre-release comparator will skip +pre-release versions. For example, `>=1.2.3` will skip pre-releases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find pre-releases. The reason for the `0` as a pre-release version in the example comparison is because pre-releases can only contain ASCII alphanumerics and hyphens (along with @@ -171,6 +168,9 @@ These look like: * `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` +Note that `1.2-1.4.5` without whitespace is parsed completely differently; it's +parsed as a single constraint `1.2.0` with _prerelease_ `1.4.5`. + ### Wildcards In Comparisons The `x`, `X`, and `*` characters can be used as a wildcard character. This works diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go index 7c4bed33..ff499fb6 100644 --- a/vendor/github.com/Masterminds/semver/v3/version.go +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -83,22 +83,23 @@ func StrictNewVersion(v string) (*Version, error) { original: v, } - // check for prerelease or build metadata - var extra []string - if strings.ContainsAny(parts[2], "-+") { - // Start with the build metadata first as it needs to be on the right - extra = strings.SplitN(parts[2], "+", 2) - if len(extra) > 1 { - // build metadata found - sv.metadata = extra[1] - parts[2] = extra[0] + // Extract build metadata + if strings.Contains(parts[2], "+") { + extra := strings.SplitN(parts[2], "+", 2) + sv.metadata = extra[1] + parts[2] = extra[0] + if err := validateMetadata(sv.metadata); err != nil { + return nil, err } + } - extra = strings.SplitN(parts[2], "-", 2) - if len(extra) > 1 { - // prerelease found - sv.pre = extra[1] - parts[2] = extra[0] + // Extract build prerelease + if strings.Contains(parts[2], "-") { + extra := strings.SplitN(parts[2], "-", 2) + sv.pre = extra[1] + parts[2] = extra[0] + if err := validatePrerelease(sv.pre); err != nil { + return nil, err } } @@ -114,7 +115,7 @@ func StrictNewVersion(v string) (*Version, error) { } } - // Extract the major, minor, and patch elements onto the returned Version + // Extract major, minor, and patch var err error sv.major, err = strconv.ParseUint(parts[0], 10, 64) if err != nil { @@ -131,23 +132,6 @@ func StrictNewVersion(v string) (*Version, error) { return nil, err } - // No prerelease or build metadata found so returning now as a fastpath. - if sv.pre == "" && sv.metadata == "" { - return sv, nil - } - - if sv.pre != "" { - if err = validatePrerelease(sv.pre); err != nil { - return nil, err - } - } - - if sv.metadata != "" { - if err = validateMetadata(sv.metadata); err != nil { - return nil, err - } - } - return sv, nil } @@ -381,15 +365,31 @@ func (v *Version) LessThan(o *Version) bool { return v.Compare(o) < 0 } +// LessThanEqual tests if one version is less or equal than another one. +func (v *Version) LessThanEqual(o *Version) bool { + return v.Compare(o) <= 0 +} + // GreaterThan tests if one version is greater than another one. func (v *Version) GreaterThan(o *Version) bool { return v.Compare(o) > 0 } +// GreaterThanEqual tests if one version is greater or equal than another one. +func (v *Version) GreaterThanEqual(o *Version) bool { + return v.Compare(o) >= 0 +} + // Equal tests if two versions are equal to each other. // Note, versions can be equal with different metadata since metadata // is not considered part of the comparable version. func (v *Version) Equal(o *Version) bool { + if v == o { + return true + } + if v == nil || o == nil { + return false + } return v.Compare(o) == 0 } diff --git a/vendor/github.com/adhocore/gronx/README.md b/vendor/github.com/adhocore/gronx/README.md index 0b3b78f5..00682ee7 100644 --- a/vendor/github.com/adhocore/gronx/README.md +++ b/vendor/github.com/adhocore/gronx/README.md @@ -47,6 +47,14 @@ gron.IsDue(expr) // true|false, nil gron.IsDue(expr, time.Date(2021, time.April, 1, 1, 1, 0, 0, time.UTC)) // true|false, nil ``` +> Validity can be checked without instantiation: + +```go +import "github.com/adhocore/gronx" + +gronx.IsValid("* * * * *") // true +``` + ### Batch Due Check If you have multiple cron expressions to check due on same reference time use `BatchDue()`: diff --git a/vendor/github.com/adhocore/gronx/gronx.go b/vendor/github.com/adhocore/gronx/gronx.go index 958d8fb1..82c0baa4 100644 --- a/vendor/github.com/adhocore/gronx/gronx.go +++ b/vendor/github.com/adhocore/gronx/gronx.go @@ -75,7 +75,9 @@ func New() *Gronx { // IsDue checks if cron expression is due for given reference time (or now). // It returns bool or error if any. func (g *Gronx) IsDue(expr string, ref ...time.Time) (bool, error) { - ref = append(ref, time.Now()) + if len(ref) == 0 { + ref = append(ref, time.Now()) + } g.C.SetRef(ref[0]) segs, err := Segments(expr) @@ -157,12 +159,16 @@ func (g *Gronx) SegmentsDue(segs []string) (bool, error) { return true, nil } +// IsValid checks if cron expression is valid. +// It returns bool. +func (g *Gronx) IsValid(expr string) bool { return IsValid(expr) } + // checker for validity var checker = &SegmentChecker{ref: time.Now()} // IsValid checks if cron expression is valid. // It returns bool. -func (g *Gronx) IsValid(expr string) bool { +func IsValid(expr string) bool { segs, err := Segments(expr) if err != nil { return false diff --git a/vendor/github.com/adhocore/gronx/validator.go b/vendor/github.com/adhocore/gronx/validator.go index 62c3a363..d7441d4a 100644 --- a/vendor/github.com/adhocore/gronx/validator.go +++ b/vendor/github.com/adhocore/gronx/validator.go @@ -14,7 +14,7 @@ func inStep(val int, s string, bounds []int) (bool, error) { if err != nil { return false, err } - if step == 0 { + if step <= 0 { return false, errors.New("step can't be 0") } diff --git a/vendor/github.com/agnivade/levenshtein/.travis.yml b/vendor/github.com/agnivade/levenshtein/.travis.yml deleted file mode 100644 index 0873fa98..00000000 --- a/vendor/github.com/agnivade/levenshtein/.travis.yml +++ /dev/null @@ -1,23 +0,0 @@ -language: go - -# See https://travis-ci.community/t/goos-js-goarch-wasm-go-run-fails-panic-newosproc-not-implemented/1651 -#addons: -# chrome: stable - -before_install: -- export GO111MODULE=on - -#install: -#- go get github.com/agnivade/wasmbrowsertest -#- mv $GOPATH/bin/wasmbrowsertest $GOPATH/bin/go_js_wasm_exec -#- export PATH=$GOPATH/bin:$PATH - -go: -- 1.13.x -- 1.14.x -- 1.15.x -- tip - -script: -#- GOOS=js GOARCH=wasm go test -v -- go test -v diff --git a/vendor/github.com/agnivade/levenshtein/Makefile b/vendor/github.com/agnivade/levenshtein/Makefile index 5f6890d6..3bbda319 100644 --- a/vendor/github.com/agnivade/levenshtein/Makefile +++ b/vendor/github.com/agnivade/levenshtein/Makefile @@ -4,12 +4,10 @@ install: go install lint: - gofmt -l -s -w . && go vet . && golint -set_exit_status=1 . + gofmt -l -s -w . && go vet . -test: # The first 2 go gets are to support older Go versions - go get github.com/arbovm/levenshtein - go get github.com/dgryski/trifles/leven - GO111MODULE=on go test -race -v -coverprofile=coverage.txt -covermode=atomic +test: + go test -race -v -coverprofile=coverage.txt -covermode=atomic bench: go test -run=XXX -bench=. -benchmem -count=5 diff --git a/vendor/github.com/agnivade/levenshtein/README.md b/vendor/github.com/agnivade/levenshtein/README.md index 13c52a21..34378aab 100644 --- a/vendor/github.com/agnivade/levenshtein/README.md +++ b/vendor/github.com/agnivade/levenshtein/README.md @@ -1,4 +1,4 @@ -levenshtein [![Build Status](https://travis-ci.org/agnivade/levenshtein.svg?branch=master)](https://travis-ci.org/agnivade/levenshtein) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein) +levenshtein ![Build Status](https://github.com/agnivade/levenshtein/actions/workflows/ci.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/agnivade/levenshtein)](https://goreportcard.com/report/github.com/agnivade/levenshtein) [![PkgGoDev](https://pkg.go.dev/badge/github.com/agnivade/levenshtein)](https://pkg.go.dev/github.com/agnivade/levenshtein) =========== [Go](http://golang.org) package to calculate the [Levenshtein Distance](http://en.wikipedia.org/wiki/Levenshtein_distance) diff --git a/vendor/github.com/agnivade/levenshtein/levenshtein.go b/vendor/github.com/agnivade/levenshtein/levenshtein.go index f727a66f..861f409d 100644 --- a/vendor/github.com/agnivade/levenshtein/levenshtein.go +++ b/vendor/github.com/agnivade/levenshtein/levenshtein.go @@ -41,6 +41,25 @@ func ComputeDistance(a, b string) int { if len(s1) > len(s2) { s1, s2 = s2, s1 } + + // remove trailing identical runes. + for i := 0; i < len(s1); i++ { + if s1[len(s1)-1-i] != s2[len(s2)-1-i] { + s1 = s1[:len(s1)-i] + s2 = s2[:len(s2)-i] + break + } + } + + // Remove leading identical runes. + for i := 0; i < len(s1); i++ { + if s1[i] != s2[i] { + s1 = s1[i:] + s2 = s2[i:] + break + } + } + lenS1 := len(s1) lenS2 := len(s2) @@ -71,7 +90,7 @@ func ComputeDistance(a, b string) int { for j := 1; j <= lenS1; j++ { current := x[j-1] // match if s2[i-1] != s1[j-1] { - current = min(min(x[j-1]+1, prev+1), x[j]+1) + current = min(x[j-1]+1, prev+1, x[j]+1) } x[j-1] = prev prev = current @@ -80,10 +99,3 @@ func ComputeDistance(a, b string) int { } return int(x[lenS1]) } - -func min(a, b uint16) uint16 { - if a < b { - return a - } - return b -} diff --git a/vendor/github.com/caddyserver/certmagic/account.go b/vendor/github.com/caddyserver/certmagic/account.go index f3b8d44d..0c43ad63 100644 --- a/vendor/github.com/caddyserver/certmagic/account.go +++ b/vendor/github.com/caddyserver/certmagic/account.go @@ -33,6 +33,7 @@ import ( "sync" "github.com/mholt/acmez/v2/acme" + "go.uber.org/zap" ) // getAccount either loads or creates a new account, depending on if @@ -40,8 +41,15 @@ import ( func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) { acct, err := am.loadAccount(ctx, ca, email) if errors.Is(err, fs.ErrNotExist) { + am.Logger.Info("creating new account because no account for configured email is known to us", + zap.String("email", email), + zap.String("ca", ca), + zap.Error(err)) return am.newAccount(email) } + am.Logger.Debug("using existing ACME account because key found in storage associated with email", + zap.String("email", email), + zap.String("ca", ca)) return acct, err } @@ -407,6 +415,15 @@ func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string) return getPrimaryContact(account), true } +func accountRegLockKey(acc acme.Account) string { + key := "register_acme_account" + if len(acc.Contact) == 0 { + return key + } + key += "_" + getPrimaryContact(acc) + return key +} + // getPrimaryContact returns the first contact on the account (if any) // without the scheme. (I guess we assume an email address.) func getPrimaryContact(account acme.Account) string { diff --git a/vendor/github.com/caddyserver/certmagic/acmeclient.go b/vendor/github.com/caddyserver/certmagic/acmeclient.go index 031aaa11..c6e1f6ed 100644 --- a/vendor/github.com/caddyserver/certmagic/acmeclient.go +++ b/vendor/github.com/caddyserver/certmagic/acmeclient.go @@ -50,77 +50,123 @@ func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA, return nil, err } - // look up or create the ACME account - var account acme.Account - if iss.AccountKeyPEM != "" { - account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM)) - } else { - account, err = iss.getAccount(ctx, client.Directory, iss.getEmail()) + // we try loading the account from storage before a potential + // lock, and after obtaining the lock as well, to ensure we don't + // repeat work done by another instance or goroutine + getAccount := func() (acme.Account, error) { + // look up or create the ACME account + var account acme.Account + if iss.AccountKeyPEM != "" { + iss.Logger.Info("using configured ACME account") + account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM)) + } else { + account, err = iss.getAccount(ctx, client.Directory, iss.getEmail()) + } + if err != nil { + return acme.Account{}, fmt.Errorf("getting ACME account: %v", err) + } + return account, nil } + + // first try getting the account + account, err := getAccount() if err != nil { - return nil, fmt.Errorf("getting ACME account: %v", err) + return nil, err } // register account if it is new if account.Status == "" { - if iss.NewAccountFunc != nil { - // obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer - iss.mu.Lock() - account, err = iss.NewAccountFunc(ctx, iss, account) - iss.mu.Unlock() - if err != nil { - return nil, fmt.Errorf("account pre-registration callback: %v", err) + iss.Logger.Info("ACME account has empty status; registering account with ACME server", + zap.Strings("contact", account.Contact), + zap.String("location", account.Location)) + + // synchronize this so the account is only created once + acctLockKey := accountRegLockKey(account) + err = acquireLock(ctx, iss.config.Storage, acctLockKey) + if err != nil { + return nil, fmt.Errorf("locking account registration: %v", err) + } + defer func() { + if err := releaseLock(ctx, iss.config.Storage, acctLockKey); err != nil { + iss.Logger.Error("failed to unlock account registration lock", zap.Error(err)) } + }() + + // if we're not the only one waiting for this account, then by this point it should already be registered and in storage; reload it + account, err = getAccount() + if err != nil { + return nil, err } - // agree to terms - if interactive { - if !iss.isAgreed() { - var termsURL string - dir, err := client.GetDirectory(ctx) + // if we are the only or first one waiting for this account, then proceed to register it while we have the lock + if account.Status == "" { + if iss.NewAccountFunc != nil { + // obtain lock here, since NewAccountFunc calls happen concurrently and they typically read and change the issuer + iss.mu.Lock() + account, err = iss.NewAccountFunc(ctx, iss, account) + iss.mu.Unlock() if err != nil { - return nil, fmt.Errorf("getting directory: %w", err) + return nil, fmt.Errorf("account pre-registration callback: %v", err) } - if dir.Meta != nil { - termsURL = dir.Meta.TermsOfService - } - if termsURL != "" { - agreed := iss.askUserAgreement(termsURL) - if !agreed { - return nil, fmt.Errorf("user must agree to CA terms") + } + + // agree to terms + if interactive { + if !iss.isAgreed() { + var termsURL string + dir, err := client.GetDirectory(ctx) + if err != nil { + return nil, fmt.Errorf("getting directory: %w", err) + } + if dir.Meta != nil { + termsURL = dir.Meta.TermsOfService + } + if termsURL != "" { + agreed := iss.askUserAgreement(termsURL) + if !agreed { + return nil, fmt.Errorf("user must agree to CA terms") + } + iss.mu.Lock() + iss.agreed = agreed + iss.mu.Unlock() } - iss.mu.Lock() - iss.agreed = agreed - iss.mu.Unlock() } + } else { + // can't prompt a user who isn't there; they should + // have reviewed the terms beforehand + iss.mu.Lock() + iss.agreed = true + iss.mu.Unlock() + } + account.TermsOfServiceAgreed = iss.isAgreed() + + // associate account with external binding, if configured + if iss.ExternalAccount != nil { + err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount) + if err != nil { + return nil, err + } + } + + // create account + account, err = client.NewAccount(ctx, account) + if err != nil { + return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err) + } + iss.Logger.Info("new ACME account registered", + zap.Strings("contact", account.Contact), + zap.String("status", account.Status)) + + // persist the account to storage + err = iss.saveAccount(ctx, client.Directory, account) + if err != nil { + return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err) } } else { - // can't prompt a user who isn't there; they should - // have reviewed the terms beforehand - iss.mu.Lock() - iss.agreed = true - iss.mu.Unlock() - } - account.TermsOfServiceAgreed = iss.isAgreed() - - // associate account with external binding, if configured - if iss.ExternalAccount != nil { - err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount) - if err != nil { - return nil, err - } - } - - // create account - account, err = client.NewAccount(ctx, account) - if err != nil { - return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err) - } - - // persist the account to storage - err = iss.saveAccount(ctx, client.Directory, account) - if err != nil { - return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err) + iss.Logger.Info("account has already been registered; reloaded", + zap.Strings("contact", account.Contact), + zap.String("status", account.Status), + zap.String("location", account.Location)) } } diff --git a/vendor/github.com/caddyserver/certmagic/acmeissuer.go b/vendor/github.com/caddyserver/certmagic/acmeissuer.go index 87fa5ff5..e010f087 100644 --- a/vendor/github.com/caddyserver/certmagic/acmeissuer.go +++ b/vendor/github.com/caddyserver/certmagic/acmeissuer.go @@ -461,7 +461,7 @@ func (am *ACMEIssuer) doIssue(ctx context.Context, csr *x509.CertificateRequest, // between client and server or some sort of bookkeeping error with regards to the certID // and the server is rejecting the ARI certID. In any case, an invalid certID may cause // orders to fail. So try once without setting it. - if !usingTestCA && attempts != 2 { + if !am.config.DisableARI && !usingTestCA && attempts != 2 { if replacing, ok := ctx.Value(ctxKeyARIReplaces).(*x509.Certificate); ok { params.Replaces = replacing } diff --git a/vendor/github.com/caddyserver/certmagic/certificates.go b/vendor/github.com/caddyserver/certmagic/certificates.go index a5147a2d..2965712a 100644 --- a/vendor/github.com/caddyserver/certmagic/certificates.go +++ b/vendor/github.com/caddyserver/certmagic/certificates.go @@ -103,53 +103,54 @@ func (cfg *Config) certNeedsRenewal(leaf *x509.Certificate, ari acme.RenewalInfo logger = zap.NewNop() } - // first check ARI: if it says it's time to renew, it's time to renew - // (notice that we don't strictly require an ARI window to also exist; we presume - // that if a time has been selected, a window does or did exist, even if it didn't - // get stored/encoded for some reason - but also: this allows administrators to - // manually or explicitly schedule a renewal time indepedently of ARI which could - // be useful) - selectedTime := ari.SelectedTime + if !cfg.DisableARI { + // first check ARI: if it says it's time to renew, it's time to renew + // (notice that we don't strictly require an ARI window to also exist; we presume + // that if a time has been selected, a window does or did exist, even if it didn't + // get stored/encoded for some reason - but also: this allows administrators to + // manually or explicitly schedule a renewal time indepedently of ARI which could + // be useful) + selectedTime := ari.SelectedTime - // if, for some reason a random time in the window hasn't been selected yet, but an ARI - // window does exist, we can always improvise one... even if this is called repeatedly, - // a random time is a random time, whether you generate it once or more :D - // (code borrowed from our acme package) - if selectedTime.IsZero() && - (!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) { - start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix() - selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC() - logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now", - zap.Time("ephemeral_selected_time", selectedTime)) - } - - // if a renewal time has been selected, start with that - if !selectedTime.IsZero() { - // ARI spec recommends an algorithm that renews after the randomly-selected - // time OR just before it if the next waking time would be after it; this - // cutoff can actually be before the start of the renewal window, but the spec - // author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71 - cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval) - if time.Now().After(cutoff) { - logger.Info("certificate needs renewal based on ARI window", - zap.Time("selected_time", selectedTime), - zap.Time("renewal_cutoff", cutoff)) - return true + // if, for some reason a random time in the window hasn't been selected yet, but an ARI + // window does exist, we can always improvise one... even if this is called repeatedly, + // a random time is a random time, whether you generate it once or more :D + // (code borrowed from our acme package) + if selectedTime.IsZero() && + (!ari.SuggestedWindow.Start.IsZero() && !ari.SuggestedWindow.End.IsZero()) { + start, end := ari.SuggestedWindow.Start.Unix()+1, ari.SuggestedWindow.End.Unix() + selectedTime = time.Unix(rand.Int63n(end-start)+start, 0).UTC() + logger.Warn("no renewal time had been selected with ARI; chose an ephemeral one for now", + zap.Time("ephemeral_selected_time", selectedTime)) } - // according to ARI, we are not ready to renew; however, we do not rely solely on - // ARI calculations... what if there is a bug in our implementation, or in the - // server's, or the stored metadata? for redundancy, give credence to the expiration - // date; ignore ARI if we are past a "dangerously close" limit, to avoid any - // possibility of a bug in ARI compromising a site's uptime: we should always always - // always give heed to actual validity period - if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) { - logger.Warn("certificate is in emergency renewal window; superceding ARI", - zap.Duration("remaining", time.Until(expiration)), - zap.Time("renewal_cutoff", cutoff)) - return true - } + // if a renewal time has been selected, start with that + if !selectedTime.IsZero() { + // ARI spec recommends an algorithm that renews after the randomly-selected + // time OR just before it if the next waking time would be after it; this + // cutoff can actually be before the start of the renewal window, but the spec + // author says that's OK: https://github.com/aarongable/draft-acme-ari/issues/71 + cutoff := ari.SelectedTime.Add(-cfg.certCache.options.RenewCheckInterval) + if time.Now().After(cutoff) { + logger.Info("certificate needs renewal based on ARI window", + zap.Time("selected_time", selectedTime), + zap.Time("renewal_cutoff", cutoff)) + return true + } + // according to ARI, we are not ready to renew; however, we do not rely solely on + // ARI calculations... what if there is a bug in our implementation, or in the + // server's, or the stored metadata? for redundancy, give credence to the expiration + // date; ignore ARI if we are past a "dangerously close" limit, to avoid any + // possibility of a bug in ARI compromising a site's uptime: we should always always + // always give heed to actual validity period + if currentlyInRenewalWindow(leaf.NotBefore, expiration, 1.0/20.0) { + logger.Warn("certificate is in emergency renewal window; superceding ARI", + zap.Duration("remaining", time.Until(expiration)), + zap.Time("renewal_cutoff", cutoff)) + return true + } + } } // the normal check, in the absence of ARI, is to determine if we're near enough (or past) @@ -552,6 +553,7 @@ func SubjectIsInternal(subj string) bool { return subj == "localhost" || strings.HasSuffix(subj, ".localhost") || strings.HasSuffix(subj, ".local") || + strings.HasSuffix(subj, ".internal") || strings.HasSuffix(subj, ".home.arpa") || isInternalIP(subj) } diff --git a/vendor/github.com/caddyserver/certmagic/config.go b/vendor/github.com/caddyserver/certmagic/config.go index 5a9cf498..a7771848 100644 --- a/vendor/github.com/caddyserver/certmagic/config.go +++ b/vendor/github.com/caddyserver/certmagic/config.go @@ -149,6 +149,10 @@ type Config struct { // EXPERIMENTAL: Subject to change or removal. SubjectTransformer func(ctx context.Context, domain string) string + // Disables both ARI fetching and the use of ARI for renewal decisions. + // TEMPORARY: Will likely be removed in the future. + DisableARI bool + // Set a logger to enable logging. If not set, // a default logger will be created. Logger *zap.Logger @@ -370,9 +374,11 @@ func (cfg *Config) manageAll(ctx context.Context, domainNames []string, async bo } for _, domainName := range domainNames { + domainName = normalizedName(domainName) + // if on-demand is configured, defer obtain and renew operations if cfg.OnDemand != nil { - cfg.OnDemand.hostAllowlist[normalizedName(domainName)] = struct{}{} + cfg.OnDemand.hostAllowlist[domainName] = struct{}{} continue } @@ -449,7 +455,7 @@ func (cfg *Config) manageOne(ctx context.Context, domainName string, async bool) // ensure ARI is updated before we check whether the cert needs renewing // (we ignore the second return value because we already check if needs renewing anyway) - if cert.ari.NeedsRefresh() { + if !cfg.DisableARI && cert.ari.NeedsRefresh() { cert, _, err = cfg.updateARI(ctx, cert, cfg.Logger) if err != nil { cfg.Logger.Error("updating ARI upon managing", zap.Error(err)) @@ -886,11 +892,13 @@ func (cfg *Config) renewCert(ctx context.Context, name string, force, interactiv // if we're renewing with the same ACME CA as before, have the ACME // client tell the server we are replacing a certificate (but doing // this on the wrong CA, or when the CA doesn't recognize the certID, - // can fail the order) - if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" { - if acmeIss, ok := issuer.(*ACMEIssuer); ok { - if acmeIss.CA == acmeData.CA { - ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf) + // can fail the order) -- TODO: change this check to whether we're using the same ACME account, not CA + if !cfg.DisableARI { + if acmeData, err := certRes.getACMEData(); err == nil && acmeData.CA != "" { + if acmeIss, ok := issuer.(*ACMEIssuer); ok { + if acmeIss.CA == acmeData.CA { + ctx = context.WithValue(ctx, ctxKeyARIReplaces, leaf) + } } } } @@ -982,23 +990,26 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC csrTemplate := new(x509.CertificateRequest) for _, name := range sans { + // identifiers should be converted to punycode before going into the CSR + // (convert IDNs to ASCII according to RFC 5280 section 7) + normalizedName, err := idna.ToASCII(name) + if err != nil { + return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err) + } + // TODO: This is a temporary hack to support ZeroSSL API... - if useCN && csrTemplate.Subject.CommonName == "" && len(name) <= 64 { - csrTemplate.Subject.CommonName = name + if useCN && csrTemplate.Subject.CommonName == "" && len(normalizedName) <= 64 { + csrTemplate.Subject.CommonName = normalizedName continue } - if ip := net.ParseIP(name); ip != nil { + + if ip := net.ParseIP(normalizedName); ip != nil { csrTemplate.IPAddresses = append(csrTemplate.IPAddresses, ip) - } else if strings.Contains(name, "@") { - csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, name) - } else if u, err := url.Parse(name); err == nil && strings.Contains(name, "/") { + } else if strings.Contains(normalizedName, "@") { + csrTemplate.EmailAddresses = append(csrTemplate.EmailAddresses, normalizedName) + } else if u, err := url.Parse(normalizedName); err == nil && strings.Contains(normalizedName, "/") { csrTemplate.URIs = append(csrTemplate.URIs, u) } else { - // convert IDNs to ASCII according to RFC 5280 section 7 - normalizedName, err := idna.ToASCII(name) - if err != nil { - return nil, fmt.Errorf("converting identifier '%s' to ASCII: %v", name, err) - } csrTemplate.DNSNames = append(csrTemplate.DNSNames, normalizedName) } } @@ -1007,6 +1018,16 @@ func (cfg *Config) generateCSR(privateKey crypto.PrivateKey, sans []string, useC csrTemplate.ExtraExtensions = append(csrTemplate.ExtraExtensions, mustStapleExtension) } + // IP addresses aren't printed here because I'm too lazy to marshal them as strings, but + // we at least print the incoming SANs so it should be obvious what became IPs + cfg.Logger.Debug("created CSR", + zap.Strings("identifiers", sans), + zap.Strings("san_dns_names", csrTemplate.DNSNames), + zap.Strings("san_emails", csrTemplate.EmailAddresses), + zap.String("common_name", csrTemplate.Subject.CommonName), + zap.Int("extra_extensions", len(csrTemplate.ExtraExtensions)), + ) + csrDER, err := x509.CreateCertificateRequest(rand.Reader, csrTemplate, privateKey) if err != nil { return nil, err @@ -1244,8 +1265,10 @@ func (cfg *Config) managedCertNeedsRenewal(certRes CertificateResource, emitLogs return 0, nil, true } var ari acme.RenewalInfo - if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil { - ari = *ariPtr + if !cfg.DisableARI { + if ariPtr, err := certRes.getARI(); err == nil && ariPtr != nil { + ari = *ariPtr + } } remaining := time.Until(expiresAt(certChain[0])) return remaining, certChain[0], cfg.certNeedsRenewal(certChain[0], ari, emitLogs) diff --git a/vendor/github.com/caddyserver/certmagic/filestorage.go b/vendor/github.com/caddyserver/certmagic/filestorage.go index f6f13603..d3df9cf7 100644 --- a/vendor/github.com/caddyserver/certmagic/filestorage.go +++ b/vendor/github.com/caddyserver/certmagic/filestorage.go @@ -27,6 +27,8 @@ import ( "path/filepath" "runtime" "time" + + "github.com/caddyserver/certmagic/internal/atomicfile" ) // FileStorage facilitates forming file paths derived from a root @@ -82,12 +84,30 @@ func (s *FileStorage) Store(_ context.Context, key string, value []byte) error { if err != nil { return err } - return os.WriteFile(filename, value, 0600) + fp, err := atomicfile.New(filename, 0o600) + if err != nil { + return err + } + _, err = fp.Write(value) + if err != nil { + // cancel the write + fp.Cancel() + return err + } + // close, thereby flushing the write + return fp.Close() } // Load retrieves the value at key. func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) { - return os.ReadFile(s.Filename(key)) + // i believe it's possible for the read call to error but still return bytes, in event of something like a shortread? + // therefore, i think it's appropriate to not return any bytes to avoid downstream users of the package erroniously believing that + // bytes read + error is a valid response (it should not be) + xs, err := os.ReadFile(s.Filename(key)) + if err != nil { + return nil, err + } + return xs, nil } // Delete deletes the value at key. diff --git a/vendor/github.com/caddyserver/certmagic/handshake.go b/vendor/github.com/caddyserver/certmagic/handshake.go index fd576995..1ff0ce27 100644 --- a/vendor/github.com/caddyserver/certmagic/handshake.go +++ b/vendor/github.com/caddyserver/certmagic/handshake.go @@ -582,7 +582,7 @@ func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHe } // Check ARI status - if cert.ari.NeedsRefresh() { + if !cfg.DisableARI && cert.ari.NeedsRefresh() { // we ignore the second return value here because we go on to check renewal status below regardless var err error cert, _, err = cfg.updateARI(ctx, cert, logger) diff --git a/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README new file mode 100644 index 00000000..17d04ddd --- /dev/null +++ b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/README @@ -0,0 +1,11 @@ +# atomic file + + +this is copied from + +https://github.com/containerd/containerd/blob/main/pkg%2Fatomicfile%2Ffile.go + + +see + +https://github.com/caddyserver/certmagic/issues/296 diff --git a/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go new file mode 100644 index 00000000..7b870f7a --- /dev/null +++ b/vendor/github.com/caddyserver/certmagic/internal/atomicfile/file.go @@ -0,0 +1,148 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* +Package atomicfile provides a mechanism (on Unix-like platforms) to present a consistent view of a file to separate +processes even while the file is being written. This is accomplished by writing a temporary file, syncing to disk, and +renaming over the destination file name. + +Partial/inconsistent reads can occur due to: + 1. A process attempting to read the file while it is being written to (both in the case of a new file with a + short/incomplete write or in the case of an existing, updated file where new bytes may be written at the beginning + but old bytes may still be present after). + 2. Concurrent goroutines leading to multiple active writers of the same file. + +The above mechanism explicitly protects against (1) as all writes are to a file with a temporary name. + +There is no explicit protection against multiple, concurrent goroutines attempting to write the same file. However, +atomically writing the file should mean only one writer will "win" and a consistent file will be visible. + +Note: atomicfile is partially implemented for Windows. The Windows codepath performs the same operations, however +Windows does not guarantee that a rename operation is atomic; a crash in the middle may leave the destination file +truncated rather than with the expected content. +*/ +package atomicfile + +import ( + "errors" + "fmt" + "io" + "os" + "path/filepath" + "sync" +) + +// File is an io.ReadWriteCloser that can also be Canceled if a change needs to be abandoned. +type File interface { + io.ReadWriteCloser + // Cancel abandons a change to a file. This can be called if a write fails or another error occurs. + Cancel() error +} + +// ErrClosed is returned if Read or Write are called on a closed File. +var ErrClosed = errors.New("file is closed") + +// New returns a new atomic file. On Unix-like platforms, the writer (an io.ReadWriteCloser) is backed by a temporary +// file placed into the same directory as the destination file (using filepath.Dir to split the directory from the +// name). On a call to Close the temporary file is synced to disk and renamed to its final name, hiding any previous +// file by the same name. +// +// Note: Take care to call Close and handle any errors that are returned. Errors returned from Close may indicate that +// the file was not written with its final name. +func New(name string, mode os.FileMode) (File, error) { + return newFile(name, mode) +} + +type atomicFile struct { + name string + f *os.File + closed bool + closedMu sync.RWMutex +} + +func newFile(name string, mode os.FileMode) (File, error) { + dir := filepath.Dir(name) + f, err := os.CreateTemp(dir, "") + if err != nil { + return nil, fmt.Errorf("failed to create temp file: %w", err) + } + if err := f.Chmod(mode); err != nil { + return nil, fmt.Errorf("failed to change temp file permissions: %w", err) + } + return &atomicFile{name: name, f: f}, nil +} + +func (a *atomicFile) Close() (err error) { + a.closedMu.Lock() + defer a.closedMu.Unlock() + + if a.closed { + return nil + } + a.closed = true + + defer func() { + if err != nil { + _ = os.Remove(a.f.Name()) // ignore errors + } + }() + // The order of operations here is: + // 1. sync + // 2. close + // 3. rename + // While the ordering of 2 and 3 is not important on Unix-like operating systems, Windows cannot rename an open + // file. By closing first, we allow the rename operation to succeed. + if err = a.f.Sync(); err != nil { + return fmt.Errorf("failed to sync temp file %q: %w", a.f.Name(), err) + } + if err = a.f.Close(); err != nil { + return fmt.Errorf("failed to close temp file %q: %w", a.f.Name(), err) + } + if err = os.Rename(a.f.Name(), a.name); err != nil { + return fmt.Errorf("failed to rename %q to %q: %w", a.f.Name(), a.name, err) + } + return nil +} + +func (a *atomicFile) Cancel() error { + a.closedMu.Lock() + defer a.closedMu.Unlock() + + if a.closed { + return nil + } + a.closed = true + _ = a.f.Close() // ignore error + return os.Remove(a.f.Name()) +} + +func (a *atomicFile) Read(p []byte) (n int, err error) { + a.closedMu.RLock() + defer a.closedMu.RUnlock() + if a.closed { + return 0, ErrClosed + } + return a.f.Read(p) +} + +func (a *atomicFile) Write(p []byte) (n int, err error) { + a.closedMu.RLock() + defer a.closedMu.RUnlock() + if a.closed { + return 0, ErrClosed + } + return a.f.Write(p) +} diff --git a/vendor/github.com/caddyserver/certmagic/maintain.go b/vendor/github.com/caddyserver/certmagic/maintain.go index 88d36531..dea2cfdf 100644 --- a/vendor/github.com/caddyserver/certmagic/maintain.go +++ b/vendor/github.com/caddyserver/certmagic/maintain.go @@ -136,7 +136,7 @@ func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error { } // ACME-specific: see if if ACME Renewal Info (ARI) window needs refreshing - if cert.ari.NeedsRefresh() { + if !cfg.DisableARI && cert.ari.NeedsRefresh() { configs[cert.hash] = cfg ariQueue = append(ariQueue, cert) } @@ -427,7 +427,7 @@ func (cfg *Config) storageHasNewerARI(ctx context.Context, cert Certificate) (bo // or if the one in storage has a later RetryAfter (though I suppose // it's not guaranteed, typically those will move forward in time) if (!cert.ari.HasWindow() && storedCertData.RenewalInfo.HasWindow()) || - storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter) { + (cert.ari.RetryAfter == nil || storedCertData.RenewalInfo.RetryAfter.After(*cert.ari.RetryAfter)) { return true, *storedCertData.RenewalInfo, nil } return false, acme.RenewalInfo{}, nil @@ -459,6 +459,9 @@ func (cfg *Config) loadStoredACMECertificateMetadata(ctx context.Context, cert C // updated in the cache. The certificate with the updated ARI is returned. If true is // returned, the ARI window or selected time has changed, and the caller should check if // the cert needs to be renewed now, even if there is an error. +// +// This will always try to ARI without checking if it needs to be refreshed. Call +// NeedsRefresh() on the RenewalInfo first, and only call this if that returns true. func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap.Logger) (updatedCert Certificate, changed bool, err error) { logger = logger.With( zap.Strings("identifiers", cert.Names), @@ -469,6 +472,17 @@ func (cfg *Config) updateARI(ctx context.Context, cert Certificate, logger *zap. updatedCert = cert oldARI := cert.ari + // synchronize ARI fetching; see #297 + lockName := "ari_" + cert.ari.UniqueIdentifier + if err := acquireLock(ctx, cfg.Storage, lockName); err != nil { + return cert, false, fmt.Errorf("unable to obtain ARI lock: %v", err) + } + defer func() { + if err := releaseLock(ctx, cfg.Storage, lockName); err != nil { + logger.Error("unable to release ARI lock", zap.Error(err)) + } + }() + // see if the stored value has been refreshed already by another instance gotNewARI, newARI, err := cfg.storageHasNewerARI(ctx, cert) @@ -615,11 +629,11 @@ func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions opts.Logger = opts.Logger.With(zap.Any("storage", storage)) // storage cleaning should be globally exclusive - if err := storage.Lock(ctx, lockName); err != nil { + if err := acquireLock(ctx, storage, lockName); err != nil { return fmt.Errorf("unable to acquire %s lock: %v", lockName, err) } defer func() { - if err := storage.Unlock(ctx, lockName); err != nil { + if err := releaseLock(ctx, storage, lockName); err != nil { opts.Logger.Error("unable to release lock", zap.Error(err)) return } diff --git a/vendor/github.com/caddyserver/certmagic/zerosslissuer.go b/vendor/github.com/caddyserver/certmagic/zerosslissuer.go index 8ee044b4..4a33bfa2 100644 --- a/vendor/github.com/caddyserver/certmagic/zerosslissuer.go +++ b/vendor/github.com/caddyserver/certmagic/zerosslissuer.go @@ -146,7 +146,7 @@ func (iss *ZeroSSLIssuer) Issue(ctx context.Context, csr *x509.CertificateReques // create the CNAME record(s) records := make(map[string]zoneRecord, len(cert.Validation.OtherMethods)) for name, verifyInfo := range cert.Validation.OtherMethods { - zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2) + zr, err := iss.CNAMEValidation.createRecord(ctx, verifyInfo.CnameValidationP1, "CNAME", verifyInfo.CnameValidationP2+".") // see issue #304 if err != nil { return nil, fmt.Errorf("creating CNAME record: %v", err) } diff --git a/vendor/github.com/go-playground/validator/v10/README.md b/vendor/github.com/go-playground/validator/v10/README.md index 9ab0705a..ddd65b07 100644 --- a/vendor/github.com/go-playground/validator/v10/README.md +++ b/vendor/github.com/go-playground/validator/v10/README.md @@ -1,7 +1,7 @@ Package validator ================= [![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) -![Project status](https://img.shields.io/badge/version-10.22.0-green.svg) +![Project status](https://img.shields.io/badge/version-10.22.1-green.svg) [![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) [![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) [![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) diff --git a/vendor/github.com/go-playground/validator/v10/baked_in.go b/vendor/github.com/go-playground/validator/v10/baked_in.go index b6fbaafa..d1a3656a 100644 --- a/vendor/github.com/go-playground/validator/v10/baked_in.go +++ b/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -1828,7 +1828,14 @@ func requireCheckFieldValue( return int64(field.Len()) == asInt(value) case reflect.Bool: - return field.Bool() == asBool(value) + return field.Bool() == (value == "true") + + case reflect.Ptr: + if field.IsNil() { + return value == "nil" + } + // Handle non-nil pointers + return requireCheckFieldValue(fl, param, value, defaultNotFoundValue) } // default reflect.String: diff --git a/vendor/github.com/hashicorp/raft/raft.go b/vendor/github.com/hashicorp/raft/raft.go index 183f041a..cbc9a59a 100644 --- a/vendor/github.com/hashicorp/raft/raft.go +++ b/vendor/github.com/hashicorp/raft/raft.go @@ -1749,7 +1749,7 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) { }() // Check if we have an existing leader [who's not the candidate] and also - var candidate ServerAddress + candidate := r.trans.DecodePeer(req.GetRPCHeader().Addr) candidateID := ServerID(req.ID) // if the Servers list is empty that mean the cluster is very likely trying to bootstrap, @@ -1805,7 +1805,6 @@ func (r *Raft) requestPreVote(rpc RPC, req *RequestPreVoteRequest) { } resp.Granted = true - r.setLastContact() } // installSnapshot is invoked when we get a InstallSnapshot RPC call. diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359e..684a3085 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -16,6 +16,20 @@ This package provides various compression algorithms. # changelog +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -81,7 +95,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -136,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -339,7 +353,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +532,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657d..af53fb86 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d64..0d7b437f 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,6 +806,41 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = nextBlock + f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + + return &f +} + // NewReader returns a new ReadCloser that can be used // to read the uncompressed version of r. // If r does not also implement io.ByteReader, @@ -798,15 +850,7 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { // // The ReadCloser returned by NewReader also implements Resetter. func NewReader(r io.Reader) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, nil) - return &f + return NewReaderOpts(r) } // NewReaderDict is like NewReader but initializes the reader @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f7..0c7dd4ff 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b2..0f56b02d 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/s2/writer.go b/vendor/github.com/klauspost/compress/s2/writer.go index 0a46f2b9..fd15078f 100644 --- a/vendor/github.com/klauspost/compress/s2/writer.go +++ b/vendor/github.com/klauspost/compress/s2/writer.go @@ -83,11 +83,14 @@ type Writer struct { snappy bool flushOnWrite bool appendIndex bool + bufferCB func([]byte) level uint8 } type result struct { b []byte + // return when writing + ret []byte // Uncompressed start offset startOffset int64 } @@ -146,6 +149,10 @@ func (w *Writer) Reset(writer io.Writer) { for write := range toWrite { // Wait for the data to be available. input := <-write + if input.ret != nil && w.bufferCB != nil { + w.bufferCB(input.ret) + input.ret = nil + } in := input.b if len(in) > 0 { if w.err(nil) == nil { @@ -341,7 +348,8 @@ func (w *Writer) AddSkippableBlock(id uint8, data []byte) (err error) { // but the input buffer cannot be written to by the caller // until Flush or Close has been called when concurrency != 1. // -// If you cannot control that, use the regular Write function. +// Use the WriterBufferDone to receive a callback when the buffer is done +// Processing. // // Note that input is not buffered. // This means that each write will result in discrete blocks being created. @@ -364,6 +372,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { } if w.concurrency == 1 { _, err := w.writeSync(buf) + if w.bufferCB != nil { + w.bufferCB(buf) + } return err } @@ -378,7 +389,7 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { hWriter <- result{startOffset: w.uncompWritten, b: magicChunkBytes} } } - + orgBuf := buf for len(buf) > 0 { // Cut input. uncompressed := buf @@ -397,6 +408,9 @@ func (w *Writer) EncodeBuffer(buf []byte) (err error) { startOffset: w.uncompWritten, } w.uncompWritten += int64(len(uncompressed)) + if len(buf) == 0 && w.bufferCB != nil { + res.ret = orgBuf + } go func() { race.ReadSlice(uncompressed) @@ -922,7 +936,7 @@ func WriterBetterCompression() WriterOption { } // WriterBestCompression will enable better compression. -// EncodeBetter compresses better than Encode but typically with a +// EncodeBest compresses better than Encode but typically with a // big speed decrease on compression. func WriterBestCompression() WriterOption { return func(w *Writer) error { @@ -941,6 +955,17 @@ func WriterUncompressed() WriterOption { } } +// WriterBufferDone will perform a callback when EncodeBuffer has finished +// writing a buffer to the output and the buffer can safely be reused. +// If the buffer was split into several blocks, it will be sent after the last block. +// Callbacks will not be done concurrently. +func WriterBufferDone(fn func(b []byte)) WriterOption { + return func(w *Writer) error { + w.bufferCB = fn + return nil + } +} + // WriterBlockSize allows to override the default block size. // Blocks will be this size or smaller. // Minimum size is 4KB and maximum size is 4MB. diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fbc..9c28840c 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -598,7 +598,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf91..84a79fde 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18f..d36be7bd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef0..a79c4a52 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -202,7 +202,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -469,6 +469,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +500,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f7..e47af66e 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd82..c59f17e0 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174b..f5591fa1 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition diff --git a/vendor/github.com/lestrrat-go/strftime/Changes b/vendor/github.com/lestrrat-go/strftime/Changes index b86a84c4..35cef2bf 100644 --- a/vendor/github.com/lestrrat-go/strftime/Changes +++ b/vendor/github.com/lestrrat-go/strftime/Changes @@ -1,6 +1,13 @@ Changes ======= +v1.1.0 - 28 Aug 2024 +[Miscellaneous] + * github.com/pkg/errors has been removed (it has been two years :) + * Updated build/test actions + * Updated minimum required go version to go 1.21 + * Fix week number handling + v1.0.6 - 20 Apr 2022 [Miscellaneous] * Minimum go version is now go 1.13 diff --git a/vendor/github.com/lestrrat-go/strftime/appenders.go b/vendor/github.com/lestrrat-go/strftime/appenders.go index 2941a247..e09a3e5d 100644 --- a/vendor/github.com/lestrrat-go/strftime/appenders.go +++ b/vendor/github.com/lestrrat-go/strftime/appenders.go @@ -36,13 +36,13 @@ var ( secondsNumberZeroPad = StdlibFormat("05") hms = StdlibFormat("15:04:05") tab = Verbatim("\t") - weekNumberSundayOrigin = weeknumberOffset(0) // week number of the year, Sunday first + weekNumberSundayOrigin = weeknumberOffset(true) // week number of the year, Sunday first weekdayMondayOrigin = weekday(1) // monday as the first day, and 01 as the first value weekNumberMondayOriginOneOrigin = AppendFunc(appendWeekNumber) eby = StdlibFormat("_2-Jan-2006") // monday as the first day, and 00 as the first value - weekNumberMondayOrigin = weeknumberOffset(1) // week number of the year, Monday first + weekNumberMondayOrigin = weeknumberOffset(false) // week number of the year, Monday first weekdaySundayOrigin = weekday(0) natReprTime = StdlibFormat("15:04:05") // national representation of the time XXX is this correct? natReprDate = StdlibFormat("01/02/06") // national representation of the date XXX is this correct? @@ -243,20 +243,16 @@ func (v weekday) Append(b []byte, t time.Time) []byte { return append(b, byte(n+48)) } -type weeknumberOffset int +type weeknumberOffset bool func (v weeknumberOffset) Append(b []byte, t time.Time) []byte { - yd := t.YearDay() - offset := int(t.Weekday()) - int(v) - if offset < 0 { - offset += 7 + offset := int(t.Weekday()) + if v { + offset = 6 - offset + } else if offset != 0 { + offset = 7 - offset } - - if yd < offset { - return append(b, '0', '0') - } - - n := ((yd - offset) / 7) + 1 + n := (t.YearDay() + offset) / 7 if n < 10 { b = append(b, '0') } diff --git a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go b/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go deleted file mode 100644 index 18871c14..00000000 --- a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_fmt.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build strftime_native_errors -// +build strftime_native_errors - -package errors - -import "fmt" - -func New(s string) error { - return fmt.Errorf(s) -} - -func Errorf(s string, args ...interface{}) error { - return fmt.Errorf(s, args...) -} - -func Wrap(err error, s string) error { - return fmt.Errorf(s+`: %w`, err) -} diff --git a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go b/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go deleted file mode 100644 index 35797878..00000000 --- a/vendor/github.com/lestrrat-go/strftime/internal/errors/errors_pkg.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build !strftime_native_errors -// +build !strftime_native_errors - -package errors - -import "github.com/pkg/errors" - -func New(s string) error { - return errors.New(s) -} - -func Errorf(s string, args ...interface{}) error { - return errors.Errorf(s, args...) -} - -func Wrap(err error, s string) error { - return errors.Wrap(err, s) -} diff --git a/vendor/github.com/lestrrat-go/strftime/specifications.go b/vendor/github.com/lestrrat-go/strftime/specifications.go index 2b6e11fe..3bd796a9 100644 --- a/vendor/github.com/lestrrat-go/strftime/specifications.go +++ b/vendor/github.com/lestrrat-go/strftime/specifications.go @@ -1,10 +1,9 @@ package strftime import ( + "errors" "fmt" "sync" - - "github.com/lestrrat-go/strftime/internal/errors" ) // because there is no such thing was a sync.RWLocker @@ -124,7 +123,7 @@ func (ds *specificationSet) Lookup(b byte) (Appender, error) { } v, ok := ds.store[b] if !ok { - return nil, errors.Errorf(`lookup failed: '%%%c' was not found in specification set`, b) + return nil, fmt.Errorf(`lookup failed: '%%%c' was not found in specification set`, b) } return v, nil } diff --git a/vendor/github.com/lestrrat-go/strftime/strftime.go b/vendor/github.com/lestrrat-go/strftime/strftime.go index c869491f..3d51ac6d 100644 --- a/vendor/github.com/lestrrat-go/strftime/strftime.go +++ b/vendor/github.com/lestrrat-go/strftime/strftime.go @@ -1,12 +1,12 @@ package strftime import ( + "errors" + "fmt" "io" "strings" "sync" "time" - - "github.com/lestrrat-go/strftime/internal/errors" ) type compileHandler interface { @@ -62,7 +62,7 @@ func compile(handler compileHandler, p string, ds SpecificationSet) error { specification, err := ds.Lookup(p[1]) if err != nil { - return errors.Wrap(err, `pattern compilation failed`) + return fmt.Errorf("pattern compilation failed: %w", err) } handler.handle(specification) @@ -127,14 +127,14 @@ func Format(p string, t time.Time, options ...Option) (string, error) { // TODO: this may be premature optimization ds, err := getSpecificationSetFor(options...) if err != nil { - return "", errors.Wrap(err, `failed to get specification set`) + return "", fmt.Errorf("failed to get specification set: %w", err) } h := getFmtAppendExecutor() defer releasdeFmtAppendExecutor(h) h.t = t if err := compile(h, p, ds); err != nil { - return "", errors.Wrap(err, `failed to compile format`) + return "", fmt.Errorf("failed to compile format: %w", err) } return string(h.dst), nil @@ -152,14 +152,14 @@ func New(p string, options ...Option) (*Strftime, error) { // TODO: this may be premature optimization ds, err := getSpecificationSetFor(options...) if err != nil { - return nil, errors.Wrap(err, `failed to get specification set`) + return nil, fmt.Errorf("failed to get specification set: %w", err) } var h appenderListBuilder h.list = &combiningAppend{} if err := compile(&h, p, ds); err != nil { - return nil, errors.Wrap(err, `failed to compile format`) + return nil, fmt.Errorf("failed to compile format: %w", err) } return &Strftime{ diff --git a/vendor/github.com/mholt/acmez/v2/acme/ari.go b/vendor/github.com/mholt/acmez/v2/acme/ari.go index d096aa92..93401f37 100644 --- a/vendor/github.com/mholt/acmez/v2/acme/ari.go +++ b/vendor/github.com/mholt/acmez/v2/acme/ari.go @@ -140,11 +140,54 @@ func (c *Client) GetRenewalInfo(ctx context.Context, leafCert *x509.Certificate) } var ari RenewalInfo - resp, err := c.httpReq(ctx, http.MethodGet, c.ariEndpoint(certID), nil, &ari) - if err != nil { - return RenewalInfo{}, err + var resp *http.Response + for i := 0; i < 3; i++ { + // backoff between retries; the if is probably not needed, but just for "properness"... + if i > 0 { + select { + case <-ctx.Done(): + return RenewalInfo{}, ctx.Err() + case <-time.After(time.Duration(i*i+1) * time.Second): + } + } + + resp, err = c.httpReq(ctx, http.MethodGet, c.ariEndpoint(certID), nil, &ari) + if err != nil { + if c.Logger != nil { + c.Logger.Warn("error getting ARI response", + zap.Error(err), + zap.Int("attempt", i), + zap.Strings("names", leafCert.DNSNames)) + } + continue + } + + // "If the client receives no response or a malformed response + // (e.g. an end timestamp which is equal to or precedes the start + // timestamp), it SHOULD make its own determination of when to + // renew the certificate, and MAY retry the renewalInfo request + // with appropriate exponential backoff behavior." + // draft-ietf-acme-ari-04 §4.2 + if ari.SuggestedWindow.Start.IsZero() || + ari.SuggestedWindow.End.IsZero() || + ari.SuggestedWindow.Start.Equal(ari.SuggestedWindow.End) || + (ari.SuggestedWindow.End.Unix()-ari.SuggestedWindow.Start.Unix()-1 <= 0) { + if c.Logger != nil { + c.Logger.Debug("invalid ARI window", + zap.Time("start", ari.SuggestedWindow.Start), + zap.Time("end", ari.SuggestedWindow.End), + zap.Strings("names", leafCert.DNSNames)) + } + continue + } + + // valid ARI window + ari.UniqueIdentifier = certID + break + } + if err != nil || resp == nil { + return RenewalInfo{}, fmt.Errorf("could not get a valid ARI response; last error: %v", err) } - ari.UniqueIdentifier = certID // "The server SHOULD include a Retry-After header indicating the polling // interval that the ACME server recommends." draft-ietf-acme-ari-03 §4.2 @@ -161,7 +204,7 @@ func (c *Client) GetRenewalInfo(ctx context.Context, leafCert *x509.Certificate) // time within the suggested window." §4.2 // TODO: It's unclear whether this time should be selected once // or every time the client wakes to check ARI (see step 5 of the - // recommended algorithm); I've inquired here: + // recommended algorithm); I've enquired here: // https://github.com/aarongable/draft-acme-ari/issues/70 // We add 1 to the start time since we are dealing in seconds for // simplicity, but the server may provide sub-second timestamps. diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go index 5f117afa..a70cbea9 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-multipart.go @@ -24,7 +24,6 @@ import ( "encoding/hex" "encoding/xml" "fmt" - "hash/crc32" "io" "net/http" "net/url" @@ -87,7 +86,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() } // Initiate a new multipart upload. @@ -116,7 +115,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := opts.AutoChecksum.Hasher() for partNumber <= totalPartsCount { length, rErr := readFull(reader, buf) if rErr == io.EOF && partNumber > 1 { @@ -154,7 +153,7 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } @@ -202,12 +201,13 @@ func (c *Client) putObjectMultipartNoStream(ctx context.Context, bucketName, obj sort.Sort(completedParts(complMultipartUpload.Parts)) opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + opts.UserMetadata = map[string]string{opts.AutoChecksum.Key(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go index 51226630..eef976c8 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object-streaming.go @@ -22,7 +22,6 @@ import ( "context" "encoding/base64" "fmt" - "hash/crc32" "io" "net/http" "net/url" @@ -109,13 +108,15 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN if err != nil { return UploadInfo{}, err } - + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + } withChecksum := c.trailingHeaderSupport if withChecksum { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() } // Initiate a new multipart upload. uploadID, err := c.newUploadID(ctx, bucketName, objectName, opts) @@ -195,10 +196,10 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN sectionReader := newHook(io.NewSectionReader(reader, readOffset, partSize), opts.Progress) trailer := make(http.Header, 1) if withChecksum { - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) - trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + crc := opts.AutoChecksum.Hasher() + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) sectionReader = newHashReaderWrapper(sectionReader, crc, func(hash []byte) { - trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + trailer.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(hash)) }) } @@ -271,17 +272,18 @@ func (c *Client) putObjectMultipartStreamFromReadAt(ctx context.Context, bucketN opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, } if withChecksum { // Add hash of hashes. - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := opts.AutoChecksum.Hasher() for _, part := range complMultipartUpload.Parts { - cs, err := base64.StdEncoding.DecodeString(part.ChecksumCRC32C) + cs, err := base64.StdEncoding.DecodeString(part.Checksum(opts.AutoChecksum)) if err == nil { crc.Write(cs) } } - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) @@ -304,11 +306,16 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b return UploadInfo{}, err } + if opts.Checksum.IsSet() { + opts.AutoChecksum = opts.Checksum + opts.SendContentMd5 = false + } + if !opts.SendContentMd5 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() } // Calculate the optimal parts info for a given size. @@ -337,7 +344,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := opts.AutoChecksum.Hasher() md5Hash := c.md5Hasher() defer md5Hash.Close() @@ -381,7 +388,7 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(opts.AutoChecksum.KeyCapitalized(), base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } @@ -433,12 +440,13 @@ func (c *Client) putObjectMultipartStreamOptionalChecksum(ctx context.Context, b opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { @@ -462,12 +470,15 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam if err = s3utils.CheckValidObjectName(objectName); err != nil { return UploadInfo{}, err } - + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + opts.AutoChecksum = opts.Checksum + } if !opts.SendContentMd5 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() } // Cancel all when an error occurs. @@ -500,7 +511,7 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // Create checksums // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := opts.AutoChecksum.Hasher() // Total data read and written to server. should be equal to 'size' at the end of the call. var totalUploadedSize int64 @@ -554,11 +565,11 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam // Calculate md5sum. customHeader := make(http.Header) if !opts.SendContentMd5 { - // Add CRC32C instead. + // Add Checksum instead. crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } @@ -639,12 +650,13 @@ func (c *Client) putObjectMultipartStreamParallel(ctx context.Context, bucketNam opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { @@ -675,6 +687,9 @@ func (c *Client) putObject(ctx context.Context, bucketName, objectName string, r if opts.SendContentMd5 && s3utils.IsGoogleEndpoint(*c.endpointURL) && size < 0 { return UploadInfo{}, errInvalidArgument("MD5Sum cannot be calculated with size '-1'") } + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + } var readSeeker io.Seeker if size > 0 { @@ -744,17 +759,6 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, // Set headers. customHeader := opts.Header() - // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. - addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) - - if addCrc { - // If user has added checksums, don't add them ourselves. - for k := range opts.UserMetadata { - if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { - addCrc = false - } - } - } // Populate request metadata. reqMetadata := requestMetadata{ bucketName: bucketName, @@ -765,8 +769,24 @@ func (c *Client) putObjectDo(ctx context.Context, bucketName, objectName string, contentMD5Base64: md5Base64, contentSHA256Hex: sha256Hex, streamSha256: !opts.DisableContentSha256, - addCrc: addCrc, } + // Add CRC when client supports it, MD5 is not set, not Google and we don't add SHA256 to chunks. + addCrc := c.trailingHeaderSupport && md5Base64 == "" && !s3utils.IsGoogleEndpoint(*c.endpointURL) && (opts.DisableContentSha256 || c.secure) + if opts.Checksum.IsSet() { + reqMetadata.addCrc = &opts.Checksum + } else if addCrc { + // If user has added checksums, don't add them ourselves. + for k := range opts.UserMetadata { + if strings.HasPrefix(strings.ToLower(k), "x-amz-checksum-") { + addCrc = false + } + } + if addCrc { + opts.AutoChecksum.SetDefault(ChecksumCRC32C) + reqMetadata.addCrc = &opts.AutoChecksum + } + } + if opts.Internal.SourceVersionID != "" { if opts.Internal.SourceVersionID != nullVersionID { if _, err := uuid.Parse(opts.Internal.SourceVersionID); err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-put-object.go b/vendor/github.com/minio/minio-go/v7/api-put-object.go index 6ccb5815..d769648a 100644 --- a/vendor/github.com/minio/minio-go/v7/api-put-object.go +++ b/vendor/github.com/minio/minio-go/v7/api-put-object.go @@ -23,7 +23,6 @@ import ( "encoding/base64" "errors" "fmt" - "hash/crc32" "io" "net/http" "sort" @@ -90,6 +89,18 @@ type PutObjectOptions struct { DisableContentSha256 bool DisableMultipart bool + // AutoChecksum is the type of checksum that will be added if no other checksum is added, + // like MD5 or SHA256 streaming checksum, and it is feasible for the upload type. + // If none is specified CRC32C is used, since it is generally the fastest. + AutoChecksum ChecksumType + + // Checksum will force a checksum of the specific type. + // This requires that the client was created with "TrailingHeaders:true" option, + // and that the destination server supports it. + // Unavailable with V2 signatures & Google endpoints. + // This will disable content MD5 checksums if set. + Checksum ChecksumType + // ConcurrentStreamParts will create NumThreads buffers of PartSize bytes, // fill them serially and upload them in parallel. // This can be used for faster uploads on non-seekable or slow-to-seek input. @@ -236,7 +247,7 @@ func (opts PutObjectOptions) Header() (header http.Header) { } // validate() checks if the UserMetadata map has standard headers or and raises an error if so. -func (opts PutObjectOptions) validate() (err error) { +func (opts PutObjectOptions) validate(c *Client) (err error) { for k, v := range opts.UserMetadata { if !httpguts.ValidHeaderFieldName(k) || isStandardHeader(k) || isSSEHeader(k) || isStorageClassHeader(k) || isMinioHeader(k) { return errInvalidArgument(k + " unsupported user defined metadata name") @@ -251,6 +262,17 @@ func (opts PutObjectOptions) validate() (err error) { if opts.LegalHold != "" && !opts.LegalHold.IsValid() { return errInvalidArgument(opts.LegalHold.String() + " unsupported legal-hold status") } + if opts.Checksum.IsSet() { + switch { + case !c.trailingHeaderSupport: + return errInvalidArgument("Checksum requires Client with TrailingHeaders enabled") + case c.overrideSignerType.IsV2(): + return errInvalidArgument("Checksum cannot be used with v2 signatures") + case s3utils.IsGoogleEndpoint(*c.endpointURL): + return errInvalidArgument("Checksum cannot be used with GCS endpoints") + } + } + return nil } @@ -287,7 +309,7 @@ func (c *Client) PutObject(ctx context.Context, bucketName, objectName string, r return UploadInfo{}, errors.New("object size must be provided with disable multipart upload") } - err = opts.validate() + err = opts.validate(c) if err != nil { return UploadInfo{}, err } @@ -300,6 +322,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str if size > int64(maxMultipartPutObjectSize) { return UploadInfo{}, errEntityTooLarge(size, maxMultipartPutObjectSize, bucketName, objectName) } + opts.AutoChecksum.SetDefault(ChecksumCRC32C) // NOTE: Streaming signature is not supported by GCS. if s3utils.IsGoogleEndpoint(*c.endpointURL) { @@ -328,7 +351,7 @@ func (c *Client) putObjectCommon(ctx context.Context, bucketName, objectName str return c.putObjectMultipartStreamNoLength(ctx, bucketName, objectName, reader, opts) } - if size < int64(partSize) || opts.DisableMultipart { + if size <= int64(partSize) || opts.DisableMultipart { return c.putObject(ctx, bucketName, objectName, reader, size, opts) } @@ -357,11 +380,15 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam return UploadInfo{}, err } + if opts.Checksum.IsSet() { + opts.SendContentMd5 = false + opts.AutoChecksum = opts.Checksum + } if !opts.SendContentMd5 { if opts.UserMetadata == nil { opts.UserMetadata = make(map[string]string, 1) } - opts.UserMetadata["X-Amz-Checksum-Algorithm"] = "CRC32C" + opts.UserMetadata["X-Amz-Checksum-Algorithm"] = opts.AutoChecksum.String() } // Initiate a new multipart upload. @@ -390,7 +417,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam // CRC32C is ~50% faster on AMD64 @ 30GB/s var crcBytes []byte customHeader := make(http.Header) - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := opts.AutoChecksum.Hasher() for partNumber <= totalPartsCount { length, rerr := readFull(reader, buf) @@ -413,7 +440,7 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam crc.Reset() crc.Write(buf[:length]) cSum := crc.Sum(nil) - customHeader.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(cSum)) + customHeader.Set(opts.AutoChecksum.Key(), base64.StdEncoding.EncodeToString(cSum)) crcBytes = append(crcBytes, cSum...) } @@ -466,12 +493,13 @@ func (c *Client) putObjectMultipartStreamNoLength(ctx context.Context, bucketNam opts = PutObjectOptions{ ServerSideEncryption: opts.ServerSideEncryption, + AutoChecksum: opts.AutoChecksum, } if len(crcBytes) > 0 { // Add hash of hashes. crc.Reset() crc.Write(crcBytes) - opts.UserMetadata = map[string]string{"X-Amz-Checksum-Crc32c": base64.StdEncoding.EncodeToString(crc.Sum(nil))} + opts.UserMetadata = map[string]string{opts.AutoChecksum.KeyCapitalized(): base64.StdEncoding.EncodeToString(crc.Sum(nil))} } uploadInfo, err := c.completeMultipartUpload(ctx, bucketName, objectName, uploadID, complMultipartUpload, opts) if err != nil { diff --git a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go index eb4da414..6b6559bf 100644 --- a/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go +++ b/vendor/github.com/minio/minio-go/v7/api-putobject-snowball.go @@ -107,7 +107,7 @@ type readSeekCloser interface { // Total size should be < 5TB. // This function blocks until 'objs' is closed and the content has been uploaded. func (c Client) PutObjectsSnowball(ctx context.Context, bucketName string, opts SnowballOptions, objs <-chan SnowballObject) (err error) { - err = opts.Opts.validate() + err = opts.Opts.validate(&c) if err != nil { return err } diff --git a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go index 1527b746..790606c5 100644 --- a/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go +++ b/vendor/github.com/minio/minio-go/v7/api-s3-datatypes.go @@ -340,6 +340,22 @@ type CompletePart struct { ChecksumSHA256 string `xml:"ChecksumSHA256,omitempty"` } +// Checksum will return the checksum for the given type. +// Will return the empty string if not set. +func (c CompletePart) Checksum(t ChecksumType) string { + switch { + case t.Is(ChecksumCRC32C): + return c.ChecksumCRC32C + case t.Is(ChecksumCRC32): + return c.ChecksumCRC32 + case t.Is(ChecksumSHA1): + return c.ChecksumSHA1 + case t.Is(ChecksumSHA256): + return c.ChecksumSHA256 + } + return "" +} + // completeMultipartUpload container for completing multipart upload. type completeMultipartUpload struct { XMLName xml.Name `xml:"http://s3.amazonaws.com/doc/2006-03-01/ CompleteMultipartUpload" json:"-"` diff --git a/vendor/github.com/minio/minio-go/v7/api.go b/vendor/github.com/minio/minio-go/v7/api.go index 13c493d0..1d6b6650 100644 --- a/vendor/github.com/minio/minio-go/v7/api.go +++ b/vendor/github.com/minio/minio-go/v7/api.go @@ -23,7 +23,6 @@ import ( "encoding/base64" "errors" "fmt" - "hash/crc32" "io" "math/rand" "net" @@ -129,7 +128,7 @@ type Options struct { // Global constants. const ( libraryName = "minio-go" - libraryVersion = "v7.0.75" + libraryVersion = "v7.0.77" ) // User Agent should always following the below style. @@ -471,7 +470,7 @@ type requestMetadata struct { contentMD5Base64 string // carries base64 encoded md5sum contentSHA256Hex string // carries hex encoded sha256sum streamSha256 bool - addCrc bool + addCrc *ChecksumType trailer http.Header // (http.Request).Trailer. Requires v4 signature. } @@ -616,16 +615,16 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ } } - if metadata.addCrc && metadata.contentLength > 0 { + if metadata.addCrc != nil && metadata.contentLength > 0 { if metadata.trailer == nil { metadata.trailer = make(http.Header, 1) } - crc := crc32.New(crc32.MakeTable(crc32.Castagnoli)) + crc := metadata.addCrc.Hasher() metadata.contentBody = newHashReaderWrapper(metadata.contentBody, crc, func(hash []byte) { // Update trailer when done. - metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(hash)) + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(hash)) }) - metadata.trailer.Set("x-amz-checksum-crc32c", base64.StdEncoding.EncodeToString(crc.Sum(nil))) + metadata.trailer.Set(metadata.addCrc.Key(), base64.StdEncoding.EncodeToString(crc.Sum(nil))) } // Create cancel context to control 'newRetryTimer' go routine. @@ -662,7 +661,7 @@ func (c *Client) executeMethod(ctx context.Context, method string, metadata requ // Initiate the request. res, err = c.do(req) if err != nil { - if isRequestErrorRetryable(err) { + if isRequestErrorRetryable(ctx, err) { // Retry the request continue } diff --git a/vendor/github.com/minio/minio-go/v7/checksum.go b/vendor/github.com/minio/minio-go/v7/checksum.go index a1f6f434..7eb1bf25 100644 --- a/vendor/github.com/minio/minio-go/v7/checksum.go +++ b/vendor/github.com/minio/minio-go/v7/checksum.go @@ -25,6 +25,7 @@ import ( "hash/crc32" "io" "math/bits" + "net/http" ) // ChecksumType contains information about the checksum type. @@ -78,6 +79,11 @@ func (c ChecksumType) Key() string { return "" } +// KeyCapitalized returns the capitalized key as used in HTTP headers. +func (c ChecksumType) KeyCapitalized() string { + return http.CanonicalHeaderKey(c.Key()) +} + // RawByteLen returns the size of the un-encoded checksum. func (c ChecksumType) RawByteLen() int { switch c & checksumMask { @@ -112,6 +118,13 @@ func (c ChecksumType) IsSet() bool { return bits.OnesCount32(uint32(c)) == 1 } +// SetDefault will set the checksum if not already set. +func (c *ChecksumType) SetDefault(t ChecksumType) { + if !c.IsSet() { + *c = t + } +} + // String returns the type as a string. // CRC32, CRC32C, SHA1, and SHA256 for valid values. // Empty string for unset and "" if not valid. diff --git a/vendor/github.com/minio/minio-go/v7/functional_tests.go b/vendor/github.com/minio/minio-go/v7/functional_tests.go index 871034bc..780dc899 100644 --- a/vendor/github.com/minio/minio-go/v7/functional_tests.go +++ b/vendor/github.com/minio/minio-go/v7/functional_tests.go @@ -24,7 +24,6 @@ import ( "archive/zip" "bytes" "context" - "crypto/sha1" "crypto/sha256" "encoding/base64" "errors" @@ -84,7 +83,7 @@ func createHTTPTransport() (transport *http.Transport) { return nil } - if mustParseBool(os.Getenv(skipCERTValidation)) { + if mustParseBool(os.Getenv(enableHTTPS)) && mustParseBool(os.Getenv(skipCERTValidation)) { transport.TLSClientConfig.InsecureSkipVerify = true } @@ -166,7 +165,7 @@ func logError(testName, function string, args map[string]interface{}, startTime } } -// log failed test runs +// Log failed test runs, do not call this directly, use logError instead, as that correctly stops the test run func logFailure(testName, function string, args map[string]interface{}, startTime time.Time, alert, message string, err error) { l := baseLogger(testName, function, args, startTime).With( "status", "FAIL", @@ -2199,22 +2198,15 @@ func testPutObjectWithChecksums() { defer cleanupBucket(bucketName, c) tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + cs minio.ChecksumType }{ - {header: "x-amz-checksum-crc32", hasher: crc32.NewIEEE()}, - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli))}, - {header: "x-amz-checksum-sha1", hasher: sha1.New()}, - {header: "x-amz-checksum-sha256", hasher: sha256.New()}, + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, } - for i, test := range tests { + for _, test := range tests { bufSize := dataFileMap["datafile-10-kB"] // Save the data @@ -2235,29 +2227,27 @@ func testPutObjectWithChecksums() { logError(testName, function, args, startTime, "", "Read failed", err) return } - h := test.hasher + h := test.cs.Hasher() h.Reset() - // Wrong CRC. - meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + // Test with Wrong CRC. + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) args["metadata"] = meta args["range"] = "false" + args["checksum"] = test.cs.String() resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ DisableMultipart: true, UserMetadata: meta, }) if err == nil { - if i == 0 && resp.ChecksumCRC32 == "" { - logIgnored(testName, function, args, startTime, "Checksums does not appear to be supported by backend") - return - } - logError(testName, function, args, startTime, "", "PutObject failed", err) + logError(testName, function, args, startTime, "", "PutObject did not fail on wrong CRC", err) return } // Set correct CRC. h.Write(b) - meta[test.header] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) reader.Close() resp, err = c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ @@ -2344,7 +2334,7 @@ func testPutObjectWithChecksums() { } // Test PutObject with custom checksums. -func testPutMultipartObjectWithChecksums() { +func testPutObjectWithTrailingChecksums() { // initialize logging params startTime := time.Now() testName := getFuncName() @@ -2352,7 +2342,7 @@ func testPutMultipartObjectWithChecksums() { args := map[string]interface{}{ "bucketName": "", "objectName": "", - "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress}", + "opts": "minio.PutObjectOptions{UserMetadata: metadata, Progress: progress, TrailChecksum: xxx}", } if !isFullMode() { @@ -2366,9 +2356,201 @@ func testPutMultipartObjectWithChecksums() { // Instantiate new minio client object. c, err := minio.New(os.Getenv(serverEndpoint), &minio.Options{ - Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), - Transport: createHTTPTransport(), - Secure: mustParseBool(os.Getenv(enableHTTPS)), + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: true, + }) + if err != nil { + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) + return + } + + // Enable tracing, write to stderr. + // c.TraceOn(os.Stderr) + + // Set user agent. + c.SetAppInfo("MinIO-go-FunctionalTest", appVersion) + + // Generate a new random bucket name. + bucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") + args["bucketName"] = bucketName + + // Make a new bucket. + err = c.MakeBucket(context.Background(), bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) + if err != nil { + logError(testName, function, args, startTime, "", "Make bucket failed", err) + return + } + + defer cleanupBucket(bucketName, c) + tests := []struct { + cs minio.ChecksumType + }{ + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, + } + + for _, test := range tests { + function := "PutObject(bucketName, objectName, reader,size, opts)" + bufSize := dataFileMap["datafile-10-kB"] + + // Save the data + objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") + args["objectName"] = objectName + + cmpChecksum := func(got, want string) { + if want != got { + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + return + } + } + + meta := map[string]string{} + reader := getDataReader("datafile-10-kB") + b, err := io.ReadAll(reader) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + h := test.cs.Hasher() + h.Reset() + + // Test with Wrong CRC. + args["metadata"] = meta + args["range"] = "false" + args["checksum"] = test.cs.String() + + resp, err := c.PutObject(context.Background(), bucketName, objectName, bytes.NewReader(b), int64(bufSize), minio.PutObjectOptions{ + DisableMultipart: true, + DisableContentSha256: true, + UserMetadata: meta, + Checksum: test.cs, + }) + if err != nil { + logError(testName, function, args, startTime, "", "PutObject failed", err) + return + } + + h.Write(b) + meta[test.cs.Key()] = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + cmpChecksum(resp.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(resp.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(resp.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(resp.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + // Read the data back + gopts := minio.GetObjectOptions{Checksum: true} + + function = "GetObject(...)" + r, err := c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + st, err := r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + cmpChecksum(st.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(st.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(st.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(st.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + if st.Size != int64(bufSize) { + logError(testName, function, args, startTime, "", "Number of bytes returned by PutObject does not match GetObject, expected "+string(bufSize)+" got "+string(st.Size), err) + return + } + + if err := r.Close(); err != nil { + logError(testName, function, args, startTime, "", "Object Close failed", err) + return + } + if err := r.Close(); err == nil { + logError(testName, function, args, startTime, "", "Object already closed, should respond with error", err) + return + } + + function = "GetObject( Range...)" + args["range"] = "true" + err = gopts.SetRange(100, 1000) + if err != nil { + logError(testName, function, args, startTime, "", "SetRange failed", err) + return + } + r, err = c.GetObject(context.Background(), bucketName, objectName, gopts) + if err != nil { + logError(testName, function, args, startTime, "", "GetObject failed", err) + return + } + + b, err = io.ReadAll(r) + if err != nil { + logError(testName, function, args, startTime, "", "Read failed", err) + return + } + st, err = r.Stat() + if err != nil { + logError(testName, function, args, startTime, "", "Stat failed", err) + return + } + + // Range requests should return empty checksums... + cmpChecksum(st.ChecksumSHA256, "") + cmpChecksum(st.ChecksumSHA1, "") + cmpChecksum(st.ChecksumCRC32, "") + cmpChecksum(st.ChecksumCRC32C, "") + + function = "GetObjectAttributes(...)" + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + cmpChecksum(s.Checksum.ChecksumSHA256, meta["x-amz-checksum-sha256"]) + cmpChecksum(s.Checksum.ChecksumSHA1, meta["x-amz-checksum-sha1"]) + cmpChecksum(s.Checksum.ChecksumCRC32, meta["x-amz-checksum-crc32"]) + cmpChecksum(s.Checksum.ChecksumCRC32C, meta["x-amz-checksum-crc32c"]) + + delete(args, "range") + delete(args, "metadata") + } + + logSuccess(testName, function, args, startTime) +} + +// Test PutObject with custom checksums. +func testPutMultipartObjectWithChecksums(trailing bool) { + // initialize logging params + startTime := time.Now() + testName := getFuncName() + function := "PutObject(bucketName, objectName, reader,size, opts)" + args := map[string]interface{}{ + "bucketName": "", + "objectName": "", + "opts": fmt.Sprintf("minio.PutObjectOptions{UserMetadata: metadata, Progress: progress Checksum: %v}", trailing), + } + + if !isFullMode() { + logIgnored(testName, function, args, startTime, "Skipping functional tests for short/quick runs") + return + } + + // Seed random based on current time. + rand.Seed(time.Now().Unix()) + + // Instantiate new minio client object. + c, err := minio.New(os.Getenv(serverEndpoint), + &minio.Options{ + Creds: credentials.NewStaticV4(os.Getenv(accessKey), os.Getenv(secretKey), ""), + Transport: createHTTPTransport(), + Secure: mustParseBool(os.Getenv(enableHTTPS)), + TrailingHeaders: trailing, }) if err != nil { logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) @@ -2419,17 +2601,12 @@ func testPutMultipartObjectWithChecksums() { } defer cleanupBucket(bucketName, c) tests := []struct { - header string - hasher hash.Hash - - // Checksum values - ChecksumCRC32 string - ChecksumCRC32C string - ChecksumSHA1 string - ChecksumSHA256 string + cs minio.ChecksumType }{ - // Currently there is no way to override the checksum type. - {header: "x-amz-checksum-crc32c", hasher: crc32.New(crc32.MakeTable(crc32.Castagnoli)), ChecksumCRC32C: "OpEx0Q==-13"}, + {cs: minio.ChecksumCRC32C}, + {cs: minio.ChecksumCRC32}, + {cs: minio.ChecksumSHA1}, + {cs: minio.ChecksumSHA256}, } for _, test := range tests { @@ -2438,11 +2615,12 @@ func testPutMultipartObjectWithChecksums() { // Save the data objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "") args["objectName"] = objectName + args["checksum"] = test.cs.String() cmpChecksum := func(got, want string) { if want != got { - // logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) - fmt.Printf("want %s, got %s\n", want, got) + logError(testName, function, args, startTime, "", "checksum mismatch", fmt.Errorf("want %s, got %s", want, got)) + //fmt.Printf("want %s, got %s\n", want, got) return } } @@ -2455,26 +2633,57 @@ func testPutMultipartObjectWithChecksums() { return } reader.Close() - h := test.hasher + h := test.cs.Hasher() h.Reset() - test.ChecksumCRC32C = hashMultiPart(b, partSize, test.hasher) + want := hashMultiPart(b, partSize, test.cs.Hasher()) + var cs minio.ChecksumType + rd := io.Reader(io.NopCloser(bytes.NewReader(b))) + if trailing { + cs = test.cs + rd = bytes.NewReader(b) + } // Set correct CRC. - - resp, err := c.PutObject(context.Background(), bucketName, objectName, io.NopCloser(bytes.NewReader(b)), int64(bufSize), minio.PutObjectOptions{ + resp, err := c.PutObject(context.Background(), bucketName, objectName, rd, int64(bufSize), minio.PutObjectOptions{ DisableContentSha256: true, DisableMultipart: false, UserMetadata: nil, PartSize: partSize, + AutoChecksum: test.cs, + Checksum: cs, }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) return } - cmpChecksum(resp.ChecksumSHA256, test.ChecksumSHA256) - cmpChecksum(resp.ChecksumSHA1, test.ChecksumSHA1) - cmpChecksum(resp.ChecksumCRC32, test.ChecksumCRC32) - cmpChecksum(resp.ChecksumCRC32C, test.ChecksumCRC32C) + + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(resp.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(resp.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(resp.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(resp.ChecksumSHA256, want) + } + + s, err := c.GetObjectAttributes(context.Background(), bucketName, objectName, minio.ObjectAttributesOptions{}) + if err != nil { + logError(testName, function, args, startTime, "", "GetObjectAttributes failed", err) + return + } + want = want[:strings.IndexByte(want, '-')] + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(s.Checksum.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(s.Checksum.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(s.Checksum.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(s.Checksum.ChecksumSHA256, want) + } // Read the data back gopts := minio.GetObjectOptions{Checksum: true} @@ -2496,18 +2705,17 @@ func testPutMultipartObjectWithChecksums() { // Test part 2 checksum... h.Reset() h.Write(b[partSize : 2*partSize]) - got := base64.StdEncoding.EncodeToString(h.Sum(nil)) - if test.ChecksumSHA256 != "" { - cmpChecksum(st.ChecksumSHA256, got) - } - if test.ChecksumSHA1 != "" { - cmpChecksum(st.ChecksumSHA1, got) - } - if test.ChecksumCRC32 != "" { - cmpChecksum(st.ChecksumCRC32, got) - } - if test.ChecksumCRC32C != "" { - cmpChecksum(st.ChecksumCRC32C, got) + want = base64.StdEncoding.EncodeToString(h.Sum(nil)) + + switch test.cs { + case minio.ChecksumCRC32C: + cmpChecksum(st.ChecksumCRC32C, want) + case minio.ChecksumCRC32: + cmpChecksum(st.ChecksumCRC32, want) + case minio.ChecksumSHA1: + cmpChecksum(st.ChecksumSHA1, want) + case minio.ChecksumSHA256: + cmpChecksum(st.ChecksumSHA256, want) } delete(args, "metadata") @@ -2972,6 +3180,7 @@ func testGetObjectAttributes() { testFiles[i].UploadInfo, err = c.PutObject(context.Background(), v.Bucket, v.Object, reader, int64(bufSize), minio.PutObjectOptions{ ContentType: v.ContentType, SendContentMd5: v.SendContentMd5, + Checksum: minio.ChecksumCRC32C, }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -3053,7 +3262,7 @@ func testGetObjectAttributes() { test: objectAttributesTestOptions{ TestFileName: "file1", StorageClass: "STANDARD", - HasFullChecksum: false, + HasFullChecksum: true, }, } @@ -3142,9 +3351,10 @@ func testGetObjectAttributesSSECEncryption() { info, err := c.PutObject(context.Background(), bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ ContentType: "content/custom", - SendContentMd5: true, + SendContentMd5: false, ServerSideEncryption: sse, PartSize: uint64(bufSize) / 2, + Checksum: minio.ChecksumCRC32C, }) if err != nil { logError(testName, function, args, startTime, "", "PutObject failed", err) @@ -3164,9 +3374,9 @@ func testGetObjectAttributesSSECEncryption() { ETag: info.ETag, NumberOfParts: 2, ObjectSize: int(info.Size), - HasFullChecksum: false, + HasFullChecksum: true, HasParts: true, - HasPartChecksums: false, + HasPartChecksums: true, }) if err != nil { logError(testName, function, args, startTime, "", "Validating GetObjectsAttributes response failed", err) @@ -5584,18 +5794,12 @@ func testPresignedPostPolicy() { } writer.Close() - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - httpClient := &http.Client{ // Setting a sensible time out of 30secs to wait for response // headers. Request is pro-actively canceled after 30secs // with no response. Timeout: 30 * time.Second, - Transport: transport, + Transport: createHTTPTransport(), } args["url"] = presignedPostPolicyURL.String() @@ -7509,7 +7713,7 @@ func testFunctional() { return } - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) + transport := createHTTPTransport() if err != nil { logError(testName, function, args, startTime, "", "DefaultTransport failed", err) return @@ -12440,18 +12644,12 @@ func testFunctionalV2() { return } - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logError(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } - httpClient := &http.Client{ // Setting a sensible time out of 30secs to wait for response // headers. Request is pro-actively canceled after 30secs // with no response. Timeout: 30 * time.Second, - Transport: transport, + Transport: createHTTPTransport(), } req, err := http.NewRequest(http.MethodHead, presignedHeadURL.String(), nil) @@ -13500,7 +13698,7 @@ func testCors() { Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { - logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -13516,7 +13714,7 @@ func testCors() { bucketName = randString(60, rand.NewSource(time.Now().UnixNano()), "minio-go-test-") err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { - logFailure(testName, function, args, startTime, "", "MakeBucket failed", err) + logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } } @@ -13526,7 +13724,7 @@ func testCors() { publicPolicy := `{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":["*"]},"Action":["s3:*"],"Resource":["arn:aws:s3:::` + bucketName + `", "arn:aws:s3:::` + bucketName + `/*"]}]}` err = c.SetBucketPolicy(ctx, bucketName, publicPolicy) if err != nil { - logFailure(testName, function, args, startTime, "", "SetBucketPolicy failed", err) + logError(testName, function, args, startTime, "", "SetBucketPolicy failed", err) return } @@ -13540,20 +13738,15 @@ func testCors() { _, err = c.PutObject(ctx, bucketName, objectName, reader, int64(bufSize), minio.PutObjectOptions{ContentType: "binary/octet-stream"}) if err != nil { - logFailure(testName, function, args, startTime, "", "PutObject call failed", err) + logError(testName, function, args, startTime, "", "PutObject call failed", err) return } bucketURL := c.EndpointURL().String() + "/" + bucketName + "/" objectURL := bucketURL + objectName - transport, err := minio.DefaultTransport(mustParseBool(os.Getenv(enableHTTPS))) - if err != nil { - logFailure(testName, function, args, startTime, "", "DefaultTransport failed", err) - return - } httpClient := &http.Client{ Timeout: 30 * time.Second, - Transport: transport, + Transport: createHTTPTransport(), } errStrAccessForbidden := `AccessForbiddenCORSResponse: This CORS request is not allowed. This is usually because the evalution of Origin, request method / Access-Control-Request-Method or Access-Control-Request-Headers are not whitelisted` @@ -14156,7 +14349,7 @@ func testCors() { } err = c.SetBucketCors(ctx, bucketName, corsConfig) if err != nil { - logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) return } } @@ -14165,7 +14358,7 @@ func testCors() { if test.method != "" && test.url != "" { req, err := http.NewRequestWithContext(ctx, test.method, test.url, nil) if err != nil { - logFailure(testName, function, args, startTime, "", "HTTP request creation failed", err) + logError(testName, function, args, startTime, "", "HTTP request creation failed", err) return } req.Header.Set("User-Agent", "MinIO-go-FunctionalTest/"+appVersion) @@ -14175,7 +14368,7 @@ func testCors() { } resp, err := httpClient.Do(req) if err != nil { - logFailure(testName, function, args, startTime, "", "HTTP request failed", err) + logError(testName, function, args, startTime, "", "HTTP request failed", err) return } defer resp.Body.Close() @@ -14183,7 +14376,7 @@ func testCors() { // Check returned status code if resp.StatusCode != test.wantStatus { errStr := fmt.Sprintf(" incorrect status code in response, want: %d, got: %d", test.wantStatus, resp.StatusCode) - logFailure(testName, function, args, startTime, "", errStr, nil) + logError(testName, function, args, startTime, "", errStr, nil) return } @@ -14191,12 +14384,12 @@ func testCors() { if test.wantBodyContains != "" { body, err := io.ReadAll(resp.Body) if err != nil { - logFailure(testName, function, args, startTime, "", "Failed to read response body", err) + logError(testName, function, args, startTime, "", "Failed to read response body", err) return } if !strings.Contains(string(body), test.wantBodyContains) { errStr := fmt.Sprintf(" incorrect body in response, want: %s, in got: %s", test.wantBodyContains, string(body)) - logFailure(testName, function, args, startTime, "", errStr, nil) + logError(testName, function, args, startTime, "", errStr, nil) return } } @@ -14213,7 +14406,7 @@ func testCors() { gotVal = strings.ReplaceAll(gotVal, " ", "") if gotVal != v { errStr := fmt.Sprintf(" incorrect header in response, want: %s: '%s', got: '%s'", k, v, gotVal) - logFailure(testName, function, args, startTime, "", errStr, nil) + logError(testName, function, args, startTime, "", errStr, nil) return } } @@ -14241,7 +14434,7 @@ func testCorsSetGetDelete() { Secure: mustParseBool(os.Getenv(enableHTTPS)), }) if err != nil { - logFailure(testName, function, args, startTime, "", "MinIO client object creation failed", err) + logError(testName, function, args, startTime, "", "MinIO client object creation failed", err) return } @@ -14258,7 +14451,7 @@ func testCorsSetGetDelete() { // Make a new bucket. err = c.MakeBucket(ctx, bucketName, minio.MakeBucketOptions{Region: "us-east-1"}) if err != nil { - logFailure(testName, function, args, startTime, "", "MakeBucket failed", err) + logError(testName, function, args, startTime, "", "MakeBucket failed", err) return } defer cleanupBucket(bucketName, c) @@ -14284,37 +14477,37 @@ func testCorsSetGetDelete() { corsConfig := cors.NewConfig(corsRules) err = c.SetBucketCors(ctx, bucketName, corsConfig) if err != nil { - logFailure(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) + logError(testName, function, args, startTime, "", "SetBucketCors failed to apply", err) return } // Get the rules and check they match what we set gotCorsConfig, err := c.GetBucketCors(ctx, bucketName) if err != nil { - logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err) + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) return } if !reflect.DeepEqual(corsConfig, gotCorsConfig) { msg := fmt.Sprintf("GetBucketCors returned unexpected rules, expected: %+v, got: %+v", corsConfig, gotCorsConfig) - logFailure(testName, function, args, startTime, "", msg, nil) + logError(testName, function, args, startTime, "", msg, nil) return } // Delete the rules err = c.SetBucketCors(ctx, bucketName, nil) if err != nil { - logFailure(testName, function, args, startTime, "", "SetBucketCors failed to delete", err) + logError(testName, function, args, startTime, "", "SetBucketCors failed to delete", err) return } // Get the rules and check they are now empty gotCorsConfig, err = c.GetBucketCors(ctx, bucketName) if err != nil { - logFailure(testName, function, args, startTime, "", "GetBucketCors failed", err) + logError(testName, function, args, startTime, "", "GetBucketCors failed", err) return } if gotCorsConfig != nil { - logFailure(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil) + logError(testName, function, args, startTime, "", "GetBucketCors returned unexpected rules", nil) return } @@ -14747,7 +14940,9 @@ func main() { testCompose10KSourcesV2() testUserMetadataCopyingV2() testPutObjectWithChecksums() - testPutMultipartObjectWithChecksums() + testPutObjectWithTrailingChecksums() + testPutMultipartObjectWithChecksums(false) + testPutMultipartObjectWithChecksums(true) testPutObject0ByteV2() testPutObjectNoLengthV2() testPutObjectsUnknownV2() diff --git a/vendor/github.com/minio/minio-go/v7/post-policy.go b/vendor/github.com/minio/minio-go/v7/post-policy.go index 3f023704..19687e02 100644 --- a/vendor/github.com/minio/minio-go/v7/post-policy.go +++ b/vendor/github.com/minio/minio-go/v7/post-policy.go @@ -301,6 +301,25 @@ func (p *PostPolicy) SetUserMetadata(key, value string) error { return nil } +// SetUserMetadataStartsWith - Set how an user metadata should starts with. +// Can be retrieved through a HEAD request or an event. +func (p *PostPolicy) SetUserMetadataStartsWith(key, value string) error { + if strings.TrimSpace(key) == "" || key == "" { + return errInvalidArgument("Key is empty") + } + headerName := fmt.Sprintf("x-amz-meta-%s", key) + policyCond := policyCondition{ + matchType: "starts-with", + condition: fmt.Sprintf("$%s", headerName), + value: value, + } + if err := p.addNewPolicy(policyCond); err != nil { + return err + } + p.formData[headerName] = value + return nil +} + // SetChecksum sets the checksum of the request. func (p *PostPolicy) SetChecksum(c Checksum) { if c.IsSet() { diff --git a/vendor/github.com/minio/minio-go/v7/retry.go b/vendor/github.com/minio/minio-go/v7/retry.go index 5ddcad89..d15eb590 100644 --- a/vendor/github.com/minio/minio-go/v7/retry.go +++ b/vendor/github.com/minio/minio-go/v7/retry.go @@ -129,9 +129,10 @@ func isHTTPStatusRetryable(httpStatusCode int) (ok bool) { } // For now, all http Do() requests are retriable except some well defined errors -func isRequestErrorRetryable(err error) bool { +func isRequestErrorRetryable(ctx context.Context, err error) bool { if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return false + // Retry if internal timeout in the HTTP call. + return ctx.Err() == nil } if ue, ok := err.(*url.Error); ok { e := ue.Unwrap() diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1..00000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03..00000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e7..00000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cde..00000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb1..00000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade..00000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea25..00000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0..00000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348..00000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go index 8d35f2d8..519db348 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -844,9 +844,7 @@ func (h *histogram) Write(out *dto.Metric) error { }} } - // If exemplars are not configured, the cap will be 0. - // So append is not needed in this case. - if cap(h.nativeExemplars.exemplars) > 0 { + if h.nativeExemplars.isEnabled() { h.nativeExemplars.Lock() his.Exemplars = append(his.Exemplars, h.nativeExemplars.exemplars...) h.nativeExemplars.Unlock() @@ -1658,10 +1656,17 @@ func addAndResetCounts(hot, cold *histogramCounts) { type nativeExemplars struct { sync.Mutex - ttl time.Duration + // Time-to-live for exemplars, it is set to -1 if exemplars are disabled, that is NativeHistogramMaxExemplars is below 0. + // The ttl is used on insertion to remove an exemplar that is older than ttl, if present. + ttl time.Duration + exemplars []*dto.Exemplar } +func (n *nativeExemplars) isEnabled() bool { + return n.ttl != -1 +} + func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { if ttl == 0 { ttl = 5 * time.Minute @@ -1673,6 +1678,7 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { if maxCount < 0 { maxCount = 0 + ttl = -1 } return nativeExemplars{ @@ -1682,20 +1688,18 @@ func makeNativeExemplars(ttl time.Duration, maxCount int) nativeExemplars { } func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { - if cap(n.exemplars) == 0 { + if !n.isEnabled() { return } n.Lock() defer n.Unlock() - // The index where to insert the new exemplar. - var nIdx int = -1 - // When the number of exemplars has not yet exceeded or // is equal to cap(n.exemplars), then // insert the new exemplar directly. if len(n.exemplars) < cap(n.exemplars) { + var nIdx int for nIdx = 0; nIdx < len(n.exemplars); nIdx++ { if *e.Value < *n.exemplars[nIdx].Value { break @@ -1705,17 +1709,46 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { return } + if len(n.exemplars) == 1 { + // When the number of exemplars is 1, then + // replace the existing exemplar with the new exemplar. + n.exemplars[0] = e + return + } + // From this point on, the number of exemplars is greater than 1. + // When the number of exemplars exceeds the limit, remove one exemplar. var ( - rIdx int // The index where to remove the old exemplar. + ot = time.Time{} // Oldest timestamp seen. Initial value doesn't matter as we replace it due to otIdx == -1 in the loop. + otIdx = -1 // Index of the exemplar with the oldest timestamp. - ot = time.Now() // Oldest timestamp seen. - otIdx = -1 // Index of the exemplar with the oldest timestamp. + md = -1.0 // Logarithm of the delta of the closest pair of exemplars. - md = -1.0 // Logarithm of the delta of the closest pair of exemplars. - mdIdx = -1 // Index of the older exemplar within the closest pair. - cLog float64 // Logarithm of the current exemplar. - pLog float64 // Logarithm of the previous exemplar. + // The insertion point of the new exemplar in the exemplars slice after insertion. + // This is calculated purely based on the order of the exemplars by value. + // nIdx == len(n.exemplars) means the new exemplar is to be inserted after the end. + nIdx = -1 + + // rIdx is ultimately the index for the exemplar that we are replacing with the new exemplar. + // The aim is to keep a good spread of exemplars by value and not let them bunch up too much. + // It is calculated in 3 steps: + // 1. First we set rIdx to the index of the older exemplar within the closest pair by value. + // That is the following will be true (on log scale): + // either the exemplar pair on index (rIdx-1, rIdx) or (rIdx, rIdx+1) will have + // the closest values to each other from all pairs. + // For example, suppose the values are distributed like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // Or like this: + // |-----------x-------------x----------------x----x-----| + // ^--rIdx as this is older. + // 2. If there is an exemplar that expired, then we simple reset rIdx to that index. + // 3. We check if by inserting the new exemplar we would create a closer pair at + // (nIdx-1, nIdx) or (nIdx, nIdx+1) and set rIdx to nIdx-1 or nIdx accordingly to + // keep the spread of exemplars by value; otherwise we keep rIdx as it is. + rIdx = -1 + cLog float64 // Logarithm of the current exemplar. + pLog float64 // Logarithm of the previous exemplar. ) for i, exemplar := range n.exemplars { @@ -1726,7 +1759,7 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { } // Find the index at which to insert new the exemplar. - if *e.Value <= *exemplar.Value && nIdx == -1 { + if nIdx == -1 && *e.Value <= *exemplar.Value { nIdx = i } @@ -1738,11 +1771,13 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { } diff := math.Abs(cLog - pLog) if md == -1 || diff < md { + // The closest exemplar pair is at index: i-1, i. + // Choose the exemplar with the older timestamp for replacement. md = diff if n.exemplars[i].Timestamp.AsTime().Before(n.exemplars[i-1].Timestamp.AsTime()) { - mdIdx = i + rIdx = i } else { - mdIdx = i - 1 + rIdx = i - 1 } } @@ -1753,8 +1788,12 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { if nIdx == -1 { nIdx = len(n.exemplars) } + // Here, we have the following relationships: + // n.exemplars[nIdx-1].Value < e.Value (if nIdx > 0) + // e.Value <= n.exemplars[nIdx].Value (if nIdx < len(n.exemplars)) if otIdx != -1 && e.Timestamp.AsTime().Sub(ot) > n.ttl { + // If the oldest exemplar has expired, then replace it with the new exemplar. rIdx = otIdx } else { // In the previous for loop, when calculating the closest pair of exemplars, @@ -1764,23 +1803,26 @@ func (n *nativeExemplars) addExemplar(e *dto.Exemplar) { if nIdx > 0 { diff := math.Abs(elog - math.Log(n.exemplars[nIdx-1].GetValue())) if diff < md { + // The value we are about to insert is closer to the previous exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-n-----------x----------------x----x-----| + // nIdx-1--^ ^--new exemplar value + // Do not make the spread worse, replace nIdx-1 and not rIdx. md = diff - mdIdx = nIdx - if n.exemplars[nIdx-1].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { - mdIdx = nIdx - 1 - } + rIdx = nIdx - 1 } } if nIdx < len(n.exemplars) { diff := math.Abs(math.Log(n.exemplars[nIdx].GetValue()) - elog) if diff < md { - mdIdx = nIdx - if n.exemplars[nIdx].Timestamp.AsTime().Before(e.Timestamp.AsTime()) { - mdIdx = nIdx - } + // The value we are about to insert is closer to the next exemplar at the insertion point than what we calculated before in rIdx. + // v--rIdx + // |-----------x-----------n-x----------------x----x-----| + // new exemplar value--^ ^--nIdx + // Do not make the spread worse, replace nIdx-1 and not rIdx. + rIdx = nIdx } } - rIdx = mdIdx } // Adjust the slice according to rIdx and nIdx. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index efbc3ea8..62a4e7ad 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -140,6 +140,8 @@ func (c *processCollector) Describe(ch chan<- *Desc) { ch <- c.maxVsize ch <- c.rss ch <- c.startTime + ch <- c.inBytes + ch <- c.outBytes } // Collect returns the current state of all metrics of the collector. diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 2e0b9a86..e598e66e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -203,8 +203,10 @@ func HandlerForTransactional(reg prometheus.TransactionalGatherer, opts HandlerO defer closeWriter() - rsp.Header().Set(contentEncodingHeader, encodingHeader) - + // Set Content-Encoding only when data is compressed + if encodingHeader != string(Identity) { + rsp.Header().Set(contentEncodingHeader, encodingHeader) + } enc := expfmt.NewEncoder(w, contentType) // handleError handles the error according to opts.ErrorHandling diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index 25cfaa21..1448439b 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -45,7 +45,7 @@ func ResponseFormat(h http.Header) Format { mediatype, params, err := mime.ParseMediaType(ct) if err != nil { - return fmtUnknown + return FmtUnknown } const textType = "text/plain" @@ -53,21 +53,21 @@ func ResponseFormat(h http.Header) Format { switch mediatype { case ProtoType: if p, ok := params["proto"]; ok && p != ProtoProtocol { - return fmtUnknown + return FmtUnknown } if e, ok := params["encoding"]; ok && e != "delimited" { - return fmtUnknown + return FmtUnknown } - return fmtProtoDelim + return FmtProtoDelim case textType: if v, ok := params["version"]; ok && v != TextVersion { - return fmtUnknown + return FmtUnknown } - return fmtText + return FmtText } - return fmtUnknown + return FmtUnknown } // NewDecoder returns a new decoder based on the given input format. diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index ff5ef7a9..cf0c150c 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -77,18 +77,18 @@ func Negotiate(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NegotiateIncludingOpenMetrics works like Negotiate but includes @@ -110,26 +110,26 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { switch ac.Params["encoding"] { case "delimited": - return fmtProtoDelim + escapingScheme + return FmtProtoDelim + escapingScheme case "text": - return fmtProtoText + escapingScheme + return FmtProtoText + escapingScheme case "compact-text": - return fmtProtoCompact + escapingScheme + return FmtProtoCompact + escapingScheme } } if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { - return fmtText + escapingScheme + return FmtText + escapingScheme } if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") { switch ver { case OpenMetricsVersion_1_0_0: - return fmtOpenMetrics_1_0_0 + escapingScheme + return FmtOpenMetrics_1_0_0 + escapingScheme default: - return fmtOpenMetrics_0_0_1 + escapingScheme + return FmtOpenMetrics_0_0_1 + escapingScheme } } } - return fmtText + escapingScheme + return FmtText + escapingScheme } // NewEncoder returns a new encoder based on content type negotiation. All diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 051b38cd..d942af8e 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -32,24 +32,31 @@ type Format string // it on the wire, new content-type strings will have to be agreed upon and // added here. const ( - TextVersion = "0.0.4" - ProtoType = `application/vnd.google.protobuf` - ProtoProtocol = `io.prometheus.client.MetricFamily` - protoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" OpenMetricsType = `application/openmetrics-text` OpenMetricsVersion_0_0_1 = "0.0.1" OpenMetricsVersion_1_0_0 = "1.0.0" - // The Content-Type values for the different wire protocols. Note that these - // values are now unexported. If code was relying on comparisons to these - // constants, instead use FormatType(). - fmtUnknown Format = `` - fmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` - fmtProtoDelim Format = protoFmt + ` encoding=delimited` - fmtProtoText Format = protoFmt + ` encoding=text` - fmtProtoCompact Format = protoFmt + ` encoding=compact-text` - fmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` - fmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` + // The Content-Type values for the different wire protocols. Do not do direct + // comparisons to these constants, instead use the comparison functions. + // Deprecated: Use expfmt.NewFormat(expfmt.TypeUnknown) instead. + FmtUnknown Format = `` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeTextPlain) instead. + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoDelim) instead. + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoText) instead. + FmtProtoText Format = ProtoFmt + ` encoding=text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeProtoCompact) instead. + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8` + // Deprecated: Use expfmt.NewFormat(expfmt.TypeOpenMetrics) instead. + FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8` ) const ( @@ -79,17 +86,17 @@ const ( func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: - return fmtProtoCompact + return FmtProtoCompact case TypeProtoDelim: - return fmtProtoDelim + return FmtProtoDelim case TypeProtoText: - return fmtProtoText + return FmtProtoText case TypeTextPlain: - return fmtText + return FmtText case TypeOpenMetrics: - return fmtOpenMetrics_1_0_0 + return FmtOpenMetrics_1_0_0 default: - return fmtUnknown + return FmtUnknown } } @@ -97,12 +104,35 @@ func NewFormat(t FormatType) Format { // specified version number. func NewOpenMetricsFormat(version string) (Format, error) { if version == OpenMetricsVersion_0_0_1 { - return fmtOpenMetrics_0_0_1, nil + return FmtOpenMetrics_0_0_1, nil } if version == OpenMetricsVersion_1_0_0 { - return fmtOpenMetrics_1_0_0, nil + return FmtOpenMetrics_1_0_0, nil } - return fmtUnknown, fmt.Errorf("unknown open metrics version string") + return FmtUnknown, fmt.Errorf("unknown open metrics version string") +} + +// WithEscapingScheme returns a copy of Format with the specified escaping +// scheme appended to the end. If an escaping scheme already exists it is +// removed. +func (f Format) WithEscapingScheme(s model.EscapingScheme) Format { + var terms []string + for _, p := range strings.Split(string(f), ";") { + toks := strings.Split(p, "=") + if len(toks) != 2 { + trimmed := strings.TrimSpace(p) + if len(trimmed) > 0 { + terms = append(terms, trimmed) + } + continue + } + key := strings.TrimSpace(toks[0]) + if key != model.EscapingKey { + terms = append(terms, strings.TrimSpace(p)) + } + } + terms = append(terms, model.EscapingKey+"="+s.String()) + return Format(strings.Join(terms, "; ")) } // FormatType deduces an overall FormatType for the given format. diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 353c5e93..11c8ff4b 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -477,7 +477,7 @@ func writeOpenMetricsNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces, quoted. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go index f9b8265a..4b86434b 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_create.go +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -354,7 +354,7 @@ func writeNameAndLabelPairs( if name != "" { // If the name does not pass the legacy validity check, we must put the // metric name inside the braces. - if !model.IsValidLegacyMetricName(model.LabelValue(name)) { + if !model.IsValidLegacyMetricName(name) { metricInsideBraces = true err := w.WriteByte(separator) written++ @@ -498,7 +498,7 @@ func writeInt(w enhancedWriter, i int64) (int, error) { // writeName writes a string as-is if it complies with the legacy naming // scheme, or escapes it in double quotes if not. func writeName(w enhancedWriter, name string) (int, error) { - if model.IsValidLegacyMetricName(model.LabelValue(name)) { + if model.IsValidLegacyMetricName(name) { return w.WriteString(name) } var written int diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 26490211..f085a923 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -22,9 +22,9 @@ import ( "math" "strconv" "strings" + "unicode/utf8" dto "github.com/prometheus/client_model/go" - "google.golang.org/protobuf/proto" "github.com/prometheus/common/model" @@ -60,6 +60,7 @@ type TextParser struct { currentMF *dto.MetricFamily currentMetric *dto.Metric currentLabelPair *dto.LabelPair + currentLabelPairs []*dto.LabelPair // Temporarily stores label pairs while parsing a metric line. // The remaining member variables are only used for summaries/histograms. currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' @@ -74,6 +75,9 @@ type TextParser struct { // count and sum of that summary/histogram. currentIsSummaryCount, currentIsSummarySum bool currentIsHistogramCount, currentIsHistogramSum bool + // These indicate if the metric name from the current line being parsed is inside + // braces and if that metric name was found respectively. + currentMetricIsInsideBraces, currentMetricInsideBracesIsPresent bool } // TextToMetricFamilies reads 'in' as the simple and flat text-based exchange @@ -137,12 +141,15 @@ func (p *TextParser) reset(in io.Reader) { } p.currentQuantile = math.NaN() p.currentBucket = math.NaN() + p.currentMF = nil } // startOfLine represents the state where the next byte read from p.buf is the // start of a line (or whitespace leading up to it). func (p *TextParser) startOfLine() stateFn { p.lineCount++ + p.currentMetricIsInsideBraces = false + p.currentMetricInsideBracesIsPresent = false if p.skipBlankTab(); p.err != nil { // This is the only place that we expect to see io.EOF, // which is not an error but the signal that we are done. @@ -158,6 +165,9 @@ func (p *TextParser) startOfLine() stateFn { return p.startComment case '\n': return p.startOfLine // Empty line, start the next one. + case '{': + p.currentMetricIsInsideBraces = true + return p.readingLabels } return p.readingMetricName } @@ -275,6 +285,8 @@ func (p *TextParser) startLabelName() stateFn { return nil // Unexpected end of input. } if p.currentByte == '}' { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } @@ -287,6 +299,45 @@ func (p *TextParser) startLabelName() stateFn { p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) return nil } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + if p.currentMetricIsInsideBraces { + if p.currentMetricInsideBracesIsPresent { + p.parseError(fmt.Sprintf("multiple metric names for metric %q", p.currentMF.GetName())) + return nil + } + switch p.currentByte { + case ',': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetricInsideBracesIsPresent = true + return p.startLabelName + case '}': + p.setOrCreateCurrentMF() + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of metric name %q", p.currentByte)) + return nil + } + } + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + p.currentLabelPairs = nil + return nil + } p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) @@ -296,23 +347,17 @@ func (p *TextParser) startLabelName() stateFn { // labels to 'real' labels. if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { - p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) - } - if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { - return nil // Unexpected end of input. - } - if p.currentByte != '=' { - p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) - return nil + p.currentLabelPairs = append(p.currentLabelPairs, p.currentLabelPair) } // Check for duplicate label names. labels := make(map[string]struct{}) - for _, l := range p.currentMetric.Label { + for _, l := range p.currentLabelPairs { lName := l.GetName() if _, exists := labels[lName]; !exists { labels[lName] = struct{}{} } else { p.parseError(fmt.Sprintf("duplicate label names for metric %q", p.currentMF.GetName())) + p.currentLabelPairs = nil return nil } } @@ -345,6 +390,7 @@ func (p *TextParser) startLabelValue() stateFn { if p.currentQuantile, p.err = parseFloat(p.currentLabelPair.GetValue()); p.err != nil { // Create a more helpful error message. p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } else { @@ -371,12 +417,19 @@ func (p *TextParser) startLabelValue() stateFn { return p.startLabelName case '}': + if p.currentMF == nil { + p.parseError("invalid metric name") + return nil + } + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPairs...) + p.currentLabelPairs = nil if p.skipBlankTab(); p.err != nil { return nil // Unexpected end of input. } return p.readingValue default: p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.GetValue())) + p.currentLabelPairs = nil return nil } } @@ -585,6 +638,8 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.WriteByte(p.currentByte) case 'n': p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) return @@ -610,13 +665,45 @@ func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { // but not into p.currentToken. func (p *TextParser) readTokenAsMetricName() { p.currentToken.Reset() + // A UTF-8 metric name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidMetricNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("metric name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + if !isValidMetricNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == ' ') { return } } @@ -628,13 +715,45 @@ func (p *TextParser) readTokenAsMetricName() { // but not into p.currentToken. func (p *TextParser) readTokenAsLabelName() { p.currentToken.Reset() + // A UTF-8 label name must be quoted and may have escaped characters. + quoted := false + escaped := false if !isValidLabelNameStart(p.currentByte) { return } - for { - p.currentToken.WriteByte(p.currentByte) + for p.err == nil { + if escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + case '"': + p.currentToken.WriteByte('"') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '"': + quoted = !quoted + if !quoted { + p.currentByte, p.err = p.buf.ReadByte() + return + } + case '\n': + p.parseError(fmt.Sprintf("label name %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } p.currentByte, p.err = p.buf.ReadByte() - if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + if !isValidLabelNameContinuation(p.currentByte, quoted) || (!quoted && p.currentByte == '=') { return } } @@ -660,6 +779,7 @@ func (p *TextParser) readTokenAsLabelValue() { p.currentToken.WriteByte('\n') default: p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + p.currentLabelPairs = nil return } escaped = false @@ -718,19 +838,19 @@ func (p *TextParser) setOrCreateCurrentMF() { } func isValidLabelNameStart(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == '"' } -func isValidLabelNameContinuation(b byte) bool { - return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +func isValidLabelNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') || (quoted && utf8.ValidString(string(b))) } func isValidMetricNameStart(b byte) bool { return isValidLabelNameStart(b) || b == ':' } -func isValidMetricNameContinuation(b byte) bool { - return isValidLabelNameContinuation(b) || b == ':' +func isValidMetricNameContinuation(b byte, quoted bool) bool { + return isValidLabelNameContinuation(b, quoted) || b == ':' } func isBlankOrTab(b byte) bool { diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go index 3317ce22..73b7aa3e 100644 --- a/vendor/github.com/prometheus/common/model/labels.go +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -97,26 +97,35 @@ var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") // therewith. type LabelName string -// IsValid returns true iff name matches the pattern of LabelNameRE for legacy -// names, and iff it's valid UTF-8 if NameValidationScheme is set to -// UTF8Validation. For the legacy matching, it does not use LabelNameRE for the -// check but a much faster hardcoded implementation. +// IsValid returns true iff the name matches the pattern of LabelNameRE when +// NameValidationScheme is set to LegacyValidation, or valid UTF-8 if +// NameValidationScheme is set to UTF8Validation. func (ln LabelName) IsValid() bool { if len(ln) == 0 { return false } switch NameValidationScheme { case LegacyValidation: - for i, b := range ln { - if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { - return false - } - } + return ln.IsValidLegacy() case UTF8Validation: return utf8.ValidString(string(ln)) default: panic(fmt.Sprintf("Invalid name validation scheme requested: %d", NameValidationScheme)) } +} + +// IsValidLegacy returns true iff name matches the pattern of LabelNameRE for +// legacy names. It does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValidLegacy() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } return true } diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go index 481c47b4..abb2c900 100644 --- a/vendor/github.com/prometheus/common/model/labelset_string.go +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -11,8 +11,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//go:build go1.21 - package model import ( diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go deleted file mode 100644 index c4212685..00000000 --- a/vendor/github.com/prometheus/common/model/labelset_string_go120.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2024 The Prometheus Authors -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:build !go1.21 - -package model - -import ( - "fmt" - "sort" - "strings" -) - -// String was optimized using functions not available for go 1.20 -// or lower. We keep the old implementation for compatibility with client_golang. -// Once client golang drops support for go 1.20 (scheduled for August 2024), this -// file can be removed. -func (l LabelSet) String() string { - labelNames := make([]string, 0, len(l)) - for name := range l { - labelNames = append(labelNames, string(name)) - } - sort.Strings(labelNames) - lstrs := make([]string, 0, len(l)) - for _, name := range labelNames { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) - } - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index eb865e5a..f50966bc 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -34,10 +34,13 @@ var ( // goroutines are started. NameValidationScheme = LegacyValidation - // NameEscapingScheme defines the default way that names will be - // escaped when presented to systems that do not support UTF-8 names. If the - // Content-Type "escaping" term is specified, that will override this value. - NameEscapingScheme = ValueEncodingEscaping + // NameEscapingScheme defines the default way that names will be escaped when + // presented to systems that do not support UTF-8 names. If the Content-Type + // "escaping" term is specified, that will override this value. + // NameEscapingScheme should not be set to the NoEscaping value. That string + // is used in content negotiation to indicate that a system supports UTF-8 and + // has that feature enabled. + NameEscapingScheme = UnderscoreEscaping ) // ValidationScheme is a Go enum for determining how metric and label names will @@ -161,7 +164,7 @@ func (m Metric) FastFingerprint() Fingerprint { func IsValidMetricName(n LabelValue) bool { switch NameValidationScheme { case LegacyValidation: - return IsValidLegacyMetricName(n) + return IsValidLegacyMetricName(string(n)) case UTF8Validation: if len(n) == 0 { return false @@ -176,7 +179,7 @@ func IsValidMetricName(n LabelValue) bool { // legacy validation scheme regardless of the value of NameValidationScheme. // This function, however, does not use MetricNameRE for the check but a much // faster hardcoded implementation. -func IsValidLegacyMetricName(n LabelValue) bool { +func IsValidLegacyMetricName(n string) bool { if len(n) == 0 { return false } @@ -208,7 +211,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF } // If the name is nil, copy as-is, don't try to escape. - if v.Name == nil || IsValidLegacyMetricName(LabelValue(v.GetName())) { + if v.Name == nil || IsValidLegacyMetricName(v.GetName()) { out.Name = v.Name } else { out.Name = proto.String(EscapeName(v.GetName(), scheme)) @@ -230,7 +233,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF for _, l := range m.Label { if l.GetName() == MetricNameLabel { - if l.Value == nil || IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.Value == nil || IsValidLegacyMetricName(l.GetValue()) { escaped.Label = append(escaped.Label, l) continue } @@ -240,7 +243,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF }) continue } - if l.Name == nil || IsValidLegacyMetricName(LabelValue(l.GetName())) { + if l.Name == nil || IsValidLegacyMetricName(l.GetName()) { escaped.Label = append(escaped.Label, l) continue } @@ -256,10 +259,10 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF func metricNeedsEscaping(m *dto.Metric) bool { for _, l := range m.Label { - if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(LabelValue(l.GetValue())) { + if l.GetName() == MetricNameLabel && !IsValidLegacyMetricName(l.GetValue()) { return true } - if !IsValidLegacyMetricName(LabelValue(l.GetName())) { + if !IsValidLegacyMetricName(l.GetName()) { return true } } @@ -283,7 +286,7 @@ func EscapeName(name string, scheme EscapingScheme) string { case NoEscaping: return name case UnderscoreEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } for i, b := range name { @@ -309,7 +312,7 @@ func EscapeName(name string, scheme EscapingScheme) string { } return escaped.String() case ValueEncodingEscaping: - if IsValidLegacyMetricName(LabelValue(name)) { + if IsValidLegacyMetricName(name) { return name } escaped.WriteString("U__") @@ -452,6 +455,6 @@ func ToEscapingScheme(s string) (EscapingScheme, error) { case EscapeValues: return ValueEncodingEscaping, nil default: - return NoEscaping, fmt.Errorf("unknown format scheme " + s) + return NoEscaping, fmt.Errorf("unknown format scheme %s", s) } } diff --git a/vendor/github.com/rs/xid/.gitignore b/vendor/github.com/rs/xid/.gitignore new file mode 100644 index 00000000..81be9277 --- /dev/null +++ b/vendor/github.com/rs/xid/.gitignore @@ -0,0 +1,3 @@ +/.idea +/.vscode +.DS_Store \ No newline at end of file diff --git a/vendor/github.com/rs/xid/README.md b/vendor/github.com/rs/xid/README.md index 974e67d2..1bf45bd1 100644 --- a/vendor/github.com/rs/xid/README.md +++ b/vendor/github.com/rs/xid/README.md @@ -4,7 +4,7 @@ Package xid is a globally unique id generator library, ready to safely be used directly in your server code. -Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization (base64) to make it shorter when transported as a string: +Xid uses the Mongo Object ID algorithm to generate globally unique ids with a different serialization ([base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10)) to make it shorter when transported as a string: https://docs.mongodb.org/manual/reference/object-id/ - 4-byte value representing the seconds since the Unix epoch, @@ -13,7 +13,7 @@ https://docs.mongodb.org/manual/reference/object-id/ - 3-byte counter, starting with a random value. The binary representation of the id is compatible with Mongo 12 bytes Object IDs. -The string representation is using base32 hex (w/o padding) for better space efficiency +The string representation is using [base32hex](https://datatracker.ietf.org/doc/html/rfc4648#page-10) (w/o padding) for better space efficiency when stored in that form (20 bytes). The hex variant of base32 is used to retain the sortable property of the id. @@ -71,8 +71,10 @@ References: - Java port by [0xShamil](https://github.com/0xShamil/): https://github.com/0xShamil/java-xid - Dart port by [Peter Bwire](https://github.com/pitabwire): https://pub.dev/packages/xid - PostgreSQL port by [Rasmus Holm](https://github.com/crholm): https://github.com/modfin/pg-xid -- Swift port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/swift-xid -- C++ port by [Uditha Atukorala](https://github.com/uditha-atukorala): https://github.com/uditha-atukorala/libxid +- Swift port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/swift-xid +- C++ port by [Uditha Atukorala](https://github.com/uatuko): https://github.com/uatuko/libxid +- Typescript & Javascript port by [Yiwen AI](https://github.com/yiwen-ai): https://github.com/yiwen-ai/xid-ts +- Gleam port by [Alexandre Del Vecchio](https://github.com/defgenx): https://github.com/defgenx/gxid ## Install diff --git a/vendor/github.com/rs/xid/hostid_darwin.go b/vendor/github.com/rs/xid/hostid_darwin.go index 08351ff7..17351563 100644 --- a/vendor/github.com/rs/xid/hostid_darwin.go +++ b/vendor/github.com/rs/xid/hostid_darwin.go @@ -2,8 +2,33 @@ package xid -import "syscall" +import ( + "errors" + "os/exec" + "strings" +) func readPlatformMachineID() (string, error) { - return syscall.Sysctl("kern.uuid") + ioreg, err := exec.LookPath("ioreg") + if err != nil { + return "", err + } + + cmd := exec.Command(ioreg, "-rd1", "-c", "IOPlatformExpertDevice") + out, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + + for _, line := range strings.Split(string(out), "\n") { + if strings.Contains(line, "IOPlatformUUID") { + parts := strings.SplitAfter(line, `" = "`) + if len(parts) == 2 { + uuid := strings.TrimRight(parts[1], `"`) + return strings.ToLower(uuid), nil + } + } + } + + return "", errors.New("cannot find host id") } diff --git a/vendor/github.com/rs/xid/hostid_windows.go b/vendor/github.com/rs/xid/hostid_windows.go index ec2593ee..a4d98ab0 100644 --- a/vendor/github.com/rs/xid/hostid_windows.go +++ b/vendor/github.com/rs/xid/hostid_windows.go @@ -11,11 +11,17 @@ import ( func readPlatformMachineID() (string, error) { // source: https://github.com/shirou/gopsutil/blob/master/host/host_syscall.go var h syscall.Handle - err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, syscall.StringToUTF16Ptr(`SOFTWARE\Microsoft\Cryptography`), 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) + + regKeyCryptoPtr, err := syscall.UTF16PtrFromString(`SOFTWARE\Microsoft\Cryptography`) + if err != nil { + return "", fmt.Errorf(`error reading registry key "SOFTWARE\Microsoft\Cryptography": %w`, err) + } + + err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, regKeyCryptoPtr, 0, syscall.KEY_READ|syscall.KEY_WOW64_64KEY, &h) if err != nil { return "", err } - defer syscall.RegCloseKey(h) + defer func() { _ = syscall.RegCloseKey(h) }() const syscallRegBufLen = 74 // len(`{`) + len(`abcdefgh-1234-456789012-123345456671` * 2) + len(`}`) // 2 == bytes/UTF16 const uuidLen = 36 @@ -23,9 +29,15 @@ func readPlatformMachineID() (string, error) { var regBuf [syscallRegBufLen]uint16 bufLen := uint32(syscallRegBufLen) var valType uint32 - err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(`MachineGuid`), nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + + mGuidPtr, err := syscall.UTF16PtrFromString(`MachineGuid`) if err != nil { - return "", err + return "", fmt.Errorf("error reading machine GUID: %w", err) + } + + err = syscall.RegQueryValueEx(h, mGuidPtr, nil, &valType, (*byte)(unsafe.Pointer(®Buf[0])), &bufLen) + if err != nil { + return "", fmt.Errorf("error parsing ") } hostID := syscall.UTF16ToString(regBuf[:]) diff --git a/vendor/github.com/rs/xid/id.go b/vendor/github.com/rs/xid/id.go index fcd7a041..e88984d9 100644 --- a/vendor/github.com/rs/xid/id.go +++ b/vendor/github.com/rs/xid/id.go @@ -54,7 +54,6 @@ import ( "sort" "sync/atomic" "time" - "unsafe" ) // Code inspired from mgo/bson ObjectId @@ -172,7 +171,7 @@ func FromString(id string) (ID, error) { func (id ID) String() string { text := make([]byte, encodedLen) encode(text, id[:]) - return *(*string)(unsafe.Pointer(&text)) + return string(text) } // Encode encodes the id using base32 encoding, writing 20 bytes to dst and return it. @@ -206,23 +205,23 @@ func encode(dst, id []byte) { dst[19] = encoding[(id[11]<<4)&0x1F] dst[18] = encoding[(id[11]>>1)&0x1F] - dst[17] = encoding[(id[11]>>6)&0x1F|(id[10]<<2)&0x1F] + dst[17] = encoding[(id[11]>>6)|(id[10]<<2)&0x1F] dst[16] = encoding[id[10]>>3] dst[15] = encoding[id[9]&0x1F] dst[14] = encoding[(id[9]>>5)|(id[8]<<3)&0x1F] dst[13] = encoding[(id[8]>>2)&0x1F] dst[12] = encoding[id[8]>>7|(id[7]<<1)&0x1F] - dst[11] = encoding[(id[7]>>4)&0x1F|(id[6]<<4)&0x1F] + dst[11] = encoding[(id[7]>>4)|(id[6]<<4)&0x1F] dst[10] = encoding[(id[6]>>1)&0x1F] - dst[9] = encoding[(id[6]>>6)&0x1F|(id[5]<<2)&0x1F] + dst[9] = encoding[(id[6]>>6)|(id[5]<<2)&0x1F] dst[8] = encoding[id[5]>>3] dst[7] = encoding[id[4]&0x1F] dst[6] = encoding[id[4]>>5|(id[3]<<3)&0x1F] dst[5] = encoding[(id[3]>>2)&0x1F] dst[4] = encoding[id[3]>>7|(id[2]<<1)&0x1F] - dst[3] = encoding[(id[2]>>4)&0x1F|(id[1]<<4)&0x1F] + dst[3] = encoding[(id[2]>>4)|(id[1]<<4)&0x1F] dst[2] = encoding[(id[1]>>1)&0x1F] - dst[1] = encoding[(id[1]>>6)&0x1F|(id[0]<<2)&0x1F] + dst[1] = encoding[(id[1]>>6)|(id[0]<<2)&0x1F] dst[0] = encoding[id[0]>>3] } diff --git a/vendor/github.com/tklauser/numcpus/.cirrus.yml b/vendor/github.com/tklauser/numcpus/.cirrus.yml index 33e6595c..b3091efd 100644 --- a/vendor/github.com/tklauser/numcpus/.cirrus.yml +++ b/vendor/github.com/tklauser/numcpus/.cirrus.yml @@ -1,10 +1,10 @@ env: CIRRUS_CLONE_DEPTH: 1 - GO_VERSION: go1.22.2 + GO_VERSION: go1.23.0 freebsd_13_task: freebsd_instance: - image_family: freebsd-13-2 + image_family: freebsd-13-3 install_script: | pkg install -y go GOBIN=$PWD/bin go install golang.org/dl/${GO_VERSION}@latest diff --git a/vendor/github.com/tklauser/numcpus/numcpus.go b/vendor/github.com/tklauser/numcpus/numcpus.go index af59983e..de206f06 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus.go +++ b/vendor/github.com/tklauser/numcpus/numcpus.go @@ -73,3 +73,26 @@ func GetPossible() (int, error) { func GetPresent() (int, error) { return getPresent() } + +// ListOffline returns the list of offline CPUs. See [GetOffline] for details on +// when a CPU is considered offline. +func ListOffline() ([]int, error) { + return listOffline() +} + +// ListOnline returns the list of CPUs that are online and being scheduled. +func ListOnline() ([]int, error) { + return listOnline() +} + +// ListPossible returns the list of possible CPUs. See [GetPossible] for +// details on when a CPU is considered possible. +func ListPossible() ([]int, error) { + return listPossible() +} + +// ListPresent returns the list of present CPUs. See [GetPresent] for +// details on when a CPU is considered present. +func ListPresent() ([]int, error) { + return listPresent() +} diff --git a/vendor/github.com/tklauser/numcpus/numcpus_linux.go b/vendor/github.com/tklauser/numcpus/numcpus_linux.go index 7e75cb06..7b991da4 100644 --- a/vendor/github.com/tklauser/numcpus/numcpus_linux.go +++ b/vendor/github.com/tklauser/numcpus/numcpus_linux.go @@ -15,6 +15,7 @@ package numcpus import ( + "fmt" "os" "path/filepath" "strconv" @@ -23,7 +24,14 @@ import ( "golang.org/x/sys/unix" ) -const sysfsCPUBasePath = "/sys/devices/system/cpu" +const ( + sysfsCPUBasePath = "/sys/devices/system/cpu" + + offline = "offline" + online = "online" + possible = "possible" + present = "present" +) func getFromCPUAffinity() (int, error) { var cpuSet unix.CPUSet @@ -33,19 +41,26 @@ func getFromCPUAffinity() (int, error) { return cpuSet.Count(), nil } -func readCPURange(file string) (int, error) { +func readCPURangeWith[T any](file string, f func(cpus string) (T, error)) (T, error) { + var zero T buf, err := os.ReadFile(filepath.Join(sysfsCPUBasePath, file)) if err != nil { - return 0, err + return zero, err } - return parseCPURange(strings.Trim(string(buf), "\n ")) + return f(strings.Trim(string(buf), "\n ")) } -func parseCPURange(cpus string) (int, error) { +func countCPURange(cpus string) (int, error) { + // Treat empty file as valid. This might be the case if there are no offline CPUs in which + // case /sys/devices/system/cpu/offline is empty. + if cpus == "" { + return 0, nil + } + n := int(0) for _, cpuRange := range strings.Split(cpus, ",") { - if len(cpuRange) == 0 { - continue + if cpuRange == "" { + return 0, fmt.Errorf("empty CPU range in CPU string %q", cpus) } from, to, found := strings.Cut(cpuRange, "-") first, err := strconv.ParseUint(from, 10, 32) @@ -60,11 +75,49 @@ func parseCPURange(cpus string) (int, error) { if err != nil { return 0, err } + if last < first { + return 0, fmt.Errorf("last CPU in range (%d) less than first (%d)", last, first) + } n += int(last - first + 1) } return n, nil } +func listCPURange(cpus string) ([]int, error) { + // See comment in countCPURange. + if cpus == "" { + return []int{}, nil + } + + list := []int{} + for _, cpuRange := range strings.Split(cpus, ",") { + if cpuRange == "" { + return nil, fmt.Errorf("empty CPU range in CPU string %q", cpus) + } + from, to, found := strings.Cut(cpuRange, "-") + first, err := strconv.ParseUint(from, 10, 32) + if err != nil { + return nil, err + } + if !found { + // range containing a single element + list = append(list, int(first)) + continue + } + last, err := strconv.ParseUint(to, 10, 32) + if err != nil { + return nil, err + } + if last < first { + return nil, fmt.Errorf("last CPU in range (%d) less than first (%d)", last, first) + } + for cpu := int(first); cpu <= int(last); cpu++ { + list = append(list, cpu) + } + } + return list, nil +} + func getConfigured() (int, error) { d, err := os.Open(sysfsCPUBasePath) if err != nil { @@ -100,20 +153,36 @@ func getKernelMax() (int, error) { } func getOffline() (int, error) { - return readCPURange("offline") + return readCPURangeWith(offline, countCPURange) } func getOnline() (int, error) { if n, err := getFromCPUAffinity(); err == nil { return n, nil } - return readCPURange("online") + return readCPURangeWith(online, countCPURange) } func getPossible() (int, error) { - return readCPURange("possible") + return readCPURangeWith(possible, countCPURange) } func getPresent() (int, error) { - return readCPURange("present") + return readCPURangeWith(present, countCPURange) +} + +func listOffline() ([]int, error) { + return readCPURangeWith(offline, listCPURange) +} + +func listOnline() ([]int, error) { + return readCPURangeWith(online, listCPURange) +} + +func listPossible() ([]int, error) { + return readCPURangeWith(possible, listCPURange) +} + +func listPresent() ([]int, error) { + return readCPURangeWith(present, listCPURange) } diff --git a/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go b/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go new file mode 100644 index 00000000..af4efeac --- /dev/null +++ b/vendor/github.com/tklauser/numcpus/numcpus_list_unsupported.go @@ -0,0 +1,33 @@ +// Copyright 2024 Tobias Klauser +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !linux + +package numcpus + +func listOffline() ([]int, error) { + return nil, ErrNotSupported +} + +func listOnline() ([]int, error) { + return nil, ErrNotSupported +} + +func listPossible() ([]int, error) { + return nil, ErrNotSupported +} + +func listPresent() ([]int, error) { + return nil, ErrNotSupported +} diff --git a/vendor/github.com/urfave/cli/v2/godoc-current.txt b/vendor/github.com/urfave/cli/v2/godoc-current.txt index 4b620fee..3e29faab 100644 --- a/vendor/github.com/urfave/cli/v2/godoc-current.txt +++ b/vendor/github.com/urfave/cli/v2/godoc-current.txt @@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}}{{end}}{{end}}{{if .Description}} @@ -136,7 +136,7 @@ var SubcommandHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Description}} DESCRIPTION: {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} diff --git a/vendor/github.com/urfave/cli/v2/help.go b/vendor/github.com/urfave/cli/v2/help.go index 640e2904..874be941 100644 --- a/vendor/github.com/urfave/cli/v2/help.go +++ b/vendor/github.com/urfave/cli/v2/help.go @@ -248,7 +248,6 @@ func ShowCommandHelpAndExit(c *Context, command string, code int) { // ShowCommandHelp prints help for the given command func ShowCommandHelp(ctx *Context, command string) error { - commands := ctx.App.Commands if ctx.Command.Subcommands != nil { commands = ctx.Command.Subcommands @@ -337,7 +336,6 @@ func ShowCommandCompletions(ctx *Context, command string) { DefaultCompleteWithFlags(c)(ctx) } } - } // printHelpCustom is the default implementation of HelpPrinterCustom. @@ -345,7 +343,6 @@ func ShowCommandCompletions(ctx *Context, command string) { // The customFuncs map will be combined with a default template.FuncMap to // allow using arbitrary functions in template rendering. func printHelpCustom(out io.Writer, templ string, data interface{}, customFuncs map[string]interface{}) { - const maxLineLength = 10000 funcMap := template.FuncMap{ @@ -450,6 +447,15 @@ func checkShellCompleteFlag(a *App, arguments []string) (bool, []string) { return false, arguments } + for _, arg := range arguments { + // If arguments include "--", shell completion is disabled + // because after "--" only positional arguments are accepted. + // https://unix.stackexchange.com/a/11382 + if arg == "--" { + return false, arguments + } + } + return true, arguments[:pos] } @@ -499,7 +505,6 @@ func wrap(input string, offset int, wrapAt int) string { ss = append(ss, wrapped) } else { ss = append(ss, padding+wrapped) - } } diff --git a/vendor/github.com/urfave/cli/v2/template.go b/vendor/github.com/urfave/cli/v2/template.go index 5748f4c2..8abc5ba4 100644 --- a/vendor/github.com/urfave/cli/v2/template.go +++ b/vendor/github.com/urfave/cli/v2/template.go @@ -1,7 +1,7 @@ package cli var helpNameTemplate = `{{$v := offset .HelpName 6}}{{wrap .HelpName 3}}{{if .Usage}} - {{wrap .Usage $v}}{{end}}` -var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}` +var usageTemplate = `{{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}}{{if .VisibleFlags}} [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}` var descriptionTemplate = `{{wrap .Description 3}}` var authorsTemplate = `{{with $length := len .Authors}}{{if ne 1 $length}}S{{end}}{{end}}: {{range $index, $author := .Authors}}{{if $index}} @@ -35,7 +35,7 @@ var AppHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Version}}{{if not .HideVersion}} VERSION: {{.Version}}{{end}}{{end}}{{if .Description}} @@ -83,7 +83,7 @@ var SubcommandHelpTemplate = `NAME: {{template "helpNameTemplate" .}} USAGE: - {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}{{if .Args}}[arguments...]{{end}}{{end}}{{end}}{{if .Description}} + {{if .UsageText}}{{wrap .UsageText 3}}{{else}}{{.HelpName}} {{if .VisibleFlags}}command [command options]{{end}}{{if .ArgsUsage}} {{.ArgsUsage}}{{else}}{{if .Args}} [arguments...]{{end}}{{end}}{{end}}{{if .Description}} DESCRIPTION: {{template "descriptionTemplate" .}}{{end}}{{if .VisibleCommands}} diff --git a/vendor/github.com/vektah/gqlparser/v2/ast/document.go b/vendor/github.com/vektah/gqlparser/v2/ast/document.go index a3a9e98d..7881f349 100644 --- a/vendor/github.com/vektah/gqlparser/v2/ast/document.go +++ b/vendor/github.com/vektah/gqlparser/v2/ast/document.go @@ -26,9 +26,10 @@ func (d *SchemaDocument) Merge(other *SchemaDocument) { } type Schema struct { - Query *Definition - Mutation *Definition - Subscription *Definition + Query *Definition + Mutation *Definition + Subscription *Definition + SchemaDirectives DirectiveList Types map[string]*Definition Directives map[string]*DirectiveDefinition diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go index 24d4f3db..daa86448 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fields_on_correct_type.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "fmt" @@ -11,8 +11,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("FieldsOnCorrectType", func(observers *Events, addError AddErrFunc) { +var FieldsOnCorrectTypeRule = Rule{ + Name: "FieldsOnCorrectType", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnField(func(walker *Walker, field *ast.Field) { if field.ObjectDefinition == nil || field.Definition != nil { return @@ -27,14 +28,18 @@ func init() { } addError( - Message(message), + Message("%s", message), At(field.Position), ) }) - }) + }, } -// Go through all of the implementations of type, as well as the interfaces +func init() { + AddRule(FieldsOnCorrectTypeRule.Name, FieldsOnCorrectTypeRule.RuleFunc) +} + +// Go through all the implementations of type, as well as the interfaces // that they implement. If any of those types include the provided field, // suggest them, sorted by how often the type is referenced, starting // with Interfaces. diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go index 81ef861b..ccbffbf6 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/fragments_on_composite_types.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "fmt" @@ -9,8 +9,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("FragmentsOnCompositeTypes", func(observers *Events, addError AddErrFunc) { +var FragmentsOnCompositeTypesRule = Rule{ + Name: "FragmentsOnCompositeTypes", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnInlineFragment(func(walker *Walker, inlineFragment *ast.InlineFragment) { fragmentType := walker.Schema.Types[inlineFragment.TypeCondition] if fragmentType == nil || fragmentType.IsCompositeType() { @@ -20,7 +21,7 @@ func init() { message := fmt.Sprintf(`Fragment cannot condition on non composite type "%s".`, inlineFragment.TypeCondition) addError( - Message(message), + Message("%s", message), At(inlineFragment.Position), ) }) @@ -33,9 +34,13 @@ func init() { message := fmt.Sprintf(`Fragment "%s" cannot condition on non composite type "%s".`, fragment.Name, fragment.TypeCondition) addError( - Message(message), + Message("%s", message), At(fragment.Position), ) }) - }) + }, +} + +func init() { + AddRule(FragmentsOnCompositeTypesRule.Name, FragmentsOnCompositeTypesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go index c187dabf..2659aeb5 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_argument_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("KnownArgumentNames", func(observers *Events, addError AddErrFunc) { +var KnownArgumentNamesRule = Rule{ + Name: "KnownArgumentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { // A GraphQL field is only valid if all supplied arguments are defined by that field. observers.OnField(func(walker *Walker, field *ast.Field) { if field.Definition == nil || field.ObjectDefinition == nil { @@ -55,5 +56,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(KnownArgumentNamesRule.Name, KnownArgumentNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go index f7bae811..b68fa5e8 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_directives.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("KnownDirectives", func(observers *Events, addError AddErrFunc) { +var KnownDirectivesRule = Rule{ + Name: "KnownDirectives", + RuleFunc: func(observers *Events, addError AddErrFunc) { type mayNotBeUsedDirective struct { Name string Line int @@ -45,5 +46,9 @@ func init() { seen[tmp] = true } }) - }) + }, +} + +func init() { + AddRule(KnownDirectivesRule.Name, KnownDirectivesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go index 3afd9c1c..77a82f67 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_fragment_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("KnownFragmentNames", func(observers *Events, addError AddErrFunc) { +var KnownFragmentNamesRule = Rule{ + Name: "KnownFragmentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnFragmentSpread(func(walker *Walker, fragmentSpread *ast.FragmentSpread) { if fragmentSpread.Definition == nil { addError( @@ -17,5 +18,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(KnownFragmentNamesRule.Name, KnownFragmentNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go index 60bc0d52..76962c3d 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_root_type.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "fmt" @@ -9,8 +9,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("KnownRootType", func(observers *Events, addError AddErrFunc) { +var KnownRootTypeRule = Rule{ + Name: "KnownRootType", + RuleFunc: func(observers *Events, addError AddErrFunc) { // A query's root must be a valid type. Surprisingly, this isn't // checked anywhere else! observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { @@ -33,5 +34,9 @@ func init() { At(operation.Position)) } }) - }) + }, +} + +func init() { + AddRule(KnownRootTypeRule.Name, KnownRootTypeRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go index 902939d3..717019fb 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/known_type_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("KnownTypeNames", func(observers *Events, addError AddErrFunc) { +var KnownTypeNamesRule = Rule{ + Name: "KnownTypeNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnVariable(func(walker *Walker, variable *ast.VariableDefinition) { typeName := variable.Type.Name() typdef := walker.Schema.Types[typeName] @@ -57,5 +58,9 @@ func init() { At(fragment.Position), ) }) - }) + }, +} + +func init() { + AddRule(KnownTypeNamesRule.Name, KnownTypeNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go index fe8bb203..ba71f141 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/lone_anonymous_operation.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("LoneAnonymousOperation", func(observers *Events, addError AddErrFunc) { +var LoneAnonymousOperationRule = Rule{ + Name: "LoneAnonymousOperation", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { if operation.Name == "" && len(walker.Document.Operations) > 1 { addError( @@ -17,5 +18,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(LoneAnonymousOperationRule.Name, LoneAnonymousOperationRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go index a953174f..c80def7c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_fragment_cycles.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "fmt" @@ -10,8 +10,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("NoFragmentCycles", func(observers *Events, addError AddErrFunc) { +var NoFragmentCyclesRule = Rule{ + Name: "NoFragmentCycles", + RuleFunc: func(observers *Events, addError AddErrFunc) { visitedFrags := make(map[string]bool) observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { @@ -67,7 +68,11 @@ func init() { recursive(fragment) }) - }) + }, +} + +func init() { + AddRule(NoFragmentCyclesRule.Name, NoFragmentCyclesRule.RuleFunc) } func getFragmentSpreads(node ast.SelectionSet) []*ast.FragmentSpread { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go index 46c18d12..84ed5fa7 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_undefined_variables.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("NoUndefinedVariables", func(observers *Events, addError AddErrFunc) { +var NoUndefinedVariablesRule = Rule{ + Name: "NoUndefinedVariables", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { if walker.CurrentOperation == nil || value.Kind != ast.Variable || value.VariableDefinition != nil { return @@ -26,5 +27,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(NoUndefinedVariablesRule.Name, NoUndefinedVariablesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go index 59d9c15c..e95bbe69 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_fragments.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("NoUnusedFragments", func(observers *Events, addError AddErrFunc) { +var NoUnusedFragmentsRule = Rule{ + Name: "NoUnusedFragments", + RuleFunc: func(observers *Events, addError AddErrFunc) { inFragmentDefinition := false fragmentNameUsed := make(map[string]bool) @@ -27,5 +28,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(NoUnusedFragmentsRule.Name, NoUnusedFragmentsRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go index d3088109..425df205 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/no_unused_variables.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("NoUnusedVariables", func(observers *Events, addError AddErrFunc) { +var NoUnusedVariablesRule = Rule{ + Name: "NoUnusedVariables", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { for _, varDef := range operation.VariableDefinitions { if varDef.Used { @@ -28,5 +29,9 @@ func init() { } } }) - }) + }, +} + +func init() { + AddRule(NoUnusedVariablesRule.Name, NoUnusedVariablesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go index eaa2035e..d2c66aac 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/overlapping_fields_can_be_merged.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "bytes" @@ -11,8 +11,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("OverlappingFieldsCanBeMerged", func(observers *Events, addError AddErrFunc) { +var OverlappingFieldsCanBeMergedRule = Rule{ + Name: "OverlappingFieldsCanBeMerged", + RuleFunc: func(observers *Events, addError AddErrFunc) { /** * Algorithm: * @@ -41,7 +42,7 @@ func init() { * * D) When comparing "between" a set of fields and a referenced fragment, first * a comparison is made between each field in the original set of fields and - * each field in the the referenced set of fields. + * each field in the referenced set of fields. * * E) Also, if any fragment is referenced in the referenced selection set, * then a comparison is made "between" the original set of fields and the @@ -104,7 +105,11 @@ func init() { conflict.addFieldsConflictMessage(addError) } }) - }) + }, +} + +func init() { + AddRule(OverlappingFieldsCanBeMergedRule.Name, OverlappingFieldsCanBeMergedRule.RuleFunc) } type pairSet struct { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go index 244e5f20..01255e9b 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/possible_fragment_spreads.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("PossibleFragmentSpreads", func(observers *Events, addError AddErrFunc) { +var PossibleFragmentSpreadsRule = Rule{ + Name: "PossibleFragmentSpreads", + RuleFunc: func(observers *Events, addError AddErrFunc) { validate := func(walker *Walker, parentDef *ast.Definition, fragmentName string, emitError func()) { if parentDef == nil { return @@ -65,5 +66,9 @@ func init() { ) }) }) - }) + }, +} + +func init() { + AddRule(PossibleFragmentSpreadsRule.Name, PossibleFragmentSpreadsRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go index ab79163b..37428d44 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/provided_required_arguments.go @@ -1,14 +1,14 @@ -package validator +package rules import ( - "github.com/vektah/gqlparser/v2/ast" - //nolint:revive // Validator rules each use dot imports for convenience. + "github.com/vektah/gqlparser/v2/ast" . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("ProvidedRequiredArguments", func(observers *Events, addError AddErrFunc) { +var ProvidedRequiredArgumentsRule = Rule{ + Name: "ProvidedRequiredArguments", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnField(func(walker *Walker, field *ast.Field) { if field.Definition == nil { return @@ -60,5 +60,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(ProvidedRequiredArgumentsRule.Name, ProvidedRequiredArgumentsRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go index 605ab9e8..5628ce2c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/scalar_leafs.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("ScalarLeafs", func(observers *Events, addError AddErrFunc) { +var ScalarLeafsRule = Rule{ + Name: "ScalarLeafs", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnField(func(walker *Walker, field *ast.Field) { if field.Definition == nil { return @@ -34,5 +35,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(ScalarLeafsRule.Name, ScalarLeafsRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go index 7d4c6843..94a4b304 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/single_field_subscriptions.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "strconv" @@ -10,8 +10,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("SingleFieldSubscriptions", func(observers *Events, addError AddErrFunc) { +var SingleFieldSubscriptionsRule = Rule{ + Name: "SingleFieldSubscriptions", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { if walker.Schema.Subscription == nil || operation.Operation != ast.Subscription { return @@ -40,7 +41,11 @@ func init() { } } }) - }) + }, +} + +func init() { + AddRule(SingleFieldSubscriptionsRule.Name, SingleFieldSubscriptionsRule.RuleFunc) } type topField struct { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go index e977d638..d0c52909 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_argument_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueArgumentNames", func(observers *Events, addError AddErrFunc) { +var UniqueArgumentNamesRule = Rule{ + Name: "UniqueArgumentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnField(func(walker *Walker, field *ast.Field) { checkUniqueArgs(field.Arguments, addError) }) @@ -16,7 +17,11 @@ func init() { observers.OnDirective(func(walker *Walker, directive *ast.Directive) { checkUniqueArgs(directive.Arguments, addError) }) - }) + }, +} + +func init() { + AddRule(UniqueArgumentNamesRule.Name, UniqueArgumentNamesRule.RuleFunc) } func checkUniqueArgs(args ast.ArgumentList, addError AddErrFunc) { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go index 47971ee1..4cab38bd 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_directives_per_location.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueDirectivesPerLocation", func(observers *Events, addError AddErrFunc) { +var UniqueDirectivesPerLocationRule = Rule{ + Name: "UniqueDirectivesPerLocation", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnDirectiveList(func(walker *Walker, directives []*ast.Directive) { seen := map[string]bool{} @@ -22,5 +23,9 @@ func init() { seen[dir.Name] = true } }) - }) + }, +} + +func init() { + AddRule(UniqueDirectivesPerLocationRule.Name, UniqueDirectivesPerLocationRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go index 2c44a437..3d2c7289 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_fragment_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueFragmentNames", func(observers *Events, addError AddErrFunc) { +var UniqueFragmentNamesRule = Rule{ + Name: "UniqueFragmentNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { seenFragments := map[string]bool{} observers.OnFragment(func(walker *Walker, fragment *ast.FragmentDefinition) { @@ -20,5 +21,9 @@ func init() { } seenFragments[fragment.Name] = true }) - }) + }, +} + +func init() { + AddRule(UniqueFragmentNamesRule.Name, UniqueFragmentNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go index c5fce8ff..3ccbbfe0 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_input_field_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueInputFieldNames", func(observers *Events, addError AddErrFunc) { +var UniqueInputFieldNamesRule = Rule{ + Name: "UniqueInputFieldNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { if value.Kind != ast.ObjectValue { return @@ -25,5 +26,9 @@ func init() { seen[field.Name] = true } }) - }) + }, +} + +func init() { + AddRule(UniqueInputFieldNamesRule.Name, UniqueInputFieldNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go index 49ffbe47..401b0ad3 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_operation_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueOperationNames", func(observers *Events, addError AddErrFunc) { +var UniqueOperationNamesRule = Rule{ + Name: "UniqueOperationNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { seen := map[string]bool{} observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { @@ -20,5 +21,9 @@ func init() { } seen[operation.Name] = true }) - }) + }, +} + +func init() { + AddRule(UniqueOperationNamesRule.Name, UniqueOperationNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go index c93948c1..f0e4a200 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/unique_variable_names.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("UniqueVariableNames", func(observers *Events, addError AddErrFunc) { +var UniqueVariableNamesRule = Rule{ + Name: "UniqueVariableNames", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { seen := map[string]int{} for _, def := range operation.VariableDefinitions { @@ -22,5 +23,9 @@ func init() { seen[def.Variable]++ } }) - }) + }, +} + +func init() { + AddRule(UniqueVariableNamesRule.Name, UniqueVariableNamesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go index 914e428e..7784fe0c 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/values_of_correct_type.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "errors" @@ -11,8 +11,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("ValuesOfCorrectType", func(observers *Events, addError AddErrFunc) { +var ValuesOfCorrectTypeRule = Rule{ + Name: "ValuesOfCorrectType", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { if value.Definition == nil || value.ExpectedType == nil { return @@ -134,7 +135,11 @@ func init() { panic(fmt.Errorf("unhandled %T", value)) } }) - }) + }, +} + +func init() { + AddRule(ValuesOfCorrectTypeRule.Name, ValuesOfCorrectTypeRule.RuleFunc) } func unexpectedTypeMessage(addError AddErrFunc, v *ast.Value) { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go index d16ee021..59b2e6f7 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_are_input_types.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("VariablesAreInputTypes", func(observers *Events, addError AddErrFunc) { +var VariablesAreInputTypesRule = Rule{ + Name: "VariablesAreInputTypes", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnOperation(func(walker *Walker, operation *ast.OperationDefinition) { for _, def := range operation.VariableDefinitions { if def.Definition == nil { @@ -26,5 +27,9 @@ func init() { } } }) - }) + }, +} + +func init() { + AddRule(VariablesAreInputTypesRule.Name, VariablesAreInputTypesRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go index e3fd6fbb..75b0f7f7 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/rules/variables_in_allowed_position.go @@ -1,4 +1,4 @@ -package validator +package rules import ( "github.com/vektah/gqlparser/v2/ast" @@ -7,8 +7,9 @@ import ( . "github.com/vektah/gqlparser/v2/validator" ) -func init() { - AddRule("VariablesInAllowedPosition", func(observers *Events, addError AddErrFunc) { +var VariablesInAllowedPositionRule = Rule{ + Name: "VariablesInAllowedPosition", + RuleFunc: func(observers *Events, addError AddErrFunc) { observers.OnValue(func(walker *Walker, value *ast.Value) { if value.Kind != ast.Variable || value.ExpectedType == nil || value.VariableDefinition == nil || walker.CurrentOperation == nil { return @@ -36,5 +37,9 @@ func init() { ) } }) - }) + }, +} + +func init() { + AddRule(VariablesInAllowedPositionRule.Name, VariablesInAllowedPositionRule.RuleFunc) } diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go index d8590284..9f9ddde4 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/schema.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/schema.go @@ -122,6 +122,10 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { schema.Subscription = def } } + if err := validateDirectives(&schema, sd.Schema[0].Directives, LocationSchema, nil); err != nil { + return nil, err + } + schema.SchemaDirectives = append(schema.SchemaDirectives, sd.Schema[0].Directives...) } for _, ext := range sd.SchemaExtension { @@ -139,6 +143,10 @@ func ValidateSchemaDocument(sd *SchemaDocument) (*Schema, error) { schema.Subscription = def } } + if err := validateDirectives(&schema, ext.Directives, LocationSchema, nil); err != nil { + return nil, err + } + schema.SchemaDirectives = append(schema.SchemaDirectives, ext.Directives...) } if err := validateTypeDefinitions(&schema); err != nil { diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go index b4f37ce2..36564b23 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/validator.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/validator.go @@ -8,22 +8,26 @@ import ( type AddErrFunc func(options ...ErrorOption) -type ruleFunc func(observers *Events, addError AddErrFunc) +type RuleFunc func(observers *Events, addError AddErrFunc) -type rule struct { - name string - rule ruleFunc +type Rule struct { + Name string + RuleFunc RuleFunc } -var rules []rule +var specifiedRules []Rule -// addRule to rule set. +// AddRule adds rule to the rule set. // f is called once each time `Validate` is executed. -func AddRule(name string, f ruleFunc) { - rules = append(rules, rule{name: name, rule: f}) +func AddRule(name string, ruleFunc RuleFunc) { + specifiedRules = append(specifiedRules, Rule{Name: name, RuleFunc: ruleFunc}) } -func Validate(schema *Schema, doc *QueryDocument) gqlerror.List { +func Validate(schema *Schema, doc *QueryDocument, rules ...Rule) gqlerror.List { + if rules == nil { + rules = specifiedRules + } + var errs gqlerror.List if schema == nil { errs = append(errs, gqlerror.Errorf("cannot validate as Schema is nil")) @@ -37,9 +41,9 @@ func Validate(schema *Schema, doc *QueryDocument) gqlerror.List { observers := &Events{} for i := range rules { rule := rules[i] - rule.rule(observers, func(options ...ErrorOption) { + rule.RuleFunc(observers, func(options ...ErrorOption) { err := &gqlerror.Error{ - Rule: rule.name, + Rule: rule.Name, } for _, o := range options { o(err) diff --git a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go index c386b6b9..f2934caf 100644 --- a/vendor/github.com/vektah/gqlparser/v2/validator/vars.go +++ b/vendor/github.com/vektah/gqlparser/v2/validator/vars.go @@ -180,7 +180,7 @@ func (v *varValidator) validateVarType(typ *ast.Type, val reflect.Value) (reflec return val, gqlerror.ErrorPathf(v.path, "cannot use %s as %s", kind.String(), typ.NamedType) case ast.InputObject: if val.Kind() != reflect.Map { - return val, gqlerror.ErrorPathf(v.path, "must be a %s", def.Name) + return val, gqlerror.ErrorPathf(v.path, "must be a %s, not a %s", def.Name, val.Kind()) } // check for unknown fields diff --git a/vendor/go.etcd.io/bbolt/.go-version b/vendor/go.etcd.io/bbolt/.go-version index f124bfa1..013173af 100644 --- a/vendor/go.etcd.io/bbolt/.go-version +++ b/vendor/go.etcd.io/bbolt/.go-version @@ -1 +1 @@ -1.21.9 +1.22.6 diff --git a/vendor/go.etcd.io/bbolt/Makefile b/vendor/go.etcd.io/bbolt/Makefile index 18154c63..21407797 100644 --- a/vendor/go.etcd.io/bbolt/Makefile +++ b/vendor/go.etcd.io/bbolt/Makefile @@ -41,6 +41,15 @@ coverage: TEST_FREELIST_TYPE=array go test -v -timeout 30m \ -coverprofile cover-freelist-array.out -covermode atomic +BOLT_CMD=bbolt + +build: + go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD} + +.PHONY: clean +clean: # Clean binaries + rm -f ./bin/${BOLT_CMD} + .PHONY: gofail-enable gofail-enable: install-gofail gofail enable . @@ -61,3 +70,7 @@ test-failpoint: @echo "[failpoint] array freelist test" TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint +.PHONY: test-robustness # Running robustness tests requires root permission +test-robustness: + go test -v ${TESTFLAGS} ./tests/dmflakey -test.root + go test -v ${TESTFLAGS} ./tests/robustness -test.root diff --git a/vendor/go.etcd.io/bbolt/db.go b/vendor/go.etcd.io/bbolt/db.go index 4175bdf3..822798e4 100644 --- a/vendor/go.etcd.io/bbolt/db.go +++ b/vendor/go.etcd.io/bbolt/db.go @@ -524,7 +524,7 @@ func (db *DB) munmap() error { // gofail: var unmapError string // return errors.New(unmapError) if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) + return fmt.Errorf("unmap error: %v", err.Error()) } return nil @@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error { // gofail: var munlockError string // return errors.New(munlockError) if err := munlock(db, fileSize); err != nil { - return fmt.Errorf("munlock error: " + err.Error()) + return fmt.Errorf("munlock error: %v", err.Error()) } return nil } @@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error { // gofail: var mlockError string // return errors.New(mlockError) if err := mlock(db, fileSize); err != nil { - return fmt.Errorf("mlock error: " + err.Error()) + return fmt.Errorf("mlock error: %v", err.Error()) } return nil } @@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error { // https://github.com/boltdb/bolt/issues/284 if !db.NoGrowSync && !db.readOnly { if runtime.GOOS != "windows" { + // gofail: var resizeFileError string + // return errors.New(resizeFileError) if err := db.file.Truncate(int64(sz)); err != nil { return fmt.Errorf("file resize error: %s", err) } diff --git a/vendor/go.etcd.io/bbolt/freelist.go b/vendor/go.etcd.io/bbolt/freelist.go index 61d43f81..dffc7bc7 100644 --- a/vendor/go.etcd.io/bbolt/freelist.go +++ b/vendor/go.etcd.io/bbolt/freelist.go @@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) { } // Remove pages from pending list and mark as free if allocated by txid. delete(f.pending, txid) + + // Remove pgids which are allocated by this txid + for pgid, tid := range f.allocs { + if tid == txid { + delete(f.allocs, pgid) + } + } + f.mergeSpans(m) } diff --git a/vendor/go.etcd.io/bbolt/tx.go b/vendor/go.etcd.io/bbolt/tx.go index 2fac8c0a..766395de 100644 --- a/vendor/go.etcd.io/bbolt/tx.go +++ b/vendor/go.etcd.io/bbolt/tx.go @@ -1,6 +1,7 @@ package bbolt import ( + "errors" "fmt" "io" "os" @@ -185,6 +186,10 @@ func (tx *Tx) Commit() error { // If the high water mark has moved up then attempt to grow the database. if tx.meta.pgid > opgid { + _ = errors.New("") + // gofail: var lackOfDiskSpace string + // tx.rollback() + // return errors.New(lackOfDiskSpace) if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { tx.rollback() return err @@ -470,6 +475,7 @@ func (tx *Tx) write() error { // Ignore file sync if flag is set on DB. if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncDataPages struct{} if err := fdatasync(tx.db); err != nil { return err } @@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error { return err } if !tx.db.NoSync || IgnoreNoSync { + // gofail: var beforeSyncMetaPage struct{} if err := fdatasync(tx.db); err != nil { return err } diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go index 3b974754..f9057fd2 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go @@ -25,15 +25,18 @@ package runtime import ( "errors" - "math" cg "go.uber.org/automaxprocs/internal/cgroups" ) // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process -// to a valid GOMAXPROCS value. -func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { - cgroups, err := newQueryer() +// to a valid GOMAXPROCS value. The quota is converted from float to int using round. +// If round == nil, DefaultRoundFunc is used. +func CPUQuotaToGOMAXPROCS(minValue int, round func(v float64) int) (int, CPUQuotaStatus, error) { + if round == nil { + round = DefaultRoundFunc + } + cgroups, err := _newQueryer() if err != nil { return -1, CPUQuotaUndefined, err } @@ -43,7 +46,7 @@ func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) { return -1, CPUQuotaUndefined, err } - maxProcs := int(math.Floor(quota)) + maxProcs := round(quota) if minValue > 0 && maxProcs < minValue { return minValue, CPUQuotaMinUsed, nil } @@ -57,6 +60,7 @@ type queryer interface { var ( _newCgroups2 = cg.NewCGroups2ForCurrentProcess _newCgroups = cg.NewCGroupsForCurrentProcess + _newQueryer = newQueryer ) func newQueryer() (queryer, error) { diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go index 69225544..e7470150 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go @@ -26,6 +26,6 @@ package runtime // CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process // to a valid GOMAXPROCS value. This is Linux-specific and not supported in the // current OS. -func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) { +func CPUQuotaToGOMAXPROCS(_ int, _ func(v float64) int) (int, CPUQuotaStatus, error) { return -1, CPUQuotaUndefined, nil } diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go index df6eacf0..f8a2834a 100644 --- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go +++ b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go @@ -20,6 +20,8 @@ package runtime +import "math" + // CPUQuotaStatus presents the status of how CPU quota is used type CPUQuotaStatus int @@ -31,3 +33,8 @@ const ( // CPUQuotaMinUsed is returned when CPU quota is smaller than the min value CPUQuotaMinUsed ) + +// DefaultRoundFunc is the default function to convert CPU quota from float to int. It rounds the value down (floor). +func DefaultRoundFunc(v float64) int { + return int(math.Floor(v)) +} diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go index 98176d64..e561fe60 100644 --- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go +++ b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go @@ -37,9 +37,10 @@ func currentMaxProcs() int { } type config struct { - printf func(string, ...interface{}) - procs func(int) (int, iruntime.CPUQuotaStatus, error) - minGOMAXPROCS int + printf func(string, ...interface{}) + procs func(int, func(v float64) int) (int, iruntime.CPUQuotaStatus, error) + minGOMAXPROCS int + roundQuotaFunc func(v float64) int } func (c *config) log(fmt string, args ...interface{}) { @@ -71,6 +72,13 @@ func Min(n int) Option { }) } +// RoundQuotaFunc sets the function that will be used to covert the CPU quota from float to int. +func RoundQuotaFunc(rf func(v float64) int) Option { + return optionFunc(func(cfg *config) { + cfg.roundQuotaFunc = rf + }) +} + type optionFunc func(*config) func (of optionFunc) apply(cfg *config) { of(cfg) } @@ -82,8 +90,9 @@ func (of optionFunc) apply(cfg *config) { of(cfg) } // configured CPU quota. func Set(opts ...Option) (func(), error) { cfg := &config{ - procs: iruntime.CPUQuotaToGOMAXPROCS, - minGOMAXPROCS: 1, + procs: iruntime.CPUQuotaToGOMAXPROCS, + roundQuotaFunc: iruntime.DefaultRoundFunc, + minGOMAXPROCS: 1, } for _, o := range opts { o.apply(cfg) @@ -102,7 +111,7 @@ func Set(opts ...Option) (func(), error) { return undoNoop, nil } - maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS) + maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS, cfg.roundQuotaFunc) if err != nil { return undoNoop, err } diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go index 108a9553..cc7fc5ae 100644 --- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go +++ b/vendor/go.uber.org/automaxprocs/maxprocs/version.go @@ -21,4 +21,4 @@ package maxprocs // Version is the current package version. -const Version = "1.5.2" +const Version = "1.6.0" diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 6713acca..c3895478 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -1,243 +1,2791 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blamka_amd64.go -out ../blamka_amd64.s -pkg argon2. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFD $0xB1, v6, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - PSHUFB c40, v2; \ - MOVO v0, t0; \ - PMULULQ v2, t0; \ - PADDQ v2, v0; \ - PADDQ t0, v0; \ - PADDQ t0, v0; \ - PXOR v0, v6; \ - PSHUFB c48, v6; \ - MOVO v4, t0; \ - PMULULQ v6, t0; \ - PADDQ v6, v4; \ - PADDQ t0, v4; \ - PADDQ t0, v4; \ - PXOR v4, v2; \ - MOVO v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v7, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - PSHUFB c40, v3; \ - MOVO v1, t0; \ - PMULULQ v3, t0; \ - PADDQ v3, v1; \ - PADDQ t0, v1; \ - PADDQ t0, v1; \ - PXOR v1, v7; \ - PSHUFB c48, v7; \ - MOVO v5, t0; \ - PMULULQ v7, t0; \ - PADDQ v7, v5; \ - PADDQ t0, v5; \ - PADDQ t0, v5; \ - PXOR v5, v3; \ - MOVO v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG_0(block, off) \ - MOVOU 8*(off+0)(block), X0; \ - MOVOU 8*(off+2)(block), X1; \ - MOVOU 8*(off+4)(block), X2; \ - MOVOU 8*(off+6)(block), X3; \ - MOVOU 8*(off+8)(block), X4; \ - MOVOU 8*(off+10)(block), X5; \ - MOVOU 8*(off+12)(block), X6; \ - MOVOU 8*(off+14)(block), X7 - -#define STORE_MSG_0(block, off) \ - MOVOU X0, 8*(off+0)(block); \ - MOVOU X1, 8*(off+2)(block); \ - MOVOU X2, 8*(off+4)(block); \ - MOVOU X3, 8*(off+6)(block); \ - MOVOU X4, 8*(off+8)(block); \ - MOVOU X5, 8*(off+10)(block); \ - MOVOU X6, 8*(off+12)(block); \ - MOVOU X7, 8*(off+14)(block) - -#define LOAD_MSG_1(block, off) \ - MOVOU 8*off+0*8(block), X0; \ - MOVOU 8*off+16*8(block), X1; \ - MOVOU 8*off+32*8(block), X2; \ - MOVOU 8*off+48*8(block), X3; \ - MOVOU 8*off+64*8(block), X4; \ - MOVOU 8*off+80*8(block), X5; \ - MOVOU 8*off+96*8(block), X6; \ - MOVOU 8*off+112*8(block), X7 - -#define STORE_MSG_1(block, off) \ - MOVOU X0, 8*off+0*8(block); \ - MOVOU X1, 8*off+16*8(block); \ - MOVOU X2, 8*off+32*8(block); \ - MOVOU X3, 8*off+48*8(block); \ - MOVOU X4, 8*off+64*8(block); \ - MOVOU X5, 8*off+80*8(block); \ - MOVOU X6, 8*off+96*8(block); \ - MOVOU X7, 8*off+112*8(block) - -#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ - LOAD_MSG_0(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_0(block, off) - -#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ - LOAD_MSG_1(block, off); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ - STORE_MSG_1(block, off) - // func blamkaSSE4(b *block) -TEXT ·blamkaSSE4(SB), 4, $0-8 - MOVQ b+0(FP), AX - - MOVOU ·c40<>(SB), X10 - MOVOU ·c48<>(SB), X11 - - BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) - BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) - - BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) - BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) +// Requires: SSE2, SSSE3 +TEXT ·blamkaSSE4(SB), NOSPLIT, $0-8 + MOVQ b+0(FP), AX + MOVOU ·c40<>+0(SB), X10 + MOVOU ·c48<>+0(SB), X11 + MOVOU (AX), X0 + MOVOU 16(AX), X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU 64(AX), X4 + MOVOU 80(AX), X5 + MOVOU 96(AX), X6 + MOVOU 112(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 16(AX) + MOVOU X2, 32(AX) + MOVOU X3, 48(AX) + MOVOU X4, 64(AX) + MOVOU X5, 80(AX) + MOVOU X6, 96(AX) + MOVOU X7, 112(AX) + MOVOU 128(AX), X0 + MOVOU 144(AX), X1 + MOVOU 160(AX), X2 + MOVOU 176(AX), X3 + MOVOU 192(AX), X4 + MOVOU 208(AX), X5 + MOVOU 224(AX), X6 + MOVOU 240(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 128(AX) + MOVOU X1, 144(AX) + MOVOU X2, 160(AX) + MOVOU X3, 176(AX) + MOVOU X4, 192(AX) + MOVOU X5, 208(AX) + MOVOU X6, 224(AX) + MOVOU X7, 240(AX) + MOVOU 256(AX), X0 + MOVOU 272(AX), X1 + MOVOU 288(AX), X2 + MOVOU 304(AX), X3 + MOVOU 320(AX), X4 + MOVOU 336(AX), X5 + MOVOU 352(AX), X6 + MOVOU 368(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 256(AX) + MOVOU X1, 272(AX) + MOVOU X2, 288(AX) + MOVOU X3, 304(AX) + MOVOU X4, 320(AX) + MOVOU X5, 336(AX) + MOVOU X6, 352(AX) + MOVOU X7, 368(AX) + MOVOU 384(AX), X0 + MOVOU 400(AX), X1 + MOVOU 416(AX), X2 + MOVOU 432(AX), X3 + MOVOU 448(AX), X4 + MOVOU 464(AX), X5 + MOVOU 480(AX), X6 + MOVOU 496(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 384(AX) + MOVOU X1, 400(AX) + MOVOU X2, 416(AX) + MOVOU X3, 432(AX) + MOVOU X4, 448(AX) + MOVOU X5, 464(AX) + MOVOU X6, 480(AX) + MOVOU X7, 496(AX) + MOVOU 512(AX), X0 + MOVOU 528(AX), X1 + MOVOU 544(AX), X2 + MOVOU 560(AX), X3 + MOVOU 576(AX), X4 + MOVOU 592(AX), X5 + MOVOU 608(AX), X6 + MOVOU 624(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 512(AX) + MOVOU X1, 528(AX) + MOVOU X2, 544(AX) + MOVOU X3, 560(AX) + MOVOU X4, 576(AX) + MOVOU X5, 592(AX) + MOVOU X6, 608(AX) + MOVOU X7, 624(AX) + MOVOU 640(AX), X0 + MOVOU 656(AX), X1 + MOVOU 672(AX), X2 + MOVOU 688(AX), X3 + MOVOU 704(AX), X4 + MOVOU 720(AX), X5 + MOVOU 736(AX), X6 + MOVOU 752(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 640(AX) + MOVOU X1, 656(AX) + MOVOU X2, 672(AX) + MOVOU X3, 688(AX) + MOVOU X4, 704(AX) + MOVOU X5, 720(AX) + MOVOU X6, 736(AX) + MOVOU X7, 752(AX) + MOVOU 768(AX), X0 + MOVOU 784(AX), X1 + MOVOU 800(AX), X2 + MOVOU 816(AX), X3 + MOVOU 832(AX), X4 + MOVOU 848(AX), X5 + MOVOU 864(AX), X6 + MOVOU 880(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 768(AX) + MOVOU X1, 784(AX) + MOVOU X2, 800(AX) + MOVOU X3, 816(AX) + MOVOU X4, 832(AX) + MOVOU X5, 848(AX) + MOVOU X6, 864(AX) + MOVOU X7, 880(AX) + MOVOU 896(AX), X0 + MOVOU 912(AX), X1 + MOVOU 928(AX), X2 + MOVOU 944(AX), X3 + MOVOU 960(AX), X4 + MOVOU 976(AX), X5 + MOVOU 992(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 896(AX) + MOVOU X1, 912(AX) + MOVOU X2, 928(AX) + MOVOU X3, 944(AX) + MOVOU X4, 960(AX) + MOVOU X5, 976(AX) + MOVOU X6, 992(AX) + MOVOU X7, 1008(AX) + MOVOU (AX), X0 + MOVOU 128(AX), X1 + MOVOU 256(AX), X2 + MOVOU 384(AX), X3 + MOVOU 512(AX), X4 + MOVOU 640(AX), X5 + MOVOU 768(AX), X6 + MOVOU 896(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, (AX) + MOVOU X1, 128(AX) + MOVOU X2, 256(AX) + MOVOU X3, 384(AX) + MOVOU X4, 512(AX) + MOVOU X5, 640(AX) + MOVOU X6, 768(AX) + MOVOU X7, 896(AX) + MOVOU 16(AX), X0 + MOVOU 144(AX), X1 + MOVOU 272(AX), X2 + MOVOU 400(AX), X3 + MOVOU 528(AX), X4 + MOVOU 656(AX), X5 + MOVOU 784(AX), X6 + MOVOU 912(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 16(AX) + MOVOU X1, 144(AX) + MOVOU X2, 272(AX) + MOVOU X3, 400(AX) + MOVOU X4, 528(AX) + MOVOU X5, 656(AX) + MOVOU X6, 784(AX) + MOVOU X7, 912(AX) + MOVOU 32(AX), X0 + MOVOU 160(AX), X1 + MOVOU 288(AX), X2 + MOVOU 416(AX), X3 + MOVOU 544(AX), X4 + MOVOU 672(AX), X5 + MOVOU 800(AX), X6 + MOVOU 928(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 32(AX) + MOVOU X1, 160(AX) + MOVOU X2, 288(AX) + MOVOU X3, 416(AX) + MOVOU X4, 544(AX) + MOVOU X5, 672(AX) + MOVOU X6, 800(AX) + MOVOU X7, 928(AX) + MOVOU 48(AX), X0 + MOVOU 176(AX), X1 + MOVOU 304(AX), X2 + MOVOU 432(AX), X3 + MOVOU 560(AX), X4 + MOVOU 688(AX), X5 + MOVOU 816(AX), X6 + MOVOU 944(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 48(AX) + MOVOU X1, 176(AX) + MOVOU X2, 304(AX) + MOVOU X3, 432(AX) + MOVOU X4, 560(AX) + MOVOU X5, 688(AX) + MOVOU X6, 816(AX) + MOVOU X7, 944(AX) + MOVOU 64(AX), X0 + MOVOU 192(AX), X1 + MOVOU 320(AX), X2 + MOVOU 448(AX), X3 + MOVOU 576(AX), X4 + MOVOU 704(AX), X5 + MOVOU 832(AX), X6 + MOVOU 960(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 64(AX) + MOVOU X1, 192(AX) + MOVOU X2, 320(AX) + MOVOU X3, 448(AX) + MOVOU X4, 576(AX) + MOVOU X5, 704(AX) + MOVOU X6, 832(AX) + MOVOU X7, 960(AX) + MOVOU 80(AX), X0 + MOVOU 208(AX), X1 + MOVOU 336(AX), X2 + MOVOU 464(AX), X3 + MOVOU 592(AX), X4 + MOVOU 720(AX), X5 + MOVOU 848(AX), X6 + MOVOU 976(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 80(AX) + MOVOU X1, 208(AX) + MOVOU X2, 336(AX) + MOVOU X3, 464(AX) + MOVOU X4, 592(AX) + MOVOU X5, 720(AX) + MOVOU X6, 848(AX) + MOVOU X7, 976(AX) + MOVOU 96(AX), X0 + MOVOU 224(AX), X1 + MOVOU 352(AX), X2 + MOVOU 480(AX), X3 + MOVOU 608(AX), X4 + MOVOU 736(AX), X5 + MOVOU 864(AX), X6 + MOVOU 992(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 96(AX) + MOVOU X1, 224(AX) + MOVOU X2, 352(AX) + MOVOU X3, 480(AX) + MOVOU X4, 608(AX) + MOVOU X5, 736(AX) + MOVOU X6, 864(AX) + MOVOU X7, 992(AX) + MOVOU 112(AX), X0 + MOVOU 240(AX), X1 + MOVOU 368(AX), X2 + MOVOU 496(AX), X3 + MOVOU 624(AX), X4 + MOVOU 752(AX), X5 + MOVOU 880(AX), X6 + MOVOU 1008(AX), X7 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFD $0xb1, X6, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + PSHUFB X10, X2 + MOVO X0, X8 + PMULULQ X2, X8 + PADDQ X2, X0 + PADDQ X8, X0 + PADDQ X8, X0 + PXOR X0, X6 + PSHUFB X11, X6 + MOVO X4, X8 + PMULULQ X6, X8 + PADDQ X6, X4 + PADDQ X8, X4 + PADDQ X8, X4 + PXOR X4, X2 + MOVO X2, X8 + PADDQ X2, X8 + PSRLQ $0x3f, X2 + PXOR X8, X2 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFD $0xb1, X7, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + PSHUFB X10, X3 + MOVO X1, X8 + PMULULQ X3, X8 + PADDQ X3, X1 + PADDQ X8, X1 + PADDQ X8, X1 + PXOR X1, X7 + PSHUFB X11, X7 + MOVO X5, X8 + PMULULQ X7, X8 + PADDQ X7, X5 + PADDQ X8, X5 + PADDQ X8, X5 + PXOR X5, X3 + MOVO X3, X8 + PADDQ X3, X8 + PSRLQ $0x3f, X3 + PXOR X8, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU X0, 112(AX) + MOVOU X1, 240(AX) + MOVOU X2, 368(AX) + MOVOU X3, 496(AX) + MOVOU X4, 624(AX) + MOVOU X5, 752(AX) + MOVOU X6, 880(AX) + MOVOU X7, 1008(AX) RET -// func mixBlocksSSE2(out, a, b, c *block) -TEXT ·mixBlocksSSE2(SB), 4, $0-32 +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +// func mixBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·mixBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 PXOR X1, X0 PXOR X2, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET -// func xorBlocksSSE2(out, a, b, c *block) -TEXT ·xorBlocksSSE2(SB), 4, $0-32 +// func xorBlocksSSE2(out *block, a *block, b *block, c *block) +// Requires: SSE2 +TEXT ·xorBlocksSSE2(SB), NOSPLIT, $0-32 MOVQ out+0(FP), DX MOVQ a+8(FP), AX MOVQ b+16(FP), BX MOVQ c+24(FP), CX - MOVQ $128, DI + MOVQ $0x00000080, DI loop: - MOVOU 0(AX), X0 - MOVOU 0(BX), X1 - MOVOU 0(CX), X2 - MOVOU 0(DX), X3 + MOVOU (AX), X0 + MOVOU (BX), X1 + MOVOU (CX), X2 + MOVOU (DX), X3 PXOR X1, X0 PXOR X2, X0 PXOR X3, X0 - MOVOU X0, 0(DX) - ADDQ $16, AX - ADDQ $16, BX - ADDQ $16, CX - ADDQ $16, DX - SUBQ $2, DI + MOVOU X0, (DX) + ADDQ $0x10, AX + ADDQ $0x10, BX + ADDQ $0x10, CX + ADDQ $0x10, DX + SUBQ $0x02, DI JA loop RET diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 9ae8206c..f75162e0 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -1,722 +1,4517 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2bAVX2_amd64_asm.go -out ../../blake2bAVX2_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·AVX2_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX2_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -DATA ·AVX2_iv0<>+0x10(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX2_iv0<>+0x18(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX2_iv0<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_iv1<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX2_iv1<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -DATA ·AVX2_iv1<>+0x10(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX2_iv1<>+0x18(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX2_iv1<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -DATA ·AVX2_c40<>+0x10(SB)/8, $0x0201000706050403 -DATA ·AVX2_c40<>+0x18(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX2_c40<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX2_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -DATA ·AVX2_c48<>+0x10(SB)/8, $0x0100070605040302 -DATA ·AVX2_c48<>+0x18(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX2_c48<>(SB), (NOPTR+RODATA), $32 - -DATA ·AVX_iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·AVX_iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·AVX_iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·AVX_iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·AVX_iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·AVX_iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·AVX_iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·AVX_iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·AVX_iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·AVX_c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·AVX_c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·AVX_c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·AVX_c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·AVX_c48<>(SB), (NOPTR+RODATA), $16 - -#define VPERMQ_0x39_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x39 -#define VPERMQ_0x93_Y1_Y1 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xc9; BYTE $0x93 -#define VPERMQ_0x4E_Y2_Y2 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xd2; BYTE $0x4e -#define VPERMQ_0x93_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x93 -#define VPERMQ_0x39_Y3_Y3 BYTE $0xc4; BYTE $0xe3; BYTE $0xfd; BYTE $0x00; BYTE $0xdb; BYTE $0x39 - -#define ROUND_AVX2(m0, m1, m2, m3, t, c40, c48) \ - VPADDQ m0, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m1, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y1_Y1; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y3_Y3; \ - VPADDQ m2, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFD $-79, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPSHUFB c40, Y1, Y1; \ - VPADDQ m3, Y0, Y0; \ - VPADDQ Y1, Y0, Y0; \ - VPXOR Y0, Y3, Y3; \ - VPSHUFB c48, Y3, Y3; \ - VPADDQ Y3, Y2, Y2; \ - VPXOR Y2, Y1, Y1; \ - VPADDQ Y1, Y1, t; \ - VPSRLQ $63, Y1, Y1; \ - VPXOR t, Y1, Y1; \ - VPERMQ_0x39_Y3_Y3; \ - VPERMQ_0x4E_Y2_Y2; \ - VPERMQ_0x93_Y1_Y1 - -#define VMOVQ_SI_X11_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x1E -#define VMOVQ_SI_X12_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x26 -#define VMOVQ_SI_X13_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x2E -#define VMOVQ_SI_X14_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x36 -#define VMOVQ_SI_X15_0 BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x3E - -#define VMOVQ_SI_X11(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x5E; BYTE $n -#define VMOVQ_SI_X12(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x66; BYTE $n -#define VMOVQ_SI_X13(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x6E; BYTE $n -#define VMOVQ_SI_X14(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x76; BYTE $n -#define VMOVQ_SI_X15(n) BYTE $0xC5; BYTE $0x7A; BYTE $0x7E; BYTE $0x7E; BYTE $n - -#define VPINSRQ_1_SI_X11_0 BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x1E; BYTE $0x01 -#define VPINSRQ_1_SI_X12_0 BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x26; BYTE $0x01 -#define VPINSRQ_1_SI_X13_0 BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x2E; BYTE $0x01 -#define VPINSRQ_1_SI_X14_0 BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x36; BYTE $0x01 -#define VPINSRQ_1_SI_X15_0 BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x3E; BYTE $0x01 - -#define VPINSRQ_1_SI_X11(n) BYTE $0xC4; BYTE $0x63; BYTE $0xA1; BYTE $0x22; BYTE $0x5E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X12(n) BYTE $0xC4; BYTE $0x63; BYTE $0x99; BYTE $0x22; BYTE $0x66; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X13(n) BYTE $0xC4; BYTE $0x63; BYTE $0x91; BYTE $0x22; BYTE $0x6E; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X14(n) BYTE $0xC4; BYTE $0x63; BYTE $0x89; BYTE $0x22; BYTE $0x76; BYTE $n; BYTE $0x01 -#define VPINSRQ_1_SI_X15(n) BYTE $0xC4; BYTE $0x63; BYTE $0x81; BYTE $0x22; BYTE $0x7E; BYTE $n; BYTE $0x01 - -#define VMOVQ_R8_X15 BYTE $0xC4; BYTE $0x41; BYTE $0xF9; BYTE $0x6E; BYTE $0xF8 -#define VPINSRQ_1_R9_X15 BYTE $0xC4; BYTE $0x43; BYTE $0x81; BYTE $0x22; BYTE $0xF9; BYTE $0x01 - -// load msg: Y12 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y12(i0, i1, i2, i3) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y12, Y12 - -// load msg: Y13 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y13(i0, i1, i2, i3) \ - VMOVQ_SI_X13(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X13(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y13, Y13 - -// load msg: Y14 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y14(i0, i1, i2, i3) \ - VMOVQ_SI_X14(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X14(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y14, Y14 - -// load msg: Y15 = (i0, i1, i2, i3) -// i0, i1, i2, i3 must not be 0 -#define LOAD_MSG_AVX2_Y15(i0, i1, i2, i3) \ - VMOVQ_SI_X15(i0*8); \ - VMOVQ_SI_X11(i2*8); \ - VPINSRQ_1_SI_X15(i1*8); \ - VPINSRQ_1_SI_X11(i3*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X11(6*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(1, 3, 5, 7); \ - LOAD_MSG_AVX2_Y14(8, 10, 12, 14); \ - LOAD_MSG_AVX2_Y15(9, 11, 13, 15) - -#define LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() \ - LOAD_MSG_AVX2_Y12(14, 4, 9, 13); \ - LOAD_MSG_AVX2_Y13(10, 8, 15, 6); \ - VMOVQ_SI_X11(11*8); \ - VPSHUFD $0x4E, 0*8(SI), X14; \ - VPINSRQ_1_SI_X11(5*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(12, 2, 7, 3) - -#define LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() \ - VMOVQ_SI_X11(5*8); \ - VMOVDQU 11*8(SI), X12; \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - VMOVQ_SI_X13(8*8); \ - VMOVQ_SI_X11(2*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X11(13*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(10, 3, 7, 9); \ - LOAD_MSG_AVX2_Y15(14, 6, 1, 4) - -#define LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() \ - LOAD_MSG_AVX2_Y12(7, 3, 13, 11); \ - LOAD_MSG_AVX2_Y13(9, 1, 12, 14); \ - LOAD_MSG_AVX2_Y14(2, 5, 4, 15); \ - VMOVQ_SI_X15(6*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X15(10*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() \ - LOAD_MSG_AVX2_Y12(9, 5, 2, 10); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X11(4*8); \ - VPINSRQ_1_SI_X13(7*8); \ - VPINSRQ_1_SI_X11(15*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(14, 11, 6, 3); \ - LOAD_MSG_AVX2_Y15(1, 12, 8, 13) - -#define LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X11_0; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X11(8*8); \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(12, 10, 11, 3); \ - LOAD_MSG_AVX2_Y14(4, 7, 15, 1); \ - LOAD_MSG_AVX2_Y15(13, 5, 14, 9) - -#define LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() \ - LOAD_MSG_AVX2_Y12(12, 1, 14, 4); \ - LOAD_MSG_AVX2_Y13(5, 15, 13, 10); \ - VMOVQ_SI_X14_0; \ - VPSHUFD $0x4E, 8*8(SI), X11; \ - VPINSRQ_1_SI_X14(6*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - LOAD_MSG_AVX2_Y15(7, 3, 2, 11) - -#define LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() \ - LOAD_MSG_AVX2_Y12(13, 7, 12, 3); \ - LOAD_MSG_AVX2_Y13(11, 14, 1, 9); \ - LOAD_MSG_AVX2_Y14(5, 15, 8, 2); \ - VMOVQ_SI_X15_0; \ - VMOVQ_SI_X11(6*8); \ - VPINSRQ_1_SI_X15(4*8); \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() \ - VMOVQ_SI_X12(6*8); \ - VMOVQ_SI_X11(11*8); \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y12, Y12; \ - LOAD_MSG_AVX2_Y13(15, 9, 3, 8); \ - VMOVQ_SI_X11(1*8); \ - VMOVDQU 12*8(SI), X14; \ - VPINSRQ_1_SI_X11(10*8); \ - VINSERTI128 $1, X11, Y14, Y14; \ - VMOVQ_SI_X15(2*8); \ - VMOVDQU 4*8(SI), X11; \ - VPINSRQ_1_SI_X15(7*8); \ - VINSERTI128 $1, X11, Y15, Y15 - -#define LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() \ - LOAD_MSG_AVX2_Y12(10, 8, 7, 1); \ - VMOVQ_SI_X13(2*8); \ - VPSHUFD $0x4E, 5*8(SI), X11; \ - VPINSRQ_1_SI_X13(4*8); \ - VINSERTI128 $1, X11, Y13, Y13; \ - LOAD_MSG_AVX2_Y14(15, 9, 3, 13); \ - VMOVQ_SI_X15(11*8); \ - VMOVQ_SI_X11(12*8); \ - VPINSRQ_1_SI_X15(14*8); \ - VPINSRQ_1_SI_X11_0; \ - VINSERTI128 $1, X11, Y15, Y15 - // func hashBlocksAVX2(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX2(SB), 4, $320-48 // frame size = 288 + 32 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, DX - ADDQ $31, DX - ANDQ $~31, DX - - MOVQ CX, 16(DX) - XORQ CX, CX - MOVQ CX, 24(DX) - - VMOVDQU ·AVX2_c40<>(SB), Y4 - VMOVDQU ·AVX2_c48<>(SB), Y5 - - VMOVDQU 0(AX), Y8 +// Requires: AVX, AVX2 +TEXT ·hashBlocksAVX2(SB), NOSPLIT, $320-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, DX + ADDQ $+31, DX + ANDQ $-32, DX + MOVQ CX, 16(DX) + XORQ CX, CX + MOVQ CX, 24(DX) + VMOVDQU ·AVX2_c40<>+0(SB), Y4 + VMOVDQU ·AVX2_c48<>+0(SB), Y5 + VMOVDQU (AX), Y8 VMOVDQU 32(AX), Y9 - VMOVDQU ·AVX2_iv0<>(SB), Y6 - VMOVDQU ·AVX2_iv1<>(SB), Y7 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 - MOVQ R9, 8(DX) + VMOVDQU ·AVX2_iv0<>+0(SB), Y6 + VMOVDQU ·AVX2_iv1<>+0(SB), Y7 + MOVQ (BX), R8 + MOVQ 8(BX), R9 + MOVQ R9, 8(DX) loop: - ADDQ $128, R8 - MOVQ R8, 0(DX) - CMPQ R8, $128 + ADDQ $0x80, R8 + MOVQ R8, (DX) + CMPQ R8, $0x80 JGE noinc INCQ R9 MOVQ R9, 8(DX) noinc: - VMOVDQA Y8, Y0 - VMOVDQA Y9, Y1 - VMOVDQA Y6, Y2 - VPXOR 0(DX), Y7, Y3 - - LOAD_MSG_AVX2_0_2_4_6_1_3_5_7_8_10_12_14_9_11_13_15() - VMOVDQA Y12, 32(DX) - VMOVDQA Y13, 64(DX) - VMOVDQA Y14, 96(DX) - VMOVDQA Y15, 128(DX) - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_14_4_9_13_10_8_15_6_1_0_11_5_12_2_7_3() - VMOVDQA Y12, 160(DX) - VMOVDQA Y13, 192(DX) - VMOVDQA Y14, 224(DX) - VMOVDQA Y15, 256(DX) - - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_11_12_5_15_8_0_2_13_10_3_7_9_14_6_1_4() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_7_3_13_11_9_1_12_14_2_5_4_15_6_10_0_8() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_9_5_2_10_0_7_4_15_14_11_6_3_1_12_8_13() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_2_6_0_8_12_10_11_3_4_7_15_1_13_5_14_9() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_12_1_14_4_5_15_13_10_0_6_9_8_7_3_2_11() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_13_7_12_3_11_14_1_9_5_15_8_2_0_4_6_10() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_6_14_11_0_15_9_3_8_12_13_1_10_2_7_4_5() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - LOAD_MSG_AVX2_10_8_7_1_2_4_6_5_15_9_3_13_11_14_12_0() - ROUND_AVX2(Y12, Y13, Y14, Y15, Y10, Y4, Y5) - - ROUND_AVX2(32(DX), 64(DX), 96(DX), 128(DX), Y10, Y4, Y5) - ROUND_AVX2(160(DX), 192(DX), 224(DX), 256(DX), Y10, Y4, Y5) - - VPXOR Y0, Y8, Y8 - VPXOR Y1, Y9, Y9 - VPXOR Y2, Y8, Y8 - VPXOR Y3, Y9, Y9 - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - - VMOVDQU Y8, 0(AX) - VMOVDQU Y9, 32(AX) + VMOVDQA Y8, Y0 + VMOVDQA Y9, Y1 + VMOVDQA Y6, Y2 + VPXOR (DX), Y7, Y3 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 32(DX) + VMOVDQA Y13, 64(DX) + VMOVDQA Y14, 96(DX) + VMOVDQA Y15, 128(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x48 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + VPSHUFD $0x4e, (SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x28 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VMOVDQA Y12, 160(DX) + VMOVDQA Y13, 192(DX) + VMOVDQA Y14, 224(DX) + VMOVDQA Y15, 256(DX) + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x28 + VMOVDQU 88(SI), X12 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x70 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x78 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x1e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + VPSHUFD $0x4e, 64(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x58 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x18 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x48 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x10 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x40 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x08 + VMOVDQU 96(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x50 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + VMOVDQU 32(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x08 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y12, Y12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + VPSHUFD $0x4e, 40(SI), X11 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y13, Y13 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x18 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x5e + BYTE $0x68 + BYTE $0x01 + VINSERTI128 $0x01, X11, Y14, Y14 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x5e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0xa1 + BYTE $0x22 + BYTE $0x1e + BYTE $0x01 + VINSERTI128 $0x01, X11, Y15, Y15 + VPADDQ Y12, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y13, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ Y14, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ Y15, Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 32(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 64(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 96(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 128(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPADDQ 160(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 192(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x93 + VPADDQ 224(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFD $-79, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPSHUFB Y4, Y1, Y1 + VPADDQ 256(DX), Y0, Y0 + VPADDQ Y1, Y0, Y0 + VPXOR Y0, Y3, Y3 + VPSHUFB Y5, Y3, Y3 + VPADDQ Y3, Y2, Y2 + VPXOR Y2, Y1, Y1 + VPADDQ Y1, Y1, Y10 + VPSRLQ $0x3f, Y1, Y1 + VPXOR Y10, Y1, Y1 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xdb + BYTE $0x39 + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xd2 + BYTE $0x4e + BYTE $0xc4 + BYTE $0xe3 + BYTE $0xfd + BYTE $0x00 + BYTE $0xc9 + BYTE $0x93 + VPXOR Y0, Y8, Y8 + VPXOR Y1, Y9, Y9 + VPXOR Y2, Y8, Y8 + VPXOR Y3, Y9, Y9 + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVQ R8, (BX) + MOVQ R9, 8(BX) + VMOVDQU Y8, (AX) + VMOVDQU Y9, 32(AX) VZEROUPPER - RET -#define VPUNPCKLQDQ_X2_X2_X15 BYTE $0xC5; BYTE $0x69; BYTE $0x6C; BYTE $0xFA -#define VPUNPCKLQDQ_X3_X3_X15 BYTE $0xC5; BYTE $0x61; BYTE $0x6C; BYTE $0xFB -#define VPUNPCKLQDQ_X7_X7_X15 BYTE $0xC5; BYTE $0x41; BYTE $0x6C; BYTE $0xFF -#define VPUNPCKLQDQ_X13_X13_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x11; BYTE $0x6C; BYTE $0xFD -#define VPUNPCKLQDQ_X14_X14_X15 BYTE $0xC4; BYTE $0x41; BYTE $0x09; BYTE $0x6C; BYTE $0xFE +DATA ·AVX2_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +DATA ·AVX2_c40<>+16(SB)/8, $0x0201000706050403 +DATA ·AVX2_c40<>+24(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX2_c40<>(SB), RODATA|NOPTR, $32 -#define VPUNPCKHQDQ_X15_X2_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x69; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X3_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X6_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x49; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X7_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xFF -#define VPUNPCKHQDQ_X15_X3_X2 BYTE $0xC4; BYTE $0xC1; BYTE $0x61; BYTE $0x6D; BYTE $0xD7 -#define VPUNPCKHQDQ_X15_X7_X6 BYTE $0xC4; BYTE $0xC1; BYTE $0x41; BYTE $0x6D; BYTE $0xF7 -#define VPUNPCKHQDQ_X15_X13_X3 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xDF -#define VPUNPCKHQDQ_X15_X13_X7 BYTE $0xC4; BYTE $0xC1; BYTE $0x11; BYTE $0x6D; BYTE $0xFF +DATA ·AVX2_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +DATA ·AVX2_c48<>+16(SB)/8, $0x0100070605040302 +DATA ·AVX2_c48<>+24(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX2_c48<>(SB), RODATA|NOPTR, $32 -#define SHUFFLE_AVX() \ - VMOVDQA X6, X13; \ - VMOVDQA X2, X14; \ - VMOVDQA X4, X6; \ - VPUNPCKLQDQ_X13_X13_X15; \ - VMOVDQA X5, X4; \ - VMOVDQA X6, X5; \ - VPUNPCKHQDQ_X15_X7_X6; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X13_X7; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VPUNPCKHQDQ_X15_X2_X2; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X3_X3; \ +DATA ·AVX2_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX2_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +DATA ·AVX2_iv0<>+16(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX2_iv0<>+24(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX2_iv0<>(SB), RODATA|NOPTR, $32 -#define SHUFFLE_AVX_INV() \ - VMOVDQA X2, X13; \ - VMOVDQA X4, X14; \ - VPUNPCKLQDQ_X2_X2_X15; \ - VMOVDQA X5, X4; \ - VPUNPCKHQDQ_X15_X3_X2; \ - VMOVDQA X14, X5; \ - VPUNPCKLQDQ_X3_X3_X15; \ - VMOVDQA X6, X14; \ - VPUNPCKHQDQ_X15_X13_X3; \ - VPUNPCKLQDQ_X7_X7_X15; \ - VPUNPCKHQDQ_X15_X6_X6; \ - VPUNPCKLQDQ_X14_X14_X15; \ - VPUNPCKHQDQ_X15_X7_X7; \ - -#define HALF_ROUND_AVX(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - VPADDQ m0, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m1, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFD $-79, v6, v6; \ - VPSHUFD $-79, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPSHUFB c40, v2, v2; \ - VPSHUFB c40, v3, v3; \ - VPADDQ m2, v0, v0; \ - VPADDQ v2, v0, v0; \ - VPADDQ m3, v1, v1; \ - VPADDQ v3, v1, v1; \ - VPXOR v0, v6, v6; \ - VPXOR v1, v7, v7; \ - VPSHUFB c48, v6, v6; \ - VPSHUFB c48, v7, v7; \ - VPADDQ v6, v4, v4; \ - VPADDQ v7, v5, v5; \ - VPXOR v4, v2, v2; \ - VPXOR v5, v3, v3; \ - VPADDQ v2, v2, t0; \ - VPSRLQ $63, v2, v2; \ - VPXOR t0, v2, v2; \ - VPADDQ v3, v3, t0; \ - VPSRLQ $63, v3, v3; \ - VPXOR t0, v3, v3 - -// load msg: X12 = (i0, i1), X13 = (i2, i3), X14 = (i4, i5), X15 = (i6, i7) -// i0, i1, i2, i3, i4, i5, i6, i7 must not be 0 -#define LOAD_MSG_AVX(i0, i1, i2, i3, i4, i5, i6, i7) \ - VMOVQ_SI_X12(i0*8); \ - VMOVQ_SI_X13(i2*8); \ - VMOVQ_SI_X14(i4*8); \ - VMOVQ_SI_X15(i6*8); \ - VPINSRQ_1_SI_X12(i1*8); \ - VPINSRQ_1_SI_X13(i3*8); \ - VPINSRQ_1_SI_X14(i5*8); \ - VPINSRQ_1_SI_X15(i7*8) - -// load msg: X12 = (0, 2), X13 = (4, 6), X14 = (1, 3), X15 = (5, 7) -#define LOAD_MSG_AVX_0_2_4_6_1_3_5_7() \ - VMOVQ_SI_X12_0; \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(1*8); \ - VMOVQ_SI_X15(5*8); \ - VPINSRQ_1_SI_X12(2*8); \ - VPINSRQ_1_SI_X13(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(7*8) - -// load msg: X12 = (1, 0), X13 = (11, 5), X14 = (12, 2), X15 = (7, 3) -#define LOAD_MSG_AVX_1_0_11_5_12_2_7_3() \ - VPSHUFD $0x4E, 0*8(SI), X12; \ - VMOVQ_SI_X13(11*8); \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(7*8); \ - VPINSRQ_1_SI_X13(5*8); \ - VPINSRQ_1_SI_X14(2*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (11, 12), X13 = (5, 15), X14 = (8, 0), X15 = (2, 13) -#define LOAD_MSG_AVX_11_12_5_15_8_0_2_13() \ - VMOVDQU 11*8(SI), X12; \ - VMOVQ_SI_X13(5*8); \ - VMOVQ_SI_X14(8*8); \ - VMOVQ_SI_X15(2*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14_0; \ - VPINSRQ_1_SI_X15(13*8) - -// load msg: X12 = (2, 5), X13 = (4, 15), X14 = (6, 10), X15 = (0, 8) -#define LOAD_MSG_AVX_2_5_4_15_6_10_0_8() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13(4*8); \ - VMOVQ_SI_X14(6*8); \ - VMOVQ_SI_X15_0; \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(15*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (9, 5), X13 = (2, 10), X14 = (0, 7), X15 = (4, 15) -#define LOAD_MSG_AVX_9_5_2_10_0_7_4_15() \ - VMOVQ_SI_X12(9*8); \ - VMOVQ_SI_X13(2*8); \ - VMOVQ_SI_X14_0; \ - VMOVQ_SI_X15(4*8); \ - VPINSRQ_1_SI_X12(5*8); \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VPINSRQ_1_SI_X15(15*8) - -// load msg: X12 = (2, 6), X13 = (0, 8), X14 = (12, 10), X15 = (11, 3) -#define LOAD_MSG_AVX_2_6_0_8_12_10_11_3() \ - VMOVQ_SI_X12(2*8); \ - VMOVQ_SI_X13_0; \ - VMOVQ_SI_X14(12*8); \ - VMOVQ_SI_X15(11*8); \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X13(8*8); \ - VPINSRQ_1_SI_X14(10*8); \ - VPINSRQ_1_SI_X15(3*8) - -// load msg: X12 = (0, 6), X13 = (9, 8), X14 = (7, 3), X15 = (2, 11) -#define LOAD_MSG_AVX_0_6_9_8_7_3_2_11() \ - MOVQ 0*8(SI), X12; \ - VPSHUFD $0x4E, 8*8(SI), X13; \ - MOVQ 7*8(SI), X14; \ - MOVQ 2*8(SI), X15; \ - VPINSRQ_1_SI_X12(6*8); \ - VPINSRQ_1_SI_X14(3*8); \ - VPINSRQ_1_SI_X15(11*8) - -// load msg: X12 = (6, 14), X13 = (11, 0), X14 = (15, 9), X15 = (3, 8) -#define LOAD_MSG_AVX_6_14_11_0_15_9_3_8() \ - MOVQ 6*8(SI), X12; \ - MOVQ 11*8(SI), X13; \ - MOVQ 15*8(SI), X14; \ - MOVQ 3*8(SI), X15; \ - VPINSRQ_1_SI_X12(14*8); \ - VPINSRQ_1_SI_X13_0; \ - VPINSRQ_1_SI_X14(9*8); \ - VPINSRQ_1_SI_X15(8*8) - -// load msg: X12 = (5, 15), X13 = (8, 2), X14 = (0, 4), X15 = (6, 10) -#define LOAD_MSG_AVX_5_15_8_2_0_4_6_10() \ - MOVQ 5*8(SI), X12; \ - MOVQ 8*8(SI), X13; \ - MOVQ 0*8(SI), X14; \ - MOVQ 6*8(SI), X15; \ - VPINSRQ_1_SI_X12(15*8); \ - VPINSRQ_1_SI_X13(2*8); \ - VPINSRQ_1_SI_X14(4*8); \ - VPINSRQ_1_SI_X15(10*8) - -// load msg: X12 = (12, 13), X13 = (1, 10), X14 = (2, 7), X15 = (4, 5) -#define LOAD_MSG_AVX_12_13_1_10_2_7_4_5() \ - VMOVDQU 12*8(SI), X12; \ - MOVQ 1*8(SI), X13; \ - MOVQ 2*8(SI), X14; \ - VPINSRQ_1_SI_X13(10*8); \ - VPINSRQ_1_SI_X14(7*8); \ - VMOVDQU 4*8(SI), X15 - -// load msg: X12 = (15, 9), X13 = (3, 13), X14 = (11, 14), X15 = (12, 0) -#define LOAD_MSG_AVX_15_9_3_13_11_14_12_0() \ - MOVQ 15*8(SI), X12; \ - MOVQ 3*8(SI), X13; \ - MOVQ 11*8(SI), X14; \ - MOVQ 12*8(SI), X15; \ - VPINSRQ_1_SI_X12(9*8); \ - VPINSRQ_1_SI_X13(13*8); \ - VPINSRQ_1_SI_X14(14*8); \ - VPINSRQ_1_SI_X15_0 +DATA ·AVX2_iv1<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX2_iv1<>+8(SB)/8, $0x9b05688c2b3e6c1f +DATA ·AVX2_iv1<>+16(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX2_iv1<>+24(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX2_iv1<>(SB), RODATA|NOPTR, $32 // func hashBlocksAVX(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksAVX(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - VMOVDQU ·AVX_c40<>(SB), X0 - VMOVDQU ·AVX_c48<>(SB), X1 +// Requires: AVX, SSE2 +TEXT ·hashBlocksAVX(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + VMOVDQU ·AVX_c40<>+0(SB), X0 + VMOVDQU ·AVX_c48<>+0(SB), X1 VMOVDQA X0, X8 VMOVDQA X1, X9 - - VMOVDQU ·AVX_iv3<>(SB), X0 - VMOVDQA X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·AVX_iv3 ^ (CX || 0) - - VMOVDQU 0(AX), X10 + VMOVDQU ·AVX_iv3<>+0(SB), X0 + VMOVDQA X0, (R10) + XORQ CX, (R10) + VMOVDQU (AX), X10 VMOVDQU 16(AX), X11 VMOVDQU 32(AX), X2 VMOVDQU 48(AX), X3 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - VMOVQ_R8_X15 - VPINSRQ_1_R9_X15 - + BYTE $0xc4 + BYTE $0x41 + BYTE $0xf9 + BYTE $0x6e + BYTE $0xf8 + BYTE $0xc4 + BYTE $0x43 + BYTE $0x81 + BYTE $0x22 + BYTE $0xf9 + BYTE $0x01 VMOVDQA X10, X0 VMOVDQA X11, X1 - VMOVDQU ·AVX_iv0<>(SB), X4 - VMOVDQU ·AVX_iv1<>(SB), X5 - VMOVDQU ·AVX_iv2<>(SB), X6 - + VMOVDQU ·AVX_iv0<>+0(SB), X4 + VMOVDQU ·AVX_iv1<>+0(SB), X5 + VMOVDQU ·AVX_iv2<>+0(SB), X6 VPXOR X15, X6, X6 - VMOVDQA 0(R10), X7 - - LOAD_MSG_AVX_0_2_4_6_1_3_5_7() + VMOVDQA (R10), X7 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x26 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x28 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x38 + BYTE $0x01 VMOVDQA X12, 16(R10) VMOVDQA X13, 32(R10) VMOVDQA X14, 48(R10) VMOVDQA X15, 64(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(8, 10, 12, 14, 9, 11, 13, 15) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 VMOVDQA X12, 80(R10) VMOVDQA X13, 96(R10) VMOVDQA X14, 112(R10) VMOVDQA X15, 128(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(14, 4, 9, 13, 10, 8, 15, 6) + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x78 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x30 + BYTE $0x01 VMOVDQA X12, 144(R10) VMOVDQA X13, 160(R10) VMOVDQA X14, 176(R10) VMOVDQA X15, 192(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_1_0_11_5_12_2_7_3() + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPSHUFD $0x4e, (SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x38 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 VMOVDQA X12, 208(R10) VMOVDQA X13, 224(R10) VMOVDQA X14, 240(R10) VMOVDQA X15, 256(R10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_11_12_5_15_8_0_2_13() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_2_5_4_15_6_10_0_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_9_5_2_10_0_7_4_15() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_2_6_0_8_12_10_11_3() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX(4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_0_6_9_8_7_3_2_11() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_5_15_8_2_0_4_6_10() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX_6_14_11_0_15_9_3_8() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_12_13_1_10_2_7_4_5() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - LOAD_MSG_AVX(10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX() - LOAD_MSG_AVX_15_9_3_13_11_14_12_0() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, X12, X13, X14, X15, X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X15, X8, X9) - SHUFFLE_AVX() - HALF_ROUND_AVX(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X15, X8, X9) - SHUFFLE_AVX_INV() - + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VMOVDQU 88(SI), X12 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x40 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x10 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x36 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x20 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x60 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x70 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x3e + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x48 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x36 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x20 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x78 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x30 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x08 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x40 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x58 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x60 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x68 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x2e + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x58 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x18 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x20 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x78 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x70 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x28 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x70 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x28 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x68 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ (SI), X12 + VPSHUFD $0x4e, 64(SI), X13 + MOVQ 56(SI), X14 + MOVQ 16(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x30 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x58 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x68 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x60 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x58 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x08 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x38 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x18 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x48 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 40(SI), X12 + MOVQ 64(SI), X13 + MOVQ (SI), X14 + MOVQ 48(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x78 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x10 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x50 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + MOVQ 48(SI), X12 + MOVQ 88(SI), X13 + MOVQ 120(SI), X14 + MOVQ 24(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x2e + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x40 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VMOVDQU 96(SI), X12 + MOVQ 8(SI), X13 + MOVQ 16(SI), X14 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x50 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x38 + BYTE $0x01 + VMOVDQU 32(SI), X15 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x66 + BYTE $0x50 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x6e + BYTE $0x38 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x76 + BYTE $0x10 + BYTE $0xc5 + BYTE $0x7a + BYTE $0x7e + BYTE $0x7e + BYTE $0x30 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x40 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x08 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x20 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x7e + BYTE $0x28 + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + MOVQ 120(SI), X12 + MOVQ 24(SI), X13 + MOVQ 88(SI), X14 + MOVQ 96(SI), X15 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x99 + BYTE $0x22 + BYTE $0x66 + BYTE $0x48 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x91 + BYTE $0x22 + BYTE $0x6e + BYTE $0x68 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x89 + BYTE $0x22 + BYTE $0x76 + BYTE $0x70 + BYTE $0x01 + BYTE $0xc4 + BYTE $0x63 + BYTE $0x81 + BYTE $0x22 + BYTE $0x3e + BYTE $0x01 + VPADDQ X12, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X13, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ X14, X0, X0 + VPADDQ X2, X0, X0 + VPADDQ X15, X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 16(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 32(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 48(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 64(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 80(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 96(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 112(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 128(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff + VPADDQ 144(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 160(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 176(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 192(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X6, X13 + VMOVDQA X2, X14 + VMOVDQA X4, X6 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x11 + BYTE $0x6c + BYTE $0xfd + VMOVDQA X5, X4 + VMOVDQA X6, X5 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xff + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x69 + BYTE $0x6d + BYTE $0xd7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xdf + VPADDQ 208(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 224(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFD $-79, X6, X6 + VPSHUFD $-79, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPSHUFB X8, X2, X2 + VPSHUFB X8, X3, X3 + VPADDQ 240(R10), X0, X0 + VPADDQ X2, X0, X0 + VPADDQ 256(R10), X1, X1 + VPADDQ X3, X1, X1 + VPXOR X0, X6, X6 + VPXOR X1, X7, X7 + VPSHUFB X9, X6, X6 + VPSHUFB X9, X7, X7 + VPADDQ X6, X4, X4 + VPADDQ X7, X5, X5 + VPXOR X4, X2, X2 + VPXOR X5, X3, X3 + VPADDQ X2, X2, X15 + VPSRLQ $0x3f, X2, X2 + VPXOR X15, X2, X2 + VPADDQ X3, X3, X15 + VPSRLQ $0x3f, X3, X3 + VPXOR X15, X3, X3 + VMOVDQA X2, X13 + VMOVDQA X4, X14 + BYTE $0xc5 + BYTE $0x69 + BYTE $0x6c + BYTE $0xfa + VMOVDQA X5, X4 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x61 + BYTE $0x6d + BYTE $0xd7 + VMOVDQA X14, X5 + BYTE $0xc5 + BYTE $0x61 + BYTE $0x6c + BYTE $0xfb + VMOVDQA X6, X14 + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x11 + BYTE $0x6d + BYTE $0xdf + BYTE $0xc5 + BYTE $0x41 + BYTE $0x6c + BYTE $0xff + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x49 + BYTE $0x6d + BYTE $0xf7 + BYTE $0xc4 + BYTE $0x41 + BYTE $0x09 + BYTE $0x6c + BYTE $0xfe + BYTE $0xc4 + BYTE $0xc1 + BYTE $0x41 + BYTE $0x6d + BYTE $0xff VMOVDQU 32(AX), X14 VMOVDQU 48(AX), X15 VPXOR X0, X10, X10 @@ -729,16 +4524,36 @@ noinc: VPXOR X7, X15, X3 VMOVDQU X2, 32(AX) VMOVDQU X3, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - VMOVDQU X10, 0(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + VMOVDQU X10, (AX) VMOVDQU X11, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) VZEROUPPER - RET + +DATA ·AVX_c40<>+0(SB)/8, $0x0201000706050403 +DATA ·AVX_c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·AVX_c40<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_c48<>+0(SB)/8, $0x0100070605040302 +DATA ·AVX_c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·AVX_c48<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·AVX_iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·AVX_iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·AVX_iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·AVX_iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·AVX_iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·AVX_iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·AVX_iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·AVX_iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·AVX_iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index adfac00c..9a0ce212 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -1,278 +1,1441 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. +// Code generated by command: go run blake2b_amd64_asm.go -out ../../blake2b_amd64.s -pkg blake2b. DO NOT EDIT. //go:build amd64 && gc && !purego #include "textflag.h" -DATA ·iv0<>+0x00(SB)/8, $0x6a09e667f3bcc908 -DATA ·iv0<>+0x08(SB)/8, $0xbb67ae8584caa73b -GLOBL ·iv0<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv1<>+0x00(SB)/8, $0x3c6ef372fe94f82b -DATA ·iv1<>+0x08(SB)/8, $0xa54ff53a5f1d36f1 -GLOBL ·iv1<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv2<>+0x00(SB)/8, $0x510e527fade682d1 -DATA ·iv2<>+0x08(SB)/8, $0x9b05688c2b3e6c1f -GLOBL ·iv2<>(SB), (NOPTR+RODATA), $16 - -DATA ·iv3<>+0x00(SB)/8, $0x1f83d9abfb41bd6b -DATA ·iv3<>+0x08(SB)/8, $0x5be0cd19137e2179 -GLOBL ·iv3<>(SB), (NOPTR+RODATA), $16 - -DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 -DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b -GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 - -DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 -DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a -GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 - -#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v6, t1; \ - PUNPCKLQDQ v6, t2; \ - PUNPCKHQDQ v7, v6; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ v7, t2; \ - MOVO t1, v7; \ - MOVO v2, t1; \ - PUNPCKHQDQ t2, v7; \ - PUNPCKLQDQ v3, t2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v3 - -#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ - MOVO v4, t1; \ - MOVO v5, v4; \ - MOVO t1, v5; \ - MOVO v2, t1; \ - PUNPCKLQDQ v2, t2; \ - PUNPCKHQDQ v3, v2; \ - PUNPCKHQDQ t2, v2; \ - PUNPCKLQDQ v3, t2; \ - MOVO t1, v3; \ - MOVO v6, t1; \ - PUNPCKHQDQ t2, v3; \ - PUNPCKLQDQ v7, t2; \ - PUNPCKHQDQ t2, v6; \ - PUNPCKLQDQ t1, t2; \ - PUNPCKHQDQ t2, v7 - -#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, m0, m1, m2, m3, t0, c40, c48) \ - PADDQ m0, v0; \ - PADDQ m1, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFD $0xB1, v6, v6; \ - PSHUFD $0xB1, v7, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - PSHUFB c40, v2; \ - PSHUFB c40, v3; \ - PADDQ m2, v0; \ - PADDQ m3, v1; \ - PADDQ v2, v0; \ - PADDQ v3, v1; \ - PXOR v0, v6; \ - PXOR v1, v7; \ - PSHUFB c48, v6; \ - PSHUFB c48, v7; \ - PADDQ v6, v4; \ - PADDQ v7, v5; \ - PXOR v4, v2; \ - PXOR v5, v3; \ - MOVOU v2, t0; \ - PADDQ v2, t0; \ - PSRLQ $63, v2; \ - PXOR t0, v2; \ - MOVOU v3, t0; \ - PADDQ v3, t0; \ - PSRLQ $63, v3; \ - PXOR t0, v3 - -#define LOAD_MSG(m0, m1, m2, m3, src, i0, i1, i2, i3, i4, i5, i6, i7) \ - MOVQ i0*8(src), m0; \ - PINSRQ $1, i1*8(src), m0; \ - MOVQ i2*8(src), m1; \ - PINSRQ $1, i3*8(src), m1; \ - MOVQ i4*8(src), m2; \ - PINSRQ $1, i5*8(src), m2; \ - MOVQ i6*8(src), m3; \ - PINSRQ $1, i7*8(src), m3 - // func hashBlocksSSE4(h *[8]uint64, c *[2]uint64, flag uint64, blocks []byte) -TEXT ·hashBlocksSSE4(SB), 4, $288-48 // frame size = 272 + 16 byte alignment - MOVQ h+0(FP), AX - MOVQ c+8(FP), BX - MOVQ flag+16(FP), CX - MOVQ blocks_base+24(FP), SI - MOVQ blocks_len+32(FP), DI - - MOVQ SP, R10 - ADDQ $15, R10 - ANDQ $~15, R10 - - MOVOU ·iv3<>(SB), X0 - MOVO X0, 0(R10) - XORQ CX, 0(R10) // 0(R10) = ·iv3 ^ (CX || 0) - - MOVOU ·c40<>(SB), X13 - MOVOU ·c48<>(SB), X14 - - MOVOU 0(AX), X12 +// Requires: SSE2, SSE4.1, SSSE3 +TEXT ·hashBlocksSSE4(SB), NOSPLIT, $288-48 + MOVQ h+0(FP), AX + MOVQ c+8(FP), BX + MOVQ flag+16(FP), CX + MOVQ blocks_base+24(FP), SI + MOVQ blocks_len+32(FP), DI + MOVQ SP, R10 + ADDQ $0x0f, R10 + ANDQ $-16, R10 + MOVOU ·iv3<>+0(SB), X0 + MOVO X0, (R10) + XORQ CX, (R10) + MOVOU ·c40<>+0(SB), X13 + MOVOU ·c48<>+0(SB), X14 + MOVOU (AX), X12 MOVOU 16(AX), X15 - - MOVQ 0(BX), R8 - MOVQ 8(BX), R9 + MOVQ (BX), R8 + MOVQ 8(BX), R9 loop: - ADDQ $128, R8 - CMPQ R8, $128 + ADDQ $0x80, R8 + CMPQ R8, $0x80 JGE noinc INCQ R9 noinc: - MOVQ R8, X8 - PINSRQ $1, R9, X8 - - MOVO X12, X0 - MOVO X15, X1 - MOVOU 32(AX), X2 - MOVOU 48(AX), X3 - MOVOU ·iv0<>(SB), X4 - MOVOU ·iv1<>(SB), X5 - MOVOU ·iv2<>(SB), X6 - - PXOR X8, X6 - MOVO 0(R10), X7 - - LOAD_MSG(X8, X9, X10, X11, SI, 0, 2, 4, 6, 1, 3, 5, 7) - MOVO X8, 16(R10) - MOVO X9, 32(R10) - MOVO X10, 48(R10) - MOVO X11, 64(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 8, 10, 12, 14, 9, 11, 13, 15) - MOVO X8, 80(R10) - MOVO X9, 96(R10) - MOVO X10, 112(R10) - MOVO X11, 128(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 14, 4, 9, 13, 10, 8, 15, 6) - MOVO X8, 144(R10) - MOVO X9, 160(R10) - MOVO X10, 176(R10) - MOVO X11, 192(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 1, 0, 11, 5, 12, 2, 7, 3) - MOVO X8, 208(R10) - MOVO X9, 224(R10) - MOVO X10, 240(R10) - MOVO X11, 256(R10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 11, 12, 5, 15, 8, 0, 2, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 10, 3, 7, 9, 14, 6, 1, 4) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 7, 3, 13, 11, 9, 1, 12, 14) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 2, 5, 4, 15, 6, 10, 0, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 9, 5, 2, 10, 0, 7, 4, 15) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 14, 11, 6, 3, 1, 12, 8, 13) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 2, 6, 0, 8, 12, 10, 11, 3) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 4, 7, 15, 1, 13, 5, 14, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 12, 1, 14, 4, 5, 15, 13, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 0, 6, 9, 8, 7, 3, 2, 11) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 13, 7, 12, 3, 11, 14, 1, 9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 5, 15, 8, 2, 0, 4, 6, 10) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 6, 14, 11, 0, 15, 9, 3, 8) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 12, 13, 1, 10, 2, 7, 4, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - LOAD_MSG(X8, X9, X10, X11, SI, 10, 8, 7, 1, 2, 4, 6, 5) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - LOAD_MSG(X8, X9, X10, X11, SI, 15, 9, 3, 13, 11, 14, 12, 0) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, X8, X9, X10, X11, X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 16(R10), 32(R10), 48(R10), 64(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 80(R10), 96(R10), 112(R10), 128(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 144(R10), 160(R10), 176(R10), 192(R10), X11, X13, X14) - SHUFFLE(X2, X3, X4, X5, X6, X7, X8, X9) - HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, 208(R10), 224(R10), 240(R10), 256(R10), X11, X13, X14) - SHUFFLE_INV(X2, X3, X4, X5, X6, X7, X8, X9) - - MOVOU 32(AX), X10 - MOVOU 48(AX), X11 - PXOR X0, X12 - PXOR X1, X15 - PXOR X2, X10 - PXOR X3, X11 - PXOR X4, X12 - PXOR X5, X15 - PXOR X6, X10 - PXOR X7, X11 - MOVOU X10, 32(AX) - MOVOU X11, 48(AX) - - LEAQ 128(SI), SI - SUBQ $128, DI - JNE loop - - MOVOU X12, 0(AX) - MOVOU X15, 16(AX) - - MOVQ R8, 0(BX) - MOVQ R9, 8(BX) - + MOVQ R8, X8 + PINSRQ $0x01, R9, X8 + MOVO X12, X0 + MOVO X15, X1 + MOVOU 32(AX), X2 + MOVOU 48(AX), X3 + MOVOU ·iv0<>+0(SB), X4 + MOVOU ·iv1<>+0(SB), X5 + MOVOU ·iv2<>+0(SB), X6 + PXOR X8, X6 + MOVO (R10), X7 + MOVQ (SI), X8 + PINSRQ $0x01, 16(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 48(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 40(SI), X11 + PINSRQ $0x01, 56(SI), X11 + MOVO X8, 16(R10) + MOVO X9, 32(R10) + MOVO X10, 48(R10) + MOVO X11, 64(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 64(SI), X8 + PINSRQ $0x01, 80(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 112(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 88(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 120(SI), X11 + MOVO X8, 80(R10) + MOVO X9, 96(R10) + MOVO X10, 112(R10) + MOVO X11, 128(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 112(SI), X8 + PINSRQ $0x01, 32(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 80(SI), X10 + PINSRQ $0x01, 64(SI), X10 + MOVQ 120(SI), X11 + PINSRQ $0x01, 48(SI), X11 + MOVO X8, 144(R10) + MOVO X9, 160(R10) + MOVO X10, 176(R10) + MOVO X11, 192(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 8(SI), X8 + PINSRQ $0x01, (SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, 40(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 16(SI), X10 + MOVQ 56(SI), X11 + PINSRQ $0x01, 24(SI), X11 + MOVO X8, 208(R10) + MOVO X9, 224(R10) + MOVO X10, 240(R10) + MOVO X11, 256(R10) + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 88(SI), X8 + PINSRQ $0x01, 96(SI), X8 + MOVQ 40(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 64(SI), X10 + PINSRQ $0x01, (SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 80(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 72(SI), X9 + MOVQ 112(SI), X10 + PINSRQ $0x01, 48(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 32(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 56(SI), X8 + PINSRQ $0x01, 24(SI), X8 + MOVQ 104(SI), X9 + PINSRQ $0x01, 88(SI), X9 + MOVQ 72(SI), X10 + PINSRQ $0x01, 8(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, 112(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 16(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 32(SI), X9 + PINSRQ $0x01, 120(SI), X9 + MOVQ 48(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ (SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 72(SI), X8 + PINSRQ $0x01, 40(SI), X8 + MOVQ 16(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 120(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 112(SI), X8 + PINSRQ $0x01, 88(SI), X8 + MOVQ 48(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 8(SI), X10 + PINSRQ $0x01, 96(SI), X10 + MOVQ 64(SI), X11 + PINSRQ $0x01, 104(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 16(SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ (SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 96(SI), X10 + PINSRQ $0x01, 80(SI), X10 + MOVQ 88(SI), X11 + PINSRQ $0x01, 24(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 32(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 120(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 104(SI), X10 + PINSRQ $0x01, 40(SI), X10 + MOVQ 112(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 96(SI), X8 + PINSRQ $0x01, 8(SI), X8 + MOVQ 112(SI), X9 + PINSRQ $0x01, 32(SI), X9 + MOVQ 40(SI), X10 + PINSRQ $0x01, 120(SI), X10 + MOVQ 104(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ (SI), X8 + PINSRQ $0x01, 48(SI), X8 + MOVQ 72(SI), X9 + PINSRQ $0x01, 64(SI), X9 + MOVQ 56(SI), X10 + PINSRQ $0x01, 24(SI), X10 + MOVQ 16(SI), X11 + PINSRQ $0x01, 88(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 104(SI), X8 + PINSRQ $0x01, 56(SI), X8 + MOVQ 96(SI), X9 + PINSRQ $0x01, 24(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 8(SI), X11 + PINSRQ $0x01, 72(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 40(SI), X8 + PINSRQ $0x01, 120(SI), X8 + MOVQ 64(SI), X9 + PINSRQ $0x01, 16(SI), X9 + MOVQ (SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 80(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 48(SI), X8 + PINSRQ $0x01, 112(SI), X8 + MOVQ 88(SI), X9 + PINSRQ $0x01, (SI), X9 + MOVQ 120(SI), X10 + PINSRQ $0x01, 72(SI), X10 + MOVQ 24(SI), X11 + PINSRQ $0x01, 64(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 96(SI), X8 + PINSRQ $0x01, 104(SI), X8 + MOVQ 8(SI), X9 + PINSRQ $0x01, 80(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 56(SI), X10 + MOVQ 32(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVQ 80(SI), X8 + PINSRQ $0x01, 64(SI), X8 + MOVQ 56(SI), X9 + PINSRQ $0x01, 8(SI), X9 + MOVQ 16(SI), X10 + PINSRQ $0x01, 32(SI), X10 + MOVQ 48(SI), X11 + PINSRQ $0x01, 40(SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + MOVQ 120(SI), X8 + PINSRQ $0x01, 72(SI), X8 + MOVQ 24(SI), X9 + PINSRQ $0x01, 104(SI), X9 + MOVQ 88(SI), X10 + PINSRQ $0x01, 112(SI), X10 + MOVQ 96(SI), X11 + PINSRQ $0x01, (SI), X11 + PADDQ X8, X0 + PADDQ X9, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ X10, X0 + PADDQ X11, X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 16(R10), X0 + PADDQ 32(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 48(R10), X0 + PADDQ 64(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 80(R10), X0 + PADDQ 96(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 112(R10), X0 + PADDQ 128(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + PADDQ 144(R10), X0 + PADDQ 160(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 176(R10), X0 + PADDQ 192(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X6, X8 + PUNPCKLQDQ X6, X9 + PUNPCKHQDQ X7, X6 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X7, X9 + MOVO X8, X7 + MOVO X2, X8 + PUNPCKHQDQ X9, X7 + PUNPCKLQDQ X3, X9 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X3 + PADDQ 208(R10), X0 + PADDQ 224(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFD $0xb1, X6, X6 + PSHUFD $0xb1, X7, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + PSHUFB X13, X2 + PSHUFB X13, X3 + PADDQ 240(R10), X0 + PADDQ 256(R10), X1 + PADDQ X2, X0 + PADDQ X3, X1 + PXOR X0, X6 + PXOR X1, X7 + PSHUFB X14, X6 + PSHUFB X14, X7 + PADDQ X6, X4 + PADDQ X7, X5 + PXOR X4, X2 + PXOR X5, X3 + MOVOU X2, X11 + PADDQ X2, X11 + PSRLQ $0x3f, X2 + PXOR X11, X2 + MOVOU X3, X11 + PADDQ X3, X11 + PSRLQ $0x3f, X3 + PXOR X11, X3 + MOVO X4, X8 + MOVO X5, X4 + MOVO X8, X5 + MOVO X2, X8 + PUNPCKLQDQ X2, X9 + PUNPCKHQDQ X3, X2 + PUNPCKHQDQ X9, X2 + PUNPCKLQDQ X3, X9 + MOVO X8, X3 + MOVO X6, X8 + PUNPCKHQDQ X9, X3 + PUNPCKLQDQ X7, X9 + PUNPCKHQDQ X9, X6 + PUNPCKLQDQ X8, X9 + PUNPCKHQDQ X9, X7 + MOVOU 32(AX), X10 + MOVOU 48(AX), X11 + PXOR X0, X12 + PXOR X1, X15 + PXOR X2, X10 + PXOR X3, X11 + PXOR X4, X12 + PXOR X5, X15 + PXOR X6, X10 + PXOR X7, X11 + MOVOU X10, 32(AX) + MOVOU X11, 48(AX) + LEAQ 128(SI), SI + SUBQ $0x80, DI + JNE loop + MOVOU X12, (AX) + MOVOU X15, 16(AX) + MOVQ R8, (BX) + MOVQ R9, 8(BX) RET + +DATA ·iv3<>+0(SB)/8, $0x1f83d9abfb41bd6b +DATA ·iv3<>+8(SB)/8, $0x5be0cd19137e2179 +GLOBL ·iv3<>(SB), RODATA|NOPTR, $16 + +DATA ·c40<>+0(SB)/8, $0x0201000706050403 +DATA ·c40<>+8(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), RODATA|NOPTR, $16 + +DATA ·c48<>+0(SB)/8, $0x0100070605040302 +DATA ·c48<>+8(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), RODATA|NOPTR, $16 + +DATA ·iv0<>+0(SB)/8, $0x6a09e667f3bcc908 +DATA ·iv0<>+8(SB)/8, $0xbb67ae8584caa73b +GLOBL ·iv0<>(SB), RODATA|NOPTR, $16 + +DATA ·iv1<>+0(SB)/8, $0x3c6ef372fe94f82b +DATA ·iv1<>+8(SB)/8, $0xa54ff53a5f1d36f1 +GLOBL ·iv1<>(SB), RODATA|NOPTR, $16 + +DATA ·iv2<>+0(SB)/8, $0x510e527fade682d1 +DATA ·iv2<>+8(SB)/8, $0x9b05688c2b3e6c1f +GLOBL ·iv2<>(SB), RODATA|NOPTR, $16 diff --git a/vendor/golang.org/x/crypto/sha3/shake.go b/vendor/golang.org/x/crypto/sha3/shake.go index 1ea9275b..a01ef435 100644 --- a/vendor/golang.org/x/crypto/sha3/shake.go +++ b/vendor/golang.org/x/crypto/sha3/shake.go @@ -85,9 +85,9 @@ func newCShake(N, S []byte, rate, outputLen int, dsbyte byte) ShakeHash { // leftEncode returns max 9 bytes c.initBlock = make([]byte, 0, 9*2+len(N)+len(S)) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(N)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(N))*8)...) c.initBlock = append(c.initBlock, N...) - c.initBlock = append(c.initBlock, leftEncode(uint64(len(S)*8))...) + c.initBlock = append(c.initBlock, leftEncode(uint64(len(S))*8)...) c.initBlock = append(c.initBlock, S...) c.Write(bytepad(c.initBlock, c.rate)) return &c diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go new file mode 100644 index 00000000..de58dfb8 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config.go @@ -0,0 +1,122 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "math" + "net/http" + "time" +) + +// http2Config is a package-internal version of net/http.HTTP2Config. +// +// http.HTTP2Config was added in Go 1.24. +// When running with a version of net/http that includes HTTP2Config, +// we merge the configuration with the fields in Transport or Server +// to produce an http2Config. +// +// Zero valued fields in http2Config are interpreted as in the +// net/http.HTTPConfig documentation. +// +// Precedence order for reconciling configurations is: +// +// - Use the net/http.{Server,Transport}.HTTP2Config value, when non-zero. +// - Otherwise use the http2.{Server.Transport} value. +// - If the resulting value is zero or out of range, use a default. +type http2Config struct { + MaxConcurrentStreams uint32 + MaxDecoderHeaderTableSize uint32 + MaxEncoderHeaderTableSize uint32 + MaxReadFrameSize uint32 + MaxUploadBufferPerConnection int32 + MaxUploadBufferPerStream int32 + SendPingTimeout time.Duration + PingTimeout time.Duration + WriteByteTimeout time.Duration + PermitProhibitedCipherSuites bool + CountError func(errType string) +} + +// configFromServer merges configuration settings from +// net/http.Server.HTTP2Config and http2.Server. +func configFromServer(h1 *http.Server, h2 *Server) http2Config { + conf := http2Config{ + MaxConcurrentStreams: h2.MaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + MaxUploadBufferPerConnection: h2.MaxUploadBufferPerConnection, + MaxUploadBufferPerStream: h2.MaxUploadBufferPerStream, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, + CountError: h2.CountError, + } + fillNetHTTPServerConfig(&conf, h1) + setConfigDefaults(&conf, true) + return conf +} + +// configFromServer merges configuration settings from h2 and h2.t1.HTTP2 +// (the net/http Transport). +func configFromTransport(h2 *Transport) http2Config { + conf := http2Config{ + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, + } + + // Unlike most config fields, where out-of-range values revert to the default, + // Transport.MaxReadFrameSize clips. + if conf.MaxReadFrameSize < minMaxFrameSize { + conf.MaxReadFrameSize = minMaxFrameSize + } else if conf.MaxReadFrameSize > maxFrameSize { + conf.MaxReadFrameSize = maxFrameSize + } + + if h2.t1 != nil { + fillNetHTTPTransportConfig(&conf, h2.t1) + } + setConfigDefaults(&conf, false) + return conf +} + +func setDefault[T ~int | ~int32 | ~uint32 | ~int64](v *T, minval, maxval, defval T) { + if *v < minval || *v > maxval { + *v = defval + } +} + +func setConfigDefaults(conf *http2Config, server bool) { + setDefault(&conf.MaxConcurrentStreams, 1, math.MaxUint32, defaultMaxStreams) + setDefault(&conf.MaxEncoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + setDefault(&conf.MaxDecoderHeaderTableSize, 1, math.MaxUint32, initialHeaderTableSize) + if server { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerConnection, initialWindowSize, math.MaxInt32, transportDefaultConnFlow) + } + if server { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, 1<<20) + } else { + setDefault(&conf.MaxUploadBufferPerStream, 1, math.MaxInt32, transportDefaultStreamFlow) + } + setDefault(&conf.MaxReadFrameSize, minMaxFrameSize, maxFrameSize, defaultMaxReadFrameSize) + setDefault(&conf.PingTimeout, 1, math.MaxInt64, 15*time.Second) +} + +// adjustHTTP1MaxHeaderSize converts a limit in bytes on the size of an HTTP/1 header +// to an HTTP/2 MAX_HEADER_LIST_SIZE value. +func adjustHTTP1MaxHeaderSize(n int64) int64 { + // http2's count is in a slightly different unit and includes 32 bytes per pair. + // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. + const perFieldOverhead = 32 // per http2 spec + const typicalHeaders = 10 // conservative + return n + typicalHeaders*perFieldOverhead +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go new file mode 100644 index 00000000..e3784123 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go124.go @@ -0,0 +1,61 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.24 + +package http2 + +import "net/http" + +// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { + fillNetHTTPConfig(conf, srv.HTTP2) +} + +// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2. +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { + fillNetHTTPConfig(conf, tr.HTTP2) +} + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go new file mode 100644 index 00000000..060fd6c6 --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_pre_go124.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.24 + +package http2 + +import "net/http" + +// Pre-Go 1.24 fallback. +// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. + +func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} + +func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 003e649f..7688c356 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -19,8 +19,9 @@ import ( "bufio" "context" "crypto/tls" + "errors" "fmt" - "io" + "net" "net/http" "os" "sort" @@ -237,13 +238,19 @@ func (cw closeWaiter) Wait() { // Its buffered writer is lazily allocated as needed, to minimize // idle memory usage with many connections. type bufferedWriter struct { - _ incomparable - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered + _ incomparable + group synctestGroupInterface // immutable + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} +func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { + return &bufferedWriter{ + group: group, + conn: conn, + byteTimeout: timeout, + } } // bufWriterPoolBufferSize is the size of bufio.Writer's @@ -270,7 +277,7 @@ func (w *bufferedWriter) Available() int { func (w *bufferedWriter) Write(p []byte) (n int, err error) { if w.bw == nil { bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) + bw.Reset((*bufferedWriterTimeoutWriter)(w)) w.bw = bw } return w.bw.Write(p) @@ -288,6 +295,38 @@ func (w *bufferedWriter) Flush() error { return err } +type bufferedWriterTimeoutWriter bufferedWriter + +func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { + return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) +} + +// writeWithByteTimeout writes to conn. +// If more than timeout passes without any bytes being written to the connection, +// the write fails. +func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { + if timeout <= 0 { + return conn.Write(p) + } + for { + var now time.Time + if group == nil { + now = time.Now() + } else { + now = group.Now() + } + conn.SetWriteDeadline(now.Add(timeout)) + nn, err := conn.Write(p[n:]) + n += nn + if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { + // Either we finished the write, made no progress, or hit the deadline. + // Whichever it is, we're done now. + conn.SetWriteDeadline(time.Time{}) + return n, err + } + } +} + func mustUint31(v int32) uint32 { if v < 0 || v > 2147483647 { panic("out of range") diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 6c349f3e..617b4a47 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -29,6 +29,7 @@ import ( "bufio" "bytes" "context" + "crypto/rand" "crypto/tls" "errors" "fmt" @@ -52,10 +53,14 @@ import ( ) const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + prefaceTimeout = 10 * time.Second + firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway + handlerChunkWriteSize = 4 << 10 + defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? + + // maxQueuedControlFrames is the maximum number of control frames like + // SETTINGS, PING and RST_STREAM that will be queued for writing before + // the connection is closed to prevent memory exhaustion attacks. maxQueuedControlFrames = 10000 ) @@ -127,6 +132,22 @@ type Server struct { // If zero or negative, there is no timeout. IdleTimeout time.Duration + // ReadIdleTimeout is the timeout after which a health check using a ping + // frame will be carried out if no frame is received on the connection. + // If zero, no health check is performed. + ReadIdleTimeout time.Duration + + // PingTimeout is the timeout after which the connection will be closed + // if a response to a ping is not received. + // If zero, a default of 15 seconds is used. + PingTimeout time.Duration + + // WriteByteTimeout is the timeout after which a connection will be + // closed if no data can be written to it. The timeout begins when data is + // available to write, and is extended whenever any bytes are written. + // If zero or negative, there is no timeout. + WriteByteTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow // control window for each connections. The HTTP/2 spec does not // allow this to be smaller than 65535 or larger than 2^32-1. @@ -189,57 +210,6 @@ func (s *Server) afterFunc(d time.Duration, f func()) timer { return timeTimer{time.AfterFunc(d, f)} } -func (s *Server) initialConnRecvWindowSize() int32 { - if s.MaxUploadBufferPerConnection >= initialWindowSize { - return s.MaxUploadBufferPerConnection - } - return 1 << 20 -} - -func (s *Server) initialStreamRecvWindowSize() int32 { - if s.MaxUploadBufferPerStream > 0 { - return s.MaxUploadBufferPerStream - } - return 1 << 20 -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -func (s *Server) maxDecoderHeaderTableSize() uint32 { - if v := s.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (s *Server) maxEncoderHeaderTableSize() uint32 { - if v := s.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -// maxQueuedControlFrames is the maximum number of control frames like -// SETTINGS, PING and RST_STREAM that will be queued for writing before -// the connection is closed to prevent memory exhaustion attacks. -func (s *Server) maxQueuedControlFrames() int { - // TODO: if anybody asks, add a Server field, and remember to define the - // behavior of negative values. - return maxQueuedControlFrames -} - type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} @@ -440,13 +410,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon baseCtx, cancel := serverConnBaseContext(c, opts) defer cancel() + http1srv := opts.baseConfig() + conf := configFromServer(http1srv, s) sc := &serverConn{ srv: s, - hs: opts.baseConfig(), + hs: http1srv, conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), + bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -456,9 +428,12 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way doneServing: make(chan struct{}), clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), + advMaxStreams: conf.MaxConcurrentStreams, initialStreamSendWindowSize: initialWindowSize, + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxFrameSize: initialMaxFrameSize, + pingTimeout: conf.PingTimeout, + countErrorFunc: conf.CountError, serveG: newGoroutineLock(), pushEnabled: true, sawClientPreface: opts.SawClientPreface, @@ -491,15 +466,15 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon sc.flow.add(initialWindowSize) sc.inflow.init(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - sc.hpackEncoder.SetMaxDynamicTableSizeLimit(s.maxEncoderHeaderTableSize()) + sc.hpackEncoder.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) fr := NewFramer(sc.bw, c) - if s.CountError != nil { - fr.countError = s.CountError + if conf.CountError != nil { + fr.countError = conf.CountError } - fr.ReadMetaHeaders = hpack.NewDecoder(s.maxDecoderHeaderTableSize(), nil) + fr.ReadMetaHeaders = hpack.NewDecoder(conf.MaxDecoderHeaderTableSize, nil) fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) + fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) sc.framer = fr if tc, ok := c.(connectionStater); ok { @@ -532,7 +507,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon // So for now, do nothing here again. } - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { + if !conf.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { // "Endpoints MAY choose to generate a connection error // (Section 5.4.1) of type INADEQUATE_SECURITY if one of // the prohibited cipher suites are negotiated." @@ -569,7 +544,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon opts.UpgradeRequest = nil } - sc.serve() + sc.serve(conf) } func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { @@ -609,6 +584,7 @@ type serverConn struct { tlsState *tls.ConnectionState // shared by all handlers, like net/http remoteAddrStr string writeSched WriteScheduler + countErrorFunc func(errType string) // Everything following is owned by the serve loop; use serveG.check(): serveG goroutineLock // used to verify funcs are on serve() @@ -628,6 +604,7 @@ type serverConn struct { streams map[uint32]*stream unstartedHandlers []unstartedHandler initialStreamSendWindowSize int32 + initialStreamRecvWindowSize int32 maxFrameSize int32 peerMaxHeaderListSize uint32 // zero means unknown (default) canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case @@ -638,9 +615,14 @@ type serverConn struct { inGoAway bool // we've started to or sent GOAWAY inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop needToSendGoAway bool // we need to schedule a GOAWAY frame write + pingSent bool + sentPingData [8]byte goAwayCode ErrCode shutdownTimer timer // nil until used idleTimer timer // nil if unused + readIdleTimeout time.Duration + pingTimeout time.Duration + readIdleTimer timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -655,11 +637,7 @@ func (sc *serverConn) maxHeaderListSize() uint32 { if n <= 0 { n = http.DefaultMaxHeaderBytes } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) + return uint32(adjustHTTP1MaxHeaderSize(int64(n))) } func (sc *serverConn) curOpenStreams() uint32 { @@ -923,7 +901,7 @@ func (sc *serverConn) notePanic() { } } -func (sc *serverConn) serve() { +func (sc *serverConn) serve(conf http2Config) { sc.serveG.check() defer sc.notePanic() defer sc.conn.Close() @@ -937,18 +915,18 @@ func (sc *serverConn) serve() { sc.writeFrame(FrameWriteRequest{ write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, + {SettingMaxFrameSize, conf.MaxReadFrameSize}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - {SettingHeaderTableSize, sc.srv.maxDecoderHeaderTableSize()}, - {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, + {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize}, + {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)}, }, }) sc.unackedSettings++ // Each connection starts with initialWindowSize inflow tokens. // If a higher value is configured, we add more tokens. - if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + if diff := conf.MaxUploadBufferPerConnection - initialWindowSize; diff > 0 { sc.sendWindowUpdate(nil, int(diff)) } @@ -968,11 +946,18 @@ func (sc *serverConn) serve() { defer sc.idleTimer.Stop() } + if conf.SendPingTimeout > 0 { + sc.readIdleTimeout = conf.SendPingTimeout + sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + defer sc.readIdleTimer.Stop() + } + go sc.readFrames() // closed by defer sc.conn.Close above settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() + lastFrameTime := sc.srv.now() loopNum := 0 for { loopNum++ @@ -986,6 +971,7 @@ func (sc *serverConn) serve() { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: + lastFrameTime = sc.srv.now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1017,6 +1003,8 @@ func (sc *serverConn) serve() { case idleTimerMsg: sc.vlogf("connection is idle") sc.goAway(ErrCodeNo) + case readIdleTimerMsg: + sc.handlePingTimer(lastFrameTime) case shutdownTimerMsg: sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) return @@ -1039,7 +1027,7 @@ func (sc *serverConn) serve() { // If the peer is causing us to generate a lot of control frames, // but not reading them from us, assume they are trying to make us // run out of memory. - if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() { + if sc.queuedControlFrames > maxQueuedControlFrames { sc.vlogf("http2: too many control frames in send queue, closing connection") return } @@ -1055,12 +1043,39 @@ func (sc *serverConn) serve() { } } +func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { + if sc.pingSent { + sc.vlogf("timeout waiting for PING response") + sc.conn.Close() + return + } + + pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) + now := sc.srv.now() + if pingAt.After(now) { + // We received frames since arming the ping timer. + // Reset it for the next possible timeout. + sc.readIdleTimer.Reset(pingAt.Sub(now)) + return + } + + sc.pingSent = true + // Ignore crypto/rand.Read errors: It generally can't fail, and worse case if it does + // is we send a PING frame containing 0s. + _, _ = rand.Read(sc.sentPingData[:]) + sc.writeFrame(FrameWriteRequest{ + write: &writePing{data: sc.sentPingData}, + }) + sc.readIdleTimer.Reset(sc.pingTimeout) +} + type serverMessage int // Message values sent to serveMsgCh. var ( settingsTimerMsg = new(serverMessage) idleTimerMsg = new(serverMessage) + readIdleTimerMsg = new(serverMessage) shutdownTimerMsg = new(serverMessage) gracefulShutdownMsg = new(serverMessage) handlerDoneMsg = new(serverMessage) @@ -1068,6 +1083,7 @@ var ( func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onReadIdleTimer() { sc.sendServeMsg(readIdleTimerMsg) } func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } func (sc *serverConn) sendServeMsg(msg interface{}) { @@ -1320,6 +1336,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { sc.writingFrame = false sc.writingFrameAsync = false + if res.err != nil { + sc.conn.Close() + } + wr := res.wr if writeEndsStream(wr.write) { @@ -1594,6 +1614,11 @@ func (sc *serverConn) processFrame(f Frame) error { func (sc *serverConn) processPing(f *PingFrame) error { sc.serveG.check() if f.IsAck() { + if sc.pingSent && sc.sentPingData == f.Data { + // This is a response to a PING we sent. + sc.pingSent = false + sc.readIdleTimer.Reset(sc.readIdleTimeout) + } // 6.7 PING: " An endpoint MUST NOT respond to PING frames // containing this flag." return nil @@ -2160,7 +2185,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter st.flow.add(sc.initialStreamSendWindowSize) - st.inflow.init(sc.srv.initialStreamRecvWindowSize()) + st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } @@ -3301,7 +3326,7 @@ func (sc *serverConn) countError(name string, err error) error { if sc == nil || sc.srv == nil { return err } - f := sc.srv.CountError + f := sc.countErrorFunc if f == nil { return err } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 61f511f9..0c5f64aa 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -25,7 +25,6 @@ import ( "net/http" "net/http/httptrace" "net/textproto" - "os" "sort" "strconv" "strings" @@ -227,40 +226,26 @@ func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (co } func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { + n := int64(t.MaxHeaderListSize) + if t.t1 != nil && t.t1.MaxResponseHeaderBytes != 0 { + n = t.t1.MaxResponseHeaderBytes + if n > 0 { + n = adjustHTTP1MaxHeaderSize(n) + } + } + if n <= 0 { return 10 << 20 } - if t.MaxHeaderListSize == 0xffffffff { + if n >= 0xffffffff { return 0 } - return t.MaxHeaderListSize -} - -func (t *Transport) maxFrameReadSize() uint32 { - if t.MaxReadFrameSize == 0 { - return 0 // use the default provided by the peer - } - if t.MaxReadFrameSize < minMaxFrameSize { - return minMaxFrameSize - } - if t.MaxReadFrameSize > maxFrameSize { - return maxFrameSize - } - return t.MaxReadFrameSize + return uint32(n) } func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -func (t *Transport) pingTimeout() time.Duration { - if t.PingTimeout == 0 { - return 15 * time.Second - } - return t.PingTimeout - -} - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. // It returns an error if t1 has already been HTTP/2-enabled. // @@ -370,11 +355,14 @@ type ClientConn struct { lastActive time.Time lastIdle time.Time // time last idle // Settings from peer: (also guarded by wmu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - peerMaxHeaderListSize uint64 - peerMaxHeaderTableSize uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + peerMaxHeaderTableSize uint32 + initialWindowSize uint32 + initialStreamRecvWindowSize int32 + readIdleTimeout time.Duration + pingTimeout time.Duration // reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests. // Write to reqHeaderMu to lock it, read from it to unlock. @@ -499,6 +487,7 @@ func (cs *clientStream) closeReqBodyLocked() { } type stickyErrWriter struct { + group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -508,22 +497,9 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - for { - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Now().Add(sew.timeout)) - } - nn, err := sew.conn.Write(p[n:]) - n += nn - if n < len(p) && nn > 0 && errors.Is(err, os.ErrDeadlineExceeded) { - // Keep extending the deadline so long as we're making progress. - continue - } - if sew.timeout != 0 { - sew.conn.SetWriteDeadline(time.Time{}) - } - *sew.err = err - return n, err - } + n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + *sew.err = err + return n, err } // noCachedConnError is the concrete type of ErrNoCachedConn, which @@ -758,44 +734,36 @@ func (t *Transport) expectContinueTimeout() time.Duration { return t.t1.ExpectContinueTimeout } -func (t *Transport) maxDecoderHeaderTableSize() uint32 { - if v := t.MaxDecoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - -func (t *Transport) maxEncoderHeaderTableSize() uint32 { - if v := t.MaxEncoderHeaderTableSize; v > 0 { - return v - } - return initialHeaderTableSize -} - func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { return t.newClientConn(c, t.disableKeepAlives()) } func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { + conf := configFromTransport(t) cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), - reqHeaderMu: make(chan struct{}, 1), + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, + maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + readIdleTimeout: conf.SendPingTimeout, + pingTimeout: conf.PingTimeout, + pings: make(map[[8]byte]chan struct{}), + reqHeaderMu: make(chan struct{}, 1), } + var group synctestGroupInterface if t.transportTestHooks != nil { t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn + group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -807,24 +775,23 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ + group: group, conn: c, - timeout: t.WriteByteTimeout, + timeout: conf.WriteByteTimeout, err: &cc.werr, }) cc.br = bufio.NewReader(c) cc.fr = NewFramer(cc.bw, cc.br) - if t.maxFrameReadSize() != 0 { - cc.fr.SetMaxReadFrameSize(t.maxFrameReadSize()) - } + cc.fr.SetMaxReadFrameSize(conf.MaxReadFrameSize) if t.CountError != nil { cc.fr.countError = t.CountError } - maxHeaderTableSize := t.maxDecoderHeaderTableSize() + maxHeaderTableSize := conf.MaxDecoderHeaderTableSize cc.fr.ReadMetaHeaders = hpack.NewDecoder(maxHeaderTableSize, nil) cc.fr.MaxHeaderListSize = t.maxHeaderListSize() cc.henc = hpack.NewEncoder(&cc.hbuf) - cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize()) + cc.henc.SetMaxDynamicTableSizeLimit(conf.MaxEncoderHeaderTableSize) cc.peerMaxHeaderTableSize = initialHeaderTableSize if cs, ok := c.(connectionStater); ok { @@ -834,11 +801,9 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialSettings := []Setting{ {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxFrameReadSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: max}) + {ID: SettingInitialWindowSize, Val: uint32(cc.initialStreamRecvWindowSize)}, } + initialSettings = append(initialSettings, Setting{ID: SettingMaxFrameSize, Val: conf.MaxReadFrameSize}) if max := t.maxHeaderListSize(); max != 0 { initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) } @@ -848,8 +813,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro cc.bw.Write(clientPreface) cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.init(transportDefaultConnFlow + initialWindowSize) + cc.fr.WriteWindowUpdate(0, uint32(conf.MaxUploadBufferPerConnection)) + cc.inflow.init(conf.MaxUploadBufferPerConnection + initialWindowSize) cc.bw.Flush() if cc.werr != nil { cc.Close() @@ -867,7 +832,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro } func (cc *ClientConn) healthCheck() { - pingTimeout := cc.t.pingTimeout() + pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) @@ -2199,7 +2164,7 @@ type resAndError struct { func (cc *ClientConn) addStreamLocked(cs *clientStream) { cs.flow.add(int32(cc.initialWindowSize)) cs.flow.setConnFlow(&cc.flow) - cs.inflow.init(transportDefaultStreamFlow) + cs.inflow.init(cc.initialStreamRecvWindowSize) cs.ID = cc.nextStreamID cc.nextStreamID += 2 cc.streams[cs.ID] = cs @@ -2345,7 +2310,7 @@ func (cc *ClientConn) countReadFrameError(err error) { func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false - readIdleTimeout := cc.t.ReadIdleTimeout + readIdleTimeout := cc.readIdleTimeout var t timer if readIdleTimeout != 0 { t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 33f61398..6ff6bee7 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -131,6 +131,16 @@ func (se StreamError) writeFrame(ctx writeContext) error { func (se StreamError) staysWithinBuffer(max int) bool { return frameHeaderLen+4 <= max } +type writePing struct { + data [8]byte +} + +func (w writePing) writeFrame(ctx writeContext) error { + return ctx.Framer().WritePing(false, w.data) +} + +func (w writePing) staysWithinBuffer(max int) bool { return frameHeaderLen+len(w.data) <= max } + type writePingAck struct{ pf *PingFrame } func (w writePingAck) writeFrame(ctx writeContext) error { diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go index ec07aab0..02609d5b 100644 --- a/vendor/golang.org/x/sys/cpu/cpu.go +++ b/vendor/golang.org/x/sys/cpu/cpu.go @@ -201,6 +201,25 @@ var S390X struct { _ CacheLinePad } +// RISCV64 contains the supported CPU features and performance characteristics for riscv64 +// platforms. The booleans in RISCV64, with the exception of HasFastMisaligned, indicate +// the presence of RISC-V extensions. +// +// It is safe to assume that all the RV64G extensions are supported and so they are omitted from +// this structure. As riscv64 Go programs require at least RV64G, the code that populates +// this structure cannot run successfully if some of the RV64G extensions are missing. +// The struct is padded to avoid false sharing. +var RISCV64 struct { + _ CacheLinePad + HasFastMisaligned bool // Fast misaligned accesses + HasC bool // Compressed instruction-set extension + HasV bool // Vector extension compatible with RVV 1.0 + HasZba bool // Address generation instructions extension + HasZbb bool // Basic bit-manipulation extension + HasZbs bool // Single-bit instructions extension + _ CacheLinePad +} + func init() { archInit() initOptions() diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go index cd63e733..7d902b68 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x +//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64 package cpu diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go new file mode 100644 index 00000000..cb4a0c57 --- /dev/null +++ b/vendor/golang.org/x/sys/cpu/cpu_linux_riscv64.go @@ -0,0 +1,137 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cpu + +import ( + "syscall" + "unsafe" +) + +// RISC-V extension discovery code for Linux. The approach here is to first try the riscv_hwprobe +// syscall falling back to HWCAP to check for the C extension if riscv_hwprobe is not available. +// +// A note on detection of the Vector extension using HWCAP. +// +// Support for the Vector extension version 1.0 was added to the Linux kernel in release 6.5. +// Support for the riscv_hwprobe syscall was added in 6.4. It follows that if the riscv_hwprobe +// syscall is not available then neither is the Vector extension (which needs kernel support). +// The riscv_hwprobe syscall should then be all we need to detect the Vector extension. +// However, some RISC-V board manufacturers ship boards with an older kernel on top of which +// they have back-ported various versions of the Vector extension patches but not the riscv_hwprobe +// patches. These kernels advertise support for the Vector extension using HWCAP. Falling +// back to HWCAP to detect the Vector extension, if riscv_hwprobe is not available, or simply not +// bothering with riscv_hwprobe at all and just using HWCAP may then seem like an attractive option. +// +// Unfortunately, simply checking the 'V' bit in AT_HWCAP will not work as this bit is used by +// RISC-V board and cloud instance providers to mean different things. The Lichee Pi 4A board +// and the Scaleway RV1 cloud instances use the 'V' bit to advertise their support for the unratified +// 0.7.1 version of the Vector Specification. The Banana Pi BPI-F3 and the CanMV-K230 board use +// it to advertise support for 1.0 of the Vector extension. Versions 0.7.1 and 1.0 of the Vector +// extension are binary incompatible. HWCAP can then not be used in isolation to populate the +// HasV field as this field indicates that the underlying CPU is compatible with RVV 1.0. +// +// There is a way at runtime to distinguish between versions 0.7.1 and 1.0 of the Vector +// specification by issuing a RVV 1.0 vsetvli instruction and checking the vill bit of the vtype +// register. This check would allow us to safely detect version 1.0 of the Vector extension +// with HWCAP, if riscv_hwprobe were not available. However, the check cannot +// be added until the assembler supports the Vector instructions. +// +// Note the riscv_hwprobe syscall does not suffer from these ambiguities by design as all of the +// extensions it advertises support for are explicitly versioned. It's also worth noting that +// the riscv_hwprobe syscall is the only way to detect multi-letter RISC-V extensions, e.g., Zba. +// These cannot be detected using HWCAP and so riscv_hwprobe must be used to detect the majority +// of RISC-V extensions. +// +// Please see https://docs.kernel.org/arch/riscv/hwprobe.html for more information. + +// golang.org/x/sys/cpu is not allowed to depend on golang.org/x/sys/unix so we must +// reproduce the constants, types and functions needed to make the riscv_hwprobe syscall +// here. + +const ( + // Copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. + riscv_HWPROBE_KEY_IMA_EXT_0 = 0x4 + riscv_HWPROBE_IMA_C = 0x2 + riscv_HWPROBE_IMA_V = 0x4 + riscv_HWPROBE_EXT_ZBA = 0x8 + riscv_HWPROBE_EXT_ZBB = 0x10 + riscv_HWPROBE_EXT_ZBS = 0x20 + riscv_HWPROBE_KEY_CPUPERF_0 = 0x5 + riscv_HWPROBE_MISALIGNED_FAST = 0x3 + riscv_HWPROBE_MISALIGNED_MASK = 0x7 +) + +const ( + // sys_RISCV_HWPROBE is copied from golang.org/x/sys/unix/zsysnum_linux_riscv64.go. + sys_RISCV_HWPROBE = 258 +) + +// riscvHWProbePairs is copied from golang.org/x/sys/unix/ztypes_linux_riscv64.go. +type riscvHWProbePairs struct { + key int64 + value uint64 +} + +const ( + // CPU features + hwcap_RISCV_ISA_C = 1 << ('C' - 'A') +) + +func doinit() { + // A slice of key/value pair structures is passed to the RISCVHWProbe syscall. The key + // field should be initialised with one of the key constants defined above, e.g., + // RISCV_HWPROBE_KEY_IMA_EXT_0. The syscall will set the value field to the appropriate value. + // If the kernel does not recognise a key it will set the key field to -1 and the value field to 0. + + pairs := []riscvHWProbePairs{ + {riscv_HWPROBE_KEY_IMA_EXT_0, 0}, + {riscv_HWPROBE_KEY_CPUPERF_0, 0}, + } + + // This call only indicates that extensions are supported if they are implemented on all cores. + if riscvHWProbe(pairs, 0) { + if pairs[0].key != -1 { + v := uint(pairs[0].value) + RISCV64.HasC = isSet(v, riscv_HWPROBE_IMA_C) + RISCV64.HasV = isSet(v, riscv_HWPROBE_IMA_V) + RISCV64.HasZba = isSet(v, riscv_HWPROBE_EXT_ZBA) + RISCV64.HasZbb = isSet(v, riscv_HWPROBE_EXT_ZBB) + RISCV64.HasZbs = isSet(v, riscv_HWPROBE_EXT_ZBS) + } + if pairs[1].key != -1 { + v := pairs[1].value & riscv_HWPROBE_MISALIGNED_MASK + RISCV64.HasFastMisaligned = v == riscv_HWPROBE_MISALIGNED_FAST + } + } + + // Let's double check with HWCAP if the C extension does not appear to be supported. + // This may happen if we're running on a kernel older than 6.4. + + if !RISCV64.HasC { + RISCV64.HasC = isSet(hwCap, hwcap_RISCV_ISA_C) + } +} + +func isSet(hwc uint, value uint) bool { + return hwc&value != 0 +} + +// riscvHWProbe is a simplified version of the generated wrapper function found in +// golang.org/x/sys/unix/zsyscall_linux_riscv64.go. We simplify it by removing the +// cpuCount and cpus parameters which we do not need. We always want to pass 0 for +// these parameters here so the kernel only reports the extensions that are present +// on all cores. +func riscvHWProbe(pairs []riscvHWProbePairs, flags uint) bool { + var _zero uintptr + var p0 unsafe.Pointer + if len(pairs) > 0 { + p0 = unsafe.Pointer(&pairs[0]) + } else { + p0 = unsafe.Pointer(&_zero) + } + + _, _, e1 := syscall.Syscall6(sys_RISCV_HWPROBE, uintptr(p0), uintptr(len(pairs)), uintptr(0), uintptr(0), uintptr(flags), 0) + return e1 == 0 +} diff --git a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go index 7f0c79c0..aca3199c 100644 --- a/vendor/golang.org/x/sys/cpu/cpu_riscv64.go +++ b/vendor/golang.org/x/sys/cpu/cpu_riscv64.go @@ -8,4 +8,13 @@ package cpu const cacheLineSize = 64 -func initOptions() {} +func initOptions() { + options = []option{ + {Name: "fastmisaligned", Feature: &RISCV64.HasFastMisaligned}, + {Name: "c", Feature: &RISCV64.HasC}, + {Name: "v", Feature: &RISCV64.HasV}, + {Name: "zba", Feature: &RISCV64.HasZba}, + {Name: "zbb", Feature: &RISCV64.HasZbb}, + {Name: "zbs", Feature: &RISCV64.HasZbs}, + } +} diff --git a/vendor/golang.org/x/sys/unix/README.md b/vendor/golang.org/x/sys/unix/README.md index 7d3c060e..6e08a76a 100644 --- a/vendor/golang.org/x/sys/unix/README.md +++ b/vendor/golang.org/x/sys/unix/README.md @@ -156,7 +156,7 @@ from the generated architecture-specific files listed below, and merge these into a common file for each OS. The merge is performed in the following steps: -1. Construct the set of common code that is idential in all architecture-specific files. +1. Construct the set of common code that is identical in all architecture-specific files. 2. Write this common code to the merged file. 3. Remove the common code from all architecture-specific files. diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index d07dd09e..ac54ecab 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -552,6 +552,7 @@ ccflags="$@" $2 !~ /^RTC_VL_(ACCURACY|BACKUP|DATA)/ && $2 ~ /^(NETLINK|NLM|NLMSG|NLA|IFA|IFAN|RT|RTC|RTCF|RTN|RTPROT|RTNH|ARPHRD|ETH_P|NETNSA)_/ || $2 ~ /^SOCK_|SK_DIAG_|SKNLGRP_$/ || + $2 ~ /^(CONNECT|SAE)_/ || $2 ~ /^FIORDCHK$/ || $2 ~ /^SIOC/ || $2 ~ /^TIOC/ || @@ -655,7 +656,7 @@ errors=$( signals=$( echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print $2 }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort ) @@ -665,7 +666,7 @@ echo '#include ' | $CC -x c - -E -dM $ccflags | sort >_error.grep echo '#include ' | $CC -x c - -E -dM $ccflags | awk '$1=="#define" && $2 ~ /^SIG[A-Z0-9]+$/ { print "^\t" $2 "[ \t]*=" }' | - grep -v 'SIGSTKSIZE\|SIGSTKSZ\|SIGRT\|SIGMAX64' | + grep -E -v '(SIGSTKSIZE|SIGSTKSZ|SIGRT|SIGMAX64)' | sort >_signal.grep echo '// mkerrors.sh' "$@" diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 67ce6cef..6f15ba1e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -360,7 +360,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(Pid_t(pid), &status, options, rusage) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 2d15200a..099867de 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -566,6 +566,43 @@ func PthreadFchdir(fd int) (err error) { return pthread_fchdir_np(fd) } +// Connectx calls connectx(2) to initiate a connection on a socket. +// +// srcIf, srcAddr, and dstAddr are filled into a [SaEndpoints] struct and passed as the endpoints argument. +// +// - srcIf is the optional source interface index. 0 means unspecified. +// - srcAddr is the optional source address. nil means unspecified. +// - dstAddr is the destination address. +// +// On success, Connectx returns the number of bytes enqueued for transmission. +func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocID, flags uint32, iov []Iovec, connid *SaeConnID) (n uintptr, err error) { + endpoints := SaEndpoints{ + Srcif: srcIf, + } + + if srcAddr != nil { + addrp, addrlen, err := srcAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Srcaddr = (*RawSockaddr)(addrp) + endpoints.Srcaddrlen = uint32(addrlen) + } + + if dstAddr != nil { + addrp, addrlen, err := dstAddr.sockaddr() + if err != nil { + return 0, err + } + endpoints.Dstaddr = (*RawSockaddr)(addrp) + endpoints.Dstaddrlen = uint32(addrlen) + } + + err = connectx(fd, &endpoints, associd, flags, iov, &n, connid) + return +} + +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_hurd.go b/vendor/golang.org/x/sys/unix/syscall_hurd.go index ba46651f..a6a2d2fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_hurd.go +++ b/vendor/golang.org/x/sys/unix/syscall_hurd.go @@ -11,6 +11,7 @@ package unix int ioctl(int, unsigned long int, uintptr_t); */ import "C" +import "unsafe" func ioctl(fd int, req uint, arg uintptr) (err error) { r0, er := C.ioctl(C.int(fd), C.ulong(req), C.uintptr_t(arg)) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 3f1d3d4c..f08abd43 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1295,6 +1295,48 @@ func GetsockoptTCPInfo(fd, level, opt int) (*TCPInfo, error) { return &value, err } +// GetsockoptTCPCCVegasInfo returns algorithm specific congestion control information for a socket using the "vegas" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCVegasInfo(fd, level, opt int) (*TCPVegasInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPVegasInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCDCTCPInfo returns algorithm specific congestion control information for a socket using the "dctp" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCDCTCPInfo(fd, level, opt int) (*TCPDCTCPInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPDCTCPInfo)(unsafe.Pointer(&value[0])) + return out, err +} + +// GetsockoptTCPCCBBRInfo returns algorithm specific congestion control information for a socket using the "bbr" +// algorithm. +// +// The socket's congestion control algorighm can be retrieved via [GetsockoptString] with the [TCP_CONGESTION] option: +// +// algo, err := unix.GetsockoptString(fd, unix.IPPROTO_TCP, unix.TCP_CONGESTION) +func GetsockoptTCPCCBBRInfo(fd, level, opt int) (*TCPBBRInfo, error) { + var value [SizeofTCPCCInfo / 4]uint32 // ensure proper alignment + vallen := _Socklen(SizeofTCPCCInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + out := (*TCPBBRInfo)(unsafe.Pointer(&value[0])) + return out, err +} + // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { @@ -1959,7 +2001,26 @@ func Getpgrp() (pid int) { //sysnb Getpid() (pid int) //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) -//sys Getrandom(buf []byte, flags int) (n int, err error) + +func Getrandom(buf []byte, flags int) (n int, err error) { + vdsoRet, supported := vgetrandom(buf, uint32(flags)) + if supported { + if vdsoRet < 0 { + return 0, errnoErr(syscall.Errno(-vdsoRet)) + } + return vdsoRet, nil + } + var p *byte + if len(buf) > 0 { + p = &buf[0] + } + r, _, e := Syscall(SYS_GETRANDOM, uintptr(unsafe.Pointer(p)), uintptr(len(buf)), uintptr(flags)) + if e != 0 { + return 0, errnoErr(e) + } + return int(r), nil +} + //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cf2ee6c7..745e5c7e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -182,3 +182,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go index 3d0e9845..dd2262a4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_loong64.go @@ -214,3 +214,5 @@ func KexecFileLoad(kernelFd int, initrdFd int, cmdline string, flags int) error } return kexecFileLoad(kernelFd, initrdFd, cmdlineLen, cmdline, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6f5a2889..8cf3670b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -187,3 +187,5 @@ func RISCVHWProbe(pairs []RISCVHWProbePairs, set *CPUSet, flags uint) (err error } return riscvHWProbe(pairs, setSize, set, flags) } + +const SYS_FSTATAT = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/vgetrandom_linux.go b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go new file mode 100644 index 00000000..07ac8e09 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/vgetrandom_linux.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build linux && go1.24 + +package unix + +import _ "unsafe" + +//go:linkname vgetrandom runtime.vgetrandom +//go:noescape +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go similarity index 56% rename from vendor/golang.org/x/tools/internal/versions/toolchain_go119.go rename to vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go index f65beed9..297e97bc 100644 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go119.go +++ b/vendor/golang.org/x/sys/unix/vgetrandom_unsupported.go @@ -2,13 +2,10 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.19 -// +build go1.19 +//go:build !linux || !go1.24 -package versions +package unix -func init() { - if Compare(toolchain, Go1_19) < 0 { - toolchain = Go1_19 - } +func vgetrandom(p []byte, flags uint32) (ret int, supported bool) { + return -1, false } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 4308ac17..d73c4652 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index c8068a7a..4a55a400 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -237,6 +237,9 @@ const ( CLOCK_UPTIME_RAW_APPROX = 0x9 CLONE_NOFOLLOW = 0x1 CLONE_NOOWNERCOPY = 0x2 + CONNECT_DATA_AUTHENTICATED = 0x4 + CONNECT_DATA_IDEMPOTENT = 0x2 + CONNECT_RESUME_ON_READ_WRITE = 0x1 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 @@ -1265,6 +1268,10 @@ const ( RTV_SSTHRESH = 0x20 RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 + SAE_ASSOCID_ALL = 0xffffffff + SAE_ASSOCID_ANY = 0x0 + SAE_CONNID_ALL = 0xffffffff + SAE_CONNID_ANY = 0x0 SCM_CREDS = 0x3 SCM_RIGHTS = 0x1 SCM_TIMESTAMP = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 01a70b24..de3b4624 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -495,6 +495,7 @@ const ( BPF_F_TEST_REG_INVARIANTS = 0x80 BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TEST_RUN_ON_CPU = 0x1 + BPF_F_TEST_SKB_CHECKSUM_COMPLETE = 0x4 BPF_F_TEST_STATE_FREQ = 0x8 BPF_F_TEST_XDP_LIVE_FRAMES = 0x2 BPF_F_XDP_DEV_BOUND_ONLY = 0x40 @@ -1922,6 +1923,7 @@ const ( MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 MNT_ID_REQ_SIZE_VER0 = 0x18 + MNT_ID_REQ_SIZE_VER1 = 0x20 MODULE_INIT_COMPRESSED_FILE = 0x4 MODULE_INIT_IGNORE_MODVERSIONS = 0x1 MODULE_INIT_IGNORE_VERMAGIC = 0x2 @@ -2187,7 +2189,7 @@ const ( NFT_REG_SIZE = 0x10 NFT_REJECT_ICMPX_MAX = 0x3 NFT_RT_MAX = 0x4 - NFT_SECMARK_CTX_MAXLEN = 0x100 + NFT_SECMARK_CTX_MAXLEN = 0x1000 NFT_SET_MAXNAMELEN = 0x100 NFT_SOCKET_MAX = 0x3 NFT_TABLE_F_MASK = 0x7 @@ -2356,9 +2358,11 @@ const ( PERF_MEM_LVLNUM_IO = 0xa PERF_MEM_LVLNUM_L1 = 0x1 PERF_MEM_LVLNUM_L2 = 0x2 + PERF_MEM_LVLNUM_L2_MHB = 0x5 PERF_MEM_LVLNUM_L3 = 0x3 PERF_MEM_LVLNUM_L4 = 0x4 PERF_MEM_LVLNUM_LFB = 0xc + PERF_MEM_LVLNUM_MSC = 0x6 PERF_MEM_LVLNUM_NA = 0xf PERF_MEM_LVLNUM_PMEM = 0xe PERF_MEM_LVLNUM_RAM = 0xd @@ -2431,6 +2435,7 @@ const ( PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROCFS_IOCTL_MAGIC = 'f' PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 @@ -2933,11 +2938,12 @@ const ( RUSAGE_SELF = 0x0 RUSAGE_THREAD = 0x1 RWF_APPEND = 0x10 + RWF_ATOMIC = 0x40 RWF_DSYNC = 0x2 RWF_HIPRI = 0x1 RWF_NOAPPEND = 0x20 RWF_NOWAIT = 0x8 - RWF_SUPPORTED = 0x3f + RWF_SUPPORTED = 0x7f RWF_SYNC = 0x4 RWF_WRITE_LIFE_NOT_SET = 0x0 SCHED_BATCH = 0x3 @@ -3210,6 +3216,7 @@ const ( STATX_ATTR_MOUNT_ROOT = 0x2000 STATX_ATTR_NODUMP = 0x40 STATX_ATTR_VERITY = 0x100000 + STATX_ATTR_WRITE_ATOMIC = 0x400000 STATX_BASIC_STATS = 0x7ff STATX_BLOCKS = 0x400 STATX_BTIME = 0x800 @@ -3226,6 +3233,7 @@ const ( STATX_SUBVOL = 0x8000 STATX_TYPE = 0x1 STATX_UID = 0x8 + STATX_WRITE_ATOMIC = 0x10000 STATX__RESERVED = 0x80000000 SYNC_FILE_RANGE_WAIT_AFTER = 0x4 SYNC_FILE_RANGE_WAIT_BEFORE = 0x1 @@ -3624,6 +3632,7 @@ const ( XDP_UMEM_PGOFF_COMPLETION_RING = 0x180000000 XDP_UMEM_PGOFF_FILL_RING = 0x100000000 XDP_UMEM_REG = 0x4 + XDP_UMEM_TX_METADATA_LEN = 0x4 XDP_UMEM_TX_SW_CSUM = 0x2 XDP_UMEM_UNALIGNED_CHUNK_FLAG = 0x1 XDP_USE_NEED_WAKEUP = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 684a5168..8aa6d77c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 61d74b59..da428f42 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -153,9 +153,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index a28c9e3e..bf45bfec 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index ab5d1fe8..71c67162 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index c523090e..9476628f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -154,9 +154,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 01e6ea78..b9e85f3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7aa610b1..a48b68a7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 92af771b..ea00e852 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b27ef5e6..91c64687 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x20 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 237a2cef..8cbf38d6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 4a5c555a..a2df7341 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index a02fb49a..24791379 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -152,9 +152,14 @@ const ( NL3 = 0x300 NLDLY = 0x300 NOFLSH = 0x80000000 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x4 ONLCR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index e26a7c61..d265f146 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index c48f7c21..3f2d6443 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -150,9 +150,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x8008b705 NS_GET_NSTYPE = 0xb703 NS_GET_OWNER_UID = 0xb704 NS_GET_PARENT = 0xb702 + NS_GET_PID_FROM_PIDNS = 0x8004b706 + NS_GET_PID_IN_PIDNS = 0x8004b708 + NS_GET_TGID_FROM_PIDNS = 0x8004b707 + NS_GET_TGID_IN_PIDNS = 0x8004b709 NS_GET_USERNS = 0xb701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index ad4b9aac..5d8b727a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -155,9 +155,14 @@ const ( NFDBITS = 0x40 NLDLY = 0x100 NOFLSH = 0x80 + NS_GET_MNTNS_ID = 0x4008b705 NS_GET_NSTYPE = 0x2000b703 NS_GET_OWNER_UID = 0x2000b704 NS_GET_PARENT = 0x2000b702 + NS_GET_PID_FROM_PIDNS = 0x4004b706 + NS_GET_PID_IN_PIDNS = 0x4004b708 + NS_GET_TGID_FROM_PIDNS = 0x4004b707 + NS_GET_TGID_IN_PIDNS = 0x4004b709 NS_GET_USERNS = 0x2000b701 OLCUC = 0x2 ONLCR = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go index da08b2ab..1ec2b140 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_zos_s390x.go @@ -581,6 +581,8 @@ const ( AT_EMPTY_PATH = 0x1000 AT_REMOVEDIR = 0x200 RENAME_NOREPLACE = 1 << 0 + ST_RDONLY = 1 + ST_NOSUID = 2 ) const ( diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index b622533e..24b346e1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index cfe6646b..ebd21310 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 13f624f6..824b9c2d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -841,6 +841,26 @@ var libc_pthread_fchdir_np_trampoline_addr uintptr // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) { + var _p0 unsafe.Pointer + if len(iov) > 0 { + _p0 = unsafe.Pointer(&iov[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall9(libc_connectx_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(endpoints)), uintptr(associd), uintptr(flags), uintptr(_p0), uintptr(len(iov)), uintptr(unsafe.Pointer(n)), uintptr(unsafe.Pointer(connid)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +var libc_connectx_trampoline_addr uintptr + +//go:cgo_import_dynamic libc_connectx connectx "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) { _, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index fe222b75..4f178a22 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -248,6 +248,11 @@ TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0 GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8 DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB) +TEXT libc_connectx_trampoline<>(SB),NOSPLIT,$0-0 + JMP libc_connectx(SB) +GLOBL ·libc_connectx_trampoline_addr(SB), RODATA, $8 +DATA ·libc_connectx_trampoline_addr(SB)/8, $libc_connectx_trampoline<>(SB) + TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0 JMP libc_sendfile(SB) GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go index 1bc1a5ad..af30da55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go @@ -971,23 +971,6 @@ func Getpriority(which int, who int) (prio int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getrandom(buf []byte, flags int) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall(SYS_GETRANDOM, uintptr(_p0), uintptr(len(buf)), uintptr(flags)) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index d3e38f68..f485dbf4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -341,6 +341,7 @@ const ( SYS_STATX = 332 SYS_IO_PGETEVENTS = 333 SYS_RSEQ = 334 + SYS_URETPROBE = 335 SYS_PIDFD_SEND_SIGNAL = 424 SYS_IO_URING_SETUP = 425 SYS_IO_URING_ENTER = 426 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6c778c23..1893e2fe 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -85,7 +85,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 37281cf5..16a4017d 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -84,6 +84,8 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 + SYS_NEWFSTATAT = 79 + SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 SYS_FDATASYNC = 83 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 9889f6a5..a5459e76 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -84,7 +84,7 @@ const ( SYS_SPLICE = 76 SYS_TEE = 77 SYS_READLINKAT = 78 - SYS_FSTATAT = 79 + SYS_NEWFSTATAT = 79 SYS_FSTAT = 80 SYS_SYNC = 81 SYS_FSYNC = 82 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index 091d107f..d003c3d4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 28ff4ef7..0d45a941 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -306,6 +306,19 @@ type XVSockPgen struct { type _Socklen uint32 +type SaeAssocID uint32 + +type SaeConnID uint32 + +type SaEndpoints struct { + Srcif uint32 + Srcaddr *RawSockaddr + Srcaddrlen uint32 + Dstaddr *RawSockaddr + Dstaddrlen uint32 + _ [4]byte +} + type Xucred struct { Version uint32 Uid uint32 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 6cbd094a..51e13eb0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -625,6 +625,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index 7c03b6ee..d002d8ef 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -630,6 +630,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index 422107ee..3f863d89 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -616,6 +616,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index 505a12ac..61c72931 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -610,6 +610,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go index cc986c79..b5d17414 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_riscv64.go @@ -612,6 +612,7 @@ const ( POLLRDNORM = 0x40 POLLWRBAND = 0x100 POLLWRNORM = 0x4 + POLLRDHUP = 0x4000 ) type CapRights struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 7f1961b9..3a69e454 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -87,31 +87,35 @@ type StatxTimestamp struct { } type Statx_t struct { - Mask uint32 - Blksize uint32 - Attributes uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Mode uint16 - _ [1]uint16 - Ino uint64 - Size uint64 - Blocks uint64 - Attributes_mask uint64 - Atime StatxTimestamp - Btime StatxTimestamp - Ctime StatxTimestamp - Mtime StatxTimestamp - Rdev_major uint32 - Rdev_minor uint32 - Dev_major uint32 - Dev_minor uint32 - Mnt_id uint64 - Dio_mem_align uint32 - Dio_offset_align uint32 - Subvol uint64 - _ [11]uint64 + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + Mnt_id uint64 + Dio_mem_align uint32 + Dio_offset_align uint32 + Subvol uint64 + Atomic_write_unit_min uint32 + Atomic_write_unit_max uint32 + Atomic_write_segments_max uint32 + _ [1]uint32 + _ [9]uint64 } type Fsid struct { @@ -516,6 +520,29 @@ type TCPInfo struct { Total_rto_time uint32 } +type TCPVegasInfo struct { + Enabled uint32 + Rttcnt uint32 + Rtt uint32 + Minrtt uint32 +} + +type TCPDCTCPInfo struct { + Enabled uint16 + Ce_state uint16 + Alpha uint32 + Ab_ecn uint32 + Ab_tot uint32 +} + +type TCPBBRInfo struct { + Bw_lo uint32 + Bw_hi uint32 + Min_rtt uint32 + Pacing_gain uint32 + Cwnd_gain uint32 +} + type CanFilter struct { Id uint32 Mask uint32 @@ -557,6 +584,7 @@ const ( SizeofICMPv6Filter = 0x20 SizeofUcred = 0xc SizeofTCPInfo = 0xf8 + SizeofTCPCCInfo = 0x14 SizeofCanFilter = 0x8 SizeofTCPRepairOpt = 0x8 ) @@ -2486,7 +2514,7 @@ type XDPMmapOffsets struct { type XDPUmemReg struct { Addr uint64 Len uint64 - Chunk_size uint32 + Size uint32 Headroom uint32 Flags uint32 Tx_metadata_len uint32 @@ -3766,7 +3794,7 @@ const ( ETHTOOL_MSG_PSE_GET = 0x24 ETHTOOL_MSG_PSE_SET = 0x25 ETHTOOL_MSG_RSS_GET = 0x26 - ETHTOOL_MSG_USER_MAX = 0x2b + ETHTOOL_MSG_USER_MAX = 0x2c ETHTOOL_MSG_KERNEL_NONE = 0x0 ETHTOOL_MSG_STRSET_GET_REPLY = 0x1 ETHTOOL_MSG_LINKINFO_GET_REPLY = 0x2 @@ -3806,7 +3834,7 @@ const ( ETHTOOL_MSG_MODULE_NTF = 0x24 ETHTOOL_MSG_PSE_GET_REPLY = 0x25 ETHTOOL_MSG_RSS_GET_REPLY = 0x26 - ETHTOOL_MSG_KERNEL_MAX = 0x2b + ETHTOOL_MSG_KERNEL_MAX = 0x2c ETHTOOL_FLAG_COMPACT_BITSETS = 0x1 ETHTOOL_FLAG_OMIT_REPLY = 0x2 ETHTOOL_FLAG_STATS = 0x4 @@ -3951,7 +3979,7 @@ const ( ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL = 0x17 ETHTOOL_A_COALESCE_USE_CQE_MODE_TX = 0x18 ETHTOOL_A_COALESCE_USE_CQE_MODE_RX = 0x19 - ETHTOOL_A_COALESCE_MAX = 0x1c + ETHTOOL_A_COALESCE_MAX = 0x1e ETHTOOL_A_PAUSE_UNSPEC = 0x0 ETHTOOL_A_PAUSE_HEADER = 0x1 ETHTOOL_A_PAUSE_AUTONEG = 0x2 @@ -4609,7 +4637,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x14a + NL80211_ATTR_MAX = 0x14c NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_MATCH_SETS = 0x85 @@ -5213,7 +5241,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x20 + NL80211_FREQUENCY_ATTR_MAX = 0x21 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 15adc041..ad05b51a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -727,6 +727,37 @@ const ( RISCV_HWPROBE_EXT_ZBA = 0x8 RISCV_HWPROBE_EXT_ZBB = 0x10 RISCV_HWPROBE_EXT_ZBS = 0x20 + RISCV_HWPROBE_EXT_ZICBOZ = 0x40 + RISCV_HWPROBE_EXT_ZBC = 0x80 + RISCV_HWPROBE_EXT_ZBKB = 0x100 + RISCV_HWPROBE_EXT_ZBKC = 0x200 + RISCV_HWPROBE_EXT_ZBKX = 0x400 + RISCV_HWPROBE_EXT_ZKND = 0x800 + RISCV_HWPROBE_EXT_ZKNE = 0x1000 + RISCV_HWPROBE_EXT_ZKNH = 0x2000 + RISCV_HWPROBE_EXT_ZKSED = 0x4000 + RISCV_HWPROBE_EXT_ZKSH = 0x8000 + RISCV_HWPROBE_EXT_ZKT = 0x10000 + RISCV_HWPROBE_EXT_ZVBB = 0x20000 + RISCV_HWPROBE_EXT_ZVBC = 0x40000 + RISCV_HWPROBE_EXT_ZVKB = 0x80000 + RISCV_HWPROBE_EXT_ZVKG = 0x100000 + RISCV_HWPROBE_EXT_ZVKNED = 0x200000 + RISCV_HWPROBE_EXT_ZVKNHA = 0x400000 + RISCV_HWPROBE_EXT_ZVKNHB = 0x800000 + RISCV_HWPROBE_EXT_ZVKSED = 0x1000000 + RISCV_HWPROBE_EXT_ZVKSH = 0x2000000 + RISCV_HWPROBE_EXT_ZVKT = 0x4000000 + RISCV_HWPROBE_EXT_ZFH = 0x8000000 + RISCV_HWPROBE_EXT_ZFHMIN = 0x10000000 + RISCV_HWPROBE_EXT_ZIHINTNTL = 0x20000000 + RISCV_HWPROBE_EXT_ZVFH = 0x40000000 + RISCV_HWPROBE_EXT_ZVFHMIN = 0x80000000 + RISCV_HWPROBE_EXT_ZFA = 0x100000000 + RISCV_HWPROBE_EXT_ZTSO = 0x200000000 + RISCV_HWPROBE_EXT_ZACAS = 0x400000000 + RISCV_HWPROBE_EXT_ZICOND = 0x800000000 + RISCV_HWPROBE_EXT_ZIHINTPAUSE = 0x1000000000 RISCV_HWPROBE_KEY_CPUPERF_0 = 0x5 RISCV_HWPROBE_MISALIGNED_UNKNOWN = 0x0 RISCV_HWPROBE_MISALIGNED_EMULATED = 0x1 @@ -734,4 +765,6 @@ const ( RISCV_HWPROBE_MISALIGNED_FAST = 0x3 RISCV_HWPROBE_MISALIGNED_UNSUPPORTED = 0x4 RISCV_HWPROBE_MISALIGNED_MASK = 0x7 + RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE = 0x6 + RISCV_HWPROBE_WHICH_CPUS = 0x1 ) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 115341fb..4e613cf6 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -65,7 +65,7 @@ func LoadDLL(name string) (dll *DLL, err error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation failes. +// MustLoadDLL is like LoadDLL but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 1fa34fd1..5cee9a31 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -313,6 +313,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetConsoleMode(console Handle, mode uint32) (err error) = kernel32.SetConsoleMode //sys GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) = kernel32.GetConsoleScreenBufferInfo //sys setConsoleCursorPosition(console Handle, position uint32) (err error) = kernel32.SetConsoleCursorPosition +//sys GetConsoleCP() (cp uint32, err error) = kernel32.GetConsoleCP +//sys GetConsoleOutputCP() (cp uint32, err error) = kernel32.GetConsoleOutputCP +//sys SetConsoleCP(cp uint32) (err error) = kernel32.SetConsoleCP +//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP //sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW //sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW //sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 3f03b3d5..7b97a154 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1060,6 +1060,7 @@ const ( SIO_GET_EXTENSION_FUNCTION_POINTER = IOC_INOUT | IOC_WS2 | 6 SIO_KEEPALIVE_VALS = IOC_IN | IOC_VENDOR | 4 SIO_UDP_CONNRESET = IOC_IN | IOC_VENDOR | 12 + SIO_UDP_NETRESET = IOC_IN | IOC_VENDOR | 15 // cf. http://support.microsoft.com/default.aspx?scid=kb;en-us;257460 diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 9bb979a3..4c2e1bdc 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -247,7 +247,9 @@ var ( procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetConsoleCP = modkernel32.NewProc("GetConsoleCP") procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procGetConsoleOutputCP = modkernel32.NewProc("GetConsoleOutputCP") procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") @@ -347,8 +349,10 @@ var ( procSetCommMask = modkernel32.NewProc("SetCommMask") procSetCommState = modkernel32.NewProc("SetCommState") procSetCommTimeouts = modkernel32.NewProc("SetCommTimeouts") + procSetConsoleCP = modkernel32.NewProc("SetConsoleCP") procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procSetConsoleOutputCP = modkernel32.NewProc("SetConsoleOutputCP") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") @@ -2162,6 +2166,15 @@ func GetComputerName(buf *uint16, n *uint32) (err error) { return } +func GetConsoleCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleMode(console Handle, mode *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(mode)), 0) if r1 == 0 { @@ -2170,6 +2183,15 @@ func GetConsoleMode(console Handle, mode *uint32) (err error) { return } +func GetConsoleOutputCP() (cp uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetConsoleOutputCP.Addr(), 0, 0, 0, 0) + cp = uint32(r0) + if cp == 0 { + err = errnoErr(e1) + } + return +} + func GetConsoleScreenBufferInfo(console Handle, info *ConsoleScreenBufferInfo) (err error) { r1, _, e1 := syscall.Syscall(procGetConsoleScreenBufferInfo.Addr(), 2, uintptr(console), uintptr(unsafe.Pointer(info)), 0) if r1 == 0 { @@ -3038,6 +3060,14 @@ func SetCommTimeouts(handle Handle, timeouts *CommTimeouts) (err error) { return } +func SetConsoleCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func setConsoleCursorPosition(console Handle, position uint32) (err error) { r1, _, e1 := syscall.Syscall(procSetConsoleCursorPosition.Addr(), 2, uintptr(console), uintptr(position), 0) if r1 == 0 { @@ -3054,6 +3084,14 @@ func SetConsoleMode(console Handle, mode uint32) (err error) { return } +func SetConsoleOutputCP(cp uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetConsoleOutputCP.Addr(), 1, uintptr(cp), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetCurrentDirectory(path *uint16) (err error) { r1, _, e1 := syscall.Syscall(procSetCurrentDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) if r1 == 0 { diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 8f6c7f49..93a798ab 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 { // bursts of at most b tokens. func NewLimiter(r Limit, b int) *Limiter { return &Limiter{ - limit: r, - burst: b, + limit: r, + burst: b, + tokens: float64(b), } } @@ -344,18 +345,6 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) tokens: n, timeToAct: t, } - } else if lim.limit == 0 { - var ok bool - if lim.burst >= n { - ok = true - lim.burst -= n - } - return Reservation{ - ok: ok, - lim: lim, - tokens: lim.burst, - timeToAct: t, - } } t, tokens := lim.advance(t) diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go index 6bdcf70a..ca71e3e1 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -7,13 +7,5 @@ package astutil import "go/ast" // Unparen returns e with any enclosing parentheses stripped. -// TODO(adonovan): use go1.22's ast.Unparen. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -} +// Deprecated: use [ast.Unparen]. +func Unparen(e ast.Expr) ast.Expr { return ast.Unparen(e) } diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3531ac8f..f1931d10 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -64,7 +64,7 @@ graph using the Imports fields. The Load function can be configured by passing a pointer to a Config as the first argument. A nil Config is equivalent to the zero Config, which -causes Load to run in LoadFiles mode, collecting minimal information. +causes Load to run in [LoadFiles] mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail @@ -72,14 +72,14 @@ reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) -uninterpreted to [Load], so that it can interpret them +uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. # The driver protocol -[Load] may be used to load Go packages even in Go projects that use +Load may be used to load Go packages even in Go projects that use alternative build systems, by installing an appropriate "driver" program for the build system and specifying its location in the GOPACKAGESDRIVER environment variable. @@ -97,6 +97,15 @@ JSON-encoded [DriverRequest] message providing additional information is written to the driver's standard input. The driver must write a JSON-encoded [DriverResponse] message to its standard output. (This message differs from the JSON schema produced by 'go list'.) + +The value of the PWD environment variable seen by the driver process +is the preferred name of its working directory. (The working directory +may have other aliases due to symbolic links; see the comment on the +Dir field of [exec.Cmd] for related information.) +When the driver process emits in its response the name of a file +that is a descendant of this directory, it must use an absolute path +that has the value of PWD as a prefix, to ensure that the returned +filenames satisfy the original query. */ package packages // import "golang.org/x/tools/go/packages" diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index c2b4b711..8f7afcb5 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -82,7 +82,7 @@ type DriverResponse struct { type driver func(cfg *Config, patterns ...string) (*DriverResponse, error) // findExternalDriver returns the file path of a tool that supplies -// the build system package structure, or "" if not found." +// the build system package structure, or "" if not found. // If GOPACKAGESDRIVER is set in the environment findExternalTool returns its // value, otherwise it searches for a binary named gopackagesdriver on the PATH. func findExternalDriver(cfg *Config) driver { diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 5c080d21..5fcad6ea 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -9,49 +9,46 @@ import ( "strings" ) -var allModes = []LoadMode{ - NeedName, - NeedFiles, - NeedCompiledGoFiles, - NeedImports, - NeedDeps, - NeedExportFile, - NeedTypes, - NeedSyntax, - NeedTypesInfo, - NeedTypesSizes, +var modes = [...]struct { + mode LoadMode + name string +}{ + {NeedName, "NeedName"}, + {NeedFiles, "NeedFiles"}, + {NeedCompiledGoFiles, "NeedCompiledGoFiles"}, + {NeedImports, "NeedImports"}, + {NeedDeps, "NeedDeps"}, + {NeedExportFile, "NeedExportFile"}, + {NeedTypes, "NeedTypes"}, + {NeedSyntax, "NeedSyntax"}, + {NeedTypesInfo, "NeedTypesInfo"}, + {NeedTypesSizes, "NeedTypesSizes"}, + {NeedModule, "NeedModule"}, + {NeedEmbedFiles, "NeedEmbedFiles"}, + {NeedEmbedPatterns, "NeedEmbedPatterns"}, } -var modeStrings = []string{ - "NeedName", - "NeedFiles", - "NeedCompiledGoFiles", - "NeedImports", - "NeedDeps", - "NeedExportFile", - "NeedTypes", - "NeedSyntax", - "NeedTypesInfo", - "NeedTypesSizes", -} - -func (mod LoadMode) String() string { - m := mod - if m == 0 { +func (mode LoadMode) String() string { + if mode == 0 { return "LoadMode(0)" } var out []string - for i, x := range allModes { - if x > m { - break - } - if (m & x) != 0 { - out = append(out, modeStrings[i]) - m = m ^ x + // named bits + for _, item := range modes { + if (mode & item.mode) != 0 { + mode ^= item.mode + out = append(out, item.name) } } - if m != 0 { - out = append(out, "Unknown") + // unnamed residue + if mode != 0 { + if out == nil { + return fmt.Sprintf("LoadMode(%#x)", int(mode)) + } + out = append(out, fmt.Sprintf("%#x", int(mode))) } - return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) + if len(out) == 1 { + return out[0] + } + return "(" + strings.Join(out, "|") + ")" } diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 0b6bfaff..f227f1ba 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -46,10 +46,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://github.com/golang/go/issues/56633 +// - https://github.com/golang/go/issues/56677 +// - https://github.com/golang/go/issues/58726 +// - https://github.com/golang/go/issues/63517 type LoadMode int const ( @@ -103,25 +103,37 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + + // Be sure to update loadmode_string.go when adding new items! ) const ( + // LoadFiles loads the name and file names for the initial packages. + // // Deprecated: LoadFiles exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles + // LoadImports loads the name, file names, and import mapping for the initial packages. + // // Deprecated: LoadImports exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadImports = LoadFiles | NeedImports + // LoadTypes loads exported type information for the initial packages. + // // Deprecated: LoadTypes exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadTypes = LoadImports | NeedTypes | NeedTypesSizes + // LoadSyntax loads typed syntax for the initial packages. + // // Deprecated: LoadSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo + // LoadAllSyntax loads typed syntax for the initial packages and all dependencies. + // // Deprecated: LoadAllSyntax exists for historical compatibility // and should not be used. Please directly specify the needed fields using the Need values. LoadAllSyntax = LoadSyntax | NeedDeps @@ -236,14 +248,13 @@ type Config struct { // Load loads and returns the Go packages named by the given patterns. // -// Config specifies loading options; -// nil behaves the same as an empty Config. +// The cfg parameter specifies loading options; nil behaves the same as an empty [Config]. // // The [Config.Mode] field is a set of bits that determine what kinds // of information should be computed and returned. Modes that require // more information tend to be slower. See [LoadMode] for details // and important caveats. Its zero value is equivalent to -// NeedName | NeedFiles | NeedCompiledGoFiles. +// [NeedName] | [NeedFiles] | [NeedCompiledGoFiles]. // // Each call to Load returns a new set of [Package] instances. // The Packages and their Imports form a directed acyclic graph. @@ -260,7 +271,7 @@ type Config struct { // Errors associated with a particular package are recorded in the // corresponding Package's Errors list, and do not cause Load to // return an error. Clients may need to handle such errors before -// proceeding with further analysis. The PrintErrors function is +// proceeding with further analysis. The [PrintErrors] function is // provided for convenient display of all errors. func Load(cfg *Config, patterns ...string) ([]*Package, error) { ld := newLoader(cfg) @@ -763,6 +774,7 @@ func newLoader(cfg *Config) *loader { // because we load source if export data is missing. if ld.ParseFile == nil { ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) { + // We implicitly promise to keep doing ast.Object resolution. :( const mode = parser.AllErrors | parser.ParseComments return parser.ParseFile(fset, filename, src, mode) } diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index 9ada1777..a70b727f 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -228,7 +228,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { // Reject obviously non-viable cases. switch obj := obj.(type) { case *types.TypeName: - if _, ok := aliases.Unalias(obj.Type()).(*types.TypeParam); !ok { + if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok { // With the exception of type parameters, only package-level type names // have a path. return "", fmt.Errorf("no path for %v", obj) @@ -280,7 +280,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { path = append(path, opType) T := o.Type() - if alias, ok := T.(*aliases.Alias); ok { + if alias, ok := T.(*types.Alias); ok { if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam, nil); r != nil { return Path(r), nil } @@ -320,7 +320,7 @@ func (enc *Encoder) For(obj types.Object) (Path, error) { } // Inspect declared methods of defined types. - if T, ok := aliases.Unalias(o.Type()).(*types.Named); ok { + if T, ok := types.Unalias(o.Type()).(*types.Named); ok { path = append(path, opType) // The method index here is always with respect // to the underlying go/types data structures, @@ -449,8 +449,8 @@ func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) { // nil, it will be allocated as necessary. func find(obj types.Object, T types.Type, path []byte, seen map[*types.TypeName]bool) []byte { switch T := T.(type) { - case *aliases.Alias: - return find(obj, aliases.Unalias(T), path, seen) + case *types.Alias: + return find(obj, types.Unalias(T), path, seen) case *types.Basic, *types.Named: // Named types belonging to pkg were handled already, // so T must belong to another package. No path. @@ -626,7 +626,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { // Inv: t != nil, obj == nil - t = aliases.Unalias(t) + t = types.Unalias(t) switch code { case opElem: hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map @@ -664,7 +664,7 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { t = named.Underlying() case opRhs: - if alias, ok := t.(*aliases.Alias); ok { + if alias, ok := t.(*types.Alias); ok { t = aliases.Rhs(alias) } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go new file mode 100644 index 00000000..75438035 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import ( + "go/ast" + "go/types" + + "golang.org/x/tools/internal/typeparams" +) + +// Callee returns the named target of a function call, if any: +// a function, method, builtin, or variable. +// +// Functions and methods may potentially have type parameters. +func Callee(info *types.Info, call *ast.CallExpr) types.Object { + fun := ast.Unparen(call.Fun) + + // Look through type instantiation if necessary. + isInstance := false + switch fun.(type) { + case *ast.IndexExpr, *ast.IndexListExpr: + // When extracting the callee from an *IndexExpr, we need to check that + // it is a *types.Func and not a *types.Var. + // Example: Don't match a slice m within the expression `m[0]()`. + isInstance = true + fun, _, _, _ = typeparams.UnpackIndexExpr(fun) + } + + var obj types.Object + switch fun := fun.(type) { + case *ast.Ident: + obj = info.Uses[fun] // type, var, builtin, or declared func + case *ast.SelectorExpr: + if sel, ok := info.Selections[fun]; ok { + obj = sel.Obj() // method or field + } else { + obj = info.Uses[fun.Sel] // qualified identifier? + } + } + if _, ok := obj.(*types.TypeName); ok { + return nil // T(x) is a conversion, not a call + } + // A Func is required to match instantiations. + if _, ok := obj.(*types.Func); isInstance && !ok { + return nil // Was not a Func. + } + return obj +} + +// StaticCallee returns the target (function or method) of a static function +// call, if any. It returns nil for calls to builtins. +// +// Note: for calls of instantiated functions and methods, StaticCallee returns +// the corresponding generic function or method on the generic type. +func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { + if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { + return f + } + return nil +} + +func interfaceMethod(f *types.Func) bool { + recv := f.Type().(*types.Signature).Recv() + return recv != nil && types.IsInterface(recv.Type()) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go new file mode 100644 index 00000000..b81ce0c3 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/imports.go @@ -0,0 +1,30 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +import "go/types" + +// Dependencies returns all dependencies of the specified packages. +// +// Dependent packages appear in topological order: if package P imports +// package Q, Q appears earlier than P in the result. +// The algorithm follows import statements in the order they +// appear in the source code, so the result is a total order. +func Dependencies(pkgs ...*types.Package) []*types.Package { + var result []*types.Package + seen := make(map[*types.Package]bool) + var visit func(pkgs []*types.Package) + visit = func(pkgs []*types.Package) { + for _, p := range pkgs { + if !seen[p] { + seen[p] = true + visit(p.Imports()) + result = append(result, p) + } + } + } + visit(pkgs) + return result +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go new file mode 100644 index 00000000..8d824f71 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -0,0 +1,517 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeutil defines various utilities for types, such as Map, +// a mapping from types.Type to any values. +package typeutil // import "golang.org/x/tools/go/types/typeutil" + +import ( + "bytes" + "fmt" + "go/types" + "reflect" + + "golang.org/x/tools/internal/typeparams" +) + +// Map is a hash-table-based mapping from types (types.Type) to +// arbitrary any values. The concrete types that implement +// the Type interface are pointers. Since they are not canonicalized, +// == cannot be used to check for equivalence, and thus we cannot +// simply use a Go map. +// +// Just as with map[K]V, a nil *Map is a valid empty map. +// +// Not thread-safe. +type Map struct { + hasher Hasher // shared by many Maps + table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused + length int // number of map entries +} + +// entry is an entry (key/value association) in a hash bucket. +type entry struct { + key types.Type + value any +} + +// SetHasher sets the hasher used by Map. +// +// All Hashers are functionally equivalent but contain internal state +// used to cache the results of hashing previously seen types. +// +// A single Hasher created by MakeHasher() may be shared among many +// Maps. This is recommended if the instances have many keys in +// common, as it will amortize the cost of hash computation. +// +// A Hasher may grow without bound as new types are seen. Even when a +// type is deleted from the map, the Hasher never shrinks, since other +// types in the map may reference the deleted type indirectly. +// +// Hashers are not thread-safe, and read-only operations such as +// Map.Lookup require updates to the hasher, so a full Mutex lock (not a +// read-lock) is require around all Map operations if a shared +// hasher is accessed from multiple threads. +// +// If SetHasher is not called, the Map will create a private hasher at +// the first call to Insert. +func (m *Map) SetHasher(hasher Hasher) { + m.hasher = hasher +} + +// Delete removes the entry with the given key, if any. +// It returns true if the entry was found. +func (m *Map) Delete(key types.Type) bool { + if m != nil && m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + for i, e := range bucket { + if e.key != nil && types.Identical(key, e.key) { + // We can't compact the bucket as it + // would disturb iterators. + bucket[i] = entry{} + m.length-- + return true + } + } + } + return false +} + +// At returns the map entry for the given key. +// The result is nil if the entry is not present. +func (m *Map) At(key types.Type) any { + if m != nil && m.table != nil { + for _, e := range m.table[m.hasher.Hash(key)] { + if e.key != nil && types.Identical(key, e.key) { + return e.value + } + } + } + return nil +} + +// Set sets the map entry for key to val, +// and returns the previous entry, if any. +func (m *Map) Set(key types.Type, value any) (prev any) { + if m.table != nil { + hash := m.hasher.Hash(key) + bucket := m.table[hash] + var hole *entry + for i, e := range bucket { + if e.key == nil { + hole = &bucket[i] + } else if types.Identical(key, e.key) { + prev = e.value + bucket[i].value = value + return + } + } + + if hole != nil { + *hole = entry{key, value} // overwrite deleted entry + } else { + m.table[hash] = append(bucket, entry{key, value}) + } + } else { + if m.hasher.memo == nil { + m.hasher = MakeHasher() + } + hash := m.hasher.Hash(key) + m.table = map[uint32][]entry{hash: {entry{key, value}}} + } + + m.length++ + return +} + +// Len returns the number of map entries. +func (m *Map) Len() int { + if m != nil { + return m.length + } + return 0 +} + +// Iterate calls function f on each entry in the map in unspecified order. +// +// If f should mutate the map, Iterate provides the same guarantees as +// Go maps: if f deletes a map entry that Iterate has not yet reached, +// f will not be invoked for it, but if f inserts a map entry that +// Iterate has not yet reached, whether or not f will be invoked for +// it is unspecified. +func (m *Map) Iterate(f func(key types.Type, value any)) { + if m != nil { + for _, bucket := range m.table { + for _, e := range bucket { + if e.key != nil { + f(e.key, e.value) + } + } + } + } +} + +// Keys returns a new slice containing the set of map keys. +// The order is unspecified. +func (m *Map) Keys() []types.Type { + keys := make([]types.Type, 0, m.Len()) + m.Iterate(func(key types.Type, _ any) { + keys = append(keys, key) + }) + return keys +} + +func (m *Map) toString(values bool) string { + if m == nil { + return "{}" + } + var buf bytes.Buffer + fmt.Fprint(&buf, "{") + sep := "" + m.Iterate(func(key types.Type, value any) { + fmt.Fprint(&buf, sep) + sep = ", " + fmt.Fprint(&buf, key) + if values { + fmt.Fprintf(&buf, ": %q", value) + } + }) + fmt.Fprint(&buf, "}") + return buf.String() +} + +// String returns a string representation of the map's entries. +// Values are printed using fmt.Sprintf("%v", v). +// Order is unspecified. +func (m *Map) String() string { + return m.toString(true) +} + +// KeysString returns a string representation of the map's key set. +// Order is unspecified. +func (m *Map) KeysString() string { + return m.toString(false) +} + +//////////////////////////////////////////////////////////////////////// +// Hasher + +// A Hasher maps each type to its hash value. +// For efficiency, a hasher uses memoization; thus its memory +// footprint grows monotonically over time. +// Hashers are not thread-safe. +// Hashers have reference semantics. +// Call MakeHasher to create a Hasher. +type Hasher struct { + memo map[types.Type]uint32 + + // ptrMap records pointer identity. + ptrMap map[any]uint32 + + // sigTParams holds type parameters from the signature being hashed. + // Signatures are considered identical modulo renaming of type parameters, so + // within the scope of a signature type the identity of the signature's type + // parameters is just their index. + // + // Since the language does not currently support referring to uninstantiated + // generic types or functions, and instantiated signatures do not have type + // parameter lists, we should never encounter a second non-empty type + // parameter list when hashing a generic signature. + sigTParams *types.TypeParamList +} + +// MakeHasher returns a new Hasher instance. +func MakeHasher() Hasher { + return Hasher{ + memo: make(map[types.Type]uint32), + ptrMap: make(map[any]uint32), + sigTParams: nil, + } +} + +// Hash computes a hash value for the given type t such that +// Identical(t, t') => Hash(t) == Hash(t'). +func (h Hasher) Hash(t types.Type) uint32 { + hash, ok := h.memo[t] + if !ok { + hash = h.hashFor(t) + h.memo[t] = hash + } + return hash +} + +// hashString computes the Fowler–Noll–Vo hash of s. +func hashString(s string) uint32 { + var h uint32 + for i := 0; i < len(s); i++ { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +// hashFor computes the hash of t. +func (h Hasher) hashFor(t types.Type) uint32 { + // See Identical for rationale. + switch t := t.(type) { + case *types.Basic: + return uint32(t.Kind()) + + case *types.Alias: + return h.Hash(types.Unalias(t)) + + case *types.Array: + return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + + case *types.Slice: + return 9049 + 2*h.Hash(t.Elem()) + + case *types.Struct: + var hash uint32 = 9059 + for i, n := 0, t.NumFields(); i < n; i++ { + f := t.Field(i) + if f.Anonymous() { + hash += 8861 + } + hash += hashString(t.Tag(i)) + hash += hashString(f.Name()) // (ignore f.Pkg) + hash += h.Hash(f.Type()) + } + return hash + + case *types.Pointer: + return 9067 + 2*h.Hash(t.Elem()) + + case *types.Signature: + var hash uint32 = 9091 + if t.Variadic() { + hash *= 8863 + } + + // Use a separate hasher for types inside of the signature, where type + // parameter identity is modified to be (index, constraint). We must use a + // new memo for this hasher as type identity may be affected by this + // masking. For example, in func[T any](*T), the identity of *T depends on + // whether we are mapping the argument in isolation, or recursively as part + // of hashing the signature. + // + // We should never encounter a generic signature while hashing another + // generic signature, but defensively set sigTParams only if h.mask is + // unset. + tparams := t.TypeParams() + if h.sigTParams == nil && tparams.Len() != 0 { + h = Hasher{ + // There may be something more efficient than discarding the existing + // memo, but it would require detecting whether types are 'tainted' by + // references to type parameters. + memo: make(map[types.Type]uint32), + // Re-using ptrMap ensures that pointer identity is preserved in this + // hasher. + ptrMap: h.ptrMap, + sigTParams: tparams, + } + } + + for i := 0; i < tparams.Len(); i++ { + tparam := tparams.At(i) + hash += 7 * h.Hash(tparam.Constraint()) + } + + return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) + + case *types.Union: + return h.hashUnion(t) + + case *types.Interface: + // Interfaces are identical if they have the same set of methods, with + // identical names and types, and they have the same set of type + // restrictions. See go/types.identical for more details. + var hash uint32 = 9103 + + // Hash methods. + for i, n := 0, t.NumMethods(); i < n; i++ { + // Method order is not significant. + // Ignore m.Pkg(). + m := t.Method(i) + // Use shallow hash on method signature to + // avoid anonymous interface cycles. + hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type()) + } + + // Hash type restrictions. + terms, err := typeparams.InterfaceTermSet(t) + // if err != nil t has invalid type restrictions. + if err == nil { + hash += h.hashTermSet(terms) + } + + return hash + + case *types.Map: + return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + + case *types.Chan: + return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + + case *types.Named: + hash := h.hashPtr(t.Obj()) + targs := t.TypeArgs() + for i := 0; i < targs.Len(); i++ { + targ := targs.At(i) + hash += 2 * h.Hash(targ) + } + return hash + + case *types.TypeParam: + return h.hashTypeParam(t) + + case *types.Tuple: + return h.hashTuple(t) + } + + panic(fmt.Sprintf("%T: %v", t, t)) +} + +func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { + // See go/types.identicalTypes for rationale. + n := tuple.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 3 * h.Hash(tuple.At(i).Type()) + } + return hash +} + +func (h Hasher) hashUnion(t *types.Union) uint32 { + // Hash type restrictions. + terms, err := typeparams.UnionTermSet(t) + // if err != nil t has invalid type restrictions. Fall back on a non-zero + // hash. + if err != nil { + return 9151 + } + return h.hashTermSet(terms) +} + +func (h Hasher) hashTermSet(terms []*types.Term) uint32 { + hash := 9157 + 2*uint32(len(terms)) + for _, term := range terms { + // term order is not significant. + termHash := h.Hash(term.Type()) + if term.Tilde() { + termHash *= 9161 + } + hash += 3 * termHash + } + return hash +} + +// hashTypeParam returns a hash of the type parameter t, with a hash value +// depending on whether t is contained in h.sigTParams. +// +// If h.sigTParams is set and contains t, then we are in the process of hashing +// a signature, and the hash value of t must depend only on t's index and +// constraint: signatures are considered identical modulo type parameter +// renaming. To avoid infinite recursion, we only hash the type parameter +// index, and rely on types.Identical to handle signatures where constraints +// are not identical. +// +// Otherwise the hash of t depends only on t's pointer identity. +func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { + if h.sigTParams != nil { + i := t.Index() + if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { + return 9173 + 3*uint32(i) + } + } + return h.hashPtr(t.Obj()) +} + +// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that +// pointers values are not dependent on the GC. +func (h Hasher) hashPtr(ptr any) uint32 { + if hash, ok := h.ptrMap[ptr]; ok { + return hash + } + hash := uint32(reflect.ValueOf(ptr).Pointer()) + h.ptrMap[ptr] = hash + return hash +} + +// shallowHash computes a hash of t without looking at any of its +// element Types, to avoid potential anonymous cycles in the types of +// interface methods. +// +// When an unnamed non-empty interface type appears anywhere among the +// arguments or results of an interface method, there is a potential +// for endless recursion. Consider: +// +// type X interface { m() []*interface { X } } +// +// The problem is that the Methods of the interface in m's result type +// include m itself; there is no mention of the named type X that +// might help us break the cycle. +// (See comment in go/types.identical, case *Interface, for more.) +func (h Hasher) shallowHash(t types.Type) uint32 { + // t is the type of an interface method (Signature), + // its params or results (Tuples), or their immediate + // elements (mostly Slice, Pointer, Basic, Named), + // so there's no need to optimize anything else. + switch t := t.(type) { + case *types.Alias: + return h.shallowHash(types.Unalias(t)) + + case *types.Signature: + var hash uint32 = 604171 + if t.Variadic() { + hash *= 971767 + } + // The Signature/Tuple recursion is always finite + // and invariably shallow. + return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results()) + + case *types.Tuple: + n := t.Len() + hash := 9137 + 2*uint32(n) + for i := 0; i < n; i++ { + hash += 53471161 * h.shallowHash(t.At(i).Type()) + } + return hash + + case *types.Basic: + return 45212177 * uint32(t.Kind()) + + case *types.Array: + return 1524181 + 2*uint32(t.Len()) + + case *types.Slice: + return 2690201 + + case *types.Struct: + return 3326489 + + case *types.Pointer: + return 4393139 + + case *types.Union: + return 562448657 + + case *types.Interface: + return 2124679 // no recursion here + + case *types.Map: + return 9109 + + case *types.Chan: + return 9127 + + case *types.Named: + return h.hashPtr(t.Obj()) + + case *types.TypeParam: + return h.hashPtr(t.Obj()) + } + panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go new file mode 100644 index 00000000..f7666028 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go @@ -0,0 +1,71 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements a cache of method sets. + +package typeutil + +import ( + "go/types" + "sync" +) + +// A MethodSetCache records the method set of each type T for which +// MethodSet(T) is called so that repeat queries are fast. +// The zero value is a ready-to-use cache instance. +type MethodSetCache struct { + mu sync.Mutex + named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N + others map[types.Type]*types.MethodSet // all other types +} + +// MethodSet returns the method set of type T. It is thread-safe. +// +// If cache is nil, this function is equivalent to types.NewMethodSet(T). +// Utility functions can thus expose an optional *MethodSetCache +// parameter to clients that care about performance. +func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { + if cache == nil { + return types.NewMethodSet(T) + } + cache.mu.Lock() + defer cache.mu.Unlock() + + switch T := types.Unalias(T).(type) { + case *types.Named: + return cache.lookupNamed(T).value + + case *types.Pointer: + if N, ok := types.Unalias(T.Elem()).(*types.Named); ok { + return cache.lookupNamed(N).pointer + } + } + + // all other types + // (The map uses pointer equivalence, not type identity.) + mset := cache.others[T] + if mset == nil { + mset = types.NewMethodSet(T) + if cache.others == nil { + cache.others = make(map[types.Type]*types.MethodSet) + } + cache.others[T] = mset + } + return mset +} + +func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { + if cache.named == nil { + cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) + } + // Avoid recomputing mset(*T) for each distinct Pointer + // instance whose underlying type is a named type. + msets, ok := cache.named[named] + if !ok { + msets.value = types.NewMethodSet(named) + msets.pointer = types.NewMethodSet(types.NewPointer(named)) + cache.named[named] = msets + } + return msets +} diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go new file mode 100644 index 00000000..9dda6a25 --- /dev/null +++ b/vendor/golang.org/x/tools/go/types/typeutil/ui.go @@ -0,0 +1,53 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeutil + +// This file defines utilities for user interfaces that display types. + +import ( + "go/types" +) + +// IntuitiveMethodSet returns the intuitive method set of a type T, +// which is the set of methods you can call on an addressable value of +// that type. +// +// The result always contains MethodSet(T), and is exactly MethodSet(T) +// for interface types and for pointer-to-concrete types. +// For all other concrete types T, the result additionally +// contains each method belonging to *T if there is no identically +// named method on T itself. +// +// This corresponds to user intuition about method sets; +// this function is intended only for user interfaces. +// +// The order of the result is as for types.MethodSet(T). +func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { + isPointerToConcrete := func(T types.Type) bool { + ptr, ok := types.Unalias(T).(*types.Pointer) + return ok && !types.IsInterface(ptr.Elem()) + } + + var result []*types.Selection + mset := msets.MethodSet(T) + if types.IsInterface(T) || isPointerToConcrete(T) { + for i, n := 0, mset.Len(); i < n; i++ { + result = append(result, mset.At(i)) + } + } else { + // T is some other concrete type. + // Report methods of T and *T, preferring those of T. + pmset := msets.MethodSet(types.NewPointer(T)) + for i, n := 0, pmset.Len(); i < n; i++ { + meth := pmset.At(i) + if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { + meth = m + } + result = append(result, meth) + } + + } + return result +} diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases.go b/vendor/golang.org/x/tools/internal/aliases/aliases.go index c24c2eee..b9425f5a 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases.go @@ -22,11 +22,17 @@ import ( // GODEBUG=gotypesalias=... by invoking the type checker. The Enabled // function is expensive and should be called once per task (e.g. // package import), not once per call to NewAlias. -func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type) *types.TypeName { +// +// Precondition: enabled || len(tparams)==0. +// If materialized aliases are disabled, there must not be any type parameters. +func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName { if enabled { tname := types.NewTypeName(pos, pkg, name, nil) - newAlias(tname, rhs) + SetTypeParams(types.NewAlias(tname, rhs), tparams) return tname } + if len(tparams) > 0 { + panic("cannot create an alias with type parameters when gotypesalias is not enabled") + } return types.NewTypeName(pos, pkg, name, rhs) } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go deleted file mode 100644 index 6652f7db..00000000 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go121.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package aliases - -import ( - "go/types" -) - -// Alias is a placeholder for a go/types.Alias for <=1.21. -// It will never be created by go/types. -type Alias struct{} - -func (*Alias) String() string { panic("unreachable") } -func (*Alias) Underlying() types.Type { panic("unreachable") } -func (*Alias) Obj() *types.TypeName { panic("unreachable") } -func Rhs(alias *Alias) types.Type { panic("unreachable") } -func TypeParams(alias *Alias) *types.TypeParamList { panic("unreachable") } -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { panic("unreachable") } -func TypeArgs(alias *Alias) *types.TypeList { panic("unreachable") } -func Origin(alias *Alias) *Alias { panic("unreachable") } - -// Unalias returns the type t for go <=1.21. -func Unalias(t types.Type) types.Type { return t } - -func newAlias(name *types.TypeName, rhs types.Type) *Alias { panic("unreachable") } - -// Enabled reports whether [NewAlias] should create [types.Alias] types. -// -// Before go1.22, this function always returns false. -func Enabled() bool { return false } diff --git a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go index 3ef1afeb..7716a333 100644 --- a/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go +++ b/vendor/golang.org/x/tools/internal/aliases/aliases_go122.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build go1.22 -// +build go1.22 - package aliases import ( @@ -14,22 +11,19 @@ import ( "go/types" ) -// Alias is an alias of types.Alias. -type Alias = types.Alias - // Rhs returns the type on the right-hand side of the alias declaration. -func Rhs(alias *Alias) types.Type { +func Rhs(alias *types.Alias) types.Type { if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok { return alias.Rhs() // go1.23+ } // go1.22's Alias didn't have the Rhs method, // so Unalias is the best we can do. - return Unalias(alias) + return types.Unalias(alias) } // TypeParams returns the type parameter list of the alias. -func TypeParams(alias *Alias) *types.TypeParamList { +func TypeParams(alias *types.Alias) *types.TypeParamList { if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok { return alias.TypeParams() // go1.23+ } @@ -37,7 +31,7 @@ func TypeParams(alias *Alias) *types.TypeParamList { } // SetTypeParams sets the type parameters of the alias type. -func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { +func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) { if alias, ok := any(alias).(interface { SetTypeParams(tparams []*types.TypeParam) }); ok { @@ -48,7 +42,7 @@ func SetTypeParams(alias *Alias, tparams []*types.TypeParam) { } // TypeArgs returns the type arguments used to instantiate the Alias type. -func TypeArgs(alias *Alias) *types.TypeList { +func TypeArgs(alias *types.Alias) *types.TypeList { if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok { return alias.TypeArgs() // go1.23+ } @@ -57,26 +51,13 @@ func TypeArgs(alias *Alias) *types.TypeList { // Origin returns the generic Alias type of which alias is an instance. // If alias is not an instance of a generic alias, Origin returns alias. -func Origin(alias *Alias) *Alias { +func Origin(alias *types.Alias) *types.Alias { if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok { return alias.Origin() // go1.23+ } return alias // not an instance of a generic alias (go1.22) } -// Unalias is a wrapper of types.Unalias. -func Unalias(t types.Type) types.Type { return types.Unalias(t) } - -// newAlias is an internal alias around types.NewAlias. -// Direct usage is discouraged as the moment. -// Try to use NewAlias instead. -func newAlias(tname *types.TypeName, rhs types.Type) *Alias { - a := types.NewAlias(tname, rhs) - // TODO(go.dev/issue/65455): Remove kludgy workaround to set a.actual as a side-effect. - Unalias(a) - return a -} - // Enabled reports whether [NewAlias] should create [types.Alias] types. // // This function is expensive! Call it sparingly. @@ -92,7 +73,7 @@ func Enabled() bool { // many tests. Therefore any attempt to cache the result // is just incorrect. fset := token.NewFileSet() - f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", 0) + f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution) pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil) _, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias) return enabled diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go index d98b0db2..d79a605e 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go @@ -87,64 +87,3 @@ func chanDir(d int) types.ChanDir { return 0 } } - -var predeclOnce sync.Once -var predecl []types.Type // initialized lazily - -func predeclared() []types.Type { - predeclOnce.Do(func() { - // initialize lazily to be sure that all - // elements have been initialized before - predecl = []types.Type{ // basic types - types.Typ[types.Bool], - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Uint], - types.Typ[types.Uint8], - types.Typ[types.Uint16], - types.Typ[types.Uint32], - types.Typ[types.Uint64], - types.Typ[types.Uintptr], - types.Typ[types.Float32], - types.Typ[types.Float64], - types.Typ[types.Complex64], - types.Typ[types.Complex128], - types.Typ[types.String], - - // basic type aliases - types.Universe.Lookup("byte").Type(), - types.Universe.Lookup("rune").Type(), - - // error - types.Universe.Lookup("error").Type(), - - // untyped types - types.Typ[types.UntypedBool], - types.Typ[types.UntypedInt], - types.Typ[types.UntypedRune], - types.Typ[types.UntypedFloat], - types.Typ[types.UntypedComplex], - types.Typ[types.UntypedString], - types.Typ[types.UntypedNil], - - // package unsafe - types.Typ[types.UnsafePointer], - - // invalid type - types.Typ[types.Invalid], // only appears in packages with errors - - // used internally by gc; never used by this package or in .a files - anyType{}, - } - predecl = append(predecl, additionalPredeclared()...) - }) - return predecl -} - -type anyType struct{} - -func (t anyType) Underlying() types.Type { return t } -func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index 39df9112..e6c5d51f 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -232,14 +232,19 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func // Select appropriate importer. if len(data) > 0 { switch data[0] { - case 'v', 'c', 'd': // binary, till go1.10 + case 'v', 'c', 'd': + // binary: emitted by cmd/compile till go1.10; obsolete. return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - case 'i': // indexed, till go1.19 + case 'i': + // indexed: emitted by cmd/compile till go1.19; + // now used only for serializing go/types. + // See https://github.com/golang/go/issues/69491. _, pkg, err := IImportData(fset, packages, data[1:], id) return pkg, err - case 'u': // unified, from go1.20 + case 'u': + // unified: emitted by cmd/compile since go1.20. _, pkg, err := UImportData(fset, packages, data[1:size], id) return pkg, err diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index deeb67f3..1e19fbed 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -2,9 +2,227 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Indexed binary package export. -// This file was derived from $GOROOT/src/cmd/compile/internal/gc/iexport.go; -// see that file for specification of the format. +// Indexed package export. +// +// The indexed export data format is an evolution of the previous +// binary export data format. Its chief contribution is introducing an +// index table, which allows efficient random access of individual +// declarations and inline function bodies. In turn, this allows +// avoiding unnecessary work for compilation units that import large +// packages. +// +// +// The top-level data format is structured as: +// +// Header struct { +// Tag byte // 'i' +// Version uvarint +// StringSize uvarint +// DataSize uvarint +// } +// +// Strings [StringSize]byte +// Data [DataSize]byte +// +// MainIndex []struct{ +// PkgPath stringOff +// PkgName stringOff +// PkgHeight uvarint +// +// Decls []struct{ +// Name stringOff +// Offset declOff +// } +// } +// +// Fingerprint [8]byte +// +// uvarint means a uint64 written out using uvarint encoding. +// +// []T means a uvarint followed by that many T objects. In other +// words: +// +// Len uvarint +// Elems [Len]T +// +// stringOff means a uvarint that indicates an offset within the +// Strings section. At that offset is another uvarint, followed by +// that many bytes, which form the string value. +// +// declOff means a uvarint that indicates an offset within the Data +// section where the associated declaration can be found. +// +// +// There are five kinds of declarations, distinguished by their first +// byte: +// +// type Var struct { +// Tag byte // 'V' +// Pos Pos +// Type typeOff +// } +// +// type Func struct { +// Tag byte // 'F' or 'G' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'G' +// Signature Signature +// } +// +// type Const struct { +// Tag byte // 'C' +// Pos Pos +// Value Value +// } +// +// type Type struct { +// Tag byte // 'T' or 'U' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'U' +// Underlying typeOff +// +// Methods []struct{ // omitted if Underlying is an interface type +// Pos Pos +// Name stringOff +// Recv Param +// Signature Signature +// } +// } +// +// type Alias struct { +// Tag byte // 'A' or 'B' +// Pos Pos +// TypeParams []typeOff // only present if Tag == 'B' +// Type typeOff +// } +// +// // "Automatic" declaration of each typeparam +// type TypeParam struct { +// Tag byte // 'P' +// Pos Pos +// Implicit bool +// Constraint typeOff +// } +// +// typeOff means a uvarint that either indicates a predeclared type, +// or an offset into the Data section. If the uvarint is less than +// predeclReserved, then it indicates the index into the predeclared +// types list (see predeclared in bexport.go for order). Otherwise, +// subtracting predeclReserved yields the offset of a type descriptor. +// +// Value means a type, kind, and type-specific value. See +// (*exportWriter).value for details. +// +// +// There are twelve kinds of type descriptors, distinguished by an itag: +// +// type DefinedType struct { +// Tag itag // definedType +// Name stringOff +// PkgPath stringOff +// } +// +// type PointerType struct { +// Tag itag // pointerType +// Elem typeOff +// } +// +// type SliceType struct { +// Tag itag // sliceType +// Elem typeOff +// } +// +// type ArrayType struct { +// Tag itag // arrayType +// Len uint64 +// Elem typeOff +// } +// +// type ChanType struct { +// Tag itag // chanType +// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv +// Elem typeOff +// } +// +// type MapType struct { +// Tag itag // mapType +// Key typeOff +// Elem typeOff +// } +// +// type FuncType struct { +// Tag itag // signatureType +// PkgPath stringOff +// Signature Signature +// } +// +// type StructType struct { +// Tag itag // structType +// PkgPath stringOff +// Fields []struct { +// Pos Pos +// Name stringOff +// Type typeOff +// Embedded bool +// Note stringOff +// } +// } +// +// type InterfaceType struct { +// Tag itag // interfaceType +// PkgPath stringOff +// Embeddeds []struct { +// Pos Pos +// Type typeOff +// } +// Methods []struct { +// Pos Pos +// Name stringOff +// Signature Signature +// } +// } +// +// // Reference to a type param declaration +// type TypeParamType struct { +// Tag itag // typeParamType +// Name stringOff +// PkgPath stringOff +// } +// +// // Instantiation of a generic type (like List[T2] or List[int]) +// type InstanceType struct { +// Tag itag // instanceType +// Pos pos +// TypeArgs []typeOff +// BaseType typeOff +// } +// +// type UnionType struct { +// Tag itag // interfaceType +// Terms []struct { +// tilde bool +// Type typeOff +// } +// } +// +// +// +// type Signature struct { +// Params []Param +// Results []Param +// Variadic bool // omitted if Results is empty +// } +// +// type Param struct { +// Pos Pos +// Name stringOff +// Type typOff +// } +// +// +// Pos encodes a file:line:column triple, incorporating a simple delta +// encoding scheme within a data object. See exportWriter.pos for +// details. package gcimporter @@ -24,7 +242,6 @@ import ( "golang.org/x/tools/go/types/objectpath" "golang.org/x/tools/internal/aliases" - "golang.org/x/tools/internal/tokeninternal" ) // IExportShallow encodes "shallow" export data for the specified package. @@ -223,7 +440,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) // Sort the set of needed offsets. Duplicates are harmless. sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] }) - lines := tokeninternal.GetLines(file) // byte offset of each line start + lines := file.Lines() // byte offset of each line start w.uint64(uint64(len(lines))) // Rather than record the entire array of line start offsets, @@ -507,13 +724,13 @@ func (p *iexporter) doDecl(obj types.Object) { case *types.TypeName: t := obj.Type() - if tparam, ok := aliases.Unalias(t).(*types.TypeParam); ok { + if tparam, ok := types.Unalias(t).(*types.TypeParam); ok { w.tag(typeParamTag) w.pos(obj.Pos()) constraint := tparam.Constraint() if p.version >= iexportVersionGo1_18 { implicit := false - if iface, _ := aliases.Unalias(constraint).(*types.Interface); iface != nil { + if iface, _ := types.Unalias(constraint).(*types.Interface); iface != nil { implicit = iface.IsImplicit() } w.bool(implicit) @@ -523,9 +740,22 @@ func (p *iexporter) doDecl(obj types.Object) { } if obj.IsAlias() { - w.tag(aliasTag) + alias, materialized := t.(*types.Alias) // may fail when aliases are not enabled + + var tparams *types.TypeParamList + if materialized { + tparams = aliases.TypeParams(alias) + } + if tparams.Len() == 0 { + w.tag(aliasTag) + } else { + w.tag(genericAliasTag) + } w.pos(obj.Pos()) - if alias, ok := t.(*aliases.Alias); ok { + if tparams.Len() > 0 { + w.tparamList(obj.Name(), tparams, obj.Pkg()) + } + if materialized { // Preserve materialized aliases, // even of non-exported types. t = aliases.Rhs(alias) @@ -744,8 +974,14 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { }() } switch t := t.(type) { - case *aliases.Alias: - // TODO(adonovan): support parameterized aliases, following *types.Named. + case *types.Alias: + if targs := aliases.TypeArgs(t); targs.Len() > 0 { + w.startType(instanceType) + w.pos(t.Obj().Pos()) + w.typeList(targs, pkg) + w.typ(aliases.Origin(t), pkg) + return + } w.startType(aliasType) w.qualifiedType(t.Obj()) @@ -854,7 +1090,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) { for i := 0; i < n; i++ { ft := t.EmbeddedType(i) tPkg := pkg - if named, _ := aliases.Unalias(ft).(*types.Named); named != nil { + if named, _ := types.Unalias(ft).(*types.Named); named != nil { w.pos(named.Obj().Pos()) } else { w.pos(token.NoPos) diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index 136aa036..21908a15 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Indexed package import. -// See cmd/compile/internal/gc/iexport.go for the export data format. +// See iexport.go for the export data format. // This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. @@ -53,6 +53,7 @@ const ( iexportVersionPosCol = 1 iexportVersionGo1_18 = 2 iexportVersionGenerics = 2 + iexportVersion = iexportVersionGenerics iexportVersionCurrent = 2 ) @@ -540,7 +541,7 @@ func canReuse(def *types.Named, rhs types.Type) bool { if def == nil { return true } - iface, _ := aliases.Unalias(rhs).(*types.Interface) + iface, _ := types.Unalias(rhs).(*types.Interface) if iface == nil { return true } @@ -562,14 +563,14 @@ func (r *importReader) obj(name string) { pos := r.pos() switch tag { - case aliasTag: + case aliasTag, genericAliasTag: + var tparams []*types.TypeParam + if tag == genericAliasTag { + tparams = r.tparamList() + } typ := r.typ() - // TODO(adonovan): support generic aliases: - // if tag == genericAliasTag { - // tparams := r.tparamList() - // alias.SetTypeParams(tparams) - // } - r.declare(aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ)) + obj := aliases.NewAlias(r.p.aliases, pos, r.currPkg, name, typ, tparams) + r.declare(obj) case constTag: typ, val := r.value() @@ -615,7 +616,7 @@ func (r *importReader) obj(name string) { if targs.Len() > 0 { rparams = make([]*types.TypeParam, targs.Len()) for i := range rparams { - rparams[i] = aliases.Unalias(targs.At(i)).(*types.TypeParam) + rparams[i] = types.Unalias(targs.At(i)).(*types.TypeParam) } } msig := r.signature(recv, rparams, nil) @@ -645,7 +646,7 @@ func (r *importReader) obj(name string) { } constraint := r.typ() if implicit { - iface, _ := aliases.Unalias(constraint).(*types.Interface) + iface, _ := types.Unalias(constraint).(*types.Interface) if iface == nil { errorf("non-interface constraint marked implicit") } @@ -852,7 +853,7 @@ func (r *importReader) typ() types.Type { } func isInterface(t types.Type) bool { - _, ok := aliases.Unalias(t).(*types.Interface) + _, ok := types.Unalias(t).(*types.Interface) return ok } @@ -862,7 +863,7 @@ func (r *importReader) string() string { return r.p.stringAt(r.uint64()) } func (r *importReader) doType(base *types.Named) (res types.Type) { k := r.kind() if debug { - r.p.trace("importing type %d (base: %s)", k, base) + r.p.trace("importing type %d (base: %v)", k, base) r.p.indent++ defer func() { r.p.indent-- @@ -959,7 +960,7 @@ func (r *importReader) doType(base *types.Named) (res types.Type) { methods[i] = method } - typ := newInterface(methods, embeddeds) + typ := types.NewInterfaceType(methods, embeddeds) r.p.interfaceList = append(r.p.interfaceList, typ) return typ @@ -1051,7 +1052,7 @@ func (r *importReader) tparamList() []*types.TypeParam { for i := range xs { // Note: the standard library importer is tolerant of nil types here, // though would panic in SetTypeParams. - xs[i] = aliases.Unalias(r.typ()).(*types.TypeParam) + xs[i] = types.Unalias(r.typ()).(*types.TypeParam) } return xs } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go deleted file mode 100644 index 8b163e3d..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface10.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.11 -// +build !go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - named := make([]*types.Named, len(embeddeds)) - for i, e := range embeddeds { - var ok bool - named[i], ok = e.(*types.Named) - if !ok { - panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11") - } - } - return types.NewInterface(methods, named) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go b/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go deleted file mode 100644 index 49984f40..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/newInterface11.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.11 -// +build go1.11 - -package gcimporter - -import "go/types" - -func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface { - return types.NewInterfaceType(methods, embeddeds) -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go new file mode 100644 index 00000000..907c8557 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/predeclared.go @@ -0,0 +1,91 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "go/types" + "sync" +) + +// predecl is a cache for the predeclared types in types.Universe. +// +// Cache a distinct result based on the runtime value of any. +// The pointer value of the any type varies based on GODEBUG settings. +var predeclMu sync.Mutex +var predecl map[types.Type][]types.Type + +func predeclared() []types.Type { + anyt := types.Universe.Lookup("any").Type() + + predeclMu.Lock() + defer predeclMu.Unlock() + + if pre, ok := predecl[anyt]; ok { + return pre + } + + if predecl == nil { + predecl = make(map[types.Type][]types.Type) + } + + decls := []types.Type{ // basic types + types.Typ[types.Bool], + types.Typ[types.Int], + types.Typ[types.Int8], + types.Typ[types.Int16], + types.Typ[types.Int32], + types.Typ[types.Int64], + types.Typ[types.Uint], + types.Typ[types.Uint8], + types.Typ[types.Uint16], + types.Typ[types.Uint32], + types.Typ[types.Uint64], + types.Typ[types.Uintptr], + types.Typ[types.Float32], + types.Typ[types.Float64], + types.Typ[types.Complex64], + types.Typ[types.Complex128], + types.Typ[types.String], + + // basic type aliases + types.Universe.Lookup("byte").Type(), + types.Universe.Lookup("rune").Type(), + + // error + types.Universe.Lookup("error").Type(), + + // untyped types + types.Typ[types.UntypedBool], + types.Typ[types.UntypedInt], + types.Typ[types.UntypedRune], + types.Typ[types.UntypedFloat], + types.Typ[types.UntypedComplex], + types.Typ[types.UntypedString], + types.Typ[types.UntypedNil], + + // package unsafe + types.Typ[types.UnsafePointer], + + // invalid type + types.Typ[types.Invalid], // only appears in packages with errors + + // used internally by gc; never used by this package or in .a files + anyType{}, + + // comparable + types.Universe.Lookup("comparable").Type(), + + // any + anyt, + } + + predecl[anyt] = decls + return decls +} + +type anyType struct{} + +func (t anyType) Underlying() types.Type { return t } +func (t anyType) String() string { return "any" } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go b/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go deleted file mode 100644 index 0cd3b91b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/support_go118.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package gcimporter - -import "go/types" - -const iexportVersion = iexportVersionGenerics - -// additionalPredeclared returns additional predeclared types in go.1.18. -func additionalPredeclared() []types.Type { - return []types.Type{ - // comparable - types.Universe.Lookup("comparable").Type(), - - // any - types.Universe.Lookup("any").Type(), - } -} - -// See cmd/compile/internal/types.SplitVargenSuffix. -func splitVargenSuffix(name string) (base, suffix string) { - i := len(name) - for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { - i-- - } - const dot = "·" - if i >= len(dot) && name[i-len(dot):i] == dot { - i -= len(dot) - return name[:i], name[i:] - } - return name, "" -} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go deleted file mode 100644 index 38b624ca..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_no.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !goexperiment.unified -// +build !goexperiment.unified - -package gcimporter - -const unifiedIR = false diff --git a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go deleted file mode 100644 index b5118d0b..00000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/unified_yes.go +++ /dev/null @@ -1,10 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build goexperiment.unified -// +build goexperiment.unified - -package gcimporter - -const unifiedIR = true diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 2c077068..1db40861 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -52,8 +52,7 @@ func (pr *pkgReader) later(fn func()) { // See cmd/compile/internal/noder.derivedInfo. type derivedInfo struct { - idx pkgbits.Index - needed bool + idx pkgbits.Index } // See cmd/compile/internal/noder.typeInfo. @@ -110,13 +109,17 @@ func readUnifiedPackage(fset *token.FileSet, ctxt *types.Context, imports map[st r := pr.newReader(pkgbits.RelocMeta, pkgbits.PublicRootIdx, pkgbits.SyncPublic) pkg := r.pkg() - r.Bool() // has init + if r.Version().Has(pkgbits.HasInit) { + r.Bool() + } for i, n := 0, r.Len(); i < n; i++ { // As if r.obj(), but avoiding the Scope.Lookup call, // to avoid eager loading of imports. r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } r.p.objIdx(r.Reloc(pkgbits.RelocObj)) assert(r.Len() == 0) } @@ -165,7 +168,7 @@ type readerDict struct { // tparams is a slice of the constructed TypeParams for the element. tparams []*types.TypeParam - // devived is a slice of types derived from tparams, which may be + // derived is a slice of types derived from tparams, which may be // instantiated while reading the current element. derived []derivedInfo derivedTypes []types.Type // lazily instantiated from derived @@ -471,7 +474,9 @@ func (r *reader) param() *types.Var { func (r *reader) obj() (types.Object, []types.Type) { r.Sync(pkgbits.SyncObject) - assert(!r.Bool()) + if r.Version().Has(pkgbits.DerivedFuncInstance) { + assert(!r.Bool()) + } pkg, name := r.p.objIdx(r.Reloc(pkgbits.RelocObj)) obj := pkgScope(pkg).Lookup(name) @@ -525,8 +530,12 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjAlias: pos := r.pos() + var tparams []*types.TypeParam + if r.Version().Has(pkgbits.AliasTypeParamNames) { + tparams = r.typeParamNames() + } typ := r.typ() - declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ)) + declare(aliases.NewAlias(r.p.aliases, pos, objPkg, objName, typ, tparams)) case pkgbits.ObjConst: pos := r.pos() @@ -553,7 +562,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { // If the underlying type is an interface, we need to // duplicate its methods so we can replace the receiver // parameter's type (#49906). - if iface, ok := aliases.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { + if iface, ok := types.Unalias(underlying).(*types.Interface); ok && iface.NumExplicitMethods() != 0 { methods := make([]*types.Func, iface.NumExplicitMethods()) for i := range methods { fn := iface.ExplicitMethod(i) @@ -632,7 +641,10 @@ func (pr *pkgReader) objDictIdx(idx pkgbits.Index) *readerDict { dict.derived = make([]derivedInfo, r.Len()) dict.derivedTypes = make([]types.Type, len(dict.derived)) for i := range dict.derived { - dict.derived[i] = derivedInfo{r.Reloc(pkgbits.RelocType), r.Bool()} + dict.derived[i] = derivedInfo{idx: r.Reloc(pkgbits.RelocType)} + if r.Version().Has(pkgbits.DerivedInfoNeeded) { + assert(!r.Bool()) + } } pr.retireReader(r) @@ -726,3 +738,17 @@ func pkgScope(pkg *types.Package) *types.Scope { } return types.Universe } + +// See cmd/compile/internal/types.SplitVargenSuffix. +func splitVargenSuffix(name string) (base, suffix string) { + i := len(name) + for i > 0 && name[i-1] >= '0' && name[i-1] <= '9' { + i-- + } + const dot = "·" + if i >= len(dot) && name[i-len(dot):i] == dot { + i -= len(dot) + return name[:i], name[i:] + } + return name, "" +} diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index 2e59ff85..e333efc8 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -16,7 +16,6 @@ import ( "os" "os/exec" "path/filepath" - "reflect" "regexp" "runtime" "strconv" @@ -250,16 +249,13 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error { cmd.Stdout = stdout cmd.Stderr = stderr - // cmd.WaitDelay was added only in go1.20 (see #50436). - if waitDelay := reflect.ValueOf(cmd).Elem().FieldByName("WaitDelay"); waitDelay.IsValid() { - // https://go.dev/issue/59541: don't wait forever copying stderr - // after the command has exited. - // After CL 484741 we copy stdout manually, so we we'll stop reading that as - // soon as ctx is done. However, we also don't want to wait around forever - // for stderr. Give a much-longer-than-reasonable delay and then assume that - // something has wedged in the kernel or runtime. - waitDelay.Set(reflect.ValueOf(30 * time.Second)) - } + // https://go.dev/issue/59541: don't wait forever copying stderr + // after the command has exited. + // After CL 484741 we copy stdout manually, so we we'll stop reading that as + // soon as ctx is done. However, we also don't want to wait around forever + // for stderr. Give a much-longer-than-reasonable delay and then assume that + // something has wedged in the kernel or runtime. + cmd.WaitDelay = 30 * time.Second // The cwd gets resolved to the real path. On Darwin, where // /tmp is a symlink, this breaks anything that expects the diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index dc7d50a7..c1510817 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -131,7 +131,7 @@ func parseOtherFiles(ctx context.Context, fset *token.FileSet, srcDir, filename continue } - f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, 0) + f, err := parser.ParseFile(fset, filepath.Join(srcDir, fi.Name()), nil, parser.SkipObjectResolution) if err != nil { continue } @@ -1620,6 +1620,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl } fullFile := filepath.Join(dir, fi.Name()) + // Legacy ast.Object resolution is needed here. f, err := parser.ParseFile(fset, fullFile, nil, 0) if err != nil { env.logf("error parsing %v: %v", fullFile, err) diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index f8346552..ff6b59a5 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -86,7 +86,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e // Don't use parse() -- we don't care about fragments or statement lists // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - parserMode := parser.Mode(0) + parserMode := parser.SkipObjectResolution if opt.Comments { parserMode |= parser.ParseComments } @@ -165,7 +165,7 @@ func formatFile(fset *token.FileSet, file *ast.File, src []byte, adjust func(ori // parse parses src, which was read from filename, // as a Go source file or statement list. func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast.File, func(orig, src []byte) []byte, error) { - parserMode := parser.Mode(0) + var parserMode parser.Mode // legacy ast.Object resolution is required here if opt.Comments { parserMode |= parser.ParseComments } diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 91221fda..8555e3f8 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -245,7 +245,10 @@ func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleRe // 2. Use this to separate module cache scanning from other scanning. func gomodcacheForEnv(goenv map[string]string) string { if gmc := goenv["GOMODCACHE"]; gmc != "" { - return gmc + // golang/go#67156: ensure that the module cache is clean, since it is + // assumed as a prefix to directories scanned by gopathwalk, which are + // themselves clean. + return filepath.Clean(gmc) } gopaths := filepath.SplitList(goenv["GOPATH"]) if len(gopaths) == 0 { @@ -740,8 +743,8 @@ func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { subdir := "" - if dir != root.Path { - subdir = dir[len(root.Path)+len("/"):] + if prefix := root.Path + string(filepath.Separator); strings.HasPrefix(dir, prefix) { + subdir = dir[len(prefix):] } importPath := filepath.ToSlash(subdir) if strings.HasPrefix(importPath, "vendor/") { diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go index b92e8e6e..f6cb37c5 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go @@ -21,7 +21,7 @@ import ( // export data. type PkgDecoder struct { // version is the file format version. - version uint32 + version Version // sync indicates whether the file uses sync markers. sync bool @@ -68,8 +68,6 @@ func (pr *PkgDecoder) SyncMarkers() bool { return pr.sync } // NewPkgDecoder returns a PkgDecoder initialized to read the Unified // IR export data from input. pkgPath is the package path for the // compilation unit that produced the export data. -// -// TODO(mdempsky): Remove pkgPath parameter; unneeded since CL 391014. func NewPkgDecoder(pkgPath, input string) PkgDecoder { pr := PkgDecoder{ pkgPath: pkgPath, @@ -80,14 +78,15 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { r := strings.NewReader(input) - assert(binary.Read(r, binary.LittleEndian, &pr.version) == nil) + var ver uint32 + assert(binary.Read(r, binary.LittleEndian, &ver) == nil) + pr.version = Version(ver) - switch pr.version { - default: - panic(fmt.Errorf("unsupported version: %v", pr.version)) - case 0: - // no flags - case 1: + if pr.version >= numVersions { + panic(fmt.Errorf("cannot decode %q, export data version %d is greater than maximum supported version %d", pkgPath, pr.version, numVersions-1)) + } + + if pr.version.Has(Flags) { var flags uint32 assert(binary.Read(r, binary.LittleEndian, &flags) == nil) pr.sync = flags&flagSyncMarkers != 0 @@ -102,7 +101,9 @@ func NewPkgDecoder(pkgPath, input string) PkgDecoder { assert(err == nil) pr.elemData = input[pos:] - assert(len(pr.elemData)-8 == int(pr.elemEnds[len(pr.elemEnds)-1])) + + const fingerprintSize = 8 + assert(len(pr.elemData)-fingerprintSize == int(pr.elemEnds[len(pr.elemEnds)-1])) return pr } @@ -136,7 +137,7 @@ func (pr *PkgDecoder) AbsIdx(k RelocKind, idx Index) int { absIdx += int(pr.elemEndsEnds[k-1]) } if absIdx >= int(pr.elemEndsEnds[k]) { - errorf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) + panicf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds) } return absIdx } @@ -193,9 +194,7 @@ func (pr *PkgDecoder) NewDecoderRaw(k RelocKind, idx Index) Decoder { Idx: idx, } - // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved. - r.Data = *strings.NewReader(pr.DataIdx(k, idx)) - + r.Data.Reset(pr.DataIdx(k, idx)) r.Sync(SyncRelocs) r.Relocs = make([]RelocEnt, r.Len()) for i := range r.Relocs { @@ -244,7 +243,7 @@ type Decoder struct { func (r *Decoder) checkErr(err error) { if err != nil { - errorf("unexpected decoding error: %w", err) + panicf("unexpected decoding error: %w", err) } } @@ -515,3 +514,6 @@ func (pr *PkgDecoder) PeekObj(idx Index) (string, string, CodeObj) { return path, name, tag } + +// Version reports the version of the bitstream. +func (w *Decoder) Version() Version { return w.common.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go index 6482617a..c17a1239 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/encoder.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/encoder.go @@ -12,18 +12,15 @@ import ( "io" "math/big" "runtime" + "strings" ) -// currentVersion is the current version number. -// -// - v0: initial prototype -// -// - v1: adds the flags uint32 word -const currentVersion uint32 = 1 - // A PkgEncoder provides methods for encoding a package's Unified IR // export data. type PkgEncoder struct { + // version of the bitstream. + version Version + // elems holds the bitstream for previously encoded elements. elems [numRelocs][]string @@ -47,8 +44,9 @@ func (pw *PkgEncoder) SyncMarkers() bool { return pw.syncFrames >= 0 } // export data files, but can help diagnosing desync errors in // higher-level Unified IR reader/writer code. If syncFrames is // negative, then sync markers are omitted entirely. -func NewPkgEncoder(syncFrames int) PkgEncoder { +func NewPkgEncoder(version Version, syncFrames int) PkgEncoder { return PkgEncoder{ + version: version, stringsIdx: make(map[string]Index), syncFrames: syncFrames, } @@ -64,13 +62,15 @@ func (pw *PkgEncoder) DumpTo(out0 io.Writer) (fingerprint [8]byte) { assert(binary.Write(out, binary.LittleEndian, x) == nil) } - writeUint32(currentVersion) + writeUint32(uint32(pw.version)) - var flags uint32 - if pw.SyncMarkers() { - flags |= flagSyncMarkers + if pw.version.Has(Flags) { + var flags uint32 + if pw.SyncMarkers() { + flags |= flagSyncMarkers + } + writeUint32(flags) } - writeUint32(flags) // Write elemEndsEnds. var sum uint32 @@ -159,7 +159,7 @@ type Encoder struct { // Flush finalizes the element's bitstream and returns its Index. func (w *Encoder) Flush() Index { - var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved + var sb strings.Builder // Backup the data so we write the relocations at the front. var tmp bytes.Buffer @@ -189,7 +189,7 @@ func (w *Encoder) Flush() Index { func (w *Encoder) checkErr(err error) { if err != nil { - errorf("unexpected encoding error: %v", err) + panicf("unexpected encoding error: %v", err) } } @@ -320,8 +320,14 @@ func (w *Encoder) Code(c Code) { // section (if not already present), and then writing a relocation // into the element bitstream. func (w *Encoder) String(s string) { + w.StringRef(w.p.StringIdx(s)) +} + +// StringRef writes a reference to the given index, which must be a +// previously encoded string value. +func (w *Encoder) StringRef(idx Index) { w.Sync(SyncString) - w.Reloc(RelocString, w.p.StringIdx(s)) + w.Reloc(RelocString, idx) } // Strings encodes and writes a variable-length slice of strings into @@ -348,7 +354,7 @@ func (w *Encoder) Value(val constant.Value) { func (w *Encoder) scalar(val constant.Value) { switch v := constant.Val(val).(type) { default: - errorf("unhandled %v (%v)", val, val.Kind()) + panicf("unhandled %v (%v)", val, val.Kind()) case bool: w.Code(ValBool) w.Bool(v) @@ -381,3 +387,6 @@ func (w *Encoder) bigFloat(v *big.Float) { b := v.Append(nil, 'p', -1) w.String(string(b)) // TODO: More efficient encoding. } + +// Version reports the version of the bitstream. +func (w *Encoder) Version() Version { return w.p.version } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go deleted file mode 100644 index 5294f6a6..00000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go1.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.7 -// +build !go1.7 - -// TODO(mdempsky): Remove after #44505 is resolved - -package pkgbits - -import "runtime" - -func walkFrames(pcs []uintptr, visit frameVisitor) { - for _, pc := range pcs { - fn := runtime.FuncForPC(pc) - file, line := fn.FileLine(pc) - - visit(file, line, fn.Name(), pc-fn.Entry()) - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go b/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go deleted file mode 100644 index 2324ae7a..00000000 --- a/vendor/golang.org/x/tools/internal/pkgbits/frames_go17.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.7 -// +build go1.7 - -package pkgbits - -import "runtime" - -// walkFrames calls visit for each call frame represented by pcs. -// -// pcs should be a slice of PCs, as returned by runtime.Callers. -func walkFrames(pcs []uintptr, visit frameVisitor) { - if len(pcs) == 0 { - return - } - - frames := runtime.CallersFrames(pcs) - for { - frame, more := frames.Next() - visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) - if !more { - return - } - } -} diff --git a/vendor/golang.org/x/tools/internal/pkgbits/support.go b/vendor/golang.org/x/tools/internal/pkgbits/support.go index ad26d3b2..50534a29 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/support.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/support.go @@ -12,6 +12,6 @@ func assert(b bool) { } } -func errorf(format string, args ...interface{}) { +func panicf(format string, args ...any) { panic(fmt.Errorf(format, args...)) } diff --git a/vendor/golang.org/x/tools/internal/pkgbits/sync.go b/vendor/golang.org/x/tools/internal/pkgbits/sync.go index 5bd51ef7..1520b73a 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/sync.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/sync.go @@ -6,6 +6,7 @@ package pkgbits import ( "fmt" + "runtime" "strings" ) @@ -23,6 +24,24 @@ func fmtFrames(pcs ...uintptr) []string { type frameVisitor func(file string, line int, name string, offset uintptr) +// walkFrames calls visit for each call frame represented by pcs. +// +// pcs should be a slice of PCs, as returned by runtime.Callers. +func walkFrames(pcs []uintptr, visit frameVisitor) { + if len(pcs) == 0 { + return + } + + frames := runtime.CallersFrames(pcs) + for { + frame, more := frames.Next() + visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry) + if !more { + return + } + } +} + // SyncMarker is an enum type that represents markers that may be // written to export data to ensure the reader and writer stay // synchronized. @@ -110,4 +129,8 @@ const ( SyncStmtsEnd SyncLabel SyncOptLabel + + SyncMultiExpr + SyncRType + SyncConvRTTI ) diff --git a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go index 4a5b0ca5..582ad56d 100644 --- a/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go +++ b/vendor/golang.org/x/tools/internal/pkgbits/syncmarker_string.go @@ -74,11 +74,14 @@ func _() { _ = x[SyncStmtsEnd-64] _ = x[SyncLabel-65] _ = x[SyncOptLabel-66] + _ = x[SyncMultiExpr-67] + _ = x[SyncRType-68] + _ = x[SyncConvRTTI-69] } -const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabel" +const _SyncMarker_name = "EOFBoolInt64Uint64StringValueValRelocsRelocUseRelocPublicPosPosBaseObjectObject1PkgPkgDefMethodTypeTypeIdxTypeParamNamesSignatureParamsParamCodeObjSymLocalIdentSelectorPrivateFuncExtVarExtTypeExtPragmaExprListExprsExprExprTypeAssignOpFuncLitCompLitDeclFuncBodyOpenScopeCloseScopeCloseAnotherScopeDeclNamesDeclNameStmtsBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtUseObjLocalAddLocalLinknameStmt1StmtsEndLabelOptLabelMultiExprRTypeConvRTTI" -var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458} +var _SyncMarker_index = [...]uint16{0, 3, 7, 12, 18, 24, 29, 32, 38, 43, 51, 57, 60, 67, 73, 80, 83, 89, 95, 99, 106, 120, 129, 135, 140, 147, 150, 160, 168, 175, 182, 188, 195, 201, 209, 214, 218, 226, 232, 234, 241, 248, 252, 260, 269, 279, 296, 305, 313, 318, 327, 333, 340, 350, 359, 369, 379, 389, 394, 405, 416, 424, 432, 437, 445, 450, 458, 467, 472, 480} func (i SyncMarker) String() string { i -= 1 diff --git a/vendor/golang.org/x/tools/internal/pkgbits/version.go b/vendor/golang.org/x/tools/internal/pkgbits/version.go new file mode 100644 index 00000000..53af9df2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/pkgbits/version.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pkgbits + +// Version indicates a version of a unified IR bitstream. +// Each Version indicates the addition, removal, or change of +// new data in the bitstream. +// +// These are serialized to disk and the interpretation remains fixed. +type Version uint32 + +const ( + // V0: initial prototype. + // + // All data that is not assigned a Field is in version V0 + // and has not been deprecated. + V0 Version = iota + + // V1: adds the Flags uint32 word + V1 + + // V2: removes unused legacy fields and supports type parameters for aliases. + // - remove the legacy "has init" bool from the public root + // - remove obj's "derived func instance" bool + // - add a TypeParamNames field to ObjAlias + // - remove derived info "needed" bool + V2 + + numVersions = iota +) + +// Field denotes a unit of data in the serialized unified IR bitstream. +// It is conceptually a like field in a structure. +// +// We only really need Fields when the data may or may not be present +// in a stream based on the Version of the bitstream. +// +// Unlike much of pkgbits, Fields are not serialized and +// can change values as needed. +type Field int + +const ( + // Flags in a uint32 in the header of a bitstream + // that is used to indicate whether optional features are enabled. + Flags Field = iota + + // Deprecated: HasInit was a bool indicating whether a package + // has any init functions. + HasInit + + // Deprecated: DerivedFuncInstance was a bool indicating + // whether an object was a function instance. + DerivedFuncInstance + + // ObjAlias has a list of TypeParamNames. + AliasTypeParamNames + + // Deprecated: DerivedInfoNeeded was a bool indicating + // whether a type was a derived type. + DerivedInfoNeeded + + numFields = iota +) + +// introduced is the version a field was added. +var introduced = [numFields]Version{ + Flags: V1, + AliasTypeParamNames: V2, +} + +// removed is the version a field was removed in or 0 for fields +// that have not yet been deprecated. +// (So removed[f]-1 is the last version it is included in.) +var removed = [numFields]Version{ + HasInit: V2, + DerivedFuncInstance: V2, + DerivedInfoNeeded: V2, +} + +// Has reports whether field f is present in a bitstream at version v. +func (v Version) Has(f Field) bool { + return introduced[f] <= v && (v < removed[f] || removed[f] == V0) +} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index a928acf2..cdaac9ab 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -951,7 +951,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParseSessionState", Func, 21}, {"QUICClient", Func, 21}, {"QUICConfig", Type, 21}, - {"QUICConfig.EnableStoreSessionEvent", Field, 23}, + {"QUICConfig.EnableSessionEvents", Field, 23}, {"QUICConfig.TLSConfig", Field, 21}, {"QUICConn", Type, 21}, {"QUICEncryptionLevel", Type, 21}, diff --git a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go b/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go deleted file mode 100644 index ff9437a3..00000000 --- a/vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// package tokeninternal provides access to some internal features of the token -// package. -package tokeninternal - -import ( - "fmt" - "go/token" - "sort" - "sync" - "unsafe" -) - -// GetLines returns the table of line-start offsets from a token.File. -func GetLines(file *token.File) []int { - // token.File has a Lines method on Go 1.21 and later. - if file, ok := (interface{})(file).(interface{ Lines() []int }); ok { - return file.Lines() - } - - // This declaration must match that of token.File. - // This creates a risk of dependency skew. - // For now we check that the size of the two - // declarations is the same, on the (fragile) assumption - // that future changes would add fields. - type tokenFile119 struct { - _ string - _ int - _ int - mu sync.Mutex // we're not complete monsters - lines []int - _ []struct{} - } - - if unsafe.Sizeof(*file) != unsafe.Sizeof(tokenFile119{}) { - panic("unexpected token.File size") - } - var ptr *tokenFile119 - type uP = unsafe.Pointer - *(*uP)(uP(&ptr)) = uP(file) - ptr.mu.Lock() - defer ptr.mu.Unlock() - return ptr.lines -} - -// AddExistingFiles adds the specified files to the FileSet if they -// are not already present. It panics if any pair of files in the -// resulting FileSet would overlap. -func AddExistingFiles(fset *token.FileSet, files []*token.File) { - // Punch through the FileSet encapsulation. - type tokenFileSet struct { - // This type remained essentially consistent from go1.16 to go1.21. - mutex sync.RWMutex - base int - files []*token.File - _ *token.File // changed to atomic.Pointer[token.File] in go1.19 - } - - // If the size of token.FileSet changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(tokenFileSet{})) - int64(unsafe.Sizeof(token.FileSet{})) - var _ [-delta * delta]int - - type uP = unsafe.Pointer - var ptr *tokenFileSet - *(*uP)(uP(&ptr)) = uP(fset) - ptr.mutex.Lock() - defer ptr.mutex.Unlock() - - // Merge and sort. - newFiles := append(ptr.files, files...) - sort.Slice(newFiles, func(i, j int) bool { - return newFiles[i].Base() < newFiles[j].Base() - }) - - // Reject overlapping files. - // Discard adjacent identical files. - out := newFiles[:0] - for i, file := range newFiles { - if i > 0 { - prev := newFiles[i-1] - if file == prev { - continue - } - if prev.Base()+prev.Size()+1 > file.Base() { - panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)", - prev.Name(), prev.Base(), prev.Base()+prev.Size(), - file.Name(), file.Base(), file.Base()+file.Size())) - } - } - out = append(out, file) - } - newFiles = out - - ptr.files = newFiles - - // Advance FileSet.Base(). - if len(newFiles) > 0 { - last := newFiles[len(newFiles)-1] - newBase := last.Base() + last.Size() + 1 - if ptr.base < newBase { - ptr.base = newBase - } - } -} - -// FileSetFor returns a new FileSet containing a sequence of new Files with -// the same base, size, and line as the input files, for use in APIs that -// require a FileSet. -// -// Precondition: the input files must be non-overlapping, and sorted in order -// of their Base. -func FileSetFor(files ...*token.File) *token.FileSet { - fset := token.NewFileSet() - for _, f := range files { - f2 := fset.AddFile(f.Name(), f.Base(), f.Size()) - lines := GetLines(f) - f2.SetLines(lines) - } - return fset -} - -// CloneFileSet creates a new FileSet holding all files in fset. It does not -// create copies of the token.Files in fset: they are added to the resulting -// FileSet unmodified. -func CloneFileSet(fset *token.FileSet) *token.FileSet { - var files []*token.File - fset.Iterate(func(f *token.File) bool { - files = append(files, f) - return true - }) - newFileSet := token.NewFileSet() - AddExistingFiles(newFileSet, files) - return newFileSet -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go new file mode 100644 index 00000000..0b84acc5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -0,0 +1,140 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package typeparams contains common utilities for writing tools that +// interact with generic Go code, as introduced with Go 1.18. It +// supplements the standard library APIs. Notably, the StructuralTerms +// API computes a minimal representation of the structural +// restrictions on a type parameter. +// +// An external version of these APIs is available in the +// golang.org/x/exp/typeparams module. +package typeparams + +import ( + "go/ast" + "go/token" + "go/types" +) + +// UnpackIndexExpr extracts data from AST nodes that represent index +// expressions. +// +// For an ast.IndexExpr, the resulting indices slice will contain exactly one +// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable +// number of index expressions. +// +// For nodes that don't represent index expressions, the first return value of +// UnpackIndexExpr will be nil. +func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) { + switch e := n.(type) { + case *ast.IndexExpr: + return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack + case *ast.IndexListExpr: + return e.X, e.Lbrack, e.Indices, e.Rbrack + } + return nil, token.NoPos, nil, token.NoPos +} + +// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on +// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0 +// will panic. +func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr { + switch len(indices) { + case 0: + panic("empty indices") + case 1: + return &ast.IndexExpr{ + X: x, + Lbrack: lbrack, + Index: indices[0], + Rbrack: rbrack, + } + default: + return &ast.IndexListExpr{ + X: x, + Lbrack: lbrack, + Indices: indices, + Rbrack: rbrack, + } + } +} + +// IsTypeParam reports whether t is a type parameter (or an alias of one). +func IsTypeParam(t types.Type) bool { + _, ok := types.Unalias(t).(*types.TypeParam) + return ok +} + +// GenericAssignableTo is a generalization of types.AssignableTo that +// implements the following rule for uninstantiated generic types: +// +// If V and T are generic named types, then V is considered assignable to T if, +// for every possible instantiation of V[A_1, ..., A_N], the instantiation +// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. +// +// If T has structural constraints, they must be satisfied by V. +// +// For example, consider the following type declarations: +// +// type Interface[T any] interface { +// Accept(T) +// } +// +// type Container[T any] struct { +// Element T +// } +// +// func (c Container[T]) Accept(t T) { c.Element = t } +// +// In this case, GenericAssignableTo reports that instantiations of Container +// are assignable to the corresponding instantiation of Interface. +func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { + V = types.Unalias(V) + T = types.Unalias(T) + + // If V and T are not both named, or do not have matching non-empty type + // parameter lists, fall back on types.AssignableTo. + + VN, Vnamed := V.(*types.Named) + TN, Tnamed := T.(*types.Named) + if !Vnamed || !Tnamed { + return types.AssignableTo(V, T) + } + + vtparams := VN.TypeParams() + ttparams := TN.TypeParams() + if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { + return types.AssignableTo(V, T) + } + + // V and T have the same (non-zero) number of type params. Instantiate both + // with the type parameters of V. This must always succeed for V, and will + // succeed for T if and only if the type set of each type parameter of V is a + // subset of the type set of the corresponding type parameter of T, meaning + // that every instantiation of V corresponds to a valid instantiation of T. + + // Minor optimization: ensure we share a context across the two + // instantiations below. + if ctxt == nil { + ctxt = types.NewContext() + } + + var targs []types.Type + for i := 0; i < vtparams.Len(); i++ { + targs = append(targs, vtparams.At(i)) + } + + vinst, err := types.Instantiate(ctxt, V, targs, true) + if err != nil { + panic("type parameters should satisfy their own constraints") + } + + tinst, err := types.Instantiate(ctxt, T, targs, true) + if err != nil { + return false + } + + return types.AssignableTo(vinst, tinst) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go new file mode 100644 index 00000000..6e83c6fb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -0,0 +1,150 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "fmt" + "go/types" +) + +// CoreType returns the core type of T or nil if T does not have a core type. +// +// See https://go.dev/ref/spec#Core_types for the definition of a core type. +func CoreType(T types.Type) types.Type { + U := T.Underlying() + if _, ok := U.(*types.Interface); !ok { + return U // for non-interface types, + } + + terms, err := NormalTerms(U) + if len(terms) == 0 || err != nil { + // len(terms) -> empty type set of interface. + // err != nil => U is invalid, exceeds complexity bounds, or has an empty type set. + return nil // no core type. + } + + U = terms[0].Type().Underlying() + var identical int // i in [0,identical) => Identical(U, terms[i].Type().Underlying()) + for identical = 1; identical < len(terms); identical++ { + if !types.Identical(U, terms[identical].Type().Underlying()) { + break + } + } + + if identical == len(terms) { + // https://go.dev/ref/spec#Core_types + // "There is a single type U which is the underlying type of all types in the type set of T" + return U + } + ch, ok := U.(*types.Chan) + if !ok { + return nil // no core type as identical < len(terms) and U is not a channel. + } + // https://go.dev/ref/spec#Core_types + // "the type chan E if T contains only bidirectional channels, or the type chan<- E or + // <-chan E depending on the direction of the directional channels present." + for chans := identical; chans < len(terms); chans++ { + curr, ok := terms[chans].Type().Underlying().(*types.Chan) + if !ok { + return nil + } + if !types.Identical(ch.Elem(), curr.Elem()) { + return nil // channel elements are not identical. + } + if ch.Dir() == types.SendRecv { + // ch is bidirectional. We can safely always use curr's direction. + ch = curr + } else if curr.Dir() != types.SendRecv && ch.Dir() != curr.Dir() { + // ch and curr are not bidirectional and not the same direction. + return nil + } + } + return ch +} + +// NormalTerms returns a slice of terms representing the normalized structural +// type restrictions of a type, if any. +// +// For all types other than *types.TypeParam, *types.Interface, and +// *types.Union, this is just a single term with Tilde() == false and +// Type() == typ. For *types.TypeParam, *types.Interface, and *types.Union, see +// below. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration type +// T[P interface{~int; m()}] int the structural restriction of the type +// parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// NormalTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, NormalTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the type is +// invalid, exceeds complexity bounds, or has an empty type set. In the latter +// case, NormalTerms returns ErrEmptyTypeSet. +// +// NormalTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func NormalTerms(typ types.Type) ([]*types.Term, error) { + switch typ := typ.Underlying().(type) { + case *types.TypeParam: + return StructuralTerms(typ) + case *types.Union: + return UnionTermSet(typ) + case *types.Interface: + return InterfaceTermSet(typ) + default: + return []*types.Term{types.NewTerm(false, typ)}, nil + } +} + +// Deref returns the type of the variable pointed to by t, +// if t's core type is a pointer; otherwise it returns t. +// +// Do not assume that Deref(T)==T implies T is not a pointer: +// consider "type T *T", for example. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func Deref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + return t +} + +// MustDeref returns the type of the variable pointed to by t. +// It panics if t's core type is not a pointer. +// +// TODO(adonovan): ideally this would live in typesinternal, but that +// creates an import cycle. Move there when we melt this package down. +func MustDeref(t types.Type) types.Type { + if ptr, ok := CoreType(t).(*types.Pointer); ok { + return ptr.Elem() + } + panic(fmt.Sprintf("%v is not a pointer", t)) +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go new file mode 100644 index 00000000..35810826 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/free.go @@ -0,0 +1,118 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "go/types" +) + +// Free is a memoization of the set of free type parameters within a +// type. It makes a sequence of calls to [Free.Has] for overlapping +// types more efficient. The zero value is ready for use. +// +// NOTE: Adapted from go/types/infer.go. If it is later exported, factor. +type Free struct { + seen map[types.Type]bool +} + +// Has reports whether the specified type has a free type parameter. +func (w *Free) Has(typ types.Type) (res bool) { + // detect cycles + if x, ok := w.seen[typ]; ok { + return x + } + if w.seen == nil { + w.seen = make(map[types.Type]bool) + } + w.seen[typ] = false + defer func() { + w.seen[typ] = res + }() + + switch t := typ.(type) { + case nil, *types.Basic: // TODO(gri) should nil be handled here? + break + + case *types.Alias: + return w.Has(types.Unalias(t)) + + case *types.Array: + return w.Has(t.Elem()) + + case *types.Slice: + return w.Has(t.Elem()) + + case *types.Struct: + for i, n := 0, t.NumFields(); i < n; i++ { + if w.Has(t.Field(i).Type()) { + return true + } + } + + case *types.Pointer: + return w.Has(t.Elem()) + + case *types.Tuple: + n := t.Len() + for i := 0; i < n; i++ { + if w.Has(t.At(i).Type()) { + return true + } + } + + case *types.Signature: + // t.tparams may not be nil if we are looking at a signature + // of a generic function type (or an interface method) that is + // part of the type we're testing. We don't care about these type + // parameters. + // Similarly, the receiver of a method may declare (rather than + // use) type parameters, we don't care about those either. + // Thus, we only need to look at the input and result parameters. + return w.Has(t.Params()) || w.Has(t.Results()) + + case *types.Interface: + for i, n := 0, t.NumMethods(); i < n; i++ { + if w.Has(t.Method(i).Type()) { + return true + } + } + terms, err := InterfaceTermSet(t) + if err != nil { + return false // ill typed + } + for _, term := range terms { + if w.Has(term.Type()) { + return true + } + } + + case *types.Map: + return w.Has(t.Key()) || w.Has(t.Elem()) + + case *types.Chan: + return w.Has(t.Elem()) + + case *types.Named: + args := t.TypeArgs() + // TODO(taking): this does not match go/types/infer.go. Check with rfindley. + if params := t.TypeParams(); params.Len() > args.Len() { + return true + } + for i, n := 0, args.Len(); i < n; i++ { + if w.Has(args.At(i)) { + return true + } + } + return w.Has(t.Underlying()) // recurse for types local to parameterized functions + + case *types.TypeParam: + return true + + default: + panic(t) // unreachable + } + + return false +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go new file mode 100644 index 00000000..93c80fdc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go @@ -0,0 +1,218 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typeparams + +import ( + "errors" + "fmt" + "go/types" + "os" + "strings" +) + +//go:generate go run copytermlist.go + +const debug = false + +var ErrEmptyTypeSet = errors.New("empty type set") + +// StructuralTerms returns a slice of terms representing the normalized +// structural type restrictions of a type parameter, if any. +// +// Structural type restrictions of a type parameter are created via +// non-interface types embedded in its constraint interface (directly, or via a +// chain of interface embeddings). For example, in the declaration +// +// type T[P interface{~int; m()}] int +// +// the structural restriction of the type parameter P is ~int. +// +// With interface embedding and unions, the specification of structural type +// restrictions may be arbitrarily complex. For example, consider the +// following: +// +// type A interface{ ~string|~[]byte } +// +// type B interface{ int|string } +// +// type C interface { ~string|~int } +// +// type T[P interface{ A|B; C }] int +// +// In this example, the structural type restriction of P is ~string|int: A|B +// expands to ~string|~[]byte|int|string, which reduces to ~string|~[]byte|int, +// which when intersected with C (~string|~int) yields ~string|int. +// +// StructuralTerms computes these expansions and reductions, producing a +// "normalized" form of the embeddings. A structural restriction is normalized +// if it is a single union containing no interface terms, and is minimal in the +// sense that removing any term changes the set of types satisfying the +// constraint. It is left as a proof for the reader that, modulo sorting, there +// is exactly one such normalized form. +// +// Because the minimal representation always takes this form, StructuralTerms +// returns a slice of tilde terms corresponding to the terms of the union in +// the normalized structural restriction. An error is returned if the +// constraint interface is invalid, exceeds complexity bounds, or has an empty +// type set. In the latter case, StructuralTerms returns ErrEmptyTypeSet. +// +// StructuralTerms makes no guarantees about the order of terms, except that it +// is deterministic. +func StructuralTerms(tparam *types.TypeParam) ([]*types.Term, error) { + constraint := tparam.Constraint() + if constraint == nil { + return nil, fmt.Errorf("%s has nil constraint", tparam) + } + iface, _ := constraint.Underlying().(*types.Interface) + if iface == nil { + return nil, fmt.Errorf("constraint is %T, not *types.Interface", constraint.Underlying()) + } + return InterfaceTermSet(iface) +} + +// InterfaceTermSet computes the normalized terms for a constraint interface, +// returning an error if the term set cannot be computed or is empty. In the +// latter case, the error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func InterfaceTermSet(iface *types.Interface) ([]*types.Term, error) { + return computeTermSet(iface) +} + +// UnionTermSet computes the normalized terms for a union, returning an error +// if the term set cannot be computed or is empty. In the latter case, the +// error will be ErrEmptyTypeSet. +// +// See the documentation of StructuralTerms for more information on +// normalization. +func UnionTermSet(union *types.Union) ([]*types.Term, error) { + return computeTermSet(union) +} + +func computeTermSet(typ types.Type) ([]*types.Term, error) { + tset, err := computeTermSetInternal(typ, make(map[types.Type]*termSet), 0) + if err != nil { + return nil, err + } + if tset.terms.isEmpty() { + return nil, ErrEmptyTypeSet + } + if tset.terms.isAll() { + return nil, nil + } + var terms []*types.Term + for _, term := range tset.terms { + terms = append(terms, types.NewTerm(term.tilde, term.typ)) + } + return terms, nil +} + +// A termSet holds the normalized set of terms for a given type. +// +// The name termSet is intentionally distinct from 'type set': a type set is +// all types that implement a type (and includes method restrictions), whereas +// a term set just represents the structural restrictions on a type. +type termSet struct { + complete bool + terms termlist +} + +func indentf(depth int, format string, args ...interface{}) { + fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...) +} + +func computeTermSetInternal(t types.Type, seen map[types.Type]*termSet, depth int) (res *termSet, err error) { + if t == nil { + panic("nil type") + } + + if debug { + indentf(depth, "%s", t.String()) + defer func() { + if err != nil { + indentf(depth, "=> %s", err) + } else { + indentf(depth, "=> %s", res.terms.String()) + } + }() + } + + const maxTermCount = 100 + if tset, ok := seen[t]; ok { + if !tset.complete { + return nil, fmt.Errorf("cycle detected in the declaration of %s", t) + } + return tset, nil + } + + // Mark the current type as seen to avoid infinite recursion. + tset := new(termSet) + defer func() { + tset.complete = true + }() + seen[t] = tset + + switch u := t.Underlying().(type) { + case *types.Interface: + // The term set of an interface is the intersection of the term sets of its + // embedded types. + tset.terms = allTermlist + for i := 0; i < u.NumEmbeddeds(); i++ { + embedded := u.EmbeddedType(i) + if _, ok := embedded.Underlying().(*types.TypeParam); ok { + return nil, fmt.Errorf("invalid embedded type %T", embedded) + } + tset2, err := computeTermSetInternal(embedded, seen, depth+1) + if err != nil { + return nil, err + } + tset.terms = tset.terms.intersect(tset2.terms) + } + case *types.Union: + // The term set of a union is the union of term sets of its terms. + tset.terms = nil + for i := 0; i < u.Len(); i++ { + t := u.Term(i) + var terms termlist + switch t.Type().Underlying().(type) { + case *types.Interface: + tset2, err := computeTermSetInternal(t.Type(), seen, depth+1) + if err != nil { + return nil, err + } + terms = tset2.terms + case *types.TypeParam, *types.Union: + // A stand-alone type parameter or union is not permitted as union + // term. + return nil, fmt.Errorf("invalid union term %T", t) + default: + if t.Type() == types.Typ[types.Invalid] { + continue + } + terms = termlist{{t.Tilde(), t.Type()}} + } + tset.terms = tset.terms.union(terms) + if len(tset.terms) > maxTermCount { + return nil, fmt.Errorf("exceeded max term count %d", maxTermCount) + } + } + case *types.TypeParam: + panic("unreachable") + default: + // For all other types, the term set is just a single non-tilde term + // holding the type itself. + if u != types.Typ[types.Invalid] { + tset.terms = termlist{{false, t}} + } + } + return tset, nil +} + +// under is a facade for the go/types internal function of the same name. It is +// used by typeterm.go. +func under(t types.Type) types.Type { + return t.Underlying() +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go new file mode 100644 index 00000000..cbd12f80 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go @@ -0,0 +1,163 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import ( + "bytes" + "go/types" +) + +// A termlist represents the type set represented by the union +// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn. +// A termlist is in normal form if all terms are disjoint. +// termlist operations don't require the operands to be in +// normal form. +type termlist []*term + +// allTermlist represents the set of all types. +// It is in normal form. +var allTermlist = termlist{new(term)} + +// String prints the termlist exactly (without normalization). +func (xl termlist) String() string { + if len(xl) == 0 { + return "∅" + } + var buf bytes.Buffer + for i, x := range xl { + if i > 0 { + buf.WriteString(" | ") + } + buf.WriteString(x.String()) + } + return buf.String() +} + +// isEmpty reports whether the termlist xl represents the empty set of types. +func (xl termlist) isEmpty() bool { + // If there's a non-nil term, the entire list is not empty. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil { + return false + } + } + return true +} + +// isAll reports whether the termlist xl represents the set of all types. +func (xl termlist) isAll() bool { + // If there's a 𝓤 term, the entire list is 𝓤. + // If the termlist is in normal form, this requires at most + // one iteration. + for _, x := range xl { + if x != nil && x.typ == nil { + return true + } + } + return false +} + +// norm returns the normal form of xl. +func (xl termlist) norm() termlist { + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + used := make([]bool, len(xl)) + var rl termlist + for i, xi := range xl { + if xi == nil || used[i] { + continue + } + for j := i + 1; j < len(xl); j++ { + xj := xl[j] + if xj == nil || used[j] { + continue + } + if u1, u2 := xi.union(xj); u2 == nil { + // If we encounter a 𝓤 term, the entire list is 𝓤. + // Exit early. + // (Note that this is not just an optimization; + // if we continue, we may end up with a 𝓤 term + // and other terms and the result would not be + // in normal form.) + if u1.typ == nil { + return allTermlist + } + xi = u1 + used[j] = true // xj is now unioned into xi - ignore it in future iterations + } + } + rl = append(rl, xi) + } + return rl +} + +// union returns the union xl ∪ yl. +func (xl termlist) union(yl termlist) termlist { + return append(xl, yl...).norm() +} + +// intersect returns the intersection xl ∩ yl. +func (xl termlist) intersect(yl termlist) termlist { + if xl.isEmpty() || yl.isEmpty() { + return nil + } + + // Quadratic algorithm, but good enough for now. + // TODO(gri) fix asymptotic performance + var rl termlist + for _, x := range xl { + for _, y := range yl { + if r := x.intersect(y); r != nil { + rl = append(rl, r) + } + } + } + return rl.norm() +} + +// equal reports whether xl and yl represent the same type set. +func (xl termlist) equal(yl termlist) bool { + // TODO(gri) this should be more efficient + return xl.subsetOf(yl) && yl.subsetOf(xl) +} + +// includes reports whether t ∈ xl. +func (xl termlist) includes(t types.Type) bool { + for _, x := range xl { + if x.includes(t) { + return true + } + } + return false +} + +// supersetOf reports whether y ⊆ xl. +func (xl termlist) supersetOf(y *term) bool { + for _, x := range xl { + if y.subsetOf(x) { + return true + } + } + return false +} + +// subsetOf reports whether xl ⊆ yl. +func (xl termlist) subsetOf(yl termlist) bool { + if yl.isEmpty() { + return xl.isEmpty() + } + + // each term x of xl must be a subset of yl + for _, x := range xl { + if !yl.supersetOf(x) { + return false // x is not a subset yl + } + } + return true +} diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go new file mode 100644 index 00000000..7350bb70 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go @@ -0,0 +1,169 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by copytermlist.go DO NOT EDIT. + +package typeparams + +import "go/types" + +// A term describes elementary type sets: +// +// ∅: (*term)(nil) == ∅ // set of no types (empty set) +// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse) +// T: &term{false, T} == {T} // set of type T +// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t +type term struct { + tilde bool // valid if typ != nil + typ types.Type +} + +func (x *term) String() string { + switch { + case x == nil: + return "∅" + case x.typ == nil: + return "𝓤" + case x.tilde: + return "~" + x.typ.String() + default: + return x.typ.String() + } +} + +// equal reports whether x and y represent the same type set. +func (x *term) equal(y *term) bool { + // easy cases + switch { + case x == nil || y == nil: + return x == y + case x.typ == nil || y.typ == nil: + return x.typ == y.typ + } + // ∅ ⊂ x, y ⊂ 𝓤 + + return x.tilde == y.tilde && types.Identical(x.typ, y.typ) +} + +// union returns the union x ∪ y: zero, one, or two non-nil terms. +func (x *term) union(y *term) (_, _ *term) { + // easy cases + switch { + case x == nil && y == nil: + return nil, nil // ∅ ∪ ∅ == ∅ + case x == nil: + return y, nil // ∅ ∪ y == y + case y == nil: + return x, nil // x ∪ ∅ == x + case x.typ == nil: + return x, nil // 𝓤 ∪ y == 𝓤 + case y.typ == nil: + return y, nil // x ∪ 𝓤 == 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return x, y // x ∪ y == (x, y) if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∪ ~t == ~t + // ~t ∪ T == ~t + // T ∪ ~t == ~t + // T ∪ T == T + if x.tilde || !y.tilde { + return x, nil + } + return y, nil +} + +// intersect returns the intersection x ∩ y. +func (x *term) intersect(y *term) *term { + // easy cases + switch { + case x == nil || y == nil: + return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅ + case x.typ == nil: + return y // 𝓤 ∩ y == y + case y.typ == nil: + return x // x ∩ 𝓤 == x + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return nil // x ∩ y == ∅ if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ∩ ~t == ~t + // ~t ∩ T == T + // T ∩ ~t == T + // T ∩ T == T + if !x.tilde || y.tilde { + return x + } + return y +} + +// includes reports whether t ∈ x. +func (x *term) includes(t types.Type) bool { + // easy cases + switch { + case x == nil: + return false // t ∈ ∅ == false + case x.typ == nil: + return true // t ∈ 𝓤 == true + } + // ∅ ⊂ x ⊂ 𝓤 + + u := t + if x.tilde { + u = under(u) + } + return types.Identical(x.typ, u) +} + +// subsetOf reports whether x ⊆ y. +func (x *term) subsetOf(y *term) bool { + // easy cases + switch { + case x == nil: + return true // ∅ ⊆ y == true + case y == nil: + return false // x ⊆ ∅ == false since x != ∅ + case y.typ == nil: + return true // x ⊆ 𝓤 == true + case x.typ == nil: + return false // 𝓤 ⊆ y == false since y != 𝓤 + } + // ∅ ⊂ x, y ⊂ 𝓤 + + if x.disjoint(y) { + return false // x ⊆ y == false if x ∩ y == ∅ + } + // x.typ == y.typ + + // ~t ⊆ ~t == true + // ~t ⊆ T == false + // T ⊆ ~t == true + // T ⊆ T == true + return !x.tilde || y.tilde +} + +// disjoint reports whether x ∩ y == ∅. +// x.typ and y.typ must not be nil. +func (x *term) disjoint(y *term) bool { + if debug && (x.typ == nil || y.typ == nil) { + panic("invalid argument(s)") + } + ux := x.typ + if y.tilde { + ux = under(ux) + } + uy := y.typ + if x.tilde { + uy = under(uy) + } + return !types.Identical(ux, uy) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/element.go b/vendor/golang.org/x/tools/internal/typesinternal/element.go new file mode 100644 index 00000000..4957f021 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/element.go @@ -0,0 +1,133 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "fmt" + "go/types" + + "golang.org/x/tools/go/types/typeutil" +) + +// ForEachElement calls f for type T and each type reachable from its +// type through reflection. It does this by recursively stripping off +// type constructors; in addition, for each named type N, the type *N +// is added to the result as it may have additional methods. +// +// The caller must provide an initially empty set used to de-duplicate +// identical types, potentially across multiple calls to ForEachElement. +// (Its final value holds all the elements seen, matching the arguments +// passed to f.) +// +// TODO(adonovan): share/harmonize with go/callgraph/rta. +func ForEachElement(rtypes *typeutil.Map, msets *typeutil.MethodSetCache, T types.Type, f func(types.Type)) { + var visit func(T types.Type, skip bool) + visit = func(T types.Type, skip bool) { + if !skip { + if seen, _ := rtypes.Set(T, true).(bool); seen { + return // de-dup + } + + f(T) // notify caller of new element type + } + + // Recursion over signatures of each method. + tmset := msets.MethodSet(T) + for i := 0; i < tmset.Len(); i++ { + sig := tmset.At(i).Type().(*types.Signature) + // It is tempting to call visit(sig, false) + // but, as noted in golang.org/cl/65450043, + // the Signature.Recv field is ignored by + // types.Identical and typeutil.Map, which + // is confusing at best. + // + // More importantly, the true signature rtype + // reachable from a method using reflection + // has no receiver but an extra ordinary parameter. + // For the Read method of io.Reader we want: + // func(Reader, []byte) (int, error) + // but here sig is: + // func([]byte) (int, error) + // with .Recv = Reader (though it is hard to + // notice because it doesn't affect Signature.String + // or types.Identical). + // + // TODO(adonovan): construct and visit the correct + // non-method signature with an extra parameter + // (though since unnamed func types have no methods + // there is essentially no actual demand for this). + // + // TODO(adonovan): document whether or not it is + // safe to skip non-exported methods (as RTA does). + visit(sig.Params(), true) // skip the Tuple + visit(sig.Results(), true) // skip the Tuple + } + + switch T := T.(type) { + case *types.Alias: + visit(types.Unalias(T), skip) // emulates the pre-Alias behavior + + case *types.Basic: + // nop + + case *types.Interface: + // nop---handled by recursion over method set. + + case *types.Pointer: + visit(T.Elem(), false) + + case *types.Slice: + visit(T.Elem(), false) + + case *types.Chan: + visit(T.Elem(), false) + + case *types.Map: + visit(T.Key(), false) + visit(T.Elem(), false) + + case *types.Signature: + if T.Recv() != nil { + panic(fmt.Sprintf("Signature %s has Recv %s", T, T.Recv())) + } + visit(T.Params(), true) // skip the Tuple + visit(T.Results(), true) // skip the Tuple + + case *types.Named: + // A pointer-to-named type can be derived from a named + // type via reflection. It may have methods too. + visit(types.NewPointer(T), false) + + // Consider 'type T struct{S}' where S has methods. + // Reflection provides no way to get from T to struct{S}, + // only to S, so the method set of struct{S} is unwanted, + // so set 'skip' flag during recursion. + visit(T.Underlying(), true) // skip the unnamed type + + case *types.Array: + visit(T.Elem(), false) + + case *types.Struct: + for i, n := 0, T.NumFields(); i < n; i++ { + // TODO(adonovan): document whether or not + // it is safe to skip non-exported fields. + visit(T.Field(i).Type(), false) + } + + case *types.Tuple: + for i, n := 0, T.Len(); i < n; i++ { + visit(T.At(i).Type(), false) + } + + case *types.TypeParam, *types.Union: + // forEachReachable must not be called on parameterized types. + panic(T) + + default: + panic(T) + } + } + visit(T, false) +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 834e0538..131caab2 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -838,7 +838,7 @@ const ( // InvalidCap occurs when an argument to the cap built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -859,7 +859,7 @@ const ( // InvalidCopy occurs when the arguments are not of slice type or do not // have compatible type. // - // See https://golang.org/ref/spec#Appendingand_copying_slices for more + // See https://golang.org/ref/spec#Appending_and_copying_slices for more // information on the type requirements for the copy built-in. // // Example: @@ -897,7 +897,7 @@ const ( // InvalidLen occurs when an argument to the len built-in function is not of // supported type. // - // See https://golang.org/ref/spec#Lengthand_capacity for information on + // See https://golang.org/ref/spec#Length_and_capacity for information on // which underlying types are supported as arguments to cap and len. // // Example: @@ -914,7 +914,7 @@ const ( // InvalidMake occurs when make is called with an unsupported type argument. // - // See https://golang.org/ref/spec#Makingslices_maps_and_channels for + // See https://golang.org/ref/spec#Making_slices_maps_and_channels for // information on the types that may be created using make. // // Example: diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index fea7c8b7..ba6f4f4e 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -6,8 +6,6 @@ package typesinternal import ( "go/types" - - "golang.org/x/tools/internal/aliases" ) // ReceiverNamed returns the named type (if any) associated with the @@ -15,11 +13,11 @@ import ( // It also reports whether a Pointer was present. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { isPtr = true t = ptr.Elem() } - named, _ = aliases.Unalias(t).(*types.Named) + named, _ = types.Unalias(t).(*types.Named) return } @@ -36,7 +34,7 @@ func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { // indirection from the type, regardless of named types (analogous to // a LOAD instruction). func Unpointer(t types.Type) types.Type { - if ptr, ok := aliases.Unalias(t).(*types.Pointer); ok { + if ptr, ok := types.Unalias(t).(*types.Pointer); ok { return ptr.Elem() } return t diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain.go b/vendor/golang.org/x/tools/internal/versions/toolchain.go deleted file mode 100644 index 377bf7a5..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package versions - -// toolchain is maximum version (<1.22) that the go toolchain used -// to build the current tool is known to support. -// -// When a tool is built with >=1.22, the value of toolchain is unused. -// -// x/tools does not support building with go <1.18. So we take this -// as the minimum possible maximum. -var toolchain string = Go1_18 diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go deleted file mode 100644 index 1a9efa12..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go120.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.20 -// +build go1.20 - -package versions - -func init() { - if Compare(toolchain, Go1_20) < 0 { - toolchain = Go1_20 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go b/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go deleted file mode 100644 index b7ef216d..00000000 --- a/vendor/golang.org/x/tools/internal/versions/toolchain_go121.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.21 -// +build go1.21 - -package versions - -func init() { - if Compare(toolchain, Go1_21) < 0 { - toolchain = Go1_21 - } -} diff --git a/vendor/golang.org/x/tools/internal/versions/types.go b/vendor/golang.org/x/tools/internal/versions/types.go index 562eef21..f0bb0d15 100644 --- a/vendor/golang.org/x/tools/internal/versions/types.go +++ b/vendor/golang.org/x/tools/internal/versions/types.go @@ -5,15 +5,34 @@ package versions import ( + "go/ast" "go/types" ) -// GoVersion returns the Go version of the type package. -// It returns zero if no version can be determined. -func GoVersion(pkg *types.Package) string { - // TODO(taking): x/tools can call GoVersion() [from 1.21] after 1.25. - if pkg, ok := any(pkg).(interface{ GoVersion() string }); ok { - return pkg.GoVersion() +// FileVersion returns a file's Go version. +// The reported version is an unknown Future version if a +// version cannot be determined. +func FileVersion(info *types.Info, file *ast.File) string { + // In tools built with Go >= 1.22, the Go version of a file + // follow a cascades of sources: + // 1) types.Info.FileVersion, which follows the cascade: + // 1.a) file version (ast.File.GoVersion), + // 1.b) the package version (types.Config.GoVersion), or + // 2) is some unknown Future version. + // + // File versions require a valid package version to be provided to types + // in Config.GoVersion. Config.GoVersion is either from the package's module + // or the toolchain (go run). This value should be provided by go/packages + // or unitchecker.Config.GoVersion. + if v := info.FileVersions[file]; IsValid(v) { + return v } - return "" + // Note: we could instead return runtime.Version() [if valid]. + // This would act as a max version on what a tool can support. + return Future +} + +// InitFileVersions initializes info to record Go versions for Go files. +func InitFileVersions(info *types.Info) { + info.FileVersions = make(map[*ast.File]string) } diff --git a/vendor/golang.org/x/tools/internal/versions/types_go121.go b/vendor/golang.org/x/tools/internal/versions/types_go121.go deleted file mode 100644 index b4345d33..00000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go121.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.22 -// +build !go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a language version (<=1.21) derived from runtime.Version() -// or an unknown future version. -func FileVersion(info *types.Info, file *ast.File) string { - // In x/tools built with Go <= 1.21, we do not have Info.FileVersions - // available. We use a go version derived from the toolchain used to - // compile the tool by default. - // This will be <= go1.21. We take this as the maximum version that - // this tool can support. - // - // There are no features currently in x/tools that need to tell fine grained - // differences for versions <1.22. - return toolchain -} - -// InitFileVersions is a noop when compiled with this Go version. -func InitFileVersions(*types.Info) {} diff --git a/vendor/golang.org/x/tools/internal/versions/types_go122.go b/vendor/golang.org/x/tools/internal/versions/types_go122.go deleted file mode 100644 index aac5db62..00000000 --- a/vendor/golang.org/x/tools/internal/versions/types_go122.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 -// +build go1.22 - -package versions - -import ( - "go/ast" - "go/types" -) - -// FileVersion returns a file's Go version. -// The reported version is an unknown Future version if a -// version cannot be determined. -func FileVersion(info *types.Info, file *ast.File) string { - // In tools built with Go >= 1.22, the Go version of a file - // follow a cascades of sources: - // 1) types.Info.FileVersion, which follows the cascade: - // 1.a) file version (ast.File.GoVersion), - // 1.b) the package version (types.Config.GoVersion), or - // 2) is some unknown Future version. - // - // File versions require a valid package version to be provided to types - // in Config.GoVersion. Config.GoVersion is either from the package's module - // or the toolchain (go run). This value should be provided by go/packages - // or unitchecker.Config.GoVersion. - if v := info.FileVersions[file]; IsValid(v) { - return v - } - // Note: we could instead return runtime.Version() [if valid]. - // This would act as a max version on what a tool can support. - return Future -} - -// InitFileVersions initializes info to record Go versions for Go files. -func InitFileVersions(info *types.Info) { - info.FileVersions = make(map[*ast.File]string) -} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go index 8401be8c..024ffebd 100644 --- a/vendor/google.golang.org/protobuf/internal/descopts/options.go +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -9,7 +9,7 @@ // dependency on the descriptor proto package). package descopts -import pref "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // These variables are set by the init function in descriptor.pb.go via logic // in internal/filetype. In other words, so long as the descriptor proto package @@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect" // // Each variable is populated with a nil pointer to the options struct. var ( - File pref.ProtoMessage - Enum pref.ProtoMessage - EnumValue pref.ProtoMessage - Message pref.ProtoMessage - Field pref.ProtoMessage - Oneof pref.ProtoMessage - ExtensionRange pref.ProtoMessage - Service pref.ProtoMessage - Method pref.ProtoMessage + File protoreflect.ProtoMessage + Enum protoreflect.ProtoMessage + EnumValue protoreflect.ProtoMessage + Message protoreflect.ProtoMessage + Field protoreflect.ProtoMessage + Oneof protoreflect.ProtoMessage + ExtensionRange protoreflect.ProtoMessage + Service protoreflect.ProtoMessage + Method protoreflect.ProtoMessage ) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index df53ff40..fa790e0f 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -258,6 +258,7 @@ type ( StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto IsWeak bool // promoted from google.protobuf.FieldOptions + IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields Enum protoreflect.EnumDescriptor @@ -351,6 +352,7 @@ func (fd *Field) IsPacked() bool { } func (fd *Field) IsExtension() bool { return false } func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } func (fd *Field) MapKey() protoreflect.FieldDescriptor { @@ -425,6 +427,7 @@ type ( Extendee protoreflect.MessageDescriptor Cardinality protoreflect.Cardinality Kind protoreflect.Kind + IsLazy bool EditionFeatures EditionFeatures } ExtensionL2 struct { @@ -465,6 +468,7 @@ func (xd *Extension) IsPacked() bool { } func (xd *Extension) IsExtension() bool { return true } func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy } func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated } func (xd *Extension) IsMap() bool { return false } func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go index 8a57d60b..d2f54949 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + xd.L1.IsLazy = protowire.DecodeBool(v) } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index e56c91a8..67a51b32 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) { fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) case genid.FieldOptions_Weak_field_number: fd.L1.IsWeak = protowire.DecodeBool(v) + case genid.FieldOptions_Lazy_field_number: + fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v) } diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go index 11f5f356..fd4d0c83 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go @@ -68,7 +68,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures { v, m := protowire.ConsumeBytes(b) b = b[m:] switch num { - case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number: + case genid.FeatureSet_Go_ext_number: parent = unmarshalGoFeature(v, parent) } } diff --git a/vendor/google.golang.org/protobuf/internal/genid/doc.go b/vendor/google.golang.org/protobuf/internal/genid/doc.go index 45ccd012..d9b9d916 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/doc.go +++ b/vendor/google.golang.org/protobuf/internal/genid/doc.go @@ -6,6 +6,6 @@ // and the well-known types. package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" const GoogleProtobuf_package protoreflect.FullName = "google.protobuf" diff --git a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go index 9a652a2b..7f67cbb6 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go +++ b/vendor/google.golang.org/protobuf/internal/genid/go_features_gen.go @@ -12,20 +12,25 @@ import ( const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto" -// Names for google.protobuf.GoFeatures. +// Names for pb.GoFeatures. const ( GoFeatures_message_name protoreflect.Name = "GoFeatures" - GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures" + GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures" ) -// Field names for google.protobuf.GoFeatures. +// Field names for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum" - GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum" + GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum" ) -// Field numbers for google.protobuf.GoFeatures. +// Field numbers for pb.GoFeatures. const ( GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1 ) + +// Extension numbers +const ( + FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002 +) diff --git a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go index 8f9ea02f..bef5a25f 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/map_entry.go +++ b/vendor/google.golang.org/protobuf/internal/genid/map_entry.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field names and numbers for synthetic map entry messages. const ( diff --git a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go index 429384b8..9404270d 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/wrappers.go +++ b/vendor/google.golang.org/protobuf/internal/genid/wrappers.go @@ -4,7 +4,7 @@ package genid -import protoreflect "google.golang.org/protobuf/reflect/protoreflect" +import "google.golang.org/protobuf/reflect/protoreflect" // Generic field name and number for messages in wrappers.proto. const ( diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go index 4bb0a7a2..0d5b546e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -67,7 +67,6 @@ type lazyExtensionValue struct { xi *extensionFieldInfo value protoreflect.Value b []byte - fn func() protoreflect.Value } type ExtensionField struct { @@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() { } f.lazy.value = val } else { - f.lazy.value = f.lazy.fn() + panic("No support for lazy fns for ExtensionField") } f.lazy.xi = nil - f.lazy.fn = nil f.lazy.b = nil atomic.StoreUint32(&f.lazy.atomicOnce, 1) } @@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value) f.lazy = nil } -// SetLazy sets the type and a value that is to be lazily evaluated upon first use. -// This must not be called concurrently. -func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) { - f.typ = t - f.lazy = &lazyExtensionValue{fn: fn} -} - // Value returns the value of the extension field. // This may be called concurrently. func (f *ExtensionField) Value() protoreflect.Value { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 78ee47e4..7c1f66c8 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si if err != nil { return out, err } + if cf.funcs.isInit == nil { + out.initialized = true + } vi.Set(vw) return out, nil } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 6b2fdbb7..78be9df3 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { if mi.methods.Merge == nil { mi.methods.Merge = mi.merge } + if mi.methods.Equal == nil { + mi.methods.Equal = equal + } } // getUnknownBytes returns a *[]byte for the unknown fields. diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go deleted file mode 100644 index 145c577b..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "reflect" - - "google.golang.org/protobuf/encoding/protowire" -) - -func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { - v := p.v.Elem().Int() - return f.tagsize + protowire.SizeVarint(uint64(v)) -} - -func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - v := p.v.Elem().Int() - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(v)) - return b, nil -} - -func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - p.v.Elem().SetInt(int64(v)) - out.n = n - return out, nil -} - -func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(src.v.Elem()) -} - -var coderEnum = pointerCoderFuncs{ - size: sizeEnum, - marshal: appendEnum, - unmarshal: consumeEnum, - merge: mergeEnum, -} - -func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - if p.v.Elem().Int() == 0 { - return 0 - } - return sizeEnum(p, f, opts) -} - -func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - if p.v.Elem().Int() == 0 { - return b, nil - } - return appendEnum(b, p, f, opts) -} - -func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if src.v.Elem().Int() != 0 { - dst.v.Elem().Set(src.v.Elem()) - } -} - -var coderEnumNoZero = pointerCoderFuncs{ - size: sizeEnumNoZero, - marshal: appendEnumNoZero, - unmarshal: consumeEnum, - merge: mergeEnumNoZero, -} - -func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - return sizeEnum(pointer{p.v.Elem()}, f, opts) -} - -func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - return appendEnum(b, pointer{p.v.Elem()}, f, opts) -} - -func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - if wtyp != protowire.VarintType { - return out, errUnknown - } - if p.v.Elem().IsNil() { - p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) - } - return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) -} - -func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - if !src.v.Elem().IsNil() { - v := reflect.New(dst.v.Type().Elem().Elem()) - v.Elem().Set(src.v.Elem().Elem()) - dst.v.Elem().Set(v) - } -} - -var coderEnumPtr = pointerCoderFuncs{ - size: sizeEnumPtr, - marshal: appendEnumPtr, - unmarshal: consumeEnumPtr, - merge: mergeEnumPtr, -} - -func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize - } - return size -} - -func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - for i, llen := 0, s.Len(); i < llen; i++ { - b = protowire.AppendVarint(b, f.wiretag) - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { - s := p.v.Elem() - if wtyp == protowire.BytesType { - b, n := protowire.ConsumeBytes(b) - if n < 0 { - return out, errDecode - } - for len(b) > 0 { - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - b = b[n:] - } - out.n = n - return out, nil - } - if wtyp != protowire.VarintType { - return out, errUnknown - } - v, n := protowire.ConsumeVarint(b) - if n < 0 { - return out, errDecode - } - rv := reflect.New(s.Type().Elem()).Elem() - rv.SetInt(int64(v)) - s.Set(reflect.Append(s, rv)) - out.n = n - return out, nil -} - -func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { - dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) -} - -var coderEnumSlice = pointerCoderFuncs{ - size: sizeEnumSlice, - marshal: appendEnumSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} - -func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return 0 - } - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - return f.tagsize + protowire.SizeBytes(n) -} - -func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - s := p.v.Elem() - llen := s.Len() - if llen == 0 { - return b, nil - } - b = protowire.AppendVarint(b, f.wiretag) - n := 0 - for i := 0; i < llen; i++ { - n += protowire.SizeVarint(uint64(s.Index(i).Int())) - } - b = protowire.AppendVarint(b, uint64(n)) - for i := 0; i < llen; i++ { - b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) - } - return b, nil -} - -var coderEnumPackedSlice = pointerCoderFuncs{ - size: sizeEnumPackedSlice, - marshal: appendEnumPackedSlice, - unmarshal: consumeEnumSlice, - merge: mergeEnumSlice, -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go index 757642e2..077712c2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl // When using unsafe pointers, we can just treat enum values as int32s. diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go index e06ece55..f72ddd88 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value { return protoreflect.ValueOfString(v.Convert(stringType).String()) } func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value { - // pref.Value.String never panics, so we go through an interface + // protoreflect.Value.String never panics, so we go through an interface // conversion here to check the type. s := v.Interface().(string) if c.goType.Kind() == reflect.Slice && s == "" { diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go index febd2122..6254f5de 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/encode.go +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -10,7 +10,7 @@ import ( "sync/atomic" "google.golang.org/protobuf/internal/flags" - proto "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/proto" piface "google.golang.org/protobuf/runtime/protoiface" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/equal.go b/vendor/google.golang.org/protobuf/internal/impl/equal.go new file mode 100644 index 00000000..9f6c32a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/equal.go @@ -0,0 +1,224 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func equal(in protoiface.EqualInput) protoiface.EqualOutput { + return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)} +} + +// equalMessage is a fast-path variant of protoreflect.equalMessage. +// It takes advantage of the internal messageState type to avoid +// unnecessary allocations, type assertions. +func equalMessage(mx, my protoreflect.Message) bool { + if mx == nil || my == nil { + return mx == my + } + if mx.Descriptor() != my.Descriptor() { + return false + } + + msx, ok := mx.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + msy, ok := my.(*messageState) + if !ok { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + + mi := msx.messageInfo() + miy := msy.messageInfo() + if mi != miy { + return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my)) + } + mi.init() + // Compares regular fields + // Modified Message.Range code that compares two messages of the same type + // while going over the fields. + for _, ri := range mi.rangeInfos { + var fd protoreflect.FieldDescriptor + var vx, vy protoreflect.Value + + switch ri := ri.(type) { + case *fieldInfo: + hx := ri.has(msx.pointer()) + hy := ri.has(msy.pointer()) + if hx != hy { + return false + } + if !hx { + continue + } + fd = ri.fieldDesc + vx = ri.get(msx.pointer()) + vy = ri.get(msy.pointer()) + case *oneofInfo: + fnx := ri.which(msx.pointer()) + fny := ri.which(msy.pointer()) + if fnx != fny { + return false + } + if fnx <= 0 { + continue + } + fi := mi.fields[fnx] + fd = fi.fieldDesc + vx = fi.get(msx.pointer()) + vy = fi.get(msy.pointer()) + } + + if !equalValue(fd, vx, vy) { + return false + } + } + + // Compare extensions. + // This is more complicated because mx or my could have empty/nil extension maps, + // however some populated extension map values are equal to nil extension maps. + emx := mi.extensionMap(msx.pointer()) + emy := mi.extensionMap(msy.pointer()) + if emx != nil { + for k, x := range *emx { + xd := x.Type().TypeDescriptor() + xv := x.Value() + var y ExtensionField + ok := false + if emy != nil { + y, ok = (*emy)[k] + } + // We need to treat empty lists as equal to nil values + if emy == nil || !ok { + if xd.IsList() && xv.List().Len() == 0 { + continue + } + return false + } + + if !equalValue(xd, xv, y.Value()) { + return false + } + } + } + if emy != nil { + // emy may have extensions emx does not have, need to check them as well + for k, y := range *emy { + if emx != nil { + // emx has the field, so we already checked it + if _, ok := (*emx)[k]; ok { + continue + } + } + // Empty lists are equal to nil + if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 { + continue + } + + // Cant be equal if the extension is populated + return false + } + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool { + // slow path + if fd.Kind() != protoreflect.MessageKind { + return vx.Equal(vy) + } + + // fast path special cases + if fd.IsMap() { + if fd.MapValue().Kind() == protoreflect.MessageKind { + return equalMessageMap(vx.Map(), vy.Map()) + } + return vx.Equal(vy) + } + + if fd.IsList() { + return equalMessageList(vx.List(), vy.List()) + } + + return equalMessage(vx.Message(), vy.Message()) +} + +// Mostly copied from protoreflect.equalMap. +// This variant only works for messages as map types. +// All other map types should be handled via Value.Equal. +func equalMessageMap(mx, my protoreflect.Map) bool { + if mx.Len() != my.Len() { + return false + } + equal := true + mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool { + if !my.Has(k) { + equal = false + return false + } + vy := my.Get(k) + equal = equalMessage(vx.Message(), vy.Message()) + return equal + }) + return equal +} + +// Mostly copied from protoreflect.equalList. +// The only change is the usage of equalImpl instead of protoreflect.equalValue. +func equalMessageList(lx, ly protoreflect.List) bool { + if lx.Len() != ly.Len() { + return false + } + for i := 0; i < lx.Len(); i++ { + // We only operate on messages here since equalImpl will not call us in any other case. + if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) { + return false + } + } + return true +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +// Copied from protoreflect.equalUnknown. +func equalUnknown(x, y protoreflect.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + my := make(map[protoreflect.FieldNumber]protoreflect.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + if len(mx) != len(my) { + return false + } + + for k, v1 := range mx { + if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) { + return false + } + } + + return true +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go index 6e8677ee..b6849d66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -160,6 +160,7 @@ func (x placeholderExtension) HasPresence() bool func (x placeholderExtension) HasOptionalKeyword() bool { return false } func (x placeholderExtension) IsExtension() bool { return true } func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsLazy() bool { return false } func (x placeholderExtension) IsPacked() bool { return false } func (x placeholderExtension) IsList() bool { return false } func (x placeholderExtension) IsMap() bool { return false } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index 019399d4..741b5ed2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -30,8 +30,8 @@ type MessageInfo struct { // Desc is the underlying message descriptor type and must be populated. Desc protoreflect.MessageDescriptor - // Exporter must be provided in a purego environment in order to provide - // access to unexported fields. + // Deprecated: Exporter will be removed the next time we bump + // protoimpl.GenVersion. See https://github.com/golang/protobuf/issues/1640 Exporter exporter // OneofWrappers is list of pointers to oneof wrapper struct types. diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go deleted file mode 100644 index da685e8a..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go +++ /dev/null @@ -1,215 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package impl - -import ( - "fmt" - "reflect" - "sync" -) - -const UnsafeEnabled = false - -// Pointer is an opaque pointer type. -type Pointer any - -// offset represents the offset to a struct field, accessible from a pointer. -// The offset is the field index into a struct. -type offset struct { - index int - export exporter -} - -// offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { - if len(f.Index) != 1 { - panic("embedded structs are not supported") - } - if f.PkgPath == "" { - return offset{index: f.Index[0]} // field is already exported - } - if x == nil { - panic("exporter must be provided for unexported field") - } - return offset{index: f.Index[0], export: x} -} - -// IsValid reports whether the offset is valid. -func (f offset) IsValid() bool { return f.index >= 0 } - -// invalidOffset is an invalid field offset. -var invalidOffset = offset{index: -1} - -// zeroOffset is a noop when calling pointer.Apply. -var zeroOffset = offset{index: 0} - -// pointer is an abstract representation of a pointer to a struct or field. -type pointer struct{ v reflect.Value } - -// pointerOf returns p as a pointer. -func pointerOf(p Pointer) pointer { - return pointerOfIface(p) -} - -// pointerOfValue returns v as a pointer. -func pointerOfValue(v reflect.Value) pointer { - return pointer{v: v} -} - -// pointerOfIface returns the pointer portion of an interface. -func pointerOfIface(v any) pointer { - return pointer{v: reflect.ValueOf(v)} -} - -// IsNil reports whether the pointer is nil. -func (p pointer) IsNil() bool { - return p.v.IsNil() -} - -// Apply adds an offset to the pointer to derive a new pointer -// to a specified field. The current pointer must be pointing at a struct. -func (p pointer) Apply(f offset) pointer { - if f.export != nil { - if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { - return pointer{v: v} - } - } - return pointer{v: p.v.Elem().Field(f.index).Addr()} -} - -// AsValueOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) -func (p pointer) AsValueOf(t reflect.Type) reflect.Value { - if got := p.v.Type().Elem(); got != t { - panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) - } - return p.v -} - -// AsIfaceOf treats p as a pointer to an object of type t and returns the value. -// It is equivalent to p.AsValueOf(t).Interface() -func (p pointer) AsIfaceOf(t reflect.Type) any { - return p.AsValueOf(t).Interface() -} - -func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } -func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } -func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } -func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } -func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } -func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } -func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } -func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } -func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } -func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } -func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } -func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } -func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } -func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } -func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } -func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } -func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } -func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } -func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } -func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } -func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } -func (p pointer) String() *string { return p.v.Interface().(*string) } -func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } -func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } -func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } -func (p pointer) BytesPtr() **[]byte { return p.v.Interface().(**[]byte) } -func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } -func (p pointer) Extensions() *map[int32]ExtensionField { - return p.v.Interface().(*map[int32]ExtensionField) -} - -func (p pointer) Elem() pointer { - return pointer{v: p.v.Elem()} -} - -// PointerSlice copies []*T from p as a new []pointer. -// This behavior differs from the implementation in pointer_unsafe.go. -func (p pointer) PointerSlice() []pointer { - // TODO: reconsider this - if p.v.IsNil() { - return nil - } - n := p.v.Elem().Len() - s := make([]pointer, n) - for i := 0; i < n; i++ { - s[i] = pointer{v: p.v.Elem().Index(i)} - } - return s -} - -// AppendPointerSlice appends v to p, which must be a []*T. -func (p pointer) AppendPointerSlice(v pointer) { - sp := p.v.Elem() - sp.Set(reflect.Append(sp, v.v)) -} - -// SetPointer sets *p to v. -func (p pointer) SetPointer(v pointer) { - p.v.Elem().Set(v.v) -} - -func growSlice(p pointer, addCap int) { - // TODO: Once we only support Go 1.20 and newer, use reflect.Grow. - in := p.v.Elem() - out := reflect.MakeSlice(in.Type(), in.Len(), in.Len()+addCap) - reflect.Copy(out, in) - p.v.Elem().Set(out) -} - -func (p pointer) growBoolSlice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint32Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growInt64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growUint64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat64Slice(addCap int) { - growSlice(p, addCap) -} - -func (p pointer) growFloat32Slice(addCap int) { - growSlice(p, addCap) -} - -func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } -func (ms *messageState) pointer() pointer { panic("not supported") } -func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } -func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } - -type atomicNilMessage struct { - once sync.Once - m messageReflectWrapper -} - -func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { - m.once.Do(func() { - m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) - m.m.mi = mi - }) - return &m.m -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 5f20ca5d..79e18666 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine -// +build !purego,!appengine - package impl import ( diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go deleted file mode 100644 index a1f6f333..00000000 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package strs - -import pref "google.golang.org/protobuf/reflect/protoreflect" - -func UnsafeString(b []byte) string { - return string(b) -} - -func UnsafeBytes(s string) []byte { - return []byte(s) -} - -type Builder struct{} - -func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { - return prefix.Append(name) -} - -func (*Builder) MakeString(b []byte) string { - return string(b) -} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go index a008acd0..832a7988 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go index 60166f2b..1ffddf68 100644 --- a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package strs diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index dbbf1f68..fb8e15e8 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -51,8 +51,8 @@ import ( // 10. Send out the CL for review and submit it. const ( Major = 1 - Minor = 34 - Patch = 2 + Minor = 35 + Patch = 1 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go index 1a0be1b0..c36d4a9c 100644 --- a/vendor/google.golang.org/protobuf/proto/equal.go +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -8,6 +8,7 @@ import ( "reflect" "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" ) // Equal reports whether two messages are equal, @@ -51,6 +52,14 @@ func Equal(x, y Message) bool { if mx.IsValid() != my.IsValid() { return false } + + // Only one of the messages needs to implement the fast-path for it to work. + pmx := protoMethods(mx) + pmy := protoMethods(my) + if pmx != nil && pmy != nil && pmx.Equal != nil && pmy.Equal != nil { + return pmx.Equal(protoiface.EqualInput{MessageA: mx, MessageB: my}).Equal + } + vx := protoreflect.ValueOfMessage(mx) vy := protoreflect.ValueOfMessage(my) return vx.Equal(vy) diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go index d248f292..78445d11 100644 --- a/vendor/google.golang.org/protobuf/proto/extension.go +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -39,6 +39,48 @@ func ClearExtension(m Message, xt protoreflect.ExtensionType) { // If the field is unpopulated, it returns the default value for // scalars and an immutable, empty value for lists or messages. // It panics if xt does not extend m. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// GetExtension, then the call should be followed immediately by a +// type assertion to the expected output value. For example: +// +// mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage) +// +// This pattern enables static analysis tools to verify that the asserted type +// matches the Go type associated with the extension field and +// also enables a possible future migration to a type-safe extension API. +// +// Since singular messages are the most common extension type, the pattern of +// calling HasExtension followed by GetExtension may be simplified to: +// +// if mm := proto.GetExtension(m, foopb.E_MyExtension).(*foopb.MyMessage); mm != nil { +// ... // make use of mm +// } +// +// The mm variable is non-nil if and only if HasExtension reports true. func GetExtension(m Message, xt protoreflect.ExtensionType) any { // Treat nil message interface as an empty message; return the default. if m == nil { @@ -51,6 +93,35 @@ func GetExtension(m Message, xt protoreflect.ExtensionType) any { // SetExtension stores the value of an extension field. // It panics if m is invalid, xt does not extend m, or if type of v // is invalid for the specified extension field. +// +// The type of the value is dependent on the field type of the extension. +// For extensions generated by protoc-gen-go, the Go type is as follows: +// +// ╔═══════════════════╤═════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═══════════════════╪═════════════════════════╣ +// ║ bool │ bool ║ +// ║ int32 │ int32, sint32, sfixed32 ║ +// ║ int64 │ int64, sint64, sfixed64 ║ +// ║ uint32 │ uint32, fixed32 ║ +// ║ uint64 │ uint64, fixed64 ║ +// ║ float32 │ float ║ +// ║ float64 │ double ║ +// ║ string │ string ║ +// ║ []byte │ bytes ║ +// ║ protoreflect.Enum │ enum ║ +// ║ proto.Message │ message, group ║ +// ╚═══════════════════╧═════════════════════════╝ +// +// The protoreflect.Enum and proto.Message types are the concrete Go type +// associated with the named enum or message. Repeated fields are represented +// using a Go slice of the base element type. +// +// If a generated extension descriptor variable is directly passed to +// SetExtension (e.g., foopb.E_MyExtension), then the value should be a +// concrete type that matches the expected Go type for the extension descriptor +// so that static analysis tools can verify type correctness. +// This also enables a possible future migration to a type-safe extension API. func SetExtension(m Message, xt protoreflect.ExtensionType, v any) { xd := xt.TypeDescriptor() pv := xt.ValueOf(v) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go index d5d5af6e..742cb518 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -23,6 +23,7 @@ type ( Unmarshal func(unmarshalInput) (unmarshalOutput, error) Merge func(mergeInput) mergeOutput CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + Equal func(equalInput) equalOutput } supportFlags = uint64 sizeInput = struct { @@ -75,4 +76,13 @@ type ( checkInitializedOutput = struct { pragma.NoUnkeyedLiterals } + equalInput = struct { + pragma.NoUnkeyedLiterals + MessageA Message + MessageB Message + } + equalOutput = struct { + pragma.NoUnkeyedLiterals + Equal bool + } ) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go deleted file mode 100644 index 75f83a2a..00000000 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2018 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build purego || appengine -// +build purego appengine - -package protoreflect - -import "google.golang.org/protobuf/internal/pragma" - -type valueType int - -const ( - nilType valueType = iota - boolType - int32Type - int64Type - uint32Type - uint64Type - float32Type - float64Type - stringType - bytesType - enumType - ifaceType -) - -// value is a union where only one type can be represented at a time. -// This uses a distinct field for each type. This is type safe in Go, but -// occupies more memory than necessary (72B). -type value struct { - pragma.DoNotCompare // 0B - - typ valueType // 8B - num uint64 // 8B - str string // 16B - bin []byte // 24B - iface any // 16B -} - -func valueOfString(v string) Value { - return Value{typ: stringType, str: v} -} -func valueOfBytes(v []byte) Value { - return Value{typ: bytesType, bin: v} -} -func valueOfIface(v any) Value { - return Value{typ: ifaceType, iface: v} -} - -func (v Value) getString() string { - return v.str -} -func (v Value) getBytes() []byte { - return v.bin -} -func (v Value) getIface() any { - return v.iface -} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go index 7f3583ea..0015fcb3 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go120.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && !go1.21 -// +build !purego,!appengine,!go1.21 +//go:build !go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go index f7d38699..479527b5 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe_go121.go @@ -2,8 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !purego && !appengine && go1.21 -// +build !purego,!appengine,go1.21 +//go:build go1.21 package protoreflect diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go index 44cf467d..24615656 100644 --- a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -39,6 +39,9 @@ type Methods = struct { // CheckInitialized returns an error if any required fields in the message are not set. CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) + + // Equal compares two messages and returns EqualOutput.Equal == true if they are equal. + Equal func(EqualInput) EqualOutput } // SupportFlags indicate support for optional features. @@ -166,3 +169,18 @@ type CheckInitializedInput = struct { type CheckInitializedOutput = struct { pragma.NoUnkeyedLiterals } + +// EqualInput is input to the Equal method. +type EqualInput = struct { + pragma.NoUnkeyedLiterals + + MessageA protoreflect.Message + MessageB protoreflect.Message +} + +// EqualOutput is output from the Equal method. +type EqualOutput = struct { + pragma.NoUnkeyedLiterals + + Equal bool +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go index 83a5a645..0d20722d 100644 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -254,11 +254,9 @@ func (x *Timestamp) check() uint { func (x *Timestamp) Reset() { *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } func (x *Timestamp) String() string { @@ -269,7 +267,7 @@ func (*Timestamp) ProtoMessage() {} func (x *Timestamp) ProtoReflect() protoreflect.Message { mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { + if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) @@ -348,20 +346,6 @@ func file_google_protobuf_timestamp_proto_init() { if File_google_protobuf_timestamp_proto != nil { return } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v any, i int) any { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ diff --git a/vendor/modules.txt b/vendor/modules.txt index b7461958..b4966dc5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# github.com/99designs/gqlgen v0.17.49 -## explicit; go 1.20 +# github.com/99designs/gqlgen v0.17.55 +## explicit; go 1.22.5 github.com/99designs/gqlgen github.com/99designs/gqlgen/api github.com/99designs/gqlgen/codegen @@ -27,14 +27,14 @@ github.com/99designs/gqlgen/plugin/servergen # github.com/KyleBanks/depth v1.2.1 ## explicit github.com/KyleBanks/depth -# github.com/Masterminds/semver/v3 v3.2.1 -## explicit; go 1.18 +# github.com/Masterminds/semver/v3 v3.3.0 +## explicit; go 1.21 github.com/Masterminds/semver/v3 -# github.com/adhocore/gronx v1.19.0 +# github.com/adhocore/gronx v1.19.1 ## explicit; go 1.13 github.com/adhocore/gronx -# github.com/agnivade/levenshtein v1.1.1 -## explicit; go 1.13 +# github.com/agnivade/levenshtein v1.2.0 +## explicit; go 1.21 github.com/agnivade/levenshtein # github.com/andybalholm/brotli v1.1.0 ## explicit; go 1.13 @@ -55,9 +55,10 @@ github.com/beorn7/perks/quantile # github.com/boltdb/bolt v1.3.1 ## explicit github.com/boltdb/bolt -# github.com/caddyserver/certmagic v0.21.3 +# github.com/caddyserver/certmagic v0.21.4 ## explicit; go 1.21.0 github.com/caddyserver/certmagic +github.com/caddyserver/certmagic/internal/atomicfile # github.com/caddyserver/zerossl v0.1.3 ## explicit; go 1.21.0 github.com/caddyserver/zerossl @@ -158,7 +159,7 @@ github.com/go-playground/locales/currency # github.com/go-playground/universal-translator v0.18.1 ## explicit; go 1.18 github.com/go-playground/universal-translator -# github.com/go-playground/validator/v10 v10.22.0 +# github.com/go-playground/validator/v10 v10.22.1 ## explicit; go 1.18 github.com/go-playground/validator/v10 # github.com/gobwas/glob v0.2.3 @@ -219,7 +220,7 @@ github.com/hashicorp/golang-lru/simplelru github.com/hashicorp/golang-lru/v2 github.com/hashicorp/golang-lru/v2/internal github.com/hashicorp/golang-lru/v2/simplelru -# github.com/hashicorp/raft v1.7.0 +# github.com/hashicorp/raft v1.7.1 ## explicit; go 1.20 github.com/hashicorp/raft # github.com/hashicorp/raft-boltdb/v2 v2.3.0 @@ -238,8 +239,8 @@ github.com/joho/godotenv/autoload # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 github.com/josharian/intern -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.17.10 +## explicit; go 1.21 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -267,17 +268,16 @@ github.com/labstack/gommon/log ## explicit; go 1.18 github.com/leodido/go-urn github.com/leodido/go-urn/scim/schema -# github.com/lestrrat-go/strftime v1.0.6 -## explicit; go 1.13 +# github.com/lestrrat-go/strftime v1.1.0 +## explicit; go 1.21 github.com/lestrrat-go/strftime -github.com/lestrrat-go/strftime/internal/errors # github.com/libdns/libdns v0.2.2 ## explicit; go 1.18 github.com/libdns/libdns # github.com/lithammer/shortuuid/v4 v4.0.0 ## explicit; go 1.13 github.com/lithammer/shortuuid/v4 -# github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae +# github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 ## explicit; go 1.16 github.com/lufia/plan9stats # github.com/mailru/easyjson v0.7.7 @@ -291,8 +291,8 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.20 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/mholt/acmez/v2 v2.0.2 -## explicit; go 1.20 +# github.com/mholt/acmez/v2 v2.0.3 +## explicit; go 1.21.0 github.com/mholt/acmez/v2 github.com/mholt/acmez/v2/acme # github.com/miekg/dns v1.1.62 @@ -301,7 +301,7 @@ github.com/miekg/dns # github.com/minio/md5-simd v1.1.2 ## explicit; go 1.14 github.com/minio/md5-simd -# github.com/minio/minio-go/v7 v7.0.75 +# github.com/minio/minio-go/v7 v7.0.77 ## explicit; go 1.21 github.com/minio/minio-go/v7 github.com/minio/minio-go/v7/pkg/cors @@ -321,9 +321,6 @@ github.com/mitchellh/mapstructure # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 ## explicit github.com/pmezard/go-difflib/difflib @@ -333,7 +330,7 @@ github.com/power-devops/perfstat # github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 ## explicit github.com/prep/average -# github.com/prometheus/client_golang v1.20.0 +# github.com/prometheus/client_golang v1.20.4 ## explicit; go 1.20 github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil github.com/prometheus/client_golang/internal/github.com/golang/gddo/httputil/header @@ -343,8 +340,8 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.6.1 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.55.0 -## explicit; go 1.20 +# github.com/prometheus/common v0.60.0 +## explicit; go 1.21 github.com/prometheus/common/expfmt github.com/prometheus/common/model # github.com/prometheus/procfs v0.15.1 @@ -355,8 +352,8 @@ github.com/prometheus/procfs/internal/util # github.com/puzpuzpuz/xsync/v3 v3.4.0 ## explicit; go 1.18 github.com/puzpuzpuz/xsync/v3 -# github.com/rs/xid v1.5.0 -## explicit; go 1.12 +# github.com/rs/xid v1.6.0 +## explicit; go 1.16 github.com/rs/xid # github.com/russross/blackfriday/v2 v2.1.0 ## explicit @@ -392,10 +389,10 @@ github.com/swaggo/swag # github.com/tklauser/go-sysconf v0.3.14 ## explicit; go 1.18 github.com/tklauser/go-sysconf -# github.com/tklauser/numcpus v0.8.0 +# github.com/tklauser/numcpus v0.9.0 ## explicit; go 1.18 github.com/tklauser/numcpus -# github.com/urfave/cli/v2 v2.27.2 +# github.com/urfave/cli/v2 v2.27.4 ## explicit; go 1.18 github.com/urfave/cli/v2 # github.com/valyala/bytebufferpool v1.0.0 @@ -404,7 +401,7 @@ github.com/valyala/bytebufferpool # github.com/valyala/fasttemplate v1.2.2 ## explicit; go 1.12 github.com/valyala/fasttemplate -# github.com/vektah/gqlparser/v2 v2.5.16 +# github.com/vektah/gqlparser/v2 v2.5.17 ## explicit; go 1.19 github.com/vektah/gqlparser/v2 github.com/vektah/gqlparser/v2/ast @@ -422,8 +419,8 @@ github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 ## explicit github.com/xeipuuv/gojsonschema -# github.com/xrash/smetrics v0.0.0-20240312152122-5f08fbb34913 -## explicit; go 1.15.0 +# github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 +## explicit; go 1.15 github.com/xrash/smetrics # github.com/yusufpapurcu/wmi v1.2.4 ## explicit; go 1.16 @@ -440,11 +437,11 @@ github.com/zeebo/blake3/internal/alg/hash/hash_avx2 github.com/zeebo/blake3/internal/alg/hash/hash_pure github.com/zeebo/blake3/internal/consts github.com/zeebo/blake3/internal/utils -# go.etcd.io/bbolt v1.3.10 -## explicit; go 1.21 +# go.etcd.io/bbolt v1.3.11 +## explicit; go 1.22 go.etcd.io/bbolt -# go.uber.org/automaxprocs v1.5.3 -## explicit; go 1.18 +# go.uber.org/automaxprocs v1.6.0 +## explicit; go 1.20 go.uber.org/automaxprocs/internal/cgroups go.uber.org/automaxprocs/internal/runtime go.uber.org/automaxprocs/maxprocs @@ -462,7 +459,7 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.26.0 +# golang.org/x/crypto v0.28.0 ## explicit; go 1.20 golang.org/x/crypto/acme golang.org/x/crypto/acme/autocert @@ -474,12 +471,12 @@ golang.org/x/crypto/ocsp golang.org/x/crypto/pbkdf2 golang.org/x/crypto/scrypt golang.org/x/crypto/sha3 -# golang.org/x/mod v0.20.0 -## explicit; go 1.18 +# golang.org/x/mod v0.21.0 +## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.28.0 +# golang.org/x/net v0.30.0 ## explicit; go 1.18 golang.org/x/net/bpf golang.org/x/net/html @@ -497,13 +494,13 @@ golang.org/x/net/publicsuffix # golang.org/x/sync v0.8.0 ## explicit; go 1.18 golang.org/x/sync/errgroup -# golang.org/x/sys v0.24.0 +# golang.org/x/sys v0.26.0 ## explicit; go 1.18 golang.org/x/sys/cpu golang.org/x/sys/unix golang.org/x/sys/windows golang.org/x/sys/windows/registry -# golang.org/x/text v0.17.0 +# golang.org/x/text v0.19.0 ## explicit; go 1.18 golang.org/x/text/cases golang.org/x/text/internal @@ -515,11 +512,11 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.6.0 +# golang.org/x/time v0.7.0 ## explicit; go 1.18 golang.org/x/time/rate -# golang.org/x/tools v0.24.0 -## explicit; go 1.19 +# golang.org/x/tools v0.26.0 +## explicit; go 1.22.0 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/buildutil golang.org/x/tools/go/gcexportdata @@ -527,6 +524,7 @@ golang.org/x/tools/go/internal/cgo golang.org/x/tools/go/loader golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath +golang.org/x/tools/go/types/typeutil golang.org/x/tools/imports golang.org/x/tools/internal/aliases golang.org/x/tools/internal/event @@ -540,11 +538,11 @@ golang.org/x/tools/internal/imports golang.org/x/tools/internal/packagesinternal golang.org/x/tools/internal/pkgbits golang.org/x/tools/internal/stdlib -golang.org/x/tools/internal/tokeninternal +golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# google.golang.org/protobuf v1.34.2 -## explicit; go 1.20 +# google.golang.org/protobuf v1.35.1 +## explicit; go 1.21 google.golang.org/protobuf/encoding/protodelim google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire