chore: upgrade coredns version (#550)

This commit is contained in:
naison
2025-04-19 10:06:56 +08:00
committed by GitHub
parent c42e3475f9
commit c9f1ce6522
1701 changed files with 235209 additions and 29271 deletions

152
go.mod
View File

@@ -4,22 +4,23 @@ go 1.23.2
require (
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
github.com/containerd/containerd v1.7.14
github.com/containerd/containerd v1.7.27
github.com/containernetworking/cni v1.1.2
github.com/coredns/caddy v1.1.1
github.com/coredns/coredns v1.11.2
github.com/coredns/caddy v1.1.2-0.20241029205200-8de985351a98
github.com/coredns/coredns v1.12.1
github.com/distribution/reference v0.6.0
github.com/docker/cli v27.5.1+incompatible
github.com/docker/docker v27.5.1+incompatible
github.com/docker/go-connections v0.5.0
github.com/docker/go-units v0.5.0
github.com/docker/libcontainer v2.2.1+incompatible
github.com/envoyproxy/go-control-plane v0.13.1
github.com/envoyproxy/go-control-plane v0.13.4
github.com/envoyproxy/go-control-plane/envoy v1.32.4
github.com/fsnotify/fsnotify v1.8.0
github.com/gliderlabs/ssh v0.3.8
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.6.0
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/go-version v1.7.0
github.com/hpcloud/tail v1.0.0
github.com/jcmturner/gofork v1.7.6
github.com/jcmturner/gokrb5/v8 v8.4.4
@@ -27,7 +28,7 @@ require (
github.com/libp2p/go-netroute v0.2.1
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
github.com/miekg/dns v1.1.58
github.com/miekg/dns v1.1.64
github.com/moby/term v0.5.2
github.com/opencontainers/image-spec v1.1.1
github.com/pkg/errors v0.9.1
@@ -40,16 +41,16 @@ require (
github.com/syncthing/syncthing v1.29.2
github.com/thejerf/suture/v4 v4.0.6
go.uber.org/automaxprocs v1.6.0
golang.org/x/crypto v0.36.0
golang.org/x/net v0.38.0
golang.org/x/crypto v0.37.0
golang.org/x/net v0.39.0
golang.org/x/oauth2 v0.28.0
golang.org/x/sys v0.31.0
golang.org/x/term v0.30.0
golang.org/x/text v0.23.0
golang.org/x/sys v0.32.0
golang.org/x/term v0.31.0
golang.org/x/text v0.24.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/grpc v1.69.4
google.golang.org/grpc v1.71.1
google.golang.org/protobuf v1.36.6
gopkg.in/natefinch/lumberjack.v2 v2.2.1
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987
@@ -69,18 +70,19 @@ require (
)
require (
cel.dev/expr v0.18.0 // indirect
cloud.google.com/go/auth v0.9.5 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
cel.dev/expr v0.19.1 // indirect
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
dario.cat/mergo v1.0.1 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
github.com/Azure/go-autorest/autorest v0.11.30 // indirect
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 // indirect
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 // indirect
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
@@ -88,27 +90,46 @@ require (
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c // indirect
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
github.com/DataDog/appsec-internal-go v1.9.0 // indirect
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/proto v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/trace v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0 // indirect
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0 // indirect
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
github.com/DataDog/go-sqllexer v0.0.11 // indirect
github.com/DataDog/go-libddwaf/v3 v3.5.1 // indirect
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6 // indirect
github.com/DataDog/go-sqllexer v0.0.14 // indirect
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
github.com/DataDog/sketches-go v1.4.4 // indirect
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 // indirect
github.com/DataDog/sketches-go v1.4.5 // indirect
github.com/MakeNowJust/heredoc v1.0.0 // indirect
github.com/Masterminds/goutils v1.1.1 // indirect
github.com/Masterminds/semver/v3 v3.3.0 // indirect
github.com/Masterminds/sprig/v3 v3.3.0 // indirect
github.com/Masterminds/squirrel v1.5.4 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/Microsoft/hcsshim v0.12.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/Microsoft/hcsshim v0.12.9 // indirect
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/antonmedv/expr v1.15.5 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/aws/aws-sdk-go v1.55.5 // indirect
github.com/aws/aws-sdk-go v1.55.6 // indirect
github.com/aws/aws-sdk-go-v2 v1.36.3 // indirect
github.com/aws/aws-sdk-go-v2/config v1.29.9 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.2 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bits-and-blooms/bitset v1.13.0 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
@@ -116,11 +137,12 @@ require (
github.com/calmh/xdr v1.2.0 // indirect
github.com/ccding/go-stun v0.1.5 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/chai2010/gettext-go v1.0.2 // indirect
github.com/chmduquesne/rollinghash v4.0.0+incompatible // indirect
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 // indirect
github.com/cilium/ebpf v0.16.0 // indirect
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 // indirect
@@ -135,15 +157,19 @@ require (
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 // indirect
github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 // indirect
github.com/ebitengine/purego v0.8.2 // indirect
github.com/emicklei/go-restful/v3 v3.12.2 // indirect
github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/evanphx/json-patch v5.9.11+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
github.com/expr-lang/expr v1.17.2 // indirect
github.com/farsightsec/golang-framestream v0.3.0 // indirect
github.com/fatih/camelcase v1.0.0 // indirect
github.com/fatih/color v1.18.0 // indirect
@@ -168,8 +194,8 @@ require (
github.com/gobwas/glob v0.2.3 // indirect
github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang-jwt/jwt/v4 v4.5.0 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/btree v1.1.3 // indirect
@@ -178,10 +204,10 @@ require (
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806 // indirect
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/gosuri/uitable v0.0.4 // indirect
@@ -191,6 +217,9 @@ require (
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect
github.com/hashicorp/go-sockaddr v1.0.2 // indirect
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
github.com/hdevalence/ed25519consensus v0.2.0 // indirect
@@ -230,13 +259,14 @@ require (
github.com/mitchellh/copystructure v1.2.0 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/symlink v0.2.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
@@ -249,12 +279,12 @@ require (
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
github.com/openzipkin/zipkin-go v0.4.2 // indirect
github.com/openzipkin/zipkin-go v0.4.3 // indirect
github.com/oschwald/geoip2-golang v1.11.0 // indirect
github.com/oschwald/maxminddb-golang v1.13.1 // indirect
github.com/outcaste-io/ristretto v0.2.3 // indirect
github.com/peterbourgon/diskv v2.0.1+incompatible // indirect
github.com/philhofer/fwd v1.1.2 // indirect
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 // indirect
github.com/pierrec/lz4/v4 v4.1.22 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
@@ -263,13 +293,16 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.63.0 // indirect
github.com/prometheus/procfs v0.16.0 // indirect
github.com/quic-go/quic-go v0.49.0 // indirect
github.com/quic-go/quic-go v0.50.1 // indirect
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rubenv/sql-migrate v1.7.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
github.com/shirou/gopsutil/v3 v3.24.4 // indirect
github.com/shirou/gopsutil/v4 v4.25.1 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
github.com/stretchr/objx v0.5.2 // indirect
@@ -278,7 +311,7 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect
github.com/tailscale/netlink v1.1.1-0.20240822203006-4d49adab4de7 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tinylib/msgp v1.1.9 // indirect
github.com/tinylib/msgp v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.9.0 // indirect
github.com/ulikunitz/xz v0.5.12 // indirect
@@ -290,20 +323,24 @@ require (
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
github.com/xlab/treeprint v1.2.0 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/api/v3 v3.5.17 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.17 // indirect
go.etcd.io/etcd/client/v3 v3.5.17 // indirect
go.opencensus.io v0.24.0 // indirect
go.etcd.io/etcd/api/v3 v3.5.20 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.20 // indirect
go.etcd.io/etcd/client/v3 v3.5.20 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/collector/component v0.104.0 // indirect
go.opentelemetry.io/collector/config/configtelemetry v0.104.0 // indirect
go.opentelemetry.io/collector/pdata v1.11.0 // indirect
go.opentelemetry.io/collector/pdata/pprofile v0.104.0 // indirect
go.opentelemetry.io/collector/semconv v0.104.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 // indirect
go.opentelemetry.io/otel v1.35.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.34.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.32.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.32.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect
go.opentelemetry.io/otel/sdk v1.35.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
go.opentelemetry.io/otel/trace v1.35.0 // indirect
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
go.uber.org/atomic v1.11.0 // indirect
go.uber.org/mock v0.5.0 // indirect
@@ -313,18 +350,19 @@ require (
go4.org/netipx v0.0.0-20231129151722-fdeea329fbba // indirect
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.30.0 // indirect
golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
google.golang.org/api v0.199.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
google.golang.org/api v0.227.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e // indirect
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/fsnotify.v1 v1.4.7 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

426
go.sum
View File

@@ -1,34 +1,32 @@
cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo=
cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go/auth v0.9.5 h1:4CTn43Eynw40aFVr3GpPqsQponx2jv0BQpjvajsbbzw=
cloud.google.com/go/auth v0.9.5/go.mod h1:Xo0n7n66eHyOWWCnitop6870Ilwo3PiZyodVkkH1xWM=
cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY=
cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc=
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4=
cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU=
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc=
github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw=
github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs=
github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA=
github.com/Azure/go-autorest/autorest v0.11.30 h1:iaZ1RGz/ALZtN5eq4Nr1SOFSlf2E4pDI3Tcsl+dZPVE=
github.com/Azure/go-autorest/autorest v0.11.30/go.mod h1:t1kpPIOpIVX7annvothKvb0stsrXa37i7b+xpmBW8Fs=
github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk=
github.com/Azure/go-autorest/autorest/adal v0.9.23 h1:Yepx8CvFxwNKpH6ja7RZ+sKX+DWYNldbLiALMC3BTz8=
github.com/Azure/go-autorest/autorest/adal v0.9.23/go.mod h1:5pcMqFkdPhviJdlEy3kC/v1ZLnQl0MH6XA5YCcMhy4c=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12 h1:wkAZRgT/pn8HhFyzfe9UnqOjJYqlembgCTi72Bm/xKk=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.12/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM=
github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc=
github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0=
github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw=
@@ -49,24 +47,36 @@ github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c h1:pxW6RcqyfI9/k
github.com/BurntSushi/toml v1.4.1-0.20240526193622-a339e1f7089c/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU=
github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU=
github.com/DataDog/appsec-internal-go v1.5.0 h1:8kS5zSx5T49uZ8dZTdT19QVAvC/B8ByyZdhQKYQWHno=
github.com/DataDog/appsec-internal-go v1.5.0/go.mod h1:pEp8gjfNLtEOmz+iZqC8bXhu0h4k7NUsW/qiQb34k1U=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 h1:J2iRNRgtKsLq3L55NJzZMQqTqbm8+ps8iKCwjkCph9E=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0/go.mod h1:AVPQWekk3h9AOC7+plBlNB68Sy6UIGFoMMVUDeSoNoI=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 h1:AoE7kw6PRbhVTER+Y6mROgy/5H9nwy58Ta9FHWiIOro=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0/go.mod h1:JhAilx32dkIgoDkFXquCTfaWDsAOfe+vfBaxbiZoPI0=
github.com/DataDog/appsec-internal-go v1.9.0 h1:cGOneFsg0JTRzWl5U2+og5dbtyW3N8XaYwc5nXe39Vw=
github.com/DataDog/appsec-internal-go v1.9.0/go.mod h1:wW0cRfWBo4C044jHGwYiyh5moQV2x0AhnwqMuiX7O/g=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0 h1:nOrRNCHyriM/EjptMrttFOQhRSmvfagESdpyknb5VPg=
github.com/DataDog/datadog-agent/pkg/obfuscate v0.58.0/go.mod h1:MfDvphBMmEMwE3a30h27AtPO7OzmvdoVTiGY1alEmo4=
github.com/DataDog/datadog-agent/pkg/proto v0.58.0 h1:JX2Q0C5QnKcYqnYHWUcP0z7R0WB8iiQz3aWn+kT5DEc=
github.com/DataDog/datadog-agent/pkg/proto v0.58.0/go.mod h1:0wLYojGxRZZFQ+SBbFjay9Igg0zbP88l03TfZaVZ6Dc=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0 h1:5hGO0Z8ih0bRojuq+1ZwLFtdgsfO3TqIjbwJAH12sOQ=
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.58.0/go.mod h1:jN5BsZI+VilHJV1Wac/efGxS4TPtXa1Lh9SiUyv93F4=
github.com/DataDog/datadog-agent/pkg/trace v0.58.0 h1:4AjohoBWWN0nNaeD/0SDZ8lRTYmnJ48CqREevUfSets=
github.com/DataDog/datadog-agent/pkg/trace v0.58.0/go.mod h1:MFnhDW22V5M78MxR7nv7abWaGc/B4L42uHH1KcIKxZs=
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0 h1:2MENBnHNw2Vx/ebKRyOPMqvzWOUps2Ol2o/j8uMvN4U=
github.com/DataDog/datadog-agent/pkg/util/log v0.58.0/go.mod h1:1KdlfcwhqtYHS1szAunsgSfvgoiVsf3mAJc+WvNTnIE=
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0 h1:Jkf91q3tuIer4Hv9CLJIYjlmcelAsoJRMmkHyz+p1Dc=
github.com/DataDog/datadog-agent/pkg/util/scrubber v0.58.0/go.mod h1:krOxbYZc4KKE7bdEDu10lLSQBjdeSFS/XDSclsaSf1Y=
github.com/DataDog/datadog-go/v5 v5.5.0 h1:G5KHeB8pWBNXT4Jtw0zAkhdxEAWSpWH00geHI6LDrKU=
github.com/DataDog/datadog-go/v5 v5.5.0/go.mod h1:K9kcYBlxkcPP8tvvjZZKs/m1edNAUFzBbdpTUKfCsuw=
github.com/DataDog/go-libddwaf/v2 v2.4.2 h1:ilquGKUmN9/Ty0sIxiEyznVRxP3hKfmH15Y1SMq5gjA=
github.com/DataDog/go-libddwaf/v2 v2.4.2/go.mod h1:gsCdoijYQfj8ce/T2bEDNPZFIYnmHluAgVDpuQOWMZE=
github.com/DataDog/go-sqllexer v0.0.11 h1:OfPBjmayreblOXreszbrOTICNZ3qWrA6Bg4sypvxpbw=
github.com/DataDog/go-sqllexer v0.0.11/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-libddwaf/v3 v3.5.1 h1:GWA4ln4DlLxiXm+X7HA/oj0ZLcdCwOS81KQitegRTyY=
github.com/DataDog/go-libddwaf/v3 v3.5.1/go.mod h1:n98d9nZ1gzenRSk53wz8l6d34ikxS+hs62A31Fqmyi4=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6 h1:bpitH5JbjBhfcTG+H2RkkiUXpYa8xSuIPnyNtTaSPog=
github.com/DataDog/go-runtime-metrics-internal v0.0.4-0.20241206090539-a14610dc22b6/go.mod h1:quaQJ+wPN41xEC458FCpTwyROZm3MzmTZ8q8XOXQiPs=
github.com/DataDog/go-sqllexer v0.0.14 h1:xUQh2tLr/95LGxDzLmttLgTo/1gzFeOyuwrQa/Iig4Q=
github.com/DataDog/go-sqllexer v0.0.14/go.mod h1:KwkYhpFEVIq+BfobkTC1vfqm4gTi65skV/DpDBXtexc=
github.com/DataDog/go-tuf v1.1.0-0.5.2 h1:4CagiIekonLSfL8GMHRHcHudo1fQnxELS9g4tiAupQ4=
github.com/DataDog/go-tuf v1.1.0-0.5.2/go.mod h1:zBcq6f654iVqmkk8n2Cx81E1JnNTMOAx1UEO/wZR+P0=
github.com/DataDog/gostackparse v0.7.0 h1:i7dLkXHvYzHV308hnkvVGDL3BR4FWl7IsXNPz/IGQh4=
github.com/DataDog/gostackparse v0.7.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.4.4 h1:dF52vzXRFSPOj2IjXSWLvXq3jubL4CI69kwYjJ1w5Z8=
github.com/DataDog/sketches-go v1.4.4/go.mod h1:XR0ns2RtEEF09mDKXiKZiQg+nfZStrq1ZuL1eezeZe0=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0 h1:fKv05WFWHCXQmUTehW1eEZvXJP65Qv00W4V01B1EqSA=
github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes v0.20.0/go.mod h1:dvIWN9pA2zWNTw5rhDWZgzZnhcfpH++d+8d1SWW6xkY=
github.com/DataDog/sketches-go v1.4.5 h1:ki7VfeNz7IcNafq7yI/j5U/YCkO3LJiMDtXz9OMQbyE=
github.com/DataDog/sketches-go v1.4.5/go.mod h1:7Y8GN8Jf66DLyDhc94zuWA3uHEt/7ttt8jHOBWWrSOg=
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
@@ -78,10 +88,10 @@ github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSC
github.com/Masterminds/squirrel v1.5.4 h1:uUcX/aBc8O7Fg9kaISIUsHXdKuqehiXAMQTYX8afzqM=
github.com/Masterminds/squirrel v1.5.4/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10=
github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
github.com/Microsoft/hcsshim v0.12.2 h1:AcXy+yfRvrx20g9v7qYaJv5Rh+8GaHOS6b8G6Wx/nKs=
github.com/Microsoft/hcsshim v0.12.2/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d h1:hi6J4K6DKrR4/ljxn6SF6nURyu785wKMuQcjt7H3VCQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -90,21 +100,49 @@ github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa h1:LHTHcTQiSGT7V
github.com/alexbrainman/sspi v0.0.0-20231016080023-1a75b4708caa/go.mod h1:cEWa1LVoE5KvSD9ONXsZrj0z6KqySlCCNKHlLzbqAt4=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/antonmedv/expr v1.15.5 h1:y0Iz3cEwmpRz5/r3w4qQR0MfIqJGdGM1zbhD/v0G5Vg=
github.com/antonmedv/expr v1.15.5/go.mod h1:0E/6TxnOlRNp81GMzX9QfDPAmHo2Phg00y4JUv1ihsE=
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/config v1.29.9 h1:Kg+fAYNaJeGXp1vmjtidss8O2uXIsXwaRqsQJKXVr+0=
github.com/aws/aws-sdk-go-v2/config v1.29.9/go.mod h1:oU3jj2O53kgOU4TXq/yipt6ryiooYjlkqqVaZk7gY/U=
github.com/aws/aws-sdk-go-v2/credentials v1.17.62 h1:fvtQY3zFzYJ9CfixuAQ96IxDrBajbBWGqjNTCa79ocU=
github.com/aws/aws-sdk-go-v2/credentials v1.17.62/go.mod h1:ElETBxIQqcxej++Cs8GyPBbgMys5DgQPTwo7cUPDKt8=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.2 h1:vlYXbindmagyVA3RS2SPd47eKZ00GZZQcr+etTviHtc=
github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.35.2/go.mod h1:yGhDiLKguA3iFJYxbrQkQiNzuy+ddxesSZYWVeeEH5Q=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1 h1:8JdC7Gr9NROg1Rusk25IcZeTO59zLxsKgE0gkh5O6h0=
github.com/aws/aws-sdk-go-v2/service/sso v1.25.1/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1 h1:KwuLovgQPcdjNMfFt9OhUd9a2OwcOKhxfvF4glTzLuA=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.29.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17 h1:PZV5W8yk4OtH1JAuhV2PXwwO9v5G5Aoj+eMCn4T+1Kc=
github.com/aws/aws-sdk-go-v2/service/sts v1.33.17/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/beorn7/perks v0.0.0-20150223135152-b965b613227f/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENUpMkpg42fw=
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
@@ -128,9 +166,6 @@ github.com/ccding/go-stun v0.1.5 h1:qEM367nnezmj7dv+SdT52prv5x6HUTG3nlrjX5aitlo=
github.com/ccding/go-stun v0.1.5/go.mod h1:cCZjJ1J3WFSJV6Wj8Y9Di8JMTsEXh6uv2eNmLzKaUeM=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
@@ -141,28 +176,28 @@ github.com/chmduquesne/rollinghash v4.0.0+incompatible/go.mod h1:Uc2I36RRfTAf7Dg
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk=
github.com/cilium/ebpf v0.15.0/go.mod h1:DHp1WyrLeiBh19Cf/tfiSMhqheEiK8fXFZ4No0P1Hso=
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575 h1:kHaBemcxl8o/pQ5VM1c8PVE1PubbNx3mjUr09OqWGCs=
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
github.com/cilium/ebpf v0.16.0 h1:+BiEnHL6Z7lXnlGUsXQPPAE7+kenAd4ES8MQ5min0Ok=
github.com/cilium/ebpf v0.16.0/go.mod h1:L7u2Blt2jMM/vLAVgjxluxtBKlz3/GWjB0dMOEngfwE=
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b h1:yTDNdo6hd8ABJYeLUvmqKAYj3jaRzLYx3UwM+/jSeBY=
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b/go.mod h1:Ascfar4FtgB+K+mwqbZpSb3WVZ5sPFIarg+iAOXNZqI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 h1:QVw89YDxXxEe+l8gU8ETbOasdwEV+avkR75ZzsVV9WI=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/containerd v1.7.14 h1:H/XLzbnGuenZEGK+v0RkwTdv2u1QFAruMe5N0GNPJwA=
github.com/containerd/containerd v1.7.14/go.mod h1:YMC9Qt5yzNqXx/fO4j/5yYVIHXSRrlB3H7sxkUTvspg=
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3 h1:boJj011Hh+874zpIySeApCX4GeOjPl9qhRF3QuIZq+Q=
github.com/cncf/xds/go v0.0.0-20241223141626-cff3c89139a3/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/containerd v1.7.27 h1:yFyEyojddO3MIGVER2xJLWoCIn+Up4GaHFquP7hsFII=
github.com/containerd/containerd v1.7.27/go.mod h1:xZmPnl75Vc+BLGt4MIfu6bp+fy03gdHAn9bz+FreFR0=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/containernetworking/cni v1.1.2 h1:wtRGZVv7olUHMOqouPpn3cXJWpJgM6+EUl31EQbXALQ=
github.com/containernetworking/cni v1.1.2/go.mod h1:sDpYKmGVENF3s6uvMvGgldDWeG8dMxakj/u+i9ht9vw=
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/coredns v1.11.2 h1:HnCGNxSolDRge1fhQD1/N7AzYfStLMtqVAmuUe1jo1I=
github.com/coredns/coredns v1.11.2/go.mod h1:EqOuX/f6iSRMG18JBwkS0Ern3iV9ImS+hZHgVuwGt+0=
github.com/coredns/caddy v1.1.2-0.20241029205200-8de985351a98 h1:c+Epklw9xk6BZ1OFBPWLA2PcL8QalKvl3if8CP9x8uw=
github.com/coredns/caddy v1.1.2-0.20241029205200-8de985351a98/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/coredns v1.12.1 h1:haptbGscSbdWU46xrjdPj1vp3wvH1Z2FgCSQKEdgN5s=
github.com/coredns/coredns v1.12.1/go.mod h1:V26ngiKdNvAiEre5PTAvklrvTjnNjl6lakq1nbE/NbU=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6 h1:8h5+bWd7R6AYUslN6c6iuZWTKsKxUFDlpnmilO6R2n0=
github.com/coreos/go-iptables v0.7.1-0.20240112124308-65c67c9f46e6/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
@@ -184,8 +219,9 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8Yc
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa h1:h8TfIT1xc8FWbwwpmHn1J5i43Y0uZP97GqasGCzSRJk=
github.com/dblohm7/wingoes v0.0.0-20240119213807-a09d6be7affa/go.mod h1:Nx87SkVqTKd8UtT+xu7sM/l+LgXs6c0aHrlKusR+2EQ=
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA=
github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y=
github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U=
@@ -212,8 +248,8 @@ github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c/go.mod h1:CADgU4DSXK
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32 h1:EHZfspsnLAz8Hzccd67D5abwLiqoqym2jz/jOS39mCk=
github.com/docker/go-events v0.0.0-20250114142523-c867878c5e32/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8=
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
@@ -227,18 +263,20 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/dvsekhvalnov/jose2go v0.0.0-20170216131308-f21a8cedbbae/go.mod h1:7BvyPhdbLxMXIYTFPLsyJRFMsKmOZnQmzh6Gb+uquuM=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4 h1:8EXxF+tCLqaVk8AOC29zl2mnhQjwyLxxOTuhUazWRsg=
github.com/eapache/queue/v2 v2.0.0-20230407133247-75960ed334e4/go.mod h1:I5sHm0Y0T1u5YjlyqC5GVArM7aNZRUYtTjmJ8mPJFds=
github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU=
github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE=
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0=
github.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=
github.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@@ -246,10 +284,13 @@ github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjT
github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f h1:Wl78ApPPB2Wvf/TIe2xdyJxTlb6obmF18d8QdkxNDu4=
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f/go.mod h1:OSYXu++VVOHnXeitef/D8n/6y4QV8uLHSFXX4NeXMGc=
github.com/expr-lang/expr v1.17.2 h1:o0A99O/Px+/DTjEnQiodAgOIK9PPxL8DtXhBRKC+Iso=
github.com/expr-lang/expr v1.17.2/go.mod h1:8/vRC7+7HBzESEqt5kKpYXxrxkr31SaO8r40VO/1IT4=
github.com/farsightsec/golang-framestream v0.3.0 h1:/spFQHucTle/ZIPkYqrfshQqPe2VQEzesH243TjIwqA=
github.com/farsightsec/golang-framestream v0.3.0/go.mod h1:eNde4IQyEiA5br02AouhEHCu3p3UzrCdFR4LuQHklMI=
github.com/fatih/camelcase v1.0.0 h1:hxNvNX/xYBp0ovncs8WyWZrOrpBNub/JfaMvbURyft8=
github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
@@ -322,14 +363,12 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg=
github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@@ -340,9 +379,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
@@ -355,13 +392,11 @@ github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
@@ -376,18 +411,17 @@ github.com/google/nftables v0.2.1-0.20240414091927-5e242ec57806/go.mod h1:Beg6V6
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20250202011525-fc3143867406 h1:wlQI2cYY0BsWmmPPAnxfQ8SDW0S3Jasn+4B8kXFxprg=
github.com/google/pprof v0.0.0-20250202011525-fc3143867406/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/gorilla/handlers v1.5.2 h1:cLTUSsNkgcwhgRqvCNmdbRWG0A3N4F+M2nWKdScwyEE=
github.com/gorilla/handlers v1.5.2/go.mod h1:dX+xVpaxdSw+q0Qek8SSsl3dfMk3jNddUkMzo0GtH0w=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@@ -415,13 +449,21 @@ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMW
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 h1:UpiO20jno/eV1eVZcxqWnUohyKRe1g8FPV/xH1s/2qs=
github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7/go.mod h1:QmrqtbKuxxSWTN3ETMPuB+VtEiBJ/A9XhoYGv8E1uD8=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.1/go.mod h1:gKOamz3EwoIoJq7mlMIRBpVTAUn8qPCrEclOKKWhD3U=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=
github.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=
github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc=
github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw=
github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU=
@@ -520,6 +562,7 @@ github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhn
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE=
github.com/lithammer/dedent v1.1.0 h1:VNzHMVCBNG1j0fh3OrsFRkVUwStdDArbgBWoPAffktY=
github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0=
github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@@ -529,8 +572,10 @@ github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4
github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38 h1:hQWBtNqRYrI7CWIaUSXXtNKR90KzcUA5uiuxFVWw7sU=
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -547,24 +592,27 @@ github.com/mdlayher/netlink v1.7.2/go.mod h1:xraEF7uJbxLhc5fpHL4cPe221LI2bdttWlU
github.com/mdlayher/socket v0.5.0 h1:ilICZmJcQz70vrWVes1MFera4jGiWNocSkykwwoy3XI=
github.com/mdlayher/socket v0.5.0/go.mod h1:WkcBFfvyG8QENs5+hfQPl1X6Jpd2yeLIYgrGFmJiJxI=
github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4=
github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY=
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75 h1:cUVxyR+UfmdEAZGJ8IiKld1O0dbGotEnkMolG5hfMSY=
github.com/miscreant/miscreant.go v0.0.0-20200214223636-26d376326b75/go.mod h1:pBbZyGwC5i16IBkjVKoy/sznA8jPD/K9iedwe1ESE6w=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db h1:62I3jR2EmQ4l5rM/4FEfDWcRD+abF5XlKShorW5LRoQ=
github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0dey0ia/Uv7NcFFVbCLtqEBQbrT4OCwCSKTEv6enCw=
github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw=
github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c h1:cqn374mizHuIWj+OSJCajGr/phAmuMug9qIX3l9CflE=
github.com/mitchellh/mapstructure v1.5.1-0.20231216201459-8508981c8b6c/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@@ -573,12 +621,12 @@ github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkV
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/symlink v0.2.0 h1:tk1rOM+Ljp0nFmfOIBtlV3rTDlWOwFRhjEeAhZB0nZc=
github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
@@ -635,8 +683,8 @@ github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 h1:uhcF5Jd7rP9DVEL10Siffyepr6SvlKbUsjH5JpNCRi8=
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0/go.mod h1:+oCZ5GXXr7KPI/DNOQORPTq5AWHfALJj9c72b0+YsEY=
github.com/openzipkin/zipkin-go v0.4.2 h1:zjqfqHjUpPmB3c1GlCvvgsM1G4LkvqQbBDueDOCg/jA=
github.com/openzipkin/zipkin-go v0.4.2/go.mod h1:ZeVkFjuuBiSy13y8vpSDCjMi9GoI3hPpCJSBx/EYFhY=
github.com/openzipkin/zipkin-go v0.4.3 h1:9EGwpqkgnwdEIJ+Od7QVSEIH+ocmm5nPat0G7sjsSdg=
github.com/openzipkin/zipkin-go v0.4.3/go.mod h1:M9wCJZFWCo2RiY+o1eBCEMe0Dp2S5LDHcMZmk3RmK7c=
github.com/oschwald/geoip2-golang v1.11.0 h1:hNENhCn1Uyzhf9PTmquXENiWS6AlxAEnBII6r8krA3w=
github.com/oschwald/geoip2-golang v1.11.0/go.mod h1:P9zG+54KPEFOliZ29i7SeYZ/GM6tfEL+rgSn03hYuUo=
github.com/oschwald/maxminddb-golang v1.13.1 h1:G3wwjdN9JmIK2o/ermkHM+98oX5fS+k5MbwsmL4MRQE=
@@ -650,8 +698,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -663,6 +711,8 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY=
@@ -680,7 +730,6 @@ github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuF
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
@@ -694,8 +743,8 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.16.0 h1:xh6oHhKwnOJKMYiYBDWmkHqQPyiY40sny36Cmx2bbsM=
github.com/prometheus/procfs v0.16.0/go.mod h1:8veyXUu3nGP7oaCxhX6yeaM5u4stL2FeMXnCqhDthZg=
github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94=
github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 h1:EaDatTxkdHG+U3Bk4EUr+DZ7fOGwTfezUiUJMaIcaho=
@@ -706,8 +755,10 @@ github.com/redis/go-redis/v9 v9.1.0 h1:137FnGdk+EQdCbye1FW+qOEcY5S+SpY9T0Niuqvtf
github.com/redis/go-redis/v9 v9.1.0/go.mod h1:urWj3He21Dj5k4TK1y59xH8Uj6ATueP8AH1cY3lZl4c=
github.com/regclient/regclient v0.8.0 h1:xNAMDlADcyMvFAlGXoqDOxlSUBG4mqWBFgjQqVTP8Og=
github.com/regclient/regclient v0.8.0/go.mod h1:h9+Y6dBvqBkdlrj6EIhbTOv0xUuIFl7CdI1bZvEB42g=
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052 h1:Qp27Idfgi6ACvFQat5+VJvlYToylpM/hcyLBI3WaKPA=
github.com/richardartoul/molecule v1.0.1-0.20221107223329-32cfee06a052/go.mod h1:uvX/8buq8uVeiZiFht+0lqSLBHF+uGV8BrTv8W/SIwk=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3 h1:4+LEVOB87y175cLJC/mbsgKmoDOjrBldtXvioEy96WY=
github.com/richardartoul/molecule v1.0.1-0.20240531184615-7ca0df43c0b3/go.mod h1:vl5+MqJ1nBINuSsUI2mGgH79UweUT/B5Fy8857PqyyI=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
@@ -717,14 +768,23 @@ github.com/rubenv/sql-migrate v1.7.1 h1:f/o0WgfO/GqNuVg+6801K/KW3WdDSupzSjDYODmi
github.com/rubenv/sql-migrate v1.7.1/go.mod h1:Ob2Psprc0/3ggbM6wCzyYVFFuc6FyZrb2AS+ezLDFb4=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=
github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=
github.com/schollz/progressbar/v3 v3.14.2 h1:EducH6uNLIWsr560zSV1KrTeUb/wZGAHqyMFIEa99ks=
github.com/schollz/progressbar/v3 v3.14.2/go.mod h1:aQAZQnhF4JGFtRJiw/eobaXpsqpVQAftEQ+hLGXaRc4=
github.com/secure-systems-lab/go-securesystemslib v0.8.0 h1:mr5An6X45Kb2nddcFlbmfHkLguCE9laoZCUzEEpIZXA=
github.com/secure-systems-lab/go-securesystemslib v0.8.0/go.mod h1:UH2VZVuJfCYR8WgMlCU1uFsOUU+KeyrTWcSS73NBOzU=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8=
github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs=
github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
@@ -770,6 +830,8 @@ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1F
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8=
@@ -786,10 +848,12 @@ github.com/thejerf/suture/v4 v4.0.6 h1:QsuCEsCqb03xF9tPAsWAj8QOAJBgQI1c0VqJNaing
github.com/thejerf/suture/v4 v4.0.6/go.mod h1:gu9Y4dXNUWFrByqRt30Rm9/UZ0wzRSt9AJS6xu/ZGxU=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU=
github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k=
github.com/tinylib/msgp v1.2.1 h1:6ypy2qcCznxpP4hpORzhtXyTqrBs7cfM9MCCWY8zsmU=
github.com/tinylib/msgp v1.2.1/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo=
github.com/tklauser/numcpus v0.9.0/go.mod h1:SN6Nq1O3VychhC1npsWostA+oW+VOQTxZrS604NSRyI=
github.com/ulikunitz/xz v0.5.12 h1:37Nm15o69RwBkXM0J6A5OlE67RZTfzUxTj8fB3dfcsc=
@@ -799,6 +863,10 @@ github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1Y
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0 h1:okhMind4q9H1OxF44gNegWkiP4H/gsTFLalHFa4OOUI=
github.com/vitrun/qart v0.0.0-20160531060029-bf64b92db6b0/go.mod h1:TTbGUfE+cXXceWtbTHq6lqcTvYPBKLNejBEbnUsQJtU=
github.com/vmihailenco/msgpack/v4 v4.3.12 h1:07s4sz9IReOgdikxLTKNbBdqDMLsjPKXwvCazn8G65U=
github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
github.com/vmihailenco/tagparser v0.1.2 h1:gnjoVuB/kljJ5wICEEOpx98oXMWPLj22G67Vbd1qPqc=
github.com/vmihailenco/tagparser v0.1.2/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
@@ -816,24 +884,32 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/etcd/api/v3 v3.5.17 h1:cQB8eb8bxwuxOilBpMJAEo8fAONyrdXTHUNcMd8yT1w=
go.etcd.io/etcd/api/v3 v3.5.17/go.mod h1:d1hvkRuXkts6PmaYk2Vrgqbv7H4ADfAKhyJqHNLJCB4=
go.etcd.io/etcd/client/pkg/v3 v3.5.17 h1:XxnDXAWq2pnxqx76ljWwiQ9jylbpC4rvkAeRVOUKKVw=
go.etcd.io/etcd/client/pkg/v3 v3.5.17/go.mod h1:4DqK1TKacp/86nJk4FLQqo6Mn2vvQFBmruW3pP14H/w=
go.etcd.io/etcd/client/v3 v3.5.17 h1:o48sINNeWz5+pjy/Z0+HKpj/xSnBkuVhVvXkjEXbqZY=
go.etcd.io/etcd/client/v3 v3.5.17/go.mod h1:j2d4eXTHWkT2ClBgnnEPm/Wuu7jsqku41v9DZ3OtjQo=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0=
go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U=
go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM=
go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0=
go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ=
go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/collector/component v0.104.0 h1:jqu/X9rnv8ha0RNZ1a9+x7OU49KwSMsPbOuIEykHuQE=
go.opentelemetry.io/collector/component v0.104.0/go.mod h1:1C7C0hMVSbXyY1ycCmaMUAR9fVwpgyiNQqxXtEWhVpw=
go.opentelemetry.io/collector/config/configtelemetry v0.104.0 h1:eHv98XIhapZA8MgTiipvi+FDOXoFhCYOwyKReOt+E4E=
go.opentelemetry.io/collector/config/configtelemetry v0.104.0/go.mod h1:WxWKNVAQJg/Io1nA3xLgn/DWLE/W1QOB2+/Js3ACi40=
go.opentelemetry.io/collector/pdata v1.11.0 h1:rzYyV1zfTQQz1DI9hCiaKyyaczqawN75XO9mdXmR/hE=
go.opentelemetry.io/collector/pdata v1.11.0/go.mod h1:IHxHsp+Jq/xfjORQMDJjSH6jvedOSTOyu3nbxqhWSYE=
go.opentelemetry.io/collector/pdata/pprofile v0.104.0 h1:MYOIHvPlKEJbWLiBKFQWGD0xd2u22xGVLt4jPbdxP4Y=
go.opentelemetry.io/collector/pdata/pprofile v0.104.0/go.mod h1:7WpyHk2wJZRx70CGkBio8klrYTTXASbyIhf+rH4FKnA=
go.opentelemetry.io/collector/semconv v0.104.0 h1:dUvajnh+AYJLEW/XOPk0T0BlwltSdi3vrjO7nSOos3k=
go.opentelemetry.io/collector/semconv v0.104.0/go.mod h1:yMVUCNoQPZVq/IPfrHrnntZTWsLf5YGZ7qwKulIl5hw=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0 h1:UW0+QyeyBVhn+COBec3nGhfnFe5lwB0ic1JBVjzhk0w=
go.opentelemetry.io/contrib/bridges/prometheus v0.57.0/go.mod h1:ppciCHRLsyCio54qbzQv0E4Jyth/fLWDTJYfvWpcSVk=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0 h1:jmTVJ86dP60C01K3slFQa2NQ/Aoi7zA+wy7vMOKD9H4=
go.opentelemetry.io/contrib/exporters/autoexport v0.57.0/go.mod h1:EJBheUMttD/lABFyLXhce47Wr6DPWYReCzaZiXadH7g=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0 h1:WzNab7hOOLzdDF/EoWCt4glhrbMPVMOO5JYTmpz36Ls=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.8.0/go.mod h1:hKvJwTzJdp90Vh7p6q/9PAOd55dI6WA6sWj62a/JvSs=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.8.0 h1:S+LdBGiQXtJdowoJoQPEtI52syEP/JYBUpjO49EQhV8=
@@ -858,16 +934,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0 h1:cC2yDI3IQd0Udsu
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.32.0/go.mod h1:2PD5Ex6z8CFzDbTdOlwyNIUywRr1DN0ospafJM1wJ+s=
go.opentelemetry.io/otel/log v0.8.0 h1:egZ8vV5atrUWUbnSsHn6vB8R21G2wrKqNiDt3iWertk=
go.opentelemetry.io/otel/log v0.8.0/go.mod h1:M9qvDdUTRCopJcGRKg57+JSQ9LgLBrwwfC32epk5NX8=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg=
go.opentelemetry.io/otel/sdk/log v0.8.0 h1:zg7GUYXqxk1jnGF/dTdLPrK06xJdrXgqgFLnI4Crxvs=
go.opentelemetry.io/otel/sdk/log v0.8.0/go.mod h1:50iXr0UVwQrYS45KbruFrEt4LvAdCaWWgIrsN3ZQggo=
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc=
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
@@ -895,21 +971,17 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3 h1:qNgPs5exUA+G0C96DrPwNrvLSj7GT/9D+3WMWUcUg34=
golang.org/x/exp v0.0.0-20250207012021-f9890c6ad9f3/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
@@ -922,12 +994,8 @@ golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -936,7 +1004,6 @@ golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
@@ -951,9 +1018,8 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -969,9 +1035,9 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180926160741-c2ed4eda69e7/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -1013,12 +1079,15 @@ golang.org/x/sys v0.4.1-0.20230131160137-e7d7f63158de/go.mod h1:oPkhp1MJrh7nUepC
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@@ -1026,11 +1095,12 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -1041,15 +1111,11 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@@ -1078,41 +1144,29 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0=
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
google.golang.org/api v0.199.0 h1:aWUXClp+VFJmqE0JPvpZOK3LDQMyFKYIow4etYd9qxs=
google.golang.org/api v0.199.0/go.mod h1:ohG4qSztDJmZdjK/Ar6MhbAmb/Rpi4JHOqagsh90K28=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f h1:gap6+3Gk41EItBuyi4XX/bp4oqJ3UwuIMl25yGinuAA=
google.golang.org/genproto/googleapis/api v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:Ic02D47M+zbarjYYUlK57y316f2MoN0gjAwI3f2S95o=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50=
google.golang.org/api v0.227.0 h1:QvIHF9IuyG6d6ReE+BNd11kIB8hZvjN8Z5xY5t21zYc=
google.golang.org/api v0.227.0/go.mod h1:EIpaG6MbTgQarWF5xJvX0eOJPK9n/5D4Bynb9j2HXvQ=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a h1:OQ7sHVzkx6L57dQpzUS4ckfWJ51KDH74XHTDe23xWAs=
google.golang.org/genproto/googleapis/api v0.0.0-20250409194420-de1ac958c67a/go.mod h1:2R6XrVC8Oc08GlNh8ujEpc7HkLiEZ16QeY7FxIs20ac=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e h1:ztQaXfzEXTmCBvbtWYRhJxW+0iJcz2qXfd38/e9l7bA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250414145226-207652e42e2e/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
google.golang.org/grpc v1.71.1 h1:ffsFWr7ygTUscGPI0KKK6TLrGz0476KUvvsbqWK0rPI=
google.golang.org/grpc v1.71.1/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 h1:jeZxE4ZlfAc+R0zO5TEmJBwOLet3NThsOfYJeSQg1x0=
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0/go.mod h1:YTvYkk3PTsfw0OWrRFxV/IQ5Gy4nZ5TRvxTAP3JcIzs=
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2 h1:SLcih9LB+I1l76Wd7aUSpzISemewzjq6djntMnBnzkA=
gopkg.in/DataDog/dd-trace-go.v1 v1.72.2/go.mod h1:XqDhDqsLpThFnJc4z0FvAEItISIAUka+RHwmQ6EfN1U=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
@@ -1152,10 +1206,6 @@ gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987 h1:TU8z2Lh3Bbq77w0t1eG8yRlL
gvisor.dev/gvisor v0.0.0-20240722211153-64c016c92987/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
helm.sh/helm/v4 v4.0.0-20250324191910-0199b748aaea h1:uNr8UUVq+hMK/QwZZUBZePTxyjIewt2NlLB2rH40MOY=
helm.sh/helm/v4 v4.0.0-20250324191910-0199b748aaea/go.mod h1:TBh0GxYZ04+lOrSGPO13UtvASGPofyB+4ZsAprbmrok=
honnef.co/go/gotraceui v0.2.0 h1:dmNsfQ9Vl3GwbiVD7Z8d/osC6WtGGrasyrC2suc4ZIQ=
honnef.co/go/gotraceui v0.2.0/go.mod h1:qHo4/W75cA3bX0QQoSvDjbJa4R8mAyyFjbWAj63XElc=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls=
k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k=
k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY=
@@ -1180,6 +1230,26 @@ k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI=
k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e h1:KqK5c/ghOm8xkHYhlodbp6i6+r+ChV2vuAuVRdFbLro=
k8s.io/utils v0.0.0-20250321185631-1f6e0b77f77e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
lukechampine.com/uint128 v1.3.0 h1:cDdUVfRwDUDovz610ABgFD17nXD4/uDgVHl2sC3+sbo=
lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
modernc.org/cc/v3 v3.41.0 h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=
modernc.org/cc/v3 v3.41.0/go.mod h1:Ni4zjJYJ04CDOhG7dn640WGfwBzfE0ecX8TyMB0Fv0Y=
modernc.org/ccgo/v3 v3.16.15 h1:KbDR3ZAVU+wiLyMESPtbtE/Add4elztFyfsWoNTgxS0=
modernc.org/ccgo/v3 v3.16.15/go.mod h1:yT7B+/E2m43tmMOT51GMoM98/MtHIcQQSleGnddkUNI=
modernc.org/libc v1.37.6 h1:orZH3c5wmhIQFTXF+Nt+eeauyd+ZIt2BX6ARe+kD+aw=
modernc.org/libc v1.37.6/go.mod h1:YAXkAZ8ktnkCKaN9sw/UDeUVkGYJ/YquGO4FTi5nmHE=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sqlite v1.28.0 h1:Zx+LyDDmXczNnEQdvPuEfcFVA2ZPyaD7UCZDjef3BHQ=
modernc.org/sqlite v1.28.0/go.mod h1:Qxpazz0zH8Z1xCFyi5GSL3FzbtZ3fvbjmywNogldEW0=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
oras.land/oras-go/v2 v2.5.0 h1:o8Me9kLY74Vp5uw07QXPiitjsw7qNXi8Twd+19Zf02c=
oras.land/oras-go/v2 v2.5.0/go.mod h1:z4eisnLP530vwIOUOJeBIj0aGI0L1C3d53atvCBqZHg=
sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU=

View File

@@ -1,5 +1,124 @@
# Changelog
## [0.15.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.1...auth/v0.15.0) (2025-02-19)
### Features
* **auth:** Add hard-bound token request to compute token provider. ([#11588](https://github.com/googleapis/google-cloud-go/issues/11588)) ([0e608bb](https://github.com/googleapis/google-cloud-go/commit/0e608bb5ac3d694c8ad36ca4340071d3a2c78699))
## [0.14.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.14.0...auth/v0.14.1) (2025-01-24)
### Documentation
* **auth:** Add warning about externally-provided credentials ([#11462](https://github.com/googleapis/google-cloud-go/issues/11462)) ([49fb6ff](https://github.com/googleapis/google-cloud-go/commit/49fb6ff4d754895f82c9c4d502fc7547d3b5a941))
## [0.14.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.13.0...auth/v0.14.0) (2025-01-08)
### Features
* **auth:** Add universe domain support to idtoken ([#11059](https://github.com/googleapis/google-cloud-go/issues/11059)) ([72add7e](https://github.com/googleapis/google-cloud-go/commit/72add7e9f8f455af695e8ef79212a4bd3122fb3a))
### Bug Fixes
* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
* **auth:** Fix copy of delegates in impersonate.NewIDTokenCredentials ([#11386](https://github.com/googleapis/google-cloud-go/issues/11386)) ([ff7ef8e](https://github.com/googleapis/google-cloud-go/commit/ff7ef8e7ade7171bce3e4f30ff10a2e9f6c27ca0)), refs [#11379](https://github.com/googleapis/google-cloud-go/issues/11379)
* **auth:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
## [0.13.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.1...auth/v0.13.0) (2024-12-13)
### Features
* **auth:** Add logging support ([#11079](https://github.com/googleapis/google-cloud-go/issues/11079)) ([c80e31d](https://github.com/googleapis/google-cloud-go/commit/c80e31df5ecb33a810be3dfb9d9e27ac531aa91d))
* **auth:** Pass logger from auth layer to metadata package ([#11288](https://github.com/googleapis/google-cloud-go/issues/11288)) ([b552efd](https://github.com/googleapis/google-cloud-go/commit/b552efd6ab34e5dfded18438e0fbfd925805614f))
### Bug Fixes
* **auth:** Check compute cred type before non-default flag for DP ([#11255](https://github.com/googleapis/google-cloud-go/issues/11255)) ([4347ca1](https://github.com/googleapis/google-cloud-go/commit/4347ca141892be8ae813399b4b437662a103bc90))
## [0.12.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.12.0...auth/v0.12.1) (2024-12-10)
### Bug Fixes
* **auth:** Correct typo in link ([#11160](https://github.com/googleapis/google-cloud-go/issues/11160)) ([af6fb46](https://github.com/googleapis/google-cloud-go/commit/af6fb46d7cd694ddbe8c9d63bc4cdcd62b9fb2c1))
## [0.12.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.11.0...auth/v0.12.0) (2024-12-04)
### Features
* **auth:** Add support for providing custom certificate URL ([#11006](https://github.com/googleapis/google-cloud-go/issues/11006)) ([ebf3657](https://github.com/googleapis/google-cloud-go/commit/ebf36579724afb375d3974cf1da38f703e3b7dbc)), refs [#11005](https://github.com/googleapis/google-cloud-go/issues/11005)
### Bug Fixes
* **auth:** Ensure endpoints are present in Validator ([#11209](https://github.com/googleapis/google-cloud-go/issues/11209)) ([106cd53](https://github.com/googleapis/google-cloud-go/commit/106cd53309facaef1b8ea78376179f523f6912b9)), refs [#11006](https://github.com/googleapis/google-cloud-go/issues/11006) [#11190](https://github.com/googleapis/google-cloud-go/issues/11190) [#11189](https://github.com/googleapis/google-cloud-go/issues/11189) [#11188](https://github.com/googleapis/google-cloud-go/issues/11188)
## [0.11.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.2...auth/v0.11.0) (2024-11-21)
### Features
* **auth:** Add universe domain support to mTLS ([#11159](https://github.com/googleapis/google-cloud-go/issues/11159)) ([117748b](https://github.com/googleapis/google-cloud-go/commit/117748ba1cfd4ae62a6a4feb7e30951cb2bc9344))
## [0.10.2](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.1...auth/v0.10.2) (2024-11-12)
### Bug Fixes
* **auth:** Restore use of grpc.Dial ([#11118](https://github.com/googleapis/google-cloud-go/issues/11118)) ([2456b94](https://github.com/googleapis/google-cloud-go/commit/2456b943b7b8aaabd4d8bfb7572c0f477ae0db45)), refs [#7556](https://github.com/googleapis/google-cloud-go/issues/7556)
## [0.10.1](https://github.com/googleapis/google-cloud-go/compare/auth/v0.10.0...auth/v0.10.1) (2024-11-06)
### Bug Fixes
* **auth:** Restore Application Default Credentials support to idtoken ([#11083](https://github.com/googleapis/google-cloud-go/issues/11083)) ([8771f2e](https://github.com/googleapis/google-cloud-go/commit/8771f2ea9807ab822083808e0678392edff3b4f2))
* **auth:** Skip impersonate universe domain check if empty ([#11086](https://github.com/googleapis/google-cloud-go/issues/11086)) ([87159c1](https://github.com/googleapis/google-cloud-go/commit/87159c1059d4a18d1367ce62746a838a94964ab6))
## [0.10.0](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.9...auth/v0.10.0) (2024-10-30)
### Features
* **auth:** Add universe domain support to credentials/impersonate ([#10953](https://github.com/googleapis/google-cloud-go/issues/10953)) ([e06cb64](https://github.com/googleapis/google-cloud-go/commit/e06cb6499f7eda3aef08ab18ff197016f667684b))
## [0.9.9](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.8...auth/v0.9.9) (2024-10-22)
### Bug Fixes
* **auth:** Fallback cert lookups for missing files ([#11013](https://github.com/googleapis/google-cloud-go/issues/11013)) ([bd76695](https://github.com/googleapis/google-cloud-go/commit/bd766957ec238b7c40ddbabb369e612dc9b07313)), refs [#10844](https://github.com/googleapis/google-cloud-go/issues/10844)
* **auth:** Replace MDS endpoint universe_domain with universe-domain ([#11000](https://github.com/googleapis/google-cloud-go/issues/11000)) ([6a1586f](https://github.com/googleapis/google-cloud-go/commit/6a1586f2ce9974684affaea84e7b629313b4d114))
## [0.9.8](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.7...auth/v0.9.8) (2024-10-09)
### Bug Fixes
* **auth:** Restore OpenTelemetry handling in transports ([#10968](https://github.com/googleapis/google-cloud-go/issues/10968)) ([08c6d04](https://github.com/googleapis/google-cloud-go/commit/08c6d04901c1a20e219b2d86df41dbaa6d7d7b55)), refs [#10962](https://github.com/googleapis/google-cloud-go/issues/10962)
* **auth:** Try talk to plaintext S2A if credentials can not be found for mTLS-S2A ([#10941](https://github.com/googleapis/google-cloud-go/issues/10941)) ([0f0bf2d](https://github.com/googleapis/google-cloud-go/commit/0f0bf2d18c97dd8b65bcf0099f0802b5631c6287))
## [0.9.7](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.6...auth/v0.9.7) (2024-10-01)
### Bug Fixes
* **auth:** Restore support for non-default service accounts for DirectPath ([#10937](https://github.com/googleapis/google-cloud-go/issues/10937)) ([a38650e](https://github.com/googleapis/google-cloud-go/commit/a38650edbf420223077498cafa537aec74b37aad)), refs [#10907](https://github.com/googleapis/google-cloud-go/issues/10907)
## [0.9.6](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.5...auth/v0.9.6) (2024-09-30)
### Bug Fixes
* **auth:** Make aws credentials provider retrieve fresh credentials ([#10920](https://github.com/googleapis/google-cloud-go/issues/10920)) ([250fbf8](https://github.com/googleapis/google-cloud-go/commit/250fbf87d858d865e399a241b7e537c4ff0c3dd8))
## [0.9.5](https://github.com/googleapis/google-cloud-go/compare/auth/v0.9.4...auth/v0.9.5) (2024-09-25)

View File

@@ -24,6 +24,7 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"strings"
@@ -32,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/jwt"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -227,9 +229,7 @@ type CredentialsOptions struct {
UniverseDomainProvider CredentialsPropertyProvider
}
// NewCredentials returns new [Credentials] from the provided options. Most users
// will want to build this object a function from the
// [cloud.google.com/go/auth/credentials] package.
// NewCredentials returns new [Credentials] from the provided options.
func NewCredentials(opts *CredentialsOptions) *Credentials {
creds := &Credentials{
TokenProvider: opts.TokenProvider,
@@ -242,8 +242,8 @@ func NewCredentials(opts *CredentialsOptions) *Credentials {
return creds
}
// CachedTokenProviderOptions provided options for configuring a
// CachedTokenProvider.
// CachedTokenProviderOptions provides options for configuring a cached
// [TokenProvider].
type CachedTokenProviderOptions struct {
// DisableAutoRefresh makes the TokenProvider always return the same token,
// even if it is expired. The default is false. Optional.
@@ -253,7 +253,7 @@ type CachedTokenProviderOptions struct {
// seconds. Optional.
ExpireEarly time.Duration
// DisableAsyncRefresh configures a synchronous workflow that refreshes
// stale tokens while blocking. The default is false. Optional.
// tokens in a blocking manner. The default is false. Optional.
DisableAsyncRefresh bool
}
@@ -280,12 +280,7 @@ func (ctpo *CachedTokenProviderOptions) blockingRefresh() bool {
// NewCachedTokenProvider wraps a [TokenProvider] to cache the tokens returned
// by the underlying provider. By default it will refresh tokens asynchronously
// (non-blocking mode) within a window that starts 3 minutes and 45 seconds
// before they expire. The asynchronous (non-blocking) refresh can be changed to
// a synchronous (blocking) refresh using the
// CachedTokenProviderOptions.DisableAsyncRefresh option. The time-before-expiry
// duration can be configured using the CachedTokenProviderOptions.ExpireEarly
// option.
// a few minutes before they expire.
func NewCachedTokenProvider(tp TokenProvider, opts *CachedTokenProviderOptions) TokenProvider {
if ctp, ok := tp.(*cachedTokenProvider); ok {
return ctp
@@ -345,13 +340,14 @@ func (c *cachedTokenProvider) tokenState() tokenState {
c.mu.Lock()
defer c.mu.Unlock()
t := c.cachedToken
now := timeNow()
if t == nil || t.Value == "" {
return invalid
} else if t.Expiry.IsZero() {
return fresh
} else if timeNow().After(t.Expiry.Round(0)) {
} else if now.After(t.Expiry.Round(0)) {
return invalid
} else if timeNow().After(t.Expiry.Round(0).Add(-c.expireEarly)) {
} else if now.After(t.Expiry.Round(0).Add(-c.expireEarly)) {
return stale
}
return fresh
@@ -496,6 +492,11 @@ type Options2LO struct {
// UseIDToken requests that the token returned be an ID token if one is
// returned from the server. Optional.
UseIDToken bool
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
}
func (o *Options2LO) client() *http.Client {
@@ -526,12 +527,13 @@ func New2LOTokenProvider(opts *Options2LO) (TokenProvider, error) {
if err := opts.validate(); err != nil {
return nil, err
}
return tokenProvider2LO{opts: opts, Client: opts.client()}, nil
return tokenProvider2LO{opts: opts, Client: opts.client(), logger: internallog.New(opts.Logger)}, nil
}
type tokenProvider2LO struct {
opts *Options2LO
Client *http.Client
logger *slog.Logger
}
func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
@@ -566,10 +568,12 @@ func (tp tokenProvider2LO) Token(ctx context.Context) (*Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
tp.logger.DebugContext(ctx, "2LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(tp.Client, req)
if err != nil {
return nil, fmt.Errorf("auth: cannot fetch token: %w", err)
}
tp.logger.DebugContext(ctx, "2LO token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, &Error{
Response: resp,

View File

@@ -37,8 +37,12 @@ var (
// computeTokenProvider creates a [cloud.google.com/go/auth.TokenProvider] that
// uses the metadata service to retrieve tokens.
func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
return auth.NewCachedTokenProvider(computeProvider{scopes: opts.Scopes}, &auth.CachedTokenProviderOptions{
func computeTokenProvider(opts *DetectOptions, client *metadata.Client) auth.TokenProvider {
return auth.NewCachedTokenProvider(&computeProvider{
scopes: opts.Scopes,
client: client,
tokenBindingType: opts.TokenBindingType,
}, &auth.CachedTokenProviderOptions{
ExpireEarly: opts.EarlyTokenRefresh,
DisableAsyncRefresh: opts.DisableAsyncRefresh,
})
@@ -47,6 +51,8 @@ func computeTokenProvider(opts *DetectOptions) auth.TokenProvider {
// computeProvider fetches tokens from the google cloud metadata service.
type computeProvider struct {
scopes []string
client *metadata.Client
tokenBindingType TokenBindingType
}
type metadataTokenResp struct {
@@ -55,17 +61,27 @@ type metadataTokenResp struct {
TokenType string `json:"token_type"`
}
func (cs computeProvider) Token(ctx context.Context) (*auth.Token, error) {
func (cs *computeProvider) Token(ctx context.Context) (*auth.Token, error) {
tokenURI, err := url.Parse(computeTokenURI)
if err != nil {
return nil, err
}
if len(cs.scopes) > 0 {
hasScopes := len(cs.scopes) > 0
if hasScopes || cs.tokenBindingType != NoBinding {
v := url.Values{}
if hasScopes {
v.Set("scopes", strings.Join(cs.scopes, ","))
}
switch cs.tokenBindingType {
case MTLSHardBinding:
v.Set("transport", "mtls")
v.Set("binding-enforcement", "on")
case ALTSHardBinding:
v.Set("transport", "alts")
}
tokenURI.RawQuery = v.Encode()
}
tokenJSON, err := metadata.GetWithContext(ctx, tokenURI.String())
tokenJSON, err := cs.client.GetWithContext(ctx, tokenURI.String())
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}

View File

@@ -19,6 +19,7 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"os"
"time"
@@ -27,6 +28,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/compute/metadata"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -49,6 +51,23 @@ var (
allowOnGCECheck = true
)
// TokenBindingType specifies the type of binding used when requesting a token
// whether to request a hard-bound token using mTLS or an instance identity
// bound token using ALTS.
type TokenBindingType int
const (
// NoBinding specifies that requested tokens are not required to have a
// binding. This is the default option.
NoBinding TokenBindingType = iota
// MTLSHardBinding specifies that a hard-bound token should be requested
// using an mTLS with S2A channel.
MTLSHardBinding
// ALTSHardBinding specifies that an instance identity bound token should
// be requested using an ALTS channel.
ALTSHardBinding
)
// OnGCE reports whether this process is running in Google Cloud.
func OnGCE() bool {
// TODO(codyoss): once all libs use this auth lib move metadata check here
@@ -96,12 +115,17 @@ func DetectDefault(opts *DetectOptions) (*auth.Credentials, error) {
}
if OnGCE() {
metadataClient := metadata.NewWithOptions(&metadata.Options{
Logger: opts.logger(),
})
return auth.NewCredentials(&auth.CredentialsOptions{
TokenProvider: computeTokenProvider(opts),
TokenProvider: computeTokenProvider(opts, metadataClient),
ProjectIDProvider: auth.CredentialsPropertyFunc(func(ctx context.Context) (string, error) {
return metadata.ProjectIDWithContext(ctx)
return metadataClient.ProjectIDWithContext(ctx)
}),
UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{},
UniverseDomainProvider: &internal.ComputeUniverseDomainProvider{
MetadataClient: metadataClient,
},
}), nil
}
@@ -114,6 +138,10 @@ type DetectOptions struct {
// https://www.googleapis.com/auth/cloud-platform. Required if Audience is
// not provided.
Scopes []string
// TokenBindingType specifies the type of binding used when requesting a
// token whether to request a hard-bound token using mTLS or an instance
// identity bound token using ALTS. Optional.
TokenBindingType TokenBindingType
// Audience that credentials tokens should have. Only applicable for 2LO
// flows with service accounts. If specified, scopes should not be provided.
Audience string
@@ -142,10 +170,26 @@ type DetectOptions struct {
// CredentialsFile overrides detection logic and sources a credential file
// from the provided filepath. If provided, CredentialsJSON must not be.
// Optional.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsFile string
// CredentialsJSON overrides detection logic and uses the JSON bytes as the
// source for the credential. If provided, CredentialsFile must not be.
// Optional.
//
// Important: If you accept a credential configuration (credential
// JSON/File/Stream) from an external source for authentication to Google
// Cloud Platform, you must validate it before providing it to any Google
// API or library. Providing an unvalidated credential configuration to
// Google APIs can compromise the security of your systems and data. For
// more information, refer to [Validate credential configurations from
// external sources](https://cloud.google.com/docs/authentication/external/externally-sourced-credentials).
CredentialsJSON []byte
// UseSelfSignedJWT directs service account based credentials to create a
// self-signed JWT with the private key found in the file, skipping any
@@ -158,6 +202,11 @@ type DetectOptions struct {
// The default value is "googleapis.com". This option is ignored for
// authentication flows that do not support universe domain. Optional.
UniverseDomain string
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
}
func (o *DetectOptions) validate() error {
@@ -193,6 +242,10 @@ func (o *DetectOptions) client() *http.Client {
return internal.DefaultClient()
}
func (o *DetectOptions) logger() *slog.Logger {
return internallog.New(o.Logger)
}
func readCredentialsFile(filename string, opts *DetectOptions) (*auth.Credentials, error) {
b, err := os.ReadFile(filename)
if err != nil {
@@ -253,6 +306,7 @@ func clientCredConfigFromJSON(b []byte, opts *DetectOptions) *auth.Options3LO {
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
Client: opts.client(),
Logger: opts.logger(),
EarlyTokenExpiry: opts.EarlyTokenRefresh,
AuthHandlerOpts: handleOpts,
// TODO(codyoss): refactor this out. We need to add in auto-detection

View File

@@ -141,6 +141,7 @@ func handleServiceAccount(f *credsfile.ServiceAccountFile, opts *DetectOptions)
TokenURL: f.TokenURL,
Subject: opts.Subject,
Client: opts.client(),
Logger: opts.logger(),
}
if opts2LO.TokenURL == "" {
opts2LO.TokenURL = jwtTokenURL
@@ -159,6 +160,7 @@ func handleUserCredential(f *credsfile.UserCredentialsFile, opts *DetectOptions)
EarlyTokenExpiry: opts.EarlyTokenRefresh,
RefreshToken: f.RefreshToken,
Client: opts.client(),
Logger: opts.logger(),
}
return auth.New3LOTokenProvider(opts3LO)
}
@@ -177,6 +179,7 @@ func handleExternalAccount(f *credsfile.ExternalAccountFile, opts *DetectOptions
Scopes: opts.scopes(),
WorkforcePoolUserProject: f.WorkforcePoolUserProject,
Client: opts.client(),
Logger: opts.logger(),
IsDefaultClient: opts.Client == nil,
}
if f.ServiceAccountImpersonation != nil {
@@ -195,6 +198,7 @@ func handleExternalAccountAuthorizedUser(f *credsfile.ExternalAccountAuthorizedU
ClientSecret: f.ClientSecret,
Scopes: opts.scopes(),
Client: opts.client(),
Logger: opts.logger(),
}
return externalaccountuser.NewTokenProvider(externalOpts)
}
@@ -214,6 +218,7 @@ func handleImpersonatedServiceAccount(f *credsfile.ImpersonatedServiceAccountFil
Tp: tp,
Delegates: f.Delegates,
Client: opts.client(),
Logger: opts.logger(),
})
}
@@ -221,5 +226,6 @@ func handleGDCHServiceAccount(f *credsfile.GDCHServiceAccountFile, opts *DetectO
return gdch.NewTokenProvider(f, &gdch.Options{
STSAudience: opts.STSAudience,
Client: opts.client(),
Logger: opts.logger(),
})
}

View File

@@ -23,6 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
var (
@@ -87,6 +89,7 @@ type awsSubjectProvider struct {
reqOpts *RequestOptions
Client *http.Client
logger *slog.Logger
}
func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -94,7 +97,6 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
if sp.RegionalCredVerificationURL == "" {
sp.RegionalCredVerificationURL = defaultRegionalCredentialVerificationURL
}
if sp.requestSigner == nil {
headers := make(map[string]string)
if sp.shouldUseMetadataServer() {
awsSessionToken, err := sp.getAWSSessionToken(ctx)
@@ -118,7 +120,6 @@ func (sp *awsSubjectProvider) subjectToken(ctx context.Context) (string, error)
RegionName: sp.region,
AwsSecurityCredentials: awsSecurityCredentials,
}
}
// Generate the signed request to AWS STS GetCallerIdentity API.
// Use the required regional endpoint. Otherwise, the request will fail.
@@ -194,10 +195,12 @@ func (sp *awsSubjectProvider) getAWSSessionToken(ctx context.Context) (string, e
}
req.Header.Set(awsIMDSv2SessionTTLHeader, awsIMDSv2SessionTTL)
sp.logger.DebugContext(ctx, "aws session token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
sp.logger.DebugContext(ctx, "aws session token response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS session token: %s", body)
}
@@ -227,10 +230,12 @@ func (sp *awsSubjectProvider) getRegion(ctx context.Context, headers map[string]
for name, value := range headers {
req.Header.Add(name, value)
}
sp.logger.DebugContext(ctx, "aws region request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
sp.logger.DebugContext(ctx, "aws region response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS region - %s", body)
}
@@ -285,10 +290,12 @@ func (sp *awsSubjectProvider) getMetadataSecurityCredentials(ctx context.Context
for name, value := range headers {
req.Header.Add(name, value)
}
sp.logger.DebugContext(ctx, "aws security credential request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return result, err
}
sp.logger.DebugContext(ctx, "aws security credential response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return result, fmt.Errorf("credentials: unable to retrieve AWS security credentials - %s", body)
}
@@ -310,10 +317,12 @@ func (sp *awsSubjectProvider) getMetadataRoleName(ctx context.Context, headers m
req.Header.Add(name, value)
}
sp.logger.DebugContext(ctx, "aws metadata role request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", err
}
sp.logger.DebugContext(ctx, "aws metadata role response", "response", internallog.HTTPResponse(resp, body))
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("credentials: unable to retrieve AWS role name - %s", body)
}

View File

@@ -18,6 +18,7 @@ import (
"context"
"errors"
"fmt"
"log/slog"
"net/http"
"regexp"
"strconv"
@@ -28,6 +29,7 @@ import (
"cloud.google.com/go/auth/credentials/internal/impersonate"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal/credsfile"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -104,6 +106,11 @@ type Options struct {
// This is important for X509 credentials which should create a new client if the default was used
// but should respect a client explicitly passed in by the user.
IsDefaultClient bool
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
}
// SubjectTokenProvider can be used to supply a subject token to exchange for a
@@ -224,6 +231,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
return nil, err
}
opts.resolveTokenURL()
logger := internallog.New(opts.Logger)
stp, err := newSubjectTokenProvider(opts)
if err != nil {
return nil, err
@@ -238,6 +246,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
client: client,
opts: opts,
stp: stp,
logger: logger,
}
if opts.ServiceAccountImpersonationURL == "" {
@@ -254,6 +263,7 @@ func NewTokenProvider(opts *Options) (auth.TokenProvider, error) {
Scopes: scopes,
Tp: auth.NewCachedTokenProvider(tp, nil),
TokenLifetimeSeconds: opts.ServiceAccountImpersonationLifetimeSeconds,
Logger: logger,
})
if err != nil {
return nil, err
@@ -269,6 +279,7 @@ type subjectTokenProvider interface {
// tokenProvider is the provider that handles external credentials. It is used to retrieve Tokens.
type tokenProvider struct {
client *http.Client
logger *slog.Logger
opts *Options
stp subjectTokenProvider
}
@@ -310,6 +321,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
Authentication: clientAuth,
Headers: header,
ExtraOpts: options,
Logger: tp.logger,
})
if err != nil {
return nil, err
@@ -330,12 +342,14 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
// newSubjectTokenProvider determines the type of credsfile.CredentialSource needed to create a
// subjectTokenProvider
func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
logger := internallog.New(o.Logger)
reqOpts := &RequestOptions{Audience: o.Audience, SubjectTokenType: o.SubjectTokenType}
if o.AwsSecurityCredentialsProvider != nil {
return &awsSubjectProvider{
securityCredentialsProvider: o.AwsSecurityCredentialsProvider,
TargetResource: o.Audience,
reqOpts: reqOpts,
logger: logger,
}, nil
} else if o.SubjectTokenProvider != nil {
return &programmaticProvider{stp: o.SubjectTokenProvider, opts: reqOpts}, nil
@@ -352,6 +366,7 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
CredVerificationURL: o.CredentialSource.URL,
TargetResource: o.Audience,
Client: o.Client,
logger: logger,
}
if o.CredentialSource.IMDSv2SessionTokenURL != "" {
awsProvider.IMDSv2SessionTokenURL = o.CredentialSource.IMDSv2SessionTokenURL
@@ -362,7 +377,13 @@ func newSubjectTokenProvider(o *Options) (subjectTokenProvider, error) {
} else if o.CredentialSource.File != "" {
return &fileSubjectProvider{File: o.CredentialSource.File, Format: o.CredentialSource.Format}, nil
} else if o.CredentialSource.URL != "" {
return &urlSubjectProvider{URL: o.CredentialSource.URL, Headers: o.CredentialSource.Headers, Format: o.CredentialSource.Format, Client: o.Client}, nil
return &urlSubjectProvider{
URL: o.CredentialSource.URL,
Headers: o.CredentialSource.Headers,
Format: o.CredentialSource.Format,
Client: o.Client,
Logger: logger,
}, nil
} else if o.CredentialSource.Executable != nil {
ec := o.CredentialSource.Executable
if ec.Command == "" {

View File

@@ -19,10 +19,12 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -38,6 +40,7 @@ type urlSubjectProvider struct {
Headers map[string]string
Format *credsfile.Format
Client *http.Client
Logger *slog.Logger
}
func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error) {
@@ -49,10 +52,12 @@ func (sp *urlSubjectProvider) subjectToken(ctx context.Context) (string, error)
for key, val := range sp.Headers {
req.Header.Add(key, val)
}
sp.Logger.DebugContext(ctx, "url subject token request", "request", internallog.HTTPRequest(req, nil))
resp, body, err := internal.DoRequest(sp.Client, req)
if err != nil {
return "", fmt.Errorf("credentials: invalid response when retrieving subject token: %w", err)
}
sp.Logger.DebugContext(ctx, "url subject token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return "", fmt.Errorf("credentials: status code %d: %s", c, body)
}

View File

@@ -17,12 +17,14 @@ package externalaccountuser
import (
"context"
"errors"
"log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/credentials/internal/stsexchange"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
// Options stores the configuration for fetching tokens with external authorized
@@ -51,6 +53,8 @@ type Options struct {
// Client for token request.
Client *http.Client
// Logger for logging.
Logger *slog.Logger
}
func (c *Options) validate() bool {
@@ -90,6 +94,7 @@ func (tp *tokenProvider) Token(ctx context.Context) (*auth.Token, error) {
RefreshToken: opts.RefreshToken,
Authentication: clientAuth,
Headers: headers,
Logger: internallog.New(tp.o.Logger),
})
if err != nil {
return nil, err

View File

@@ -16,12 +16,13 @@ package gdch
import (
"context"
"crypto/rsa"
"crypto"
"crypto/tls"
"crypto/x509"
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"net/url"
"os"
@@ -32,6 +33,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/credsfile"
"cloud.google.com/go/auth/internal/jwt"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -51,6 +53,7 @@ var (
type Options struct {
STSAudience string
Client *http.Client
Logger *slog.Logger
}
// NewTokenProvider returns a [cloud.google.com/go/auth.TokenProvider] from a
@@ -62,7 +65,7 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
if o.STSAudience == "" {
return nil, errors.New("credentials: STSAudience must be set for the GDCH auth flows")
}
pk, err := internal.ParseKey([]byte(f.PrivateKey))
signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, err
}
@@ -75,10 +78,11 @@ func NewTokenProvider(f *credsfile.GDCHServiceAccountFile, o *Options) (auth.Tok
serviceIdentity: fmt.Sprintf("system:serviceaccount:%s:%s", f.Project, f.Name),
tokenURL: f.TokenURL,
aud: o.STSAudience,
pk: pk,
signer: signer,
pkID: f.PrivateKeyID,
certPool: certPool,
client: o.Client,
logger: internallog.New(o.Logger),
}
return tp, nil
}
@@ -97,11 +101,12 @@ type gdchProvider struct {
serviceIdentity string
tokenURL string
aud string
pk *rsa.PrivateKey
signer crypto.Signer
pkID string
certPool *x509.CertPool
client *http.Client
logger *slog.Logger
}
func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
@@ -120,7 +125,7 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(g.pkID),
}
payload, err := jwt.EncodeJWS(&h, &claims, g.pk)
payload, err := jwt.EncodeJWS(&h, &claims, g.signer)
if err != nil {
return nil, err
}
@@ -136,10 +141,12 @@ func (g gdchProvider) Token(ctx context.Context) (*auth.Token, error) {
return nil, err
}
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
g.logger.DebugContext(ctx, "gdch token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
resp, body, err := internal.DoRequest(g.client, req)
if err != nil {
return nil, fmt.Errorf("credentials: cannot fetch token: %w", err)
}
g.logger.DebugContext(ctx, "gdch token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, &auth.Error{
Response: resp,

View File

@@ -0,0 +1,105 @@
// Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package impersonate
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"strings"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
var (
universeDomainPlaceholder = "UNIVERSE_DOMAIN"
iamCredentialsUniverseDomainEndpoint = "https://iamcredentials.UNIVERSE_DOMAIN"
)
// IDTokenIAMOptions provides configuration for [IDTokenIAMOptions.Token].
type IDTokenIAMOptions struct {
// Client is required.
Client *http.Client
// Logger is required.
Logger *slog.Logger
UniverseDomain auth.CredentialsPropertyProvider
ServiceAccountEmail string
GenerateIDTokenRequest
}
// GenerateIDTokenRequest holds the request to the IAM generateIdToken RPC.
type GenerateIDTokenRequest struct {
Audience string `json:"audience"`
IncludeEmail bool `json:"includeEmail"`
// Delegates are the ordered, fully-qualified resource name for service
// accounts in a delegation chain. Each service account must be granted
// roles/iam.serviceAccountTokenCreator on the next service account in the
// chain. The delegates must have the following format:
// projects/-/serviceAccounts/{ACCOUNT_EMAIL_OR_UNIQUEID}. The - wildcard
// character is required; replacing it with a project ID is invalid.
// Optional.
Delegates []string `json:"delegates,omitempty"`
}
// GenerateIDTokenResponse holds the response from the IAM generateIdToken RPC.
type GenerateIDTokenResponse struct {
Token string `json:"token"`
}
// Token call IAM generateIdToken with the configuration provided in [IDTokenIAMOptions].
func (o IDTokenIAMOptions) Token(ctx context.Context) (*auth.Token, error) {
universeDomain, err := o.UniverseDomain.GetProperty(ctx)
if err != nil {
return nil, err
}
endpoint := strings.Replace(iamCredentialsUniverseDomainEndpoint, universeDomainPlaceholder, universeDomain, 1)
url := fmt.Sprintf("%s/v1/%s:generateIdToken", endpoint, internal.FormatIAMServiceAccountResource(o.ServiceAccountEmail))
bodyBytes, err := json.Marshal(o.GenerateIDTokenRequest)
if err != nil {
return nil, fmt.Errorf("impersonate: unable to marshal request: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewReader(bodyBytes))
if err != nil {
return nil, fmt.Errorf("impersonate: unable to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
o.Logger.DebugContext(ctx, "impersonated idtoken request", "request", internallog.HTTPRequest(req, bodyBytes))
resp, body, err := internal.DoRequest(o.Client, req)
if err != nil {
return nil, fmt.Errorf("impersonate: unable to generate ID token: %w", err)
}
o.Logger.DebugContext(ctx, "impersonated idtoken response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < 200 || c > 299 {
return nil, fmt.Errorf("impersonate: status code %d: %s", c, body)
}
var tokenResp GenerateIDTokenResponse
if err := json.Unmarshal(body, &tokenResp); err != nil {
return nil, fmt.Errorf("impersonate: unable to parse response: %w", err)
}
return &auth.Token{
Value: tokenResp.Token,
// Generated ID tokens are good for one hour.
Expiry: time.Now().Add(1 * time.Hour),
}, nil
}

View File

@@ -20,11 +20,13 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"net/http"
"time"
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -74,6 +76,11 @@ type Options struct {
// Client configures the underlying client used to make network requests
// when fetching tokens. Required.
Client *http.Client
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
}
func (o *Options) validate() error {
@@ -88,6 +95,7 @@ func (o *Options) validate() error {
// Token performs the exchange to get a temporary service account token to allow access to GCP.
func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
logger := internallog.New(o.Logger)
lifetime := defaultTokenLifetime
if o.TokenLifetimeSeconds != 0 {
lifetime = fmt.Sprintf("%ds", o.TokenLifetimeSeconds)
@@ -109,10 +117,12 @@ func (o *Options) Token(ctx context.Context) (*auth.Token, error) {
if err := setAuthHeader(ctx, o.Tp, req); err != nil {
return nil, err
}
logger.DebugContext(ctx, "impersonated token request", "request", internallog.HTTPRequest(req, b))
resp, body, err := internal.DoRequest(o.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: unable to generate access token: %w", err)
}
logger.DebugContext(ctx, "impersonated token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c >= http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}

View File

@@ -19,6 +19,7 @@ import (
"encoding/base64"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"net/url"
"strconv"
@@ -26,6 +27,7 @@ import (
"cloud.google.com/go/auth"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
const (
@@ -40,6 +42,7 @@ const (
// Options stores the configuration for making an sts exchange request.
type Options struct {
Client *http.Client
Logger *slog.Logger
Endpoint string
Request *TokenRequest
Authentication ClientAuthentication
@@ -80,6 +83,7 @@ func ExchangeToken(ctx context.Context, opts *Options) (*TokenResponse, error) {
func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenResponse, error) {
opts.Authentication.InjectAuthentication(data, opts.Headers)
encodedData := data.Encode()
logger := internallog.New(opts.Logger)
req, err := http.NewRequestWithContext(ctx, "POST", opts.Endpoint, strings.NewReader(encodedData))
if err != nil {
@@ -93,10 +97,12 @@ func doRequest(ctx context.Context, opts *Options, data url.Values) (*TokenRespo
}
req.Header.Set("Content-Length", strconv.Itoa(len(encodedData)))
logger.DebugContext(ctx, "sts token request", "request", internallog.HTTPRequest(req, []byte(encodedData)))
resp, body, err := internal.DoRequest(opts.Client, req)
if err != nil {
return nil, fmt.Errorf("credentials: invalid response from Secure Token Server: %w", err)
}
logger.DebugContext(ctx, "sts token response", "response", internallog.HTTPResponse(resp, body))
if c := resp.StatusCode; c < http.StatusOK || c > http.StatusMultipleChoices {
return nil, fmt.Errorf("credentials: status code %d: %s", c, body)
}

View File

@@ -16,9 +16,10 @@ package credentials
import (
"context"
"crypto/rsa"
"crypto"
"errors"
"fmt"
"log/slog"
"strings"
"time"
@@ -39,7 +40,7 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions
if len(opts.scopes()) == 0 && opts.Audience == "" {
return nil, errors.New("credentials: both scopes and audience are empty")
}
pk, err := internal.ParseKey([]byte(f.PrivateKey))
signer, err := internal.ParseKey([]byte(f.PrivateKey))
if err != nil {
return nil, fmt.Errorf("credentials: could not parse key: %w", err)
}
@@ -47,8 +48,9 @@ func configureSelfSignedJWT(f *credsfile.ServiceAccountFile, opts *DetectOptions
email: f.ClientEmail,
audience: opts.Audience,
scopes: opts.scopes(),
pk: pk,
signer: signer,
pkID: f.PrivateKeyID,
logger: opts.logger(),
}, nil
}
@@ -56,8 +58,9 @@ type selfSignedTokenProvider struct {
email string
audience string
scopes []string
pk *rsa.PrivateKey
signer crypto.Signer
pkID string
logger *slog.Logger
}
func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
@@ -77,9 +80,10 @@ func (tp *selfSignedTokenProvider) Token(context.Context) (*auth.Token, error) {
Type: jwt.HeaderType,
KeyID: string(tp.pkID),
}
msg, err := jwt.EncodeJWS(h, c, tp.pk)
tok, err := jwt.EncodeJWS(h, c, tp.signer)
if err != nil {
return nil, fmt.Errorf("credentials: could not encode JWT: %w", err)
}
return &auth.Token{Value: msg, Type: internal.TokenTypeBearer, Expiry: exp}, nil
tp.logger.Debug("created self-signed JWT", "token", tok)
return &auth.Token{Value: tok, Type: internal.TokenTypeBearer, Expiry: exp}, nil
}

View File

@@ -20,12 +20,14 @@ import (
"crypto/tls"
"errors"
"fmt"
"log/slog"
"net/http"
"cloud.google.com/go/auth"
detect "cloud.google.com/go/auth/credentials"
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"github.com/googleapis/gax-go/v2/internallog"
)
// ClientCertProvider is a function that returns a TLS client certificate to be
@@ -69,6 +71,11 @@ type Options struct {
// configured for the client, which will be compared to the universe domain
// that is separately configured for the credentials.
UniverseDomain string
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
// InternalOptions are NOT meant to be set directly by consumers of this
// package, they should only be set by generated client code.
@@ -101,6 +108,10 @@ func (o *Options) client() *http.Client {
return nil
}
func (o *Options) logger() *slog.Logger {
return internallog.New(o.Logger)
}
func (o *Options) resolveDetectOptions() *detect.DetectOptions {
io := o.InternalOptions
// soft-clone these so we are not updating a ref the user holds and may reuse
@@ -125,6 +136,9 @@ func (o *Options) resolveDetectOptions() *detect.DetectOptions {
do.Client = transport.DefaultHTTPClientWithTLS(tlsConfig)
do.TokenURL = detect.GoogleMTLSTokenURL
}
if do.Logger == nil {
do.Logger = o.logger()
}
return do
}
@@ -147,14 +161,21 @@ type InternalOptions struct {
// service.
DefaultScopes []string
// SkipValidation bypasses validation on Options. It should only be used
// internally for clients that needs more control over their transport.
// internally for clients that need more control over their transport.
SkipValidation bool
// SkipUniverseDomainValidation skips the verification that the universe
// domain configured for the client matches the universe domain configured
// for the credentials. It should only be used internally for clients that
// need more control over their transport. The default is false.
SkipUniverseDomainValidation bool
}
// AddAuthorizationMiddleware adds a middleware to the provided client's
// transport that sets the Authorization header with the value produced by the
// provided [cloud.google.com/go/auth.Credentials]. An error is returned only
// if client or creds is nil.
//
// This function does not support setting a universe domain value on the client.
func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) error {
if client == nil || creds == nil {
return fmt.Errorf("httptransport: client and tp must not be nil")
@@ -173,7 +194,6 @@ func AddAuthorizationMiddleware(client *http.Client, creds *auth.Credentials) er
client.Transport = &authTransport{
creds: creds,
base: base,
// TODO(quartzmo): Somehow set clientUniverseDomain from impersonate calls.
}
return nil
}
@@ -191,6 +211,7 @@ func NewClient(opts *Options) (*http.Client, error) {
ClientCertProvider: opts.ClientCertProvider,
Client: opts.client(),
UniverseDomain: opts.UniverseDomain,
Logger: opts.logger(),
}
if io := opts.InternalOptions; io != nil {
tOpts.DefaultEndpointTemplate = io.DefaultEndpointTemplate

View File

@@ -1,93 +0,0 @@
// Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package httptransport
import (
"encoding/binary"
"encoding/hex"
"fmt"
"net/http"
"strconv"
"strings"
"go.opencensus.io/trace"
"go.opencensus.io/trace/propagation"
)
const (
httpHeaderMaxSize = 200
cloudTraceHeader = `X-Cloud-Trace-Context`
)
// asserts the httpFormat fulfills this foreign interface
var _ propagation.HTTPFormat = (*httpFormat)(nil)
// httpFormat implements propagation.httpFormat to propagate
// traces in HTTP headers for Google Cloud Platform and Cloud Trace.
type httpFormat struct{}
// SpanContextFromRequest extracts a Cloud Trace span context from incoming requests.
func (f *httpFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) {
h := req.Header.Get(cloudTraceHeader)
// See https://cloud.google.com/trace/docs/faq for the header HTTPFormat.
// Return if the header is empty or missing, or if the header is unreasonably
// large, to avoid making unnecessary copies of a large string.
if h == "" || len(h) > httpHeaderMaxSize {
return trace.SpanContext{}, false
}
// Parse the trace id field.
slash := strings.Index(h, `/`)
if slash == -1 {
return trace.SpanContext{}, false
}
tid, h := h[:slash], h[slash+1:]
buf, err := hex.DecodeString(tid)
if err != nil {
return trace.SpanContext{}, false
}
copy(sc.TraceID[:], buf)
// Parse the span id field.
spanstr := h
semicolon := strings.Index(h, `;`)
if semicolon != -1 {
spanstr, h = h[:semicolon], h[semicolon+1:]
}
sid, err := strconv.ParseUint(spanstr, 10, 64)
if err != nil {
return trace.SpanContext{}, false
}
binary.BigEndian.PutUint64(sc.SpanID[:], sid)
// Parse the options field, options field is optional.
if !strings.HasPrefix(h, "o=") {
return sc, true
}
o, err := strconv.ParseUint(h[2:], 10, 32)
if err != nil {
return trace.SpanContext{}, false
}
sc.TraceOptions = trace.TraceOptions(o)
return sc, true
}
// SpanContextToRequest modifies the given request to include a Cloud Trace header.
func (f *httpFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) {
sid := binary.BigEndian.Uint64(sc.SpanID[:])
header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions))
req.Header.Set(cloudTraceHeader, header)
}

View File

@@ -27,7 +27,7 @@ import (
"cloud.google.com/go/auth/internal"
"cloud.google.com/go/auth/internal/transport"
"cloud.google.com/go/auth/internal/transport/cert"
"go.opencensus.io/plugin/ochttp"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"golang.org/x/net/http2"
)
@@ -42,7 +42,7 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers: headers,
}
var trans http.RoundTripper = ht
trans = addOCTransport(trans, opts)
trans = addOpenTelemetryTransport(trans, opts)
switch {
case opts.DisableAuthentication:
// Do nothing.
@@ -82,11 +82,16 @@ func newTransport(base http.RoundTripper, opts *Options) (http.RoundTripper, err
headers.Set(quotaProjectHeaderKey, qp)
}
}
var skipUD bool
if iOpts := opts.InternalOptions; iOpts != nil {
skipUD = iOpts.SkipUniverseDomainValidation
}
creds.TokenProvider = auth.NewCachedTokenProvider(creds.TokenProvider, nil)
trans = &authTransport{
base: trans,
creds: creds,
clientUniverseDomain: opts.UniverseDomain,
skipUniverseDomainValidation: skipUD,
}
}
return trans, nil
@@ -163,20 +168,18 @@ func (t *headerTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return rt.RoundTrip(&newReq)
}
func addOCTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
func addOpenTelemetryTransport(trans http.RoundTripper, opts *Options) http.RoundTripper {
if opts.DisableTelemetry {
return trans
}
return &ochttp.Transport{
Base: trans,
Propagation: &httpFormat{},
}
return otelhttp.NewTransport(trans)
}
type authTransport struct {
creds *auth.Credentials
base http.RoundTripper
clientUniverseDomain string
skipUniverseDomainValidation bool
}
// getClientUniverseDomain returns the default service domain for a given Cloud
@@ -215,7 +218,7 @@ func (t *authTransport) RoundTrip(req *http.Request) (*http.Response, error) {
if err != nil {
return nil, err
}
if token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
if !t.skipUniverseDomainValidation && token.MetadataString("auth.google.tokenSource") != "compute-metadata" {
credentialsUniverseDomain, err := t.creds.UniverseDomain(req.Context())
if err != nil {
return nil, err

View File

@@ -16,7 +16,7 @@ package internal
import (
"context"
"crypto/rsa"
"crypto"
"crypto/x509"
"encoding/json"
"encoding/pem"
@@ -72,25 +72,27 @@ func DefaultClient() *http.Client {
}
// ParseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// to an crypto.Signer. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
func ParseKey(key []byte) (crypto.Signer, error) {
block, _ := pem.Decode(key)
if block != nil {
key = block.Bytes
}
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
var parsedKey crypto.PrivateKey
var err error
parsedKey, err = x509.ParsePKCS8PrivateKey(key)
if err != nil {
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
if err != nil {
return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8: %w", err)
}
}
parsed, ok := parsedKey.(*rsa.PrivateKey)
parsed, ok := parsedKey.(crypto.Signer)
if !ok {
return nil, errors.New("private key is invalid")
return nil, errors.New("private key is not a signer")
}
return parsed, nil
}
@@ -179,6 +181,7 @@ func (p StaticProperty) GetProperty(context.Context) (string, error) {
// ComputeUniverseDomainProvider fetches the credentials universe domain from
// the google cloud metadata service.
type ComputeUniverseDomainProvider struct {
MetadataClient *metadata.Client
universeDomainOnce sync.Once
universeDomain string
universeDomainErr error
@@ -188,7 +191,7 @@ type ComputeUniverseDomainProvider struct {
// metadata service.
func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string, error) {
c.universeDomainOnce.Do(func() {
c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx)
c.universeDomain, c.universeDomainErr = getMetadataUniverseDomain(ctx, c.MetadataClient)
})
if c.universeDomainErr != nil {
return "", c.universeDomainErr
@@ -197,14 +200,14 @@ func (c *ComputeUniverseDomainProvider) GetProperty(ctx context.Context) (string
}
// httpGetMetadataUniverseDomain is a package var for unit test substitution.
var httpGetMetadataUniverseDomain = func(ctx context.Context) (string, error) {
var httpGetMetadataUniverseDomain = func(ctx context.Context, client *metadata.Client) (string, error) {
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
defer cancel()
return metadata.GetWithContext(ctx, "universe/universe_domain")
return client.GetWithContext(ctx, "universe/universe-domain")
}
func getMetadataUniverseDomain(ctx context.Context) (string, error) {
universeDomain, err := httpGetMetadataUniverseDomain(ctx)
func getMetadataUniverseDomain(ctx context.Context, client *metadata.Client) (string, error) {
universeDomain, err := httpGetMetadataUniverseDomain(ctx, client)
if err == nil {
return universeDomain, nil
}
@@ -214,3 +217,9 @@ func getMetadataUniverseDomain(ctx context.Context) (string, error) {
}
return "", err
}
// FormatIAMServiceAccountResource sets a service account name in an IAM resource
// name.
func FormatIAMServiceAccountResource(name string) string {
return fmt.Sprintf("projects/-/serviceAccounts/%s", name)
}

View File

@@ -111,7 +111,7 @@ func (c *Claims) encode() (string, error) {
}
// EncodeJWS encodes the data using the provided key as a JSON web signature.
func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
func EncodeJWS(header *Header, c *Claims, signer crypto.Signer) (string, error) {
head, err := header.encode()
if err != nil {
return "", err
@@ -123,7 +123,7 @@ func EncodeJWS(header *Header, c *Claims, key *rsa.PrivateKey) (string, error) {
ss := fmt.Sprintf("%s.%s", head, claims)
h := sha256.New()
h.Write([]byte(ss))
sig, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, h.Sum(nil))
sig, err := signer.Sign(rand.Reader, h.Sum(nil), crypto.SHA256)
if err != nil {
return "", err
}

View File

@@ -20,6 +20,7 @@ import (
"crypto/x509"
"errors"
"log"
"log/slog"
"net"
"net/http"
"net/url"
@@ -51,8 +52,14 @@ const (
mtlsMDSKey = "/run/google-mds-mtls/client.key"
)
var (
errUniverseNotSupportedMTLS = errors.New("mTLS is not supported in any universe other than googleapis.com")
// Type represents the type of transport used.
type Type int
const (
// TransportTypeUnknown represents an unknown transport type and is the default option.
TransportTypeUnknown Type = iota
// TransportTypeMTLSS2A represents the mTLS transport type using S2A.
TransportTypeMTLSS2A
)
// Options is a struct that is duplicated information from the individual
@@ -60,13 +67,14 @@ var (
// fields on httptransport.Options and grpctransport.Options.
type Options struct {
Endpoint string
DefaultMTLSEndpoint string
DefaultEndpointTemplate string
DefaultMTLSEndpoint string
ClientCertProvider cert.Provider
Client *http.Client
UniverseDomain string
EnableDirectPath bool
EnableDirectPathXds bool
Logger *slog.Logger
}
// getUniverseDomain returns the default service domain for a given Cloud
@@ -94,6 +102,16 @@ func (o *Options) defaultEndpoint() string {
return strings.Replace(o.DefaultEndpointTemplate, universeDomainPlaceholder, o.getUniverseDomain(), 1)
}
// defaultMTLSEndpoint returns the DefaultMTLSEndpointTemplate merged with the
// universe domain if the DefaultMTLSEndpointTemplate is set, otherwise returns an
// empty string.
func (o *Options) defaultMTLSEndpoint() string {
if o.DefaultMTLSEndpoint == "" {
return ""
}
return strings.Replace(o.DefaultMTLSEndpoint, universeDomainPlaceholder, o.getUniverseDomain(), 1)
}
// mergedEndpoint merges a user-provided Endpoint of format host[:port] with the
// default endpoint.
func (o *Options) mergedEndpoint() (string, error) {
@@ -112,13 +130,20 @@ func fixScheme(baseURL string) string {
return baseURL
}
// GRPCTransportCredentials embeds interface TransportCredentials with additional data.
type GRPCTransportCredentials struct {
credentials.TransportCredentials
Endpoint string
TransportType Type
}
// GetGRPCTransportCredsAndEndpoint returns an instance of
// [google.golang.org/grpc/credentials.TransportCredentials], and the
// corresponding endpoint to use for GRPC client.
func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCredentials, string, error) {
// corresponding endpoint and transport type to use for GRPC client.
func GetGRPCTransportCredsAndEndpoint(opts *Options) (*GRPCTransportCredentials, error) {
config, err := getTransportConfig(opts)
if err != nil {
return nil, "", err
return nil, err
}
defaultTransportCreds := credentials.NewTLS(&tls.Config{
@@ -133,12 +158,16 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
return defaultTransportCreds, config.endpoint, nil
if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
} else {
return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
} else {
return defaultTransportCreds, config.endpoint, nil
return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
var fallbackOpts *s2a.FallbackOptions
@@ -156,9 +185,9 @@ func GetGRPCTransportCredsAndEndpoint(opts *Options) (credentials.TransportCrede
})
if err != nil {
// Use default if we cannot initialize S2A client transport credentials.
return defaultTransportCreds, config.endpoint, nil
return &GRPCTransportCredentials{defaultTransportCreds, config.endpoint, TransportTypeUnknown}, nil
}
return s2aTransportCreds, config.s2aMTLSEndpoint, nil
return &GRPCTransportCredentials{s2aTransportCreds, config.s2aMTLSEndpoint, TransportTypeMTLSS2A}, nil
}
// GetHTTPTransportConfig returns a client certificate source and a function for
@@ -177,8 +206,12 @@ func GetHTTPTransportConfig(opts *Options) (cert.Provider, func(context.Context,
transportCredsForS2A, err = loadMTLSMDSTransportCreds(mtlsMDSRoot, mtlsMDSKey)
if err != nil {
log.Printf("Loading MTLS MDS credentials failed: %v", err)
if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
} else {
return config.clientCertSource, nil, nil
}
}
} else if config.s2aAddress != "" {
s2aAddr = config.s2aAddress
} else {
@@ -248,12 +281,9 @@ func getTransportConfig(opts *Options) (*transportConfig, error) {
if !shouldUseS2A(clientCertSource, opts) {
return &defaultTransportConfig, nil
}
if !opts.isUniverseDomainGDU() {
return nil, errUniverseNotSupportedMTLS
}
s2aAddress := GetS2AAddress()
mtlsS2AAddress := GetMTLSS2AAddress()
s2aAddress := GetS2AAddress(opts.Logger)
mtlsS2AAddress := GetMTLSS2AAddress(opts.Logger)
if s2aAddress == "" && mtlsS2AAddress == "" {
return &defaultTransportConfig, nil
}
@@ -262,7 +292,7 @@ func getTransportConfig(opts *Options) (*transportConfig, error) {
endpoint: endpoint,
s2aAddress: s2aAddress,
mtlsS2AAddress: mtlsS2AAddress,
s2aMTLSEndpoint: opts.DefaultMTLSEndpoint,
s2aMTLSEndpoint: opts.defaultMTLSEndpoint(),
}, nil
}
@@ -308,24 +338,23 @@ type transportConfig struct {
// getEndpoint returns the endpoint for the service, taking into account the
// user-provided endpoint override "settings.Endpoint".
//
// If no endpoint override is specified, we will either return the default endpoint or
// the default mTLS endpoint if a client certificate is available.
// If no endpoint override is specified, we will either return the default
// endpoint or the default mTLS endpoint if a client certificate is available.
//
// You can override the default endpoint choice (mtls vs. regular) by setting the
// GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
// You can override the default endpoint choice (mTLS vs. regular) by setting
// the GOOGLE_API_USE_MTLS_ENDPOINT environment variable.
//
// If the endpoint override is an address (host:port) rather than full base
// URL (ex. https://...), then the user-provided address will be merged into
// the default endpoint. For example, WithEndpoint("myhost:8000") and
// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return "https://myhost:8080/bar/baz"
// DefaultEndpointTemplate("https://UNIVERSE_DOMAIN/bar/baz") will return
// "https://myhost:8080/bar/baz". Note that this does not apply to the mTLS
// endpoint.
func getEndpoint(opts *Options, clientCertSource cert.Provider) (string, error) {
if opts.Endpoint == "" {
mtlsMode := getMTLSMode()
if mtlsMode == mTLSModeAlways || (clientCertSource != nil && mtlsMode == mTLSModeAuto) {
if !opts.isUniverseDomainGDU() {
return "", errUniverseNotSupportedMTLS
}
return opts.DefaultMTLSEndpoint, nil
return opts.defaultMTLSEndpoint(), nil
}
return opts.defaultEndpoint(), nil
}

View File

@@ -16,7 +16,6 @@ package cert
import (
"crypto/tls"
"errors"
"github.com/googleapis/enterprise-certificate-proxy/client"
)
@@ -37,11 +36,10 @@ type ecpSource struct {
func NewEnterpriseCertificateProxyProvider(configFilePath string) (Provider, error) {
key, err := client.Cred(configFilePath)
if err != nil {
if errors.Is(err, client.ErrCredUnavailable) {
// TODO(codyoss): once this is fixed upstream can handle this error a
// little better here. But be safe for now and assume unavailable.
return nil, errSourceUnavailable
}
return nil, err
}
return (&ecpSource{
key: key,

View File

@@ -82,11 +82,8 @@ func (s *workloadSource) getClientCertificate(info *tls.CertificateRequestInfo)
func getCertAndKeyFiles(configFilePath string) (string, string, error) {
jsonFile, err := os.Open(configFilePath)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return "", "", errSourceUnavailable
}
return "", "", err
}
byteValue, err := io.ReadAll(jsonFile)
if err != nil {

View File

@@ -19,6 +19,7 @@ import (
"encoding/json"
"fmt"
"log"
"log/slog"
"os"
"strconv"
"sync"
@@ -39,8 +40,8 @@ var (
// GetS2AAddress returns the S2A address to be reached via plaintext connection.
// Returns empty string if not set or invalid.
func GetS2AAddress() string {
getMetadataMTLSAutoConfig()
func GetS2AAddress(logger *slog.Logger) string {
getMetadataMTLSAutoConfig(logger)
if !mtlsConfiguration.valid() {
return ""
}
@@ -49,8 +50,8 @@ func GetS2AAddress() string {
// GetMTLSS2AAddress returns the S2A address to be reached via MTLS connection.
// Returns empty string if not set or invalid.
func GetMTLSS2AAddress() string {
getMetadataMTLSAutoConfig()
func GetMTLSS2AAddress(logger *slog.Logger) string {
getMetadataMTLSAutoConfig(logger)
if !mtlsConfiguration.valid() {
return ""
}
@@ -74,22 +75,25 @@ type s2aAddresses struct {
MTLSAddress string `json:"mtls_address"`
}
func getMetadataMTLSAutoConfig() {
func getMetadataMTLSAutoConfig(logger *slog.Logger) {
var err error
mtlsOnce.Do(func() {
mtlsConfiguration, err = queryConfig()
mtlsConfiguration, err = queryConfig(logger)
if err != nil {
log.Printf("Getting MTLS config failed: %v", err)
}
})
}
var httpGetMetadataMTLSConfig = func() (string, error) {
return metadata.GetWithContext(context.Background(), configEndpointSuffix)
var httpGetMetadataMTLSConfig = func(logger *slog.Logger) (string, error) {
metadataClient := metadata.NewWithOptions(&metadata.Options{
Logger: logger,
})
return metadataClient.GetWithContext(context.Background(), configEndpointSuffix)
}
func queryConfig() (*mtlsConfig, error) {
resp, err := httpGetMetadataMTLSConfig()
func queryConfig(logger *slog.Logger) (*mtlsConfig, error) {
resp, err := httpGetMetadataMTLSConfig(logger)
if err != nil {
return nil, fmt.Errorf("querying MTLS config from MDS endpoint failed: %w", err)
}

View File

@@ -37,6 +37,7 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
}
newDo := &credentials.DetectOptions{
// Simple types
TokenBindingType: oldDo.TokenBindingType,
Audience: oldDo.Audience,
Subject: oldDo.Subject,
EarlyTokenRefresh: oldDo.EarlyTokenRefresh,
@@ -46,9 +47,10 @@ func CloneDetectOptions(oldDo *credentials.DetectOptions) *credentials.DetectOpt
UseSelfSignedJWT: oldDo.UseSelfSignedJWT,
UniverseDomain: oldDo.UniverseDomain,
// These fields are are pointer types that we just want to use exactly
// as the user set, copy the ref
// These fields are pointer types that we just want to use exactly as
// the user set, copy the ref
Client: oldDo.Client,
Logger: oldDo.Logger,
AuthHandlerOptions: oldDo.AuthHandlerOptions,
}

View File

@@ -1,5 +1,26 @@
# Changelog
## [0.2.7](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.6...auth/oauth2adapt/v0.2.7) (2025-01-09)
### Bug Fixes
* **auth/oauth2adapt:** Update golang.org/x/net to v0.33.0 ([e9b0b69](https://github.com/googleapis/google-cloud-go/commit/e9b0b69644ea5b276cacff0a707e8a5e87efafc9))
## [0.2.6](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.5...auth/oauth2adapt/v0.2.6) (2024-11-21)
### Bug Fixes
* **auth/oauth2adapt:** Copy map in tokenSourceAdapter.Token ([#11164](https://github.com/googleapis/google-cloud-go/issues/11164)) ([8cb0cbc](https://github.com/googleapis/google-cloud-go/commit/8cb0cbccdc32886dfb3af49fee04012937d114d2)), refs [#11161](https://github.com/googleapis/google-cloud-go/issues/11161)
## [0.2.5](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.4...auth/oauth2adapt/v0.2.5) (2024-10-30)
### Bug Fixes
* **auth/oauth2adapt:** Convert token metadata where possible ([#11062](https://github.com/googleapis/google-cloud-go/issues/11062)) ([34bf1c1](https://github.com/googleapis/google-cloud-go/commit/34bf1c164465d66745c0cfdf7cd10a8e2da92e52))
## [0.2.4](https://github.com/googleapis/google-cloud-go/compare/auth/oauth2adapt/v0.2.3...auth/oauth2adapt/v0.2.4) (2024-08-08)

View File

@@ -26,6 +26,13 @@ import (
"golang.org/x/oauth2/google"
)
const (
oauth2TokenSourceKey = "oauth2.google.tokenSource"
oauth2ServiceAccountKey = "oauth2.google.serviceAccount"
authTokenSourceKey = "auth.google.tokenSource"
authServiceAccountKey = "auth.google.serviceAccount"
)
// TokenProviderFromTokenSource converts any [golang.org/x/oauth2.TokenSource]
// into a [cloud.google.com/go/auth.TokenProvider].
func TokenProviderFromTokenSource(ts oauth2.TokenSource) auth.TokenProvider {
@@ -47,10 +54,21 @@ func (tp *tokenProviderAdapter) Token(context.Context) (*auth.Token, error) {
}
return nil, err
}
// Preserve compute token metadata, for both types of tokens.
metadata := map[string]interface{}{}
if val, ok := tok.Extra(oauth2TokenSourceKey).(string); ok {
metadata[authTokenSourceKey] = val
metadata[oauth2TokenSourceKey] = val
}
if val, ok := tok.Extra(oauth2ServiceAccountKey).(string); ok {
metadata[authServiceAccountKey] = val
metadata[oauth2ServiceAccountKey] = val
}
return &auth.Token{
Value: tok.AccessToken,
Type: tok.Type(),
Expiry: tok.Expiry,
Metadata: metadata,
}, nil
}
@@ -76,11 +94,29 @@ func (ts *tokenSourceAdapter) Token() (*oauth2.Token, error) {
}
return nil, err
}
return &oauth2.Token{
tok2 := &oauth2.Token{
AccessToken: tok.Value,
TokenType: tok.Type,
Expiry: tok.Expiry,
}, nil
}
// Preserve token metadata.
m := tok.Metadata
if m != nil {
// Copy map to avoid concurrent map writes error (#11161).
metadata := make(map[string]interface{}, len(m)+2)
for k, v := range m {
metadata[k] = v
}
// Append compute token metadata in converted form.
if val, ok := metadata[authTokenSourceKey].(string); ok && val != "" {
metadata[oauth2TokenSourceKey] = val
}
if val, ok := metadata[authServiceAccountKey].(string); ok && val != "" {
metadata[oauth2ServiceAccountKey] = val
}
tok2 = tok2.WithExtra(metadata)
}
return tok2, nil
}
// AuthCredentialsFromOauth2Credentials converts a [golang.org/x/oauth2/google.Credentials]

View File

@@ -20,6 +20,7 @@ import (
"encoding/json"
"errors"
"fmt"
"log/slog"
"mime"
"net/http"
"net/url"
@@ -28,6 +29,7 @@ import (
"time"
"cloud.google.com/go/auth/internal"
"github.com/googleapis/gax-go/v2/internallog"
)
// AuthorizationHandler is a 3-legged-OAuth helper that prompts the user for
@@ -69,6 +71,11 @@ type Options3LO struct {
// AuthHandlerOpts provides a set of options for doing a
// 3-legged OAuth2 flow with a custom [AuthorizationHandler]. Optional.
AuthHandlerOpts *AuthorizationHandlerOptions
// Logger is used for debug logging. If provided, logging will be enabled
// at the loggers configured level. By default logging is disabled unless
// enabled by setting GOOGLE_SDK_GO_LOGGING_LEVEL in which case a default
// logger will be used. Optional.
Logger *slog.Logger
}
func (o *Options3LO) validate() error {
@@ -96,6 +103,10 @@ func (o *Options3LO) validate() error {
return nil
}
func (o *Options3LO) logger() *slog.Logger {
return internallog.New(o.Logger)
}
// PKCEOptions holds parameters to support PKCE.
type PKCEOptions struct {
// Challenge is the un-padded, base64-url-encoded string of the encrypted code verifier.
@@ -293,12 +304,15 @@ func fetchToken(ctx context.Context, o *Options3LO, v url.Values) (*Token, strin
if o.AuthStyle == StyleInHeader {
req.SetBasicAuth(url.QueryEscape(o.ClientID), url.QueryEscape(o.ClientSecret))
}
logger := o.logger()
logger.DebugContext(ctx, "3LO token request", "request", internallog.HTTPRequest(req, []byte(v.Encode())))
// Make request
resp, body, err := internal.DoRequest(o.client(), req)
if err != nil {
return nil, refreshToken, err
}
logger.DebugContext(ctx, "3LO token response", "response", internallog.HTTPResponse(resp, body))
failureStatus := resp.StatusCode < 200 || resp.StatusCode > 299
tokError := &Error{
Response: resp,

View File

@@ -1,5 +1,12 @@
# Changes
## [0.6.0](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.2...compute/metadata/v0.6.0) (2024-12-13)
### Features
* **compute/metadata:** Add debug logging ([#11078](https://github.com/googleapis/google-cloud-go/issues/11078)) ([a816814](https://github.com/googleapis/google-cloud-go/commit/a81681463906e4473570a2f426eb0dc2de64e53f))
## [0.5.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.5.1...compute/metadata/v0.5.2) (2024-09-20)

149
vendor/cloud.google.com/go/compute/metadata/log.go generated vendored Normal file
View File

@@ -0,0 +1,149 @@
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package metadata
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"strings"
)
// Code below this point is copied from github.com/googleapis/gax-go/v2/internallog
// to avoid the dependency. The compute/metadata module is used by too many
// non-client library modules that can't justify the dependency.
// The handler returned if logging is not enabled.
type noOpHandler struct{}
func (h noOpHandler) Enabled(_ context.Context, _ slog.Level) bool {
return false
}
func (h noOpHandler) Handle(_ context.Context, _ slog.Record) error {
return nil
}
func (h noOpHandler) WithAttrs(_ []slog.Attr) slog.Handler {
return h
}
func (h noOpHandler) WithGroup(_ string) slog.Handler {
return h
}
// httpRequest returns a lazily evaluated [slog.LogValuer] for a
// [http.Request] and the associated body.
func httpRequest(req *http.Request, body []byte) slog.LogValuer {
return &request{
req: req,
payload: body,
}
}
type request struct {
req *http.Request
payload []byte
}
func (r *request) LogValue() slog.Value {
if r == nil || r.req == nil {
return slog.Value{}
}
var groupValueAttrs []slog.Attr
groupValueAttrs = append(groupValueAttrs, slog.String("method", r.req.Method))
groupValueAttrs = append(groupValueAttrs, slog.String("url", r.req.URL.String()))
var headerAttr []slog.Attr
for k, val := range r.req.Header {
headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
}
if len(headerAttr) > 0 {
groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
}
if len(r.payload) > 0 {
if attr, ok := processPayload(r.payload); ok {
groupValueAttrs = append(groupValueAttrs, attr)
}
}
return slog.GroupValue(groupValueAttrs...)
}
// httpResponse returns a lazily evaluated [slog.LogValuer] for a
// [http.Response] and the associated body.
func httpResponse(resp *http.Response, body []byte) slog.LogValuer {
return &response{
resp: resp,
payload: body,
}
}
type response struct {
resp *http.Response
payload []byte
}
func (r *response) LogValue() slog.Value {
if r == nil {
return slog.Value{}
}
var groupValueAttrs []slog.Attr
groupValueAttrs = append(groupValueAttrs, slog.String("status", fmt.Sprint(r.resp.StatusCode)))
var headerAttr []slog.Attr
for k, val := range r.resp.Header {
headerAttr = append(headerAttr, slog.String(k, strings.Join(val, ",")))
}
if len(headerAttr) > 0 {
groupValueAttrs = append(groupValueAttrs, slog.Any("headers", headerAttr))
}
if len(r.payload) > 0 {
if attr, ok := processPayload(r.payload); ok {
groupValueAttrs = append(groupValueAttrs, attr)
}
}
return slog.GroupValue(groupValueAttrs...)
}
func processPayload(payload []byte) (slog.Attr, bool) {
peekChar := payload[0]
if peekChar == '{' {
// JSON object
var m map[string]any
if err := json.Unmarshal(payload, &m); err == nil {
return slog.Any("payload", m), true
}
} else if peekChar == '[' {
// JSON array
var m []any
if err := json.Unmarshal(payload, &m); err == nil {
return slog.Any("payload", m), true
}
} else {
// Everything else
buf := &bytes.Buffer{}
if err := json.Compact(buf, payload); err != nil {
// Write raw payload incase of error
buf.Write(payload)
}
return slog.String("payload", buf.String()), true
}
return slog.Attr{}, false
}

View File

@@ -24,6 +24,7 @@ import (
"encoding/json"
"fmt"
"io"
"log/slog"
"net"
"net/http"
"net/url"
@@ -60,7 +61,10 @@ var (
instID = &cachedValue{k: "instance/id", trim: true}
)
var defaultClient = &Client{hc: newDefaultHTTPClient()}
var defaultClient = &Client{
hc: newDefaultHTTPClient(),
logger: slog.New(noOpHandler{}),
}
func newDefaultHTTPClient() *http.Client {
return &http.Client{
@@ -409,16 +413,41 @@ func strsContains(ss []string, s string) bool {
// A Client provides metadata.
type Client struct {
hc *http.Client
logger *slog.Logger
}
// Options for configuring a [Client].
type Options struct {
// Client is the HTTP client used to make requests. Optional.
Client *http.Client
// Logger is used to log information about HTTP request and responses.
// If not provided, nothing will be logged. Optional.
Logger *slog.Logger
}
// NewClient returns a Client that can be used to fetch metadata.
// Returns the client that uses the specified http.Client for HTTP requests.
// If nil is specified, returns the default client.
func NewClient(c *http.Client) *Client {
if c == nil {
return NewWithOptions(&Options{
Client: c,
})
}
// NewWithOptions returns a Client that is configured with the provided Options.
func NewWithOptions(opts *Options) *Client {
if opts == nil {
return defaultClient
}
return &Client{hc: c}
client := opts.Client
if client == nil {
client = newDefaultHTTPClient()
}
logger := opts.Logger
if logger == nil {
logger = slog.New(noOpHandler{})
}
return &Client{hc: client, logger: logger}
}
// getETag returns a value from the metadata service as well as the associated ETag.
@@ -448,12 +477,21 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
req.Header.Set("User-Agent", userAgent)
var res *http.Response
var reqErr error
var body []byte
retryer := newRetryer()
for {
c.logger.DebugContext(ctx, "metadata request", "request", httpRequest(req, nil))
res, reqErr = c.hc.Do(req)
var code int
if res != nil {
code = res.StatusCode
body, err = io.ReadAll(res.Body)
if err != nil {
res.Body.Close()
return "", "", err
}
c.logger.DebugContext(ctx, "metadata response", "response", httpResponse(res, body))
res.Body.Close()
}
if delay, shouldRetry := retryer.Retry(code, reqErr); shouldRetry {
if res != nil && res.Body != nil {
@@ -469,18 +507,13 @@ func (c *Client) getETag(ctx context.Context, suffix string) (value, etag string
if reqErr != nil {
return "", "", reqErr
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := io.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
return "", "", &Error{Code: res.StatusCode, Message: string(body)}
}
return string(all), res.Header.Get("Etag"), nil
return string(body), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.

View File

@@ -190,7 +190,7 @@ func buildCanonicalizedResource(accountName, uri string, keyType SharedKeyType)
}
}
return string(cr.Bytes()), nil
return cr.String(), nil
}
func getCanonicalizedAccountName(accountName string) string {
@@ -289,7 +289,7 @@ func buildCanonicalizedHeader(headers http.Header) string {
ch.WriteRune('\n')
}
return strings.TrimSuffix(string(ch.Bytes()), "\n")
return strings.TrimSuffix(ch.String(), "\n")
}
func createAuthorizationHeader(accountName string, accountKey []byte, canonicalizedString string, keyType SharedKeyType) string {

View File

@@ -19,7 +19,7 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"strings"
@@ -318,11 +318,11 @@ func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) {
if err == nil && resp.Body != nil {
// copy the body and close it so callers don't have to
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
b, err := io.ReadAll(resp.Body)
if err != nil {
return resp, err
}
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
resp.Body = io.NopCloser(bytes.NewReader(b))
}
return resp, err
}
@@ -459,12 +459,12 @@ func (pt *pollingTrackerBase) updateRawBody() error {
pt.rawBody = map[string]interface{}{}
if pt.resp.ContentLength != 0 {
defer pt.resp.Body.Close()
b, err := ioutil.ReadAll(pt.resp.Body)
b, err := io.ReadAll(pt.resp.Body)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
}
// put the body back so it's available to other callers
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
pt.resp.Body = io.NopCloser(bytes.NewReader(b))
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
if len(b) == 0 {
return nil
@@ -516,11 +516,11 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
if b, err = io.ReadAll(pt.resp.Body); err != nil {
goto Default
}
// put the body back so it's available to other callers
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
pt.resp.Body = io.NopCloser(bytes.NewReader(b))
if len(b) == 0 {
goto Default
}

View File

@@ -21,7 +21,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"log"
"os"
"strings"
@@ -325,7 +325,7 @@ func GetSettingsFromFile() (FileSettings, error) {
return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
contents, err := os.ReadFile(fileLocation)
if err != nil {
return s, err
}
@@ -488,7 +488,7 @@ func decode(b []byte) ([]byte, error) {
}
return []byte(string(utf16.Decode(u16))), nil
}
return ioutil.ReadAll(reader)
return io.ReadAll(reader)
}
func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
@@ -636,7 +636,7 @@ func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincip
if err != nil {
return nil, err
}
certData, err := ioutil.ReadFile(ccc.CertificatePath)
certData, err := os.ReadFile(ccc.CertificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
}
@@ -653,7 +653,7 @@ func (ccc ClientCertificateConfig) MultiTenantServicePrincipalToken() (*adal.Mul
if err != nil {
return nil, err
}
certData, err := ioutil.ReadFile(ccc.CertificatePath)
certData, err := os.ReadFile(ccc.CertificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
}

View File

@@ -20,7 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"regexp"
"strconv"
@@ -333,7 +333,7 @@ func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator {
// Copy and replace the Body in case it does not contain an error object.
// This will leave the Body available to the caller.
b, decodeErr := autorest.CopyAndDecode(encodedAs, resp.Body, &e)
resp.Body = ioutil.NopCloser(&b)
resp.Body = io.NopCloser(&b)
if decodeErr != nil {
return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b, decodeErr)
}

View File

@@ -17,7 +17,6 @@ package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"strings"
)
@@ -315,7 +314,7 @@ func EnvironmentFromName(name string) (Environment, error) {
// This function is particularly useful in the Hybrid Cloud model, where one must define their own
// endpoints.
func EnvironmentFromFile(location string) (unmarshaled Environment, err error) {
fileContents, err := ioutil.ReadFile(location)
fileContents, err := os.ReadFile(location)
if err != nil {
return
}

View File

@@ -3,7 +3,7 @@ package azure
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"strings"
@@ -236,7 +236,7 @@ func retrieveMetadataEnvironment(endpoint string) (environment environmentMetada
return environment, err
}
defer response.Body.Close()
jsonResponse, err := ioutil.ReadAll(response.Body)
jsonResponse, err := io.ReadAll(response.Body)
if err != nil {
return environment, err
}

View File

@@ -20,7 +20,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"net/http"
"strings"
@@ -106,14 +105,14 @@ func (li LoggingInspector) WithInspection() PrepareDecorator {
defer r.Body.Close()
r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body))
r.Body = io.NopCloser(io.TeeReader(r.Body, &body))
if err := r.Write(&b); err != nil {
return nil, fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(requestFormat, b.String())
r.Body = ioutil.NopCloser(&body)
r.Body = io.NopCloser(&body)
return p.Prepare(r)
})
}
@@ -129,14 +128,14 @@ func (li LoggingInspector) ByInspecting() RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
var body, b bytes.Buffer
defer resp.Body.Close()
resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body))
resp.Body = io.NopCloser(io.TeeReader(resp.Body, &body))
if err := resp.Write(&b); err != nil {
return fmt.Errorf("Failed to write response: %v", err)
}
li.Logger.Printf(responseFormat, b.String())
resp.Body = ioutil.NopCloser(&body)
resp.Body = io.NopCloser(&body)
return r.Respond(resp)
})
}

View File

@@ -21,7 +21,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
@@ -268,7 +267,7 @@ func WithBytes(input *[]byte) PrepareDecorator {
}
r.ContentLength = int64(len(*input))
r.Body = ioutil.NopCloser(bytes.NewReader(*input))
r.Body = io.NopCloser(bytes.NewReader(*input))
}
return r, err
})
@@ -296,7 +295,7 @@ func WithFormData(v url.Values) PrepareDecorator {
setHeader(r, http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost)
r.ContentLength = int64(len(s))
r.Body = ioutil.NopCloser(strings.NewReader(s))
r.Body = io.NopCloser(strings.NewReader(s))
}
return r, err
})
@@ -331,7 +330,7 @@ func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDec
return r, err
}
setHeader(r, http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType())
r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes()))
r.Body = io.NopCloser(bytes.NewReader(body.Bytes()))
r.ContentLength = int64(body.Len())
return r, err
}
@@ -346,11 +345,11 @@ func WithFile(f io.ReadCloser) PrepareDecorator {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
r, err := p.Prepare(r)
if err == nil {
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
return r, err
}
r.Body = ioutil.NopCloser(bytes.NewReader(b))
r.Body = io.NopCloser(bytes.NewReader(b))
r.ContentLength = int64(len(b))
}
return r, err
@@ -396,7 +395,7 @@ func WithString(v string) PrepareDecorator {
r, err := p.Prepare(r)
if err == nil {
r.ContentLength = int64(len(v))
r.Body = ioutil.NopCloser(strings.NewReader(v))
r.Body = io.NopCloser(strings.NewReader(v))
}
return r, err
})
@@ -413,7 +412,7 @@ func WithJSON(v interface{}) PrepareDecorator {
b, err := json.Marshal(v)
if err == nil {
r.ContentLength = int64(len(b))
r.Body = ioutil.NopCloser(bytes.NewReader(b))
r.Body = io.NopCloser(bytes.NewReader(b))
}
}
return r, err
@@ -436,7 +435,7 @@ func WithXML(v interface{}) PrepareDecorator {
r.ContentLength = int64(len(bytesWithHeader))
setHeader(r, headerContentLength, fmt.Sprintf("%d", len(bytesWithHeader)))
r.Body = ioutil.NopCloser(bytes.NewReader(bytesWithHeader))
r.Body = io.NopCloser(bytes.NewReader(bytesWithHeader))
}
}
return r, err

View File

@@ -20,7 +20,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
)
@@ -111,7 +110,7 @@ func ByDiscardingBody() RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil && resp != nil && resp.Body != nil {
if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil {
if _, err := io.Copy(io.Discard, resp.Body); err != nil {
return fmt.Errorf("Error discarding the response body: %v", err)
}
}
@@ -160,7 +159,7 @@ func ByUnmarshallingBytes(v *[]byte) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
bytes, errInner := ioutil.ReadAll(resp.Body)
bytes, errInner := io.ReadAll(resp.Body)
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else {
@@ -179,7 +178,7 @@ func ByUnmarshallingJSON(v interface{}) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
b, errInner := ioutil.ReadAll(resp.Body)
b, errInner := io.ReadAll(resp.Body)
// Some responses might include a BOM, remove for successful unmarshalling
b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf"))
if errInner != nil {
@@ -203,7 +202,7 @@ func ByUnmarshallingXML(v interface{}) RespondDecorator {
return ResponderFunc(func(resp *http.Response) error {
err := r.Respond(resp)
if err == nil {
b, errInner := ioutil.ReadAll(resp.Body)
b, errInner := io.ReadAll(resp.Body)
if errInner != nil {
err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner)
} else {
@@ -232,9 +231,9 @@ func WithErrorUnlessStatusCode(codes ...int) RespondDecorator {
resp.Status)
if resp.Body != nil {
defer resp.Body.Close()
b, _ := ioutil.ReadAll(resp.Body)
b, _ := io.ReadAll(resp.Body)
derr.ServiceError = b
resp.Body = ioutil.NopCloser(bytes.NewReader(b))
resp.Body = io.NopCloser(bytes.NewReader(b))
}
err = derr
}

View File

@@ -17,7 +17,6 @@ package autorest
import (
"bytes"
"io"
"io/ioutil"
"net/http"
)
@@ -41,12 +40,12 @@ func (rr *RetriableRequest) prepareFromByteReader() (err error) {
return err
}
} else {
b, err = ioutil.ReadAll(rr.req.Body)
b, err = io.ReadAll(rr.req.Body)
if err != nil {
return err
}
}
rr.br = bytes.NewReader(b)
rr.req.Body = ioutil.NopCloser(rr.br)
rr.req.Body = io.NopCloser(rr.br)
return err
}

View File

@@ -19,7 +19,7 @@ package autorest
import (
"bytes"
"io/ioutil"
"io"
"net/http"
)
@@ -33,10 +33,10 @@ type RetriableRequest struct {
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.req.Body != nil && rr.req.Body != http.NoBody {
if rr.br != nil {
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
rr.req.Body = ioutil.NopCloser(rr.br)
rr.req.Body = io.NopCloser(rr.br)
}
if err != nil {
return err

View File

@@ -20,7 +20,6 @@ package autorest
import (
"bytes"
"io"
"io/ioutil"
"net/http"
)
@@ -35,12 +34,12 @@ type RetriableRequest struct {
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.req.Body != nil && rr.req.Body != http.NoBody {
if rr.rc != nil {
rr.req.Body = rr.rc
} else if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
rr.req.Body = ioutil.NopCloser(rr.br)
rr.req.Body = io.NopCloser(rr.br)
}
if err != nil {
return err

View File

@@ -20,7 +20,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@@ -217,7 +216,7 @@ func IsTemporaryNetworkError(err error) bool {
// DrainResponseBody reads the response body then closes it.
func DrainResponseBody(resp *http.Response) error {
if resp != nil && resp.Body != nil {
_, err := io.Copy(ioutil.Discard, resp.Body)
_, err := io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return err
}

View File

@@ -32,6 +32,8 @@ const (
EnvTraceRateLimit = "DD_APPSEC_TRACE_RATE_LIMIT"
// EnvRules is the env var used to provide a path to a local security rule file
EnvRules = "DD_APPSEC_RULES"
// EnvRASPEnabled is the env var used to enable/disable RASP functionalities for ASM
EnvRASPEnabled = "DD_APPSEC_RASP_ENABLED"
)
// Configuration constants and default values
@@ -39,9 +41,9 @@ const (
// DefaultAPISecSampleRate is the default rate at which API Security schemas are extracted from requests
DefaultAPISecSampleRate = .1
// DefaultObfuscatorKeyRegex is the default regexp used to obfuscate keys
DefaultObfuscatorKeyRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?)key)|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization`
DefaultObfuscatorKeyRegex = `(?i)pass|pw(?:or)?d|secret|(?:api|private|public|access)[_-]?key|token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization|jsessionid|phpsessid|asp\.net[_-]sessionid|sid|jwt`
// DefaultObfuscatorValueRegex is the default regexp used to obfuscate values
DefaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`
DefaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:[_-]?phrase)?|secret(?:[_-]?key)?|(?:(?:api|private|public|access)[_-]?)key(?:[_-]?id)?|(?:(?:auth|access|id|refresh)[_-]?)?token|consumer[_-]?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?|jsessionid|phpsessid|asp\.net(?:[_-]|-)sessionid|sid|jwt)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`
// DefaultWAFTimeout is the default time limit past which a WAF run will timeout
DefaultWAFTimeout = time.Millisecond
// DefaultTraceRate is the default limit (trace/sec) past which ASM traces are sampled out
@@ -65,24 +67,10 @@ type ObfuscatorConfig struct {
// NewAPISecConfig creates and returns a new API Security configuration by reading the env
func NewAPISecConfig() APISecConfig {
return APISecConfig{
Enabled: apiSecurityEnabled(),
Enabled: boolEnv(EnvAPISecEnabled, true),
SampleRate: readAPISecuritySampleRate(),
}
}
func apiSecurityEnabled() bool {
enabled := true
str, set := os.LookupEnv(EnvAPISecEnabled)
if set {
var err error
enabled, err = strconv.ParseBool(str)
if err != nil {
logEnvVarParsingError(EnvAPISecEnabled, str, err, enabled)
}
}
return enabled
}
func readAPISecuritySampleRate() float64 {
value := os.Getenv(EnvAPISecSampleRate)
rate, err := strconv.ParseFloat(value, 64)
@@ -99,6 +87,12 @@ func readAPISecuritySampleRate() float64 {
return rate
}
// RASPEnabled returns true if RASP functionalities are enabled through the env, or if DD_APPSEC_RASP_ENABLED
// is not set
func RASPEnabled() bool {
return boolEnv(EnvRASPEnabled, true)
}
// NewObfuscatorConfig creates and returns a new WAF obfuscator configuration by reading the env
func NewObfuscatorConfig() ObfuscatorConfig {
keyRE := readObfuscatorConfigRegexp(EnvObfuscatorKey, DefaultObfuscatorKeyRegex)
@@ -194,3 +188,16 @@ func logEnvVarParsingError(name, value string, err error, defaultValue any) {
func logUnexpectedEnvVarValue(name string, value any, reason string, defaultValue any) {
log.Debug("appsec: unexpected configuration value of %s=%v: %s. Using default value %v.", name, value, reason, defaultValue)
}
func boolEnv(key string, def bool) bool {
strVal, ok := os.LookupEnv(key)
if !ok {
return def
}
v, err := strconv.ParseBool(strVal)
if err != nil {
logEnvVarParsingError(key, strVal, err, def)
return def
}
return v
}

View File

@@ -7,14 +7,8 @@ package appsec
import _ "embed" // Blank import comment for golint compliance
// StaticRecommendedRules holds the recommended AppSec security rules (v1.11.0)
// Source: https://github.com/DataDog/appsec-event-rules/blob/1.11.0/build/recommended.json
// StaticRecommendedRules holds the recommended AppSec security rules (v1.13.2)
// Source: https://github.com/DataDog/appsec-event-rules/blob/1.13.2/build/recommended.json
//
//go:embed rules.json
var StaticRecommendedRules string
// StaticProcessors holds the default processors and scanners used for API Security
// Not part of the recommended security rules
//
//go:embed processors.json
var StaticProcessors string

View File

@@ -1,208 +0,0 @@
{
"processors": [
{
"id": "processor-001",
"generator": "extract_schema",
"conditions": [
{
"operator": "equals",
"parameters": {
"inputs": [
{
"address": "waf.context.processor",
"key_path": [
"extract-schema"
]
}
],
"type": "boolean",
"value": true
}
}
],
"parameters": {
"mappings": [
{
"inputs": [
{
"address": "server.request.body"
}
],
"output": "_dd.appsec.s.req.body"
},
{
"inputs": [
{
"address": "server.request.headers.no_cookies"
}
],
"output": "_dd.appsec.s.req.headers"
},
{
"inputs": [
{
"address": "server.request.query"
}
],
"output": "_dd.appsec.s.req.query"
},
{
"inputs": [
{
"address": "server.request.path_params"
}
],
"output": "_dd.appsec.s.req.params"
},
{
"inputs": [
{
"address": "server.request.cookies"
}
],
"output": "_dd.appsec.s.req.cookies"
},
{
"inputs": [
{
"address": "server.response.headers.no_cookies"
}
],
"output": "_dd.appsec.s.res.headers"
},
{
"inputs": [
{
"address": "server.response.body"
}
],
"output": "_dd.appsec.s.res.body"
}
],
"scanners": [
{
"tags": {
"category": "pii"
}
}
]
},
"evaluate": false,
"output": true
}
],
"scanners": [
{
"id": "d962f7ddb3f55041e39195a60ff79d4814a7c331",
"name": "US Passport Scanner",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "passport",
"options": {
"case_sensitive": false,
"min_length": 8
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "\\b[0-9A-Z]{9}\\b|\\b[0-9]{6}[A-Z][0-9]{2}\\b",
"options": {
"case_sensitive": false,
"min_length": 8
}
}
},
"tags": {
"type": "passport_number",
"category": "pii"
}
},
{
"id": "ac6d683cbac77f6e399a14990793dd8fd0fca333",
"name": "US Vehicle Identification Number Scanner",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "vehicle[_\\s-]*identification[_\\s-]*number|vin",
"options": {
"case_sensitive": false,
"min_length": 3
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "\\b[A-HJ-NPR-Z0-9]{17}\\b",
"options": {
"case_sensitive": false,
"min_length": 17
}
}
},
"tags": {
"type": "vin",
"category": "pii"
}
},
{
"id": "de0899e0cbaaa812bb624cf04c912071012f616d",
"name": "UK National Insurance Number Scanner",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "national[\\s_]?(?:insurance(?:\\s+number)?)?|NIN|NI[\\s_]?number|insurance[\\s_]?number",
"options": {
"case_sensitive": false,
"min_length": 3
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "\\b[A-Z]{2}\\d{6}[A-Z]?\\b",
"options": {
"case_sensitive": false,
"min_length": 8
}
}
},
"tags": {
"type": "uk_nin",
"category": "pii"
}
},
{
"id": "450239afc250a19799b6c03dc0e16fd6a4b2a1af",
"name": "Canadian Social Insurance Number Scanner",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "social[\\s_]?(?:insurance(?:\\s+number)?)?|SIN|Canadian[\\s_]?(?:social[\\s_]?(?:insurance)?|insurance[\\s_]?number)?",
"options": {
"case_sensitive": false,
"min_length": 3
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "\\b\\d{3}-\\d{3}-\\d{3}\\b",
"options": {
"case_sensitive": false,
"min_length": 11
}
}
},
"tags": {
"type": "canadian_sin",
"category": "pii"
}
}
]
}

View File

@@ -18,16 +18,9 @@ func DefaultRuleset() ([]byte, error) {
// DefaultRulesetMap returns the unmarshaled default recommended security rules for AppSec
func DefaultRulesetMap() (map[string]any, error) {
var rules map[string]any
var processors map[string]any
if err := json.Unmarshal([]byte(StaticRecommendedRules), &rules); err != nil {
return nil, err
}
if err := json.Unmarshal([]byte(StaticProcessors), &processors); err != nil {
return nil, err
}
for k, v := range processors {
rules[k] = v
}
return rules, nil
}

View File

@@ -1,7 +1,7 @@
{
"version": "2.2",
"metadata": {
"rules_version": "1.11.0"
"rules_version": "1.13.2"
},
"rules": [
{
@@ -1921,7 +1921,6 @@
"$ifs",
"$oldpwd",
"$ostype",
"$path",
"$pwd",
"dev/fd/",
"dev/null",
@@ -5849,7 +5848,8 @@
"/website.php",
"/stats.php",
"/assets/plugins/mp3_id/mp3_id.php",
"/siteminderagent/forms/smpwservices.fcc"
"/siteminderagent/forms/smpwservices.fcc",
"/eval-stdin.php"
]
}
}
@@ -6236,6 +6236,200 @@
],
"transformers": []
},
{
"id": "rasp-930-100",
"name": "Local file inclusion exploit",
"tags": {
"type": "lfi",
"category": "vulnerability_trigger",
"cwe": "22",
"capec": "1000/255/153/126",
"confidence": "0",
"module": "rasp"
},
"conditions": [
{
"parameters": {
"resource": [
{
"address": "server.io.fs.file"
}
],
"params": [
{
"address": "server.request.query"
},
{
"address": "server.request.body"
},
{
"address": "server.request.path_params"
},
{
"address": "grpc.server.request.message"
},
{
"address": "graphql.server.all_resolvers"
},
{
"address": "graphql.server.resolver"
}
]
},
"operator": "lfi_detector"
}
],
"transformers": [],
"on_match": [
"stack_trace"
]
},
{
"id": "rasp-932-100",
"name": "Command injection exploit",
"tags": {
"type": "command_injection",
"category": "vulnerability_trigger",
"cwe": "77",
"capec": "1000/152/248/88",
"confidence": "0",
"module": "rasp"
},
"conditions": [
{
"parameters": {
"resource": [
{
"address": "server.sys.shell.cmd"
}
],
"params": [
{
"address": "server.request.query"
},
{
"address": "server.request.body"
},
{
"address": "server.request.path_params"
},
{
"address": "grpc.server.request.message"
},
{
"address": "graphql.server.all_resolvers"
},
{
"address": "graphql.server.resolver"
}
]
},
"operator": "shi_detector"
}
],
"transformers": [],
"on_match": [
"stack_trace"
]
},
{
"id": "rasp-934-100",
"name": "Server-side request forgery exploit",
"tags": {
"type": "ssrf",
"category": "vulnerability_trigger",
"cwe": "918",
"capec": "1000/225/115/664",
"confidence": "0",
"module": "rasp"
},
"conditions": [
{
"parameters": {
"resource": [
{
"address": "server.io.net.url"
}
],
"params": [
{
"address": "server.request.query"
},
{
"address": "server.request.body"
},
{
"address": "server.request.path_params"
},
{
"address": "grpc.server.request.message"
},
{
"address": "graphql.server.all_resolvers"
},
{
"address": "graphql.server.resolver"
}
]
},
"operator": "ssrf_detector"
}
],
"transformers": [],
"on_match": [
"stack_trace"
]
},
{
"id": "rasp-942-100",
"name": "SQL injection exploit",
"tags": {
"type": "sql_injection",
"category": "vulnerability_trigger",
"cwe": "89",
"capec": "1000/152/248/66",
"confidence": "0",
"module": "rasp"
},
"conditions": [
{
"parameters": {
"resource": [
{
"address": "server.db.statement"
}
],
"params": [
{
"address": "server.request.query"
},
{
"address": "server.request.body"
},
{
"address": "server.request.path_params"
},
{
"address": "graphql.server.all_resolvers"
},
{
"address": "graphql.server.resolver"
}
],
"db_type": [
{
"address": "server.db.system"
}
]
},
"operator": "sqli_detector@v2"
}
],
"transformers": [],
"on_match": [
"stack_trace"
]
},
{
"id": "sqr-000-001",
"name": "SSRF: Try to access the credential manager of the main cloud services",
@@ -8239,6 +8433,57 @@
}
],
"processors": [
{
"id": "http-endpoint-fingerprint",
"generator": "http_endpoint_fingerprint",
"conditions": [
{
"operator": "exists",
"parameters": {
"inputs": [
{
"address": "waf.context.event"
},
{
"address": "server.business_logic.users.login.failure"
},
{
"address": "server.business_logic.users.login.success"
}
]
}
}
],
"parameters": {
"mappings": [
{
"method": [
{
"address": "server.request.method"
}
],
"uri_raw": [
{
"address": "server.request.uri.raw"
}
],
"body": [
{
"address": "server.request.body"
}
],
"query": [
{
"address": "server.request.query"
}
],
"output": "_dd.appsec.fp.http.endpoint"
}
]
},
"evaluate": false,
"output": true
},
{
"id": "extract-content",
"generator": "extract_schema",
@@ -8388,9 +8633,155 @@
},
"evaluate": false,
"output": true
},
{
"id": "http-header-fingerprint",
"generator": "http_header_fingerprint",
"conditions": [
{
"operator": "exists",
"parameters": {
"inputs": [
{
"address": "waf.context.event"
},
{
"address": "server.business_logic.users.login.failure"
},
{
"address": "server.business_logic.users.login.success"
}
]
}
}
],
"parameters": {
"mappings": [
{
"headers": [
{
"address": "server.request.headers.no_cookies"
}
],
"output": "_dd.appsec.fp.http.header"
}
]
},
"evaluate": false,
"output": true
},
{
"id": "http-network-fingerprint",
"generator": "http_network_fingerprint",
"conditions": [
{
"operator": "exists",
"parameters": {
"inputs": [
{
"address": "waf.context.event"
},
{
"address": "server.business_logic.users.login.failure"
},
{
"address": "server.business_logic.users.login.success"
}
]
}
}
],
"parameters": {
"mappings": [
{
"headers": [
{
"address": "server.request.headers.no_cookies"
}
],
"output": "_dd.appsec.fp.http.network"
}
]
},
"evaluate": false,
"output": true
},
{
"id": "session-fingerprint",
"generator": "session_fingerprint",
"conditions": [
{
"operator": "exists",
"parameters": {
"inputs": [
{
"address": "waf.context.event"
},
{
"address": "server.business_logic.users.login.failure"
},
{
"address": "server.business_logic.users.login.success"
}
]
}
}
],
"parameters": {
"mappings": [
{
"cookies": [
{
"address": "server.request.cookies"
}
],
"session_id": [
{
"address": "usr.session_id"
}
],
"user_id": [
{
"address": "usr.id"
}
],
"output": "_dd.appsec.fp.session"
}
]
},
"evaluate": false,
"output": true
}
],
"scanners": [
{
"id": "406f8606-52c4-4663-8db9-df70f9e8766c",
"name": "ZIP Code",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "\\b(?:zip|postal)\\b",
"options": {
"case_sensitive": false,
"min_length": 3
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "^[0-9]{5}(?:-[0-9]{4})?$",
"options": {
"case_sensitive": true,
"min_length": 5
}
}
},
"tags": {
"type": "zipcode",
"category": "address"
}
},
{
"id": "JU1sRk3mSzqSUJn6GrVn7g",
"name": "American Express Card Scanner (4+4+4+3 digits)",
@@ -9157,6 +9548,34 @@
"category": "payment"
}
},
{
"id": "18b608bd7a764bff5b2344c0",
"name": "Phone number",
"key": {
"operator": "match_regex",
"parameters": {
"regex": "\\bphone|number|mobile\\b",
"options": {
"case_sensitive": false,
"min_length": 3
}
}
},
"value": {
"operator": "match_regex",
"parameters": {
"regex": "^(?:\\(\\+\\d{1,3}\\)|\\+\\d{1,3}|00\\d{1,3})?[-\\s\\.]?(?:\\(\\d{3}\\)[-\\s\\.]?)?(?:\\d[-\\s\\.]?){6,10}$",
"options": {
"case_sensitive": false,
"min_length": 6
}
}
},
"tags": {
"type": "phone",
"category": "pii"
}
},
{
"id": "de0899e0cbaaa812bb624cf04c912071012f616d-mod",
"name": "UK National Insurance Number Scanner",

View File

@@ -0,0 +1,126 @@
package httpsec
import (
"net"
"net/textproto"
"strings"
"github.com/DataDog/appsec-internal-go/netip"
)
const (
// RemoteIPTag is the tag name used for the remote HTTP request IP address.
RemoteIPTag = "network.client.ip"
// ClientIPTag is the tag name used for the client IP deduced from the HTTP
// request headers with ClientIP().
ClientIPTag = "http.client_ip"
)
// ClientIPTags returns the resulting Datadog span tags `http.client_ip`
// containing the client IP and `network.client.ip` containing the remote IP.
// The tags are present only if a valid ip address has been returned by
// ClientIP().
func ClientIPTags(remoteIP, clientIP netip.Addr) (tags map[string]string) {
remoteIPValid := remoteIP.IsValid()
clientIPValid := clientIP.IsValid()
if !remoteIPValid && !clientIPValid {
return nil
}
tags = make(map[string]string, 2)
if remoteIPValid {
tags[RemoteIPTag] = remoteIP.String()
}
if clientIPValid {
tags[ClientIPTag] = clientIP.String()
}
return tags
}
// ClientIP returns the first public IP address found in the given headers. If
// none is present, it returns the first valid IP address present, possibly
// being a local IP address. The remote address, when valid, is used as fallback
// when no IP address has been found at all.
func ClientIP(hdrs map[string][]string, hasCanonicalHeaders bool, remoteAddr string, monitoredHeaders []string) (remoteIP, clientIP netip.Addr) {
// Walk IP-related headers
var foundIP netip.Addr
headersLoop:
for _, headerName := range monitoredHeaders {
if hasCanonicalHeaders {
headerName = textproto.CanonicalMIMEHeaderKey(headerName)
}
headerValues, exists := hdrs[headerName]
if !exists {
continue // this monitored header is not present
}
// Assuming a list of comma-separated IP addresses, split them and build
// the list of values to try to parse as IP addresses
var ips []string
for _, ip := range headerValues {
ips = append(ips, strings.Split(ip, ",")...)
}
// Look for the first valid or global IP address in the comma-separated list
for _, ipstr := range ips {
ip := parseIP(strings.TrimSpace(ipstr))
if !ip.IsValid() {
continue
}
// Replace foundIP if still not valid in order to keep the oldest
if !foundIP.IsValid() {
foundIP = ip
}
if isGlobal(ip) {
foundIP = ip
break headersLoop
}
}
}
// Decide which IP address is the client one by starting with the remote IP
if ip := parseIP(remoteAddr); ip.IsValid() {
remoteIP = ip
clientIP = ip
}
// The IP address found in the headers supersedes a private remote IP address.
if foundIP.IsValid() && !isGlobal(remoteIP) || isGlobal(foundIP) {
clientIP = foundIP
}
return remoteIP, clientIP
}
func parseIP(s string) netip.Addr {
if ip, err := netip.ParseAddr(s); err == nil {
return ip
}
if h, _, err := net.SplitHostPort(s); err == nil {
if ip, err := netip.ParseAddr(h); err == nil {
return ip
}
}
return netip.Addr{}
}
var ipv6SpecialNetworks = [...]netip.Prefix{
netip.MustParsePrefix("fec0::/10"), // site local
}
func isGlobal(ip netip.Addr) bool {
// IsPrivate also checks for ipv6 ULA.
// We care to check for these addresses are not considered public, hence not global.
// See https://www.rfc-editor.org/rfc/rfc4193.txt for more details.
isGlobal := ip.IsValid() && !ip.IsPrivate() && !ip.IsLoopback() && !ip.IsLinkLocalUnicast()
if !isGlobal || !ip.Is6() {
return isGlobal
}
for _, n := range ipv6SpecialNetworks {
if n.Contains(ip) {
return false
}
}
return isGlobal
}

View File

@@ -0,0 +1,31 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2022 Datadog, Inc.
package netip
import "net/netip"
// Addr wraps a netip.Addr value
type Addr = netip.Addr
// Prefix wraps a netip.Prefix value
type Prefix = netip.Prefix
var (
// ParseAddr wraps the netip.ParseAddr function
ParseAddr = netip.ParseAddr
// MustParsePrefix wraps the netip.MustParsePrefix function
MustParsePrefix = netip.MustParsePrefix
// MustParseAddr wraps the netip.MustParseAddr function
MustParseAddr = netip.MustParseAddr
// AddrFrom16 wraps the netIP.AddrFrom16 function
AddrFrom16 = netip.AddrFrom16
)
// IPv4 wraps the netip.AddrFrom4 function
func IPv4(a, b, c, d byte) Addr {
e := [4]byte{a, b, c, d}
return netip.AddrFrom4(e)
}

View File

@@ -5,9 +5,67 @@
package obfuscate
import (
"strings"
)
// creditCard maintains credit card obfuscation state and processing.
type creditCard struct {
luhn bool
}
func newCCObfuscator(config *CreditCardsConfig) *creditCard {
return &creditCard{
luhn: config.Luhn,
}
}
// ObfuscateCreditCardNumber obfuscates any "credit card like" numbers in value for keys not in the allow-list
func (o *Obfuscator) ObfuscateCreditCardNumber(key, val string) string {
switch key {
case "_sample_rate",
"_sampling_priority_v1",
"account_id",
"aws_account",
"error",
"error.msg",
"error.type",
"error.stack",
"env",
"graphql.field",
"graphql.query",
"graphql.type",
"graphql.operation.name",
"grpc.code",
"grpc.method",
"grpc.request",
"http.status_code",
"http.method",
"runtime-id",
"out.host",
"out.port",
"sampling.priority",
"span.type",
"span.name",
"service.name",
"service",
"sql.query",
"version":
// these tags are known to not be credit card numbers
return val
}
if strings.HasPrefix(key, "_") {
return val
}
if o.ccObfuscator.IsCardNumber(val) {
return "?"
}
return val
}
// IsCardNumber checks if b could be a credit card number by checking the digit count and IIN prefix.
// If validateLuhn is true, the Luhn checksum is also applied to potential candidates.
func IsCardNumber(b string, validateLuhn bool) (ok bool) {
func (cc *creditCard) IsCardNumber(b string) (ok bool) {
//
// Just credit card numbers for now, based on:
// • https://baymard.com/checkout-usability/credit-card-patterns
@@ -28,7 +86,7 @@ func IsCardNumber(b string, validateLuhn bool) (ok bool) {
count := 0 // counts digits encountered
foundPrefix := false // reports whether we've detected a valid prefix
recdigit := func(_ byte) {} // callback on each found digit; no-op by default (we only need this for Luhn)
if validateLuhn {
if cc.luhn {
// we need Luhn checksum validation, so we have to take additional action
// and record all digits found
buf := make([]byte, 0, len(b))

View File

@@ -0,0 +1,168 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package obfuscate
import (
"net"
"net/netip"
"regexp"
"strings"
)
// QuantizePeerIPAddresses quantizes a comma separated list of hosts. Each entry which is an IP address is replaced using quantizeIP.
// Duplicate entries post-quantization or collapsed into a single unique value.
// Entries which are not IP addresses are left unchanged.
// Comma-separated host lists are common for peer tags like peer.cassandra.contact.points, peer.couchbase.seed.nodes, peer.kafka.bootstrap.servers
func QuantizePeerIPAddresses(raw string) string {
values := strings.Split(raw, ",")
uniq := values[:0]
uniqSet := make(map[string]bool)
for _, v := range values {
q := quantizeIP(v)
if !uniqSet[q] {
uniqSet[q] = true
uniq = append(uniq, q)
}
}
return strings.Join(uniq, ",")
}
var protocolRegex = regexp.MustCompile(`((?:dnspoll|ftp|file|http|https):/{2,3}).*`)
var allowedIPAddresses = map[string]bool{
// localhost
"127.0.0.1": true,
"::1": true,
// link-local cloud provider metadata server addresses
"169.254.169.254": true,
"fd00:ec2::254": true,
// ECS task metadata
"169.254.170.2": true,
}
func splitPrefix(raw string) (prefix, after string) {
if after, ok := strings.CutPrefix(raw, "ip-"); ok { // AWS EC2 hostnames e.g. ip-10-123-4-567.ec2.internal
return "ip-", after
}
subMatches := protocolRegex.FindStringSubmatch(raw)
if len(subMatches) >= 2 {
prefix = subMatches[1]
}
return prefix, raw[len(prefix):]
}
// quantizeIP quantizes the ip address in the provided string, only if it exactly matches an ip with an optional port
// if the string is not an ip then empty string is returned
func quantizeIP(raw string) string {
prefix, rawNoPrefix := splitPrefix(raw)
host, port, suffix := parseIPAndPort(rawNoPrefix)
if host == "" {
// not an ip address
return raw
}
if allowedIPAddresses[host] {
return raw
}
replacement := prefix + "blocked-ip-address"
if port != "" {
// we're keeping the original port as part of the key because ports are much lower cardinality
// than ip addresses, and they also tend to correspond more closely to a protocol (i.e. 443 is HTTPS)
// so it's likely safe and probably also useful to leave them in
replacement = replacement + ":" + port
}
return replacement + suffix
}
// parseIPAndPort returns (host, port) if the host is a valid ip address with an optional port, else returns empty strings.
func parseIPAndPort(input string) (host, port, suffix string) {
host, port, err := net.SplitHostPort(input)
if err != nil {
host = input
}
if ok, i := isParseableIP(host); ok {
return host[:i], port, host[i:]
}
return "", "", ""
}
func isParseableIP(s string) (parsed bool, lastIndex int) {
if len(s) == 0 {
return false, -1
}
// Must start with a hex digit, or IPv6 can have a preceding ':'
switch s[0] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'a', 'b', 'c', 'd', 'e', 'f',
'A', 'B', 'C', 'D', 'E', 'F',
':':
default:
return false, -1
}
for i := 0; i < len(s); i++ {
switch s[i] {
case '.', '_', '-':
return parseIPv4(s, s[i])
case ':':
// IPv6
if _, err := netip.ParseAddr(s); err == nil {
return true, len(s)
}
return false, -1
case '%':
// Assume that this was trying to be an IPv6 address with
// a zone specifier, but the address is missing.
return false, -1
}
}
return false, -1
}
// parseIsIPv4 parses s as an IPv4 address and returns whether it is an IP address
// modified from netip to accept alternate separators besides '.'
// also modified to return true if s is an IPv4 address with trailing characters
func parseIPv4(s string, sep byte) (parsed bool, lastIndex int) {
var fields [4]uint8
var val, pos int
var digLen int // number of digits in current octet
for i := 0; i < len(s); i++ {
if s[i] >= '0' && s[i] <= '9' {
if digLen == 1 && val == 0 {
return false, -1
}
val = val*10 + int(s[i]) - '0'
digLen++
if val > 255 {
return false, -1
}
} else if s[i] == sep {
// .1.2.3
// 1.2.3.
// 1..2.3
if i == 0 || i == len(s)-1 || s[i-1] == sep {
return false, -1
}
// 1.2.3.4.5
if pos == 3 {
return true, i
}
fields[pos] = uint8(val)
pos++
val = 0
digLen = 0
} else {
if pos == 3 && digLen > 0 {
fields[3] = uint8(val)
return true, i
}
return false, -1
}
}
if pos < 3 {
return false, -1
}
fields[3] = uint8(val)
return true, len(s)
}

View File

@@ -22,6 +22,11 @@ func (o *Obfuscator) ObfuscateElasticSearchString(cmd string) string {
return obfuscateJSONString(cmd, o.es)
}
// ObfuscateOpenSearchString obfuscates the given OpenSearch JSON query.
func (o *Obfuscator) ObfuscateOpenSearchString(cmd string) string {
return obfuscateJSONString(cmd, o.openSearch)
}
// obfuscateJSONString obfuscates the given span's tag using the given obfuscator. If the obfuscator is
// nil it is considered disabled.
func obfuscateJSONString(cmd string, obfuscator *jsonObfuscator) string {

View File

@@ -533,9 +533,7 @@ func stateNul(s *scanner, c byte) int {
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
//
//nolint:revive // TODO(APM) Fix revive linter
func stateError(s *scanner, c byte) int {
func stateError(_ *scanner, _ byte) int {
return scanError
}

View File

@@ -15,8 +15,9 @@ package obfuscate
import (
"bytes"
"github.com/DataDog/datadog-go/v5/statsd"
"go.uber.org/atomic"
"github.com/DataDog/datadog-go/v5/statsd"
)
// Obfuscator quantizes and obfuscates spans. The obfuscator is not safe for
@@ -24,9 +25,11 @@ import (
type Obfuscator struct {
opts *Config
es *jsonObfuscator // nil if disabled
openSearch *jsonObfuscator // nil if disabled
mongo *jsonObfuscator // nil if disabled
sqlExecPlan *jsonObfuscator // nil if disabled
sqlExecPlanNormalize *jsonObfuscator // nil if disabled
ccObfuscator *creditCard // nil if disabled
// sqlLiteralEscapes reports whether we should treat escape characters literally or as escape characters.
// Different SQL engines behave in different ways and the tokenizer needs to be generic.
sqlLiteralEscapes *atomic.Bool
@@ -69,6 +72,9 @@ type Config struct {
// ES holds the obfuscation configuration for ElasticSearch bodies.
ES JSONConfig
// OpenSearch holds the obfuscation configuration for OpenSearch bodies.
OpenSearch JSONConfig
// Mongo holds the obfuscation configuration for MongoDB queries.
Mongo JSONConfig
@@ -88,6 +94,9 @@ type Config struct {
// Memcached holds the obfuscation settings for Memcached commands.
Memcached MemcachedConfig
// Memcached holds the obfuscation settings for obfuscation of CC numbers in meta.
CreditCard CreditCardsConfig
// Statsd specifies the statsd client to use for reporting metrics.
Statsd StatsClient
@@ -107,6 +116,7 @@ type ObfuscationMode string
// ObfuscationMode valid values
const (
NormalizeOnly = ObfuscationMode("normalize_only")
ObfuscateOnly = ObfuscationMode("obfuscate_only")
ObfuscateAndNormalize = ObfuscationMode("obfuscate_and_normalize")
)
@@ -145,12 +155,12 @@ type SQLConfig struct {
// ObfuscationMode specifies the obfuscation mode to use for go-sqllexer pkg.
// When specified, obfuscator will attempt to use go-sqllexer pkg to obfuscate (and normalize) SQL queries.
// Valid values are "obfuscate_only", "obfuscate_and_normalize"
// Valid values are "normalize_only", "obfuscate_only", "obfuscate_and_normalize"
ObfuscationMode ObfuscationMode `json:"obfuscation_mode" yaml:"obfuscation_mode"`
// RemoveSpaceBetweenParentheses specifies whether to remove spaces between parentheses.
// By default, spaces are inserted between parentheses during normalization.
// This option is only valid when ObfuscationMode is "obfuscate_and_normalize".
// This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
RemoveSpaceBetweenParentheses bool `json:"remove_space_between_parentheses" yaml:"remove_space_between_parentheses"`
// KeepNull specifies whether to disable obfuscate NULL value with ?.
@@ -167,12 +177,12 @@ type SQLConfig struct {
// KeepTrailingSemicolon specifies whether to keep trailing semicolon.
// By default, trailing semicolon is removed during normalization.
// This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
// This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"`
// KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table].
// By default, identifier quotation is removed during normalization.
// This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
// This option is only valid when ObfuscationMode is "normalize_only" or "obfuscate_and_normalize".
KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"`
// Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations.
@@ -239,6 +249,18 @@ type JSONConfig struct {
ObfuscateSQLValues []string `mapstructure:"obfuscate_sql_values"`
}
// CreditCardsConfig holds the configuration for credit card obfuscation in
// (Meta) tags.
type CreditCardsConfig struct {
// Enabled specifies whether this feature should be enabled.
Enabled bool `mapstructure:"enabled"`
// Luhn specifies whether Luhn checksum validation should be enabled.
// https://dev.to/shiraazm/goluhn-a-simple-library-for-generating-calculating-and-verifying-luhn-numbers-588j
// It reduces false positives, but increases the CPU time X3.
Luhn bool `mapstructure:"luhn"`
}
// NewObfuscator creates a new obfuscator
func NewObfuscator(cfg Config) *Obfuscator {
if cfg.Logger == nil {
@@ -253,6 +275,9 @@ func NewObfuscator(cfg Config) *Obfuscator {
if cfg.ES.Enabled {
o.es = newJSONObfuscator(&cfg.ES, &o)
}
if cfg.OpenSearch.Enabled {
o.openSearch = newJSONObfuscator(&cfg.OpenSearch, &o)
}
if cfg.Mongo.Enabled {
o.mongo = newJSONObfuscator(&cfg.Mongo, &o)
}
@@ -262,6 +287,9 @@ func NewObfuscator(cfg Config) *Obfuscator {
if cfg.SQLExecPlanNormalize.Enabled {
o.sqlExecPlanNormalize = newJSONObfuscator(&cfg.SQLExecPlanNormalize, &o)
}
if cfg.CreditCard.Enabled {
o.ccObfuscator = newCCObfuscator(&cfg.CreditCard)
}
if cfg.Statsd == nil {
cfg.Statsd = &statsd.NoOpClient{}
}

View File

@@ -245,10 +245,8 @@ func obfuscateRedisCmd(out *strings.Builder, cmd string, args ...string) {
out.WriteString(strings.Join(args, " "))
}
// removeAllRedisArgs will take in a command and obfuscate all arguments following
// RemoveAllRedisArgs will take in a command and obfuscate all arguments following
// the command, regardless of if the command is valid Redis or not
//
//nolint:revive // TODO(APM) Fix revive linter
func (*Obfuscator) RemoveAllRedisArgs(rediscmd string) string {
fullCmd := strings.Fields(rediscmd)
if len(fullCmd) == 0 {

View File

@@ -426,17 +426,22 @@ func (o *Obfuscator) ObfuscateSQLExecPlan(jsonPlan string, normalize bool) (stri
// ObfuscateWithSQLLexer obfuscates the given SQL query using the go-sqllexer package.
// If ObfuscationMode is set to ObfuscateOnly, the query will be obfuscated without normalizing it.
func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*ObfuscatedQuery, error) {
if opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize {
if opts.ObfuscationMode != NormalizeOnly && opts.ObfuscationMode != ObfuscateOnly && opts.ObfuscationMode != ObfuscateAndNormalize {
return nil, fmt.Errorf("invalid obfuscation mode: %s", opts.ObfuscationMode)
}
obfuscator := sqllexer.NewObfuscator(
var obfuscator *sqllexer.Obfuscator
if opts.ObfuscationMode == ObfuscateOnly || opts.ObfuscationMode == ObfuscateAndNormalize {
obfuscator = sqllexer.NewObfuscator(
sqllexer.WithReplaceDigits(opts.ReplaceDigits),
sqllexer.WithDollarQuotedFunc(opts.DollarQuotedFunc),
sqllexer.WithReplacePositionalParameter(!opts.KeepPositionalParameter),
sqllexer.WithReplaceBoolean(!opts.KeepBoolean),
sqllexer.WithReplaceNull(!opts.KeepNull),
)
}
if opts.ObfuscationMode == ObfuscateOnly {
// Obfuscate the query without normalizing it.
out := obfuscator.Obfuscate(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)))
@@ -461,12 +466,22 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca
sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon),
sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation),
)
out, statementMetadata, err := sqllexer.ObfuscateAndNormalize(
var out string
var statementMetadata *sqllexer.StatementMetadata
var err error
if opts.ObfuscationMode == NormalizeOnly {
// Normalize the query without obfuscating it.
out, statementMetadata, err = normalizer.Normalize(in, sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)))
} else {
out, statementMetadata, err = sqllexer.ObfuscateAndNormalize(
in,
obfuscator,
normalizer,
sqllexer.WithDBMS(sqllexer.DBMSType(opts.DBMS)),
)
}
if err != nil {
return nil, err
}

View File

@@ -610,9 +610,9 @@ func (tkn *SQLTokenizer) scanIdentifier() (TokenKind, []byte) {
return ID, t
}
//nolint:revive // TODO(APM) Fix revive linter
func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) {
func (tkn *SQLTokenizer) scanVariableIdentifier(_ rune) (TokenKind, []byte) {
for tkn.advance(); tkn.lastChar != ')' && tkn.lastChar != EndChar; tkn.advance() {
continue
}
tkn.advance()
if !isLetter(tkn.lastChar) {
@@ -623,8 +623,7 @@ func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte)
return Variable, tkn.bytes()
}
//nolint:revive // TODO(APM) Fix revive linter
func (tkn *SQLTokenizer) scanFormatParameter(prefix rune) (TokenKind, []byte) {
func (tkn *SQLTokenizer) scanFormatParameter(_ rune) (TokenKind, []byte) {
tkn.advance()
return Variable, tkn.bytes()
}
@@ -677,8 +676,7 @@ func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) {
return DollarQuotedString, buf.Bytes()
}
//nolint:revive // TODO(APM) Fix revive linter
func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) {
func (tkn *SQLTokenizer) scanPreparedStatement(_ rune) (TokenKind, []byte) {
// a prepared statement expect a digit identifier like $1
if !isDigit(tkn.lastChar) {
tkn.setErr(`prepared statements must start with digits, got "%c" (%d)`, tkn.lastChar, tkn.lastChar)
@@ -695,8 +693,7 @@ func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte)
return PreparedStatement, buff
}
//nolint:revive // TODO(APM) Fix revive linter
func (tkn *SQLTokenizer) scanEscapeSequence(braces rune) (TokenKind, []byte) {
func (tkn *SQLTokenizer) scanEscapeSequence(_ rune) (TokenKind, []byte) {
for tkn.lastChar != '}' && tkn.lastChar != EndChar {
tkn.advance()
}
@@ -825,8 +822,7 @@ func (tkn *SQLTokenizer) scanString(delim rune, kind TokenKind) (TokenKind, []by
return kind, buf.Bytes()
}
//nolint:revive // TODO(APM) Fix revive linter
func (tkn *SQLTokenizer) scanCommentType1(prefix string) (TokenKind, []byte) {
func (tkn *SQLTokenizer) scanCommentType1(_ string) (TokenKind, []byte) {
for tkn.lastChar != EndChar {
if tkn.lastChar == '\n' {
tkn.advance()

View File

@@ -0,0 +1,200 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-present Datadog, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,240 @@
// protoc -I. -I$GOPATH/src --gogofaster_out=. span.proto tracer_payload.proto agent_payload.proto
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.0
// protoc v5.26.1
// source: datadog/trace/agent_payload.proto
package trace
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// AgentPayload represents payload the agent sends to the intake.
type AgentPayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// hostName specifies hostname of where the agent is running.
HostName string `protobuf:"bytes,1,opt,name=hostName,proto3" json:"hostName,omitempty"`
// env specifies `env` set in agent configuration.
Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"`
// tracerPayloads specifies list of the payloads received from tracers.
TracerPayloads []*TracerPayload `protobuf:"bytes,5,rep,name=tracerPayloads,proto3" json:"tracerPayloads,omitempty"`
// tags specifies tags common in all `tracerPayloads`.
Tags map[string]string `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// agentVersion specifies version of the agent.
AgentVersion string `protobuf:"bytes,7,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"`
// targetTPS holds `TargetTPS` value in AgentConfig.
TargetTPS float64 `protobuf:"fixed64,8,opt,name=targetTPS,proto3" json:"targetTPS,omitempty"`
// errorTPS holds `ErrorTPS` value in AgentConfig.
ErrorTPS float64 `protobuf:"fixed64,9,opt,name=errorTPS,proto3" json:"errorTPS,omitempty"`
// rareSamplerEnabled holds `RareSamplerEnabled` value in AgentConfig
RareSamplerEnabled bool `protobuf:"varint,10,opt,name=rareSamplerEnabled,proto3" json:"rareSamplerEnabled,omitempty"`
}
func (x *AgentPayload) Reset() {
*x = AgentPayload{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_agent_payload_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *AgentPayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*AgentPayload) ProtoMessage() {}
func (x *AgentPayload) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_agent_payload_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use AgentPayload.ProtoReflect.Descriptor instead.
func (*AgentPayload) Descriptor() ([]byte, []int) {
return file_datadog_trace_agent_payload_proto_rawDescGZIP(), []int{0}
}
func (x *AgentPayload) GetHostName() string {
if x != nil {
return x.HostName
}
return ""
}
func (x *AgentPayload) GetEnv() string {
if x != nil {
return x.Env
}
return ""
}
func (x *AgentPayload) GetTracerPayloads() []*TracerPayload {
if x != nil {
return x.TracerPayloads
}
return nil
}
func (x *AgentPayload) GetTags() map[string]string {
if x != nil {
return x.Tags
}
return nil
}
func (x *AgentPayload) GetAgentVersion() string {
if x != nil {
return x.AgentVersion
}
return ""
}
func (x *AgentPayload) GetTargetTPS() float64 {
if x != nil {
return x.TargetTPS
}
return 0
}
func (x *AgentPayload) GetErrorTPS() float64 {
if x != nil {
return x.ErrorTPS
}
return 0
}
func (x *AgentPayload) GetRareSamplerEnabled() bool {
if x != nil {
return x.RareSamplerEnabled
}
return false
}
var File_datadog_trace_agent_payload_proto protoreflect.FileDescriptor
var file_datadog_trace_agent_payload_proto_rawDesc = []byte{
0x0a, 0x21, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
0x61, 0x67, 0x65, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61,
0x63, 0x65, 0x1a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63,
0x65, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x84, 0x03, 0x0a, 0x0c, 0x41, 0x67, 0x65, 0x6e, 0x74,
0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x4e,
0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x44, 0x0a, 0x0e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e,
0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72,
0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x0e, 0x74, 0x72, 0x61,
0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73, 0x12, 0x39, 0x0a, 0x04, 0x74,
0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67,
0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x61,
0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x74,
0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x54, 0x50, 0x53, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x65, 0x72, 0x72, 0x6f,
0x72, 0x54, 0x50, 0x53, 0x12, 0x2e, 0x0a, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08,
0x52, 0x12, 0x72, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61,
0x62, 0x6c, 0x65, 0x64, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a,
0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f,
0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_datadog_trace_agent_payload_proto_rawDescOnce sync.Once
file_datadog_trace_agent_payload_proto_rawDescData = file_datadog_trace_agent_payload_proto_rawDesc
)
func file_datadog_trace_agent_payload_proto_rawDescGZIP() []byte {
file_datadog_trace_agent_payload_proto_rawDescOnce.Do(func() {
file_datadog_trace_agent_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_agent_payload_proto_rawDescData)
})
return file_datadog_trace_agent_payload_proto_rawDescData
}
var file_datadog_trace_agent_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_datadog_trace_agent_payload_proto_goTypes = []interface{}{
(*AgentPayload)(nil), // 0: datadog.trace.AgentPayload
nil, // 1: datadog.trace.AgentPayload.TagsEntry
(*TracerPayload)(nil), // 2: datadog.trace.TracerPayload
}
var file_datadog_trace_agent_payload_proto_depIdxs = []int32{
2, // 0: datadog.trace.AgentPayload.tracerPayloads:type_name -> datadog.trace.TracerPayload
1, // 1: datadog.trace.AgentPayload.tags:type_name -> datadog.trace.AgentPayload.TagsEntry
2, // [2:2] is the sub-list for method output_type
2, // [2:2] is the sub-list for method input_type
2, // [2:2] is the sub-list for extension type_name
2, // [2:2] is the sub-list for extension extendee
0, // [0:2] is the sub-list for field type_name
}
func init() { file_datadog_trace_agent_payload_proto_init() }
func file_datadog_trace_agent_payload_proto_init() {
if File_datadog_trace_agent_payload_proto != nil {
return
}
file_datadog_trace_tracer_payload_proto_init()
if !protoimpl.UnsafeEnabled {
file_datadog_trace_agent_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*AgentPayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_datadog_trace_agent_payload_proto_rawDesc,
NumEnums: 0,
NumMessages: 2,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_datadog_trace_agent_payload_proto_goTypes,
DependencyIndexes: file_datadog_trace_agent_payload_proto_depIdxs,
MessageInfos: file_datadog_trace_agent_payload_proto_msgTypes,
}.Build()
File_datadog_trace_agent_payload_proto = out.File
file_datadog_trace_agent_payload_proto_rawDesc = nil
file_datadog_trace_agent_payload_proto_goTypes = nil
file_datadog_trace_agent_payload_proto_depIdxs = nil
}

View File

@@ -0,0 +1,200 @@
package trace
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z *AgentPayload) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 8
// string "HostName"
o = append(o, 0x88, 0xa8, 0x48, 0x6f, 0x73, 0x74, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.HostName)
// string "Env"
o = append(o, 0xa3, 0x45, 0x6e, 0x76)
o = msgp.AppendString(o, z.Env)
// string "TracerPayloads"
o = append(o, 0xae, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.TracerPayloads)))
for za0001 := range z.TracerPayloads {
if z.TracerPayloads[za0001] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.TracerPayloads[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "TracerPayloads", za0001)
return
}
}
}
// string "Tags"
o = append(o, 0xa4, 0x54, 0x61, 0x67, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Tags)))
for za0002, za0003 := range z.Tags {
o = msgp.AppendString(o, za0002)
o = msgp.AppendString(o, za0003)
}
// string "AgentVersion"
o = append(o, 0xac, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.AgentVersion)
// string "TargetTPS"
o = append(o, 0xa9, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x50, 0x53)
o = msgp.AppendFloat64(o, z.TargetTPS)
// string "ErrorTPS"
o = append(o, 0xa8, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x50, 0x53)
o = msgp.AppendFloat64(o, z.ErrorTPS)
// string "RareSamplerEnabled"
o = append(o, 0xb2, 0x52, 0x61, 0x72, 0x65, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.RareSamplerEnabled)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *AgentPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "HostName":
z.HostName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HostName")
return
}
case "Env":
z.Env, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Env")
return
}
case "TracerPayloads":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TracerPayloads")
return
}
if cap(z.TracerPayloads) >= int(zb0002) {
z.TracerPayloads = (z.TracerPayloads)[:zb0002]
} else {
z.TracerPayloads = make([]*TracerPayload, zb0002)
}
for za0001 := range z.TracerPayloads {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.TracerPayloads[za0001] = nil
} else {
if z.TracerPayloads[za0001] == nil {
z.TracerPayloads[za0001] = new(TracerPayload)
}
bts, err = z.TracerPayloads[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "TracerPayloads", za0001)
return
}
}
}
case "Tags":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if z.Tags == nil {
z.Tags = make(map[string]string, zb0003)
} else if len(z.Tags) > 0 {
for key := range z.Tags {
delete(z.Tags, key)
}
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0002)
return
}
z.Tags[za0002] = za0003
}
case "AgentVersion":
z.AgentVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AgentVersion")
return
}
case "TargetTPS":
z.TargetTPS, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TargetTPS")
return
}
case "ErrorTPS":
z.ErrorTPS, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ErrorTPS")
return
}
case "RareSamplerEnabled":
z.RareSamplerEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RareSamplerEnabled")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *AgentPayload) Msgsize() (s int) {
s = 1 + 9 + msgp.StringPrefixSize + len(z.HostName) + 4 + msgp.StringPrefixSize + len(z.Env) + 15 + msgp.ArrayHeaderSize
for za0001 := range z.TracerPayloads {
if z.TracerPayloads[za0001] == nil {
s += msgp.NilSize
} else {
s += z.TracerPayloads[za0001].Msgsize()
}
}
s += 5 + msgp.MapHeaderSize
if z.Tags != nil {
for za0002, za0003 := range z.Tags {
_ = za0003
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
}
}
s += 13 + msgp.StringPrefixSize + len(z.AgentVersion) + 10 + msgp.Float64Size + 9 + msgp.Float64Size + 19 + msgp.BoolSize
return
}

View File

@@ -0,0 +1,523 @@
// Code generated by protoc-gen-go-vtproto. DO NOT EDIT.
// protoc-gen-go-vtproto version: v0.4.0
// source: datadog/trace/agent_payload.proto
package trace
import (
binary "encoding/binary"
fmt "fmt"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
io "io"
math "math"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
func (m *AgentPayload) MarshalVT() (dAtA []byte, err error) {
if m == nil {
return nil, nil
}
size := m.SizeVT()
dAtA = make([]byte, size)
n, err := m.MarshalToSizedBufferVT(dAtA[:size])
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *AgentPayload) MarshalToVT(dAtA []byte) (int, error) {
size := m.SizeVT()
return m.MarshalToSizedBufferVT(dAtA[:size])
}
func (m *AgentPayload) MarshalToSizedBufferVT(dAtA []byte) (int, error) {
if m == nil {
return 0, nil
}
i := len(dAtA)
_ = i
var l int
_ = l
if m.unknownFields != nil {
i -= len(m.unknownFields)
copy(dAtA[i:], m.unknownFields)
}
if m.RareSamplerEnabled {
i--
if m.RareSamplerEnabled {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i--
dAtA[i] = 0x50
}
if m.ErrorTPS != 0 {
i -= 8
binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.ErrorTPS))))
i--
dAtA[i] = 0x49
}
if m.TargetTPS != 0 {
i -= 8
binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.TargetTPS))))
i--
dAtA[i] = 0x41
}
if len(m.AgentVersion) > 0 {
i -= len(m.AgentVersion)
copy(dAtA[i:], m.AgentVersion)
i = encodeVarint(dAtA, i, uint64(len(m.AgentVersion)))
i--
dAtA[i] = 0x3a
}
if len(m.Tags) > 0 {
for k := range m.Tags {
v := m.Tags[k]
baseI := i
i -= len(v)
copy(dAtA[i:], v)
i = encodeVarint(dAtA, i, uint64(len(v)))
i--
dAtA[i] = 0x12
i -= len(k)
copy(dAtA[i:], k)
i = encodeVarint(dAtA, i, uint64(len(k)))
i--
dAtA[i] = 0xa
i = encodeVarint(dAtA, i, uint64(baseI-i))
i--
dAtA[i] = 0x32
}
}
if len(m.TracerPayloads) > 0 {
for iNdEx := len(m.TracerPayloads) - 1; iNdEx >= 0; iNdEx-- {
size, err := m.TracerPayloads[iNdEx].MarshalToSizedBufferVT(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarint(dAtA, i, uint64(size))
i--
dAtA[i] = 0x2a
}
}
if len(m.Env) > 0 {
i -= len(m.Env)
copy(dAtA[i:], m.Env)
i = encodeVarint(dAtA, i, uint64(len(m.Env)))
i--
dAtA[i] = 0x12
}
if len(m.HostName) > 0 {
i -= len(m.HostName)
copy(dAtA[i:], m.HostName)
i = encodeVarint(dAtA, i, uint64(len(m.HostName)))
i--
dAtA[i] = 0xa
}
return len(dAtA) - i, nil
}
func (m *AgentPayload) SizeVT() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.HostName)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
l = len(m.Env)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
if len(m.TracerPayloads) > 0 {
for _, e := range m.TracerPayloads {
l = e.SizeVT()
n += 1 + l + sov(uint64(l))
}
}
if len(m.Tags) > 0 {
for k, v := range m.Tags {
_ = k
_ = v
mapEntrySize := 1 + len(k) + sov(uint64(len(k))) + 1 + len(v) + sov(uint64(len(v)))
n += mapEntrySize + 1 + sov(uint64(mapEntrySize))
}
}
l = len(m.AgentVersion)
if l > 0 {
n += 1 + l + sov(uint64(l))
}
if m.TargetTPS != 0 {
n += 9
}
if m.ErrorTPS != 0 {
n += 9
}
if m.RareSamplerEnabled {
n += 2
}
n += len(m.unknownFields)
return n
}
func (m *AgentPayload) UnmarshalVT(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: AgentPayload: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: AgentPayload: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field HostName", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.HostName = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Env = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 5:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field TracerPayloads", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.TracerPayloads = append(m.TracerPayloads, &TracerPayload{})
if err := m.TracerPayloads[len(m.TracerPayloads)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
case 6:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Tags == nil {
m.Tags = make(map[string]string)
}
var mapkey string
var mapvalue string
for iNdEx < postIndex {
entryPreIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
if fieldNum == 1 {
var stringLenmapkey uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapkey |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapkey := int(stringLenmapkey)
if intStringLenmapkey < 0 {
return ErrInvalidLength
}
postStringIndexmapkey := iNdEx + intStringLenmapkey
if postStringIndexmapkey < 0 {
return ErrInvalidLength
}
if postStringIndexmapkey > l {
return io.ErrUnexpectedEOF
}
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
iNdEx = postStringIndexmapkey
} else if fieldNum == 2 {
var stringLenmapvalue uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLenmapvalue |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLenmapvalue := int(stringLenmapvalue)
if intStringLenmapvalue < 0 {
return ErrInvalidLength
}
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
if postStringIndexmapvalue < 0 {
return ErrInvalidLength
}
if postStringIndexmapvalue > l {
return io.ErrUnexpectedEOF
}
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
iNdEx = postStringIndexmapvalue
} else {
iNdEx = entryPreIndex
skippy, err := skip(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLength
}
if (iNdEx + skippy) > postIndex {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
m.Tags[mapkey] = mapvalue
iNdEx = postIndex
case 7:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field AgentVersion", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLength
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLength
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.AgentVersion = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 8:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field TargetTPS", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.TargetTPS = float64(math.Float64frombits(v))
case 9:
if wireType != 1 {
return fmt.Errorf("proto: wrong wireType = %d for field ErrorTPS", wireType)
}
var v uint64
if (iNdEx + 8) > l {
return io.ErrUnexpectedEOF
}
v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:]))
iNdEx += 8
m.ErrorTPS = float64(math.Float64frombits(v))
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field RareSamplerEnabled", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflow
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
m.RareSamplerEnabled = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skip(dAtA[iNdEx:])
if err != nil {
return err
}
if (skippy < 0) || (iNdEx+skippy) < 0 {
return ErrInvalidLength
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}

View File

@@ -0,0 +1,275 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
// Package trace defines the types and functions to encode/decode traces.
package trace
import (
"bytes"
"errors"
"math"
"strings"
"unicode/utf8"
"github.com/tinylib/msgp/msgp"
)
// repairUTF8 ensures all characters in s are UTF-8 by replacing non-UTF-8 characters
// with the replacement char <20>
func repairUTF8(s string) string {
in := strings.NewReader(s)
var out bytes.Buffer
out.Grow(len(s))
for {
r, _, err := in.ReadRune()
if err != nil {
// note: by contract, if `in` contains non-valid utf-8, no error is returned. Rather the utf-8 replacement
// character is returned. Therefore, the only error should usually be io.EOF indicating end of string.
// If any other error is returned by chance, we quit as well, outputting whatever part of the string we
// had already constructed.
return out.String()
}
out.WriteRune(r)
}
}
// parseStringBytes reads the next type in the msgpack payload and
// converts the BinType or the StrType in a valid string.
func parseStringBytes(bts []byte) (string, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return "", bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
var (
err error
i []byte
)
switch t {
case msgp.BinType:
i, bts, err = msgp.ReadBytesZC(bts)
case msgp.StrType:
i, bts, err = msgp.ReadStringZC(bts)
default:
return "", bts, msgp.TypeError{Encoded: t, Method: msgp.StrType}
}
if err != nil {
return "", bts, err
}
if utf8.Valid(i) {
return string(i), bts, nil
}
return repairUTF8(msgp.UnsafeString(i)), bts, nil
}
// parseFloat64Bytes parses a float64 even if the sent value is an int64 or an uint64;
// this is required because the encoding library could remove bytes from the encoded
// payload to reduce the size, if they're not needed.
func parseFloat64Bytes(bts []byte) (float64, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return 0, bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
var err error
switch t {
case msgp.IntType:
var i int64
i, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
return 0, bts, err
}
return float64(i), bts, nil
case msgp.UintType:
var i uint64
i, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return 0, bts, err
}
return float64(i), bts, nil
case msgp.Float64Type:
var f float64
f, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
return 0, bts, err
}
return f, bts, nil
default:
return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.Float64Type}
}
}
// cast to int64 values that are int64 but that are sent in uint64
// over the wire. Set to 0 if they overflow the MaxInt64 size. This
// cast should be used ONLY while decoding int64 values that are
// sent as uint64 to reduce the payload size, otherwise the approach
// is not correct in the general sense.
func castInt64(v uint64) (int64, bool) {
if v > math.MaxInt64 {
return 0, false
}
return int64(v), true
}
// parseInt64Bytes parses an int64 even if the sent value is an uint64;
// this is required because the encoding library could remove bytes from the encoded
// payload to reduce the size, if they're not needed.
func parseInt64Bytes(bts []byte) (int64, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return 0, bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
var (
i int64
u uint64
err error
)
switch t {
case msgp.IntType:
i, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
return 0, bts, err
}
return i, bts, nil
case msgp.UintType:
u, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return 0, bts, err
}
// force-cast
i, ok := castInt64(u)
if !ok {
return 0, bts, errors.New("found uint64, overflows int64")
}
return i, bts, nil
default:
return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType}
}
}
// parseUint64Bytes parses an uint64 even if the sent value is an int64;
// this is required because the language used for the encoding library
// may not have unsigned types. An example is early version of Java
// (and so JRuby interpreter) that encodes uint64 as int64:
// http://docs.oracle.com/javase/tutorial/java/nutsandbolts/datatypes.html
func parseUint64Bytes(bts []byte) (uint64, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return 0, bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
var (
i int64
u uint64
err error
)
switch t {
case msgp.UintType:
u, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
return 0, bts, err
}
return u, bts, err
case msgp.IntType:
i, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
return 0, bts, err
}
return uint64(i), bts, nil
default:
return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType}
}
}
// cast to int32 values that are int32 but that are sent in uint32
// over the wire. Set to 0 if they overflow the MaxInt32 size. This
// cast should be used ONLY while decoding int32 values that are
// sent as uint32 to reduce the payload size, otherwise the approach
// is not correct in the general sense.
func castInt32(v uint32) (int32, bool) {
if v > math.MaxInt32 {
return 0, false
}
return int32(v), true
}
// parseInt32Bytes parses an int32 even if the sent value is an uint32;
// this is required because the encoding library could remove bytes from the encoded
// payload to reduce the size, if they're not needed.
func parseInt32Bytes(bts []byte) (int32, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return 0, bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
var (
i int32
u uint32
err error
)
switch t {
case msgp.IntType:
i, bts, err = msgp.ReadInt32Bytes(bts)
if err != nil {
return 0, bts, err
}
return i, bts, nil
case msgp.UintType:
u, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
return 0, bts, err
}
// force-cast
i, ok := castInt32(u)
if !ok {
return 0, bts, errors.New("found uint32, overflows int32")
}
return i, bts, nil
default:
return 0, bts, msgp.TypeError{Encoded: t, Method: msgp.IntType}
}
}
// parseBytes reads the next BinType in the msgpack payload.
//
//nolint:unused // potentially useful; was used with prior proto definitions
func parseBytes(bts []byte) ([]byte, []byte, error) {
if msgp.IsNil(bts) {
bts, err := msgp.ReadNilBytes(bts)
return nil, bts, err
}
// read the generic representation type without decoding
t := msgp.NextType(bts)
switch t {
case msgp.BinType:
unsafeBytes, bts, err := msgp.ReadBytesZC(bts)
if err != nil {
return nil, bts, err
}
safeBytes := make([]byte, len(unsafeBytes))
copy(safeBytes, unsafeBytes)
return safeBytes, bts, nil
default:
return nil, bts, msgp.TypeError{Encoded: t, Method: msgp.BinType}
}
}

View File

@@ -0,0 +1,223 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package trace
import (
"errors"
"fmt"
"github.com/tinylib/msgp/msgp"
)
// dictionaryString reads an int from decoder dc and returns the string
// at that index from dict.
func dictionaryString(bts []byte, dict []string) (string, []byte, error) {
var (
ui uint32
err error
)
ui, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
return "", bts, err
}
idx := int(ui)
if idx >= len(dict) {
return "", bts, fmt.Errorf("dictionary index %d out of range", idx)
}
return dict[idx], bts, nil
}
// UnmarshalMsgDictionary decodes a trace using the specification from the v0.5 endpoint.
// For details, see the documentation for endpoint v0.5 in pkg/trace/api/version.go
func (t *Traces) UnmarshalMsgDictionary(bts []byte) error {
var err error
if _, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes); err != nil {
return err
}
// read dictionary
var sz uint32
if sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes); err != nil {
return err
}
dict := make([]string, sz)
for i := range dict {
var str string
str, bts, err = parseStringBytes(bts)
if err != nil {
return err
}
dict[i] = str
}
// read traces
sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes)
if err != nil {
return err
}
if cap(*t) >= int(sz) {
*t = (*t)[:sz]
} else {
*t = make(Traces, sz)
}
for i := range *t {
sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes)
if err != nil {
return err
}
if cap((*t)[i]) >= int(sz) {
(*t)[i] = (*t)[i][:sz]
} else {
(*t)[i] = make(Trace, sz)
}
for j := range (*t)[i] {
if (*t)[i][j] == nil {
(*t)[i][j] = new(Span)
}
if bts, err = (*t)[i][j].UnmarshalMsgDictionary(bts, dict); err != nil {
return err
}
}
}
return nil
}
// spanPropertyCount specifies the number of top-level properties that a span
// has.
const spanPropertyCount = 12
// UnmarshalMsgDictionary decodes a span from the given decoder dc, looking up strings
// in the given dictionary dict. For details, see the documentation for endpoint v0.5
// in pkg/trace/api/version.go
func (z *Span) UnmarshalMsgDictionary(bts []byte, dict []string) ([]byte, error) {
var (
sz uint32
err error
)
sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadArrayHeaderBytes)
if err != nil {
return bts, err
}
if sz != spanPropertyCount {
return bts, errors.New("encoded span needs exactly 12 elements in array")
}
// Service (0)
z.Service, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
// Name (1)
z.Name, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
// Resource (2)
z.Resource, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
// TraceID (3)
z.TraceID, bts, err = parseUint64Bytes(bts)
if err != nil {
return bts, err
}
// SpanID (4)
z.SpanID, bts, err = parseUint64Bytes(bts)
if err != nil {
return bts, err
}
// ParentID (5)
z.ParentID, bts, err = parseUint64Bytes(bts)
if err != nil {
return bts, err
}
// Start (6)
z.Start, bts, err = parseInt64Bytes(bts)
if err != nil {
return bts, err
}
// Duration (7)
z.Duration, bts, err = parseInt64Bytes(bts)
if err != nil {
return bts, err
}
// Error (8)
z.Error, bts, err = parseInt32Bytes(bts)
if err != nil {
return bts, err
}
// Meta (9)
sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadMapHeaderBytes)
if err != nil {
return bts, err
}
if z.Meta == nil && sz > 0 {
z.Meta = make(map[string]string, sz)
} else if len(z.Meta) > 0 {
for key := range z.Meta {
delete(z.Meta, key)
}
}
for sz > 0 {
sz--
var key, val string
key, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
val, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
z.Meta[key] = val
}
// Metrics (10)
sz, bts, err = safeReadHeaderBytes(bts, msgp.ReadMapHeaderBytes)
if err != nil {
return bts, err
}
if z.Metrics == nil && sz > 0 {
z.Metrics = make(map[string]float64, sz)
} else if len(z.Metrics) > 0 {
for key := range z.Metrics {
delete(z.Metrics, key)
}
}
for sz > 0 {
sz--
var (
key string
val float64
)
key, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
val, bts, err = parseFloat64Bytes(bts)
if err != nil {
return bts, err
}
z.Metrics[key] = val
}
// Type (11)
z.Type, bts, err = dictionaryString(bts, dict)
if err != nil {
return bts, err
}
return bts, nil
}
// safeReadHeaderBytes wraps msgp header readers (typically ReadArrayHeaderBytes and ReadMapHeaderBytes).
// It enforces the dictionary max size of 25MB and protects the caller from making unbounded allocations through `make(any, sz)`.
func safeReadHeaderBytes(b []byte, read func([]byte) (uint32, []byte, error)) (uint32, []byte, error) {
sz, bts, err := read(b)
if err != nil {
return 0, nil, err
}
if sz > 25*1e6 {
// Dictionary can't be larger than 25 MB
return 0, nil, errors.New("too long payload")
}
return sz, bts, err
}

View File

@@ -0,0 +1,448 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.0
// protoc v5.26.1
// source: datadog/trace/span.proto
package trace
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
type SpanLink struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// @gotags: json:"trace_id" msg:"trace_id"
TraceID uint64 `protobuf:"varint,1,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"` // Required.
// @gotags: json:"trace_id_high" msg:"trace_id_high,omitempty"
TraceIDHigh uint64 `protobuf:"varint,2,opt,name=traceID_high,json=traceIDHigh,proto3" json:"trace_id_high" msg:"trace_id_high,omitempty"` // Optional. The high 64 bits of a referenced trace id.
// @gotags: json:"span_id" msg:"span_id"
SpanID uint64 `protobuf:"varint,3,opt,name=spanID,proto3" json:"span_id" msg:"span_id"` // Required.
// @gotags: msg:"attributes,omitempty"
Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"attributes,omitempty"` // Optional. Simple mapping of keys to string values.
// @gotags: msg:"tracestate,omitempty"
Tracestate string `protobuf:"bytes,5,opt,name=tracestate,proto3" json:"tracestate,omitempty" msg:"tracestate,omitempty"` // Optional. W3C tracestate.
// @gotags: msg:"flags,omitempty"
Flags uint32 `protobuf:"varint,6,opt,name=flags,proto3" json:"flags,omitempty" msg:"flags,omitempty"` // Optional. W3C trace flags. If set, the high bit (bit 31) must be set.
}
func (x *SpanLink) Reset() {
*x = SpanLink{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_span_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *SpanLink) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*SpanLink) ProtoMessage() {}
func (x *SpanLink) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_span_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use SpanLink.ProtoReflect.Descriptor instead.
func (*SpanLink) Descriptor() ([]byte, []int) {
return file_datadog_trace_span_proto_rawDescGZIP(), []int{0}
}
func (x *SpanLink) GetTraceID() uint64 {
if x != nil {
return x.TraceID
}
return 0
}
func (x *SpanLink) GetTraceIDHigh() uint64 {
if x != nil {
return x.TraceIDHigh
}
return 0
}
func (x *SpanLink) GetSpanID() uint64 {
if x != nil {
return x.SpanID
}
return 0
}
func (x *SpanLink) GetAttributes() map[string]string {
if x != nil {
return x.Attributes
}
return nil
}
func (x *SpanLink) GetTracestate() string {
if x != nil {
return x.Tracestate
}
return ""
}
func (x *SpanLink) GetFlags() uint32 {
if x != nil {
return x.Flags
}
return 0
}
type Span struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// service is the name of the service with which this span is associated.
// @gotags: json:"service" msg:"service"
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service" msg:"service"`
// name is the operation name of this span.
// @gotags: json:"name" msg:"name"
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name" msg:"name"`
// resource is the resource name of this span, also sometimes called the endpoint (for web spans).
// @gotags: json:"resource" msg:"resource"
Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource" msg:"resource"`
// traceID is the ID of the trace to which this span belongs.
// @gotags: json:"trace_id" msg:"trace_id"
TraceID uint64 `protobuf:"varint,4,opt,name=traceID,proto3" json:"trace_id" msg:"trace_id"`
// spanID is the ID of this span.
// @gotags: json:"span_id" msg:"span_id"
SpanID uint64 `protobuf:"varint,5,opt,name=spanID,proto3" json:"span_id" msg:"span_id"`
// parentID is the ID of this span's parent, or zero if this span has no parent.
// @gotags: json:"parent_id" msg:"parent_id"
ParentID uint64 `protobuf:"varint,6,opt,name=parentID,proto3" json:"parent_id" msg:"parent_id"`
// start is the number of nanoseconds between the Unix epoch and the beginning of this span.
// @gotags: json:"start" msg:"start"
Start int64 `protobuf:"varint,7,opt,name=start,proto3" json:"start" msg:"start"`
// duration is the time length of this span in nanoseconds.
// @gotags: json:"duration" msg:"duration"
Duration int64 `protobuf:"varint,8,opt,name=duration,proto3" json:"duration" msg:"duration"`
// error is 1 if there is an error associated with this span, or 0 if there is not.
// @gotags: json:"error" msg:"error"
Error int32 `protobuf:"varint,9,opt,name=error,proto3" json:"error" msg:"error"`
// meta is a mapping from tag name to tag value for string-valued tags.
// @gotags: json:"meta,omitempty" msg:"meta,omitempty"
Meta map[string]string `protobuf:"bytes,10,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta,omitempty"`
// metrics is a mapping from tag name to tag value for numeric-valued tags.
// @gotags: json:"metrics,omitempty" msg:"metrics,omitempty"
Metrics map[string]float64 `protobuf:"bytes,11,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed64,2,opt,name=value,proto3" msg:"metrics,omitempty"`
// type is the type of the service with which this span is associated. Example values: web, db, lambda.
// @gotags: json:"type" msg:"type"
Type string `protobuf:"bytes,12,opt,name=type,proto3" json:"type" msg:"type"`
// meta_struct is a registry of structured "other" data used by, e.g., AppSec.
// @gotags: json:"meta_struct,omitempty" msg:"meta_struct,omitempty"
MetaStruct map[string][]byte `protobuf:"bytes,13,rep,name=meta_struct,json=metaStruct,proto3" json:"meta_struct,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"meta_struct,omitempty"`
// span_links represents a collection of links, where each link defines a causal relationship between two spans.
// @gotags: json:"span_links,omitempty" msg:"span_links,omitempty"
SpanLinks []*SpanLink `protobuf:"bytes,14,rep,name=spanLinks,proto3" json:"span_links,omitempty" msg:"span_links,omitempty"`
}
func (x *Span) Reset() {
*x = Span{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_span_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *Span) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*Span) ProtoMessage() {}
func (x *Span) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_span_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use Span.ProtoReflect.Descriptor instead.
func (*Span) Descriptor() ([]byte, []int) {
return file_datadog_trace_span_proto_rawDescGZIP(), []int{1}
}
func (x *Span) GetService() string {
if x != nil {
return x.Service
}
return ""
}
func (x *Span) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *Span) GetResource() string {
if x != nil {
return x.Resource
}
return ""
}
func (x *Span) GetTraceID() uint64 {
if x != nil {
return x.TraceID
}
return 0
}
func (x *Span) GetSpanID() uint64 {
if x != nil {
return x.SpanID
}
return 0
}
func (x *Span) GetParentID() uint64 {
if x != nil {
return x.ParentID
}
return 0
}
func (x *Span) GetStart() int64 {
if x != nil {
return x.Start
}
return 0
}
func (x *Span) GetDuration() int64 {
if x != nil {
return x.Duration
}
return 0
}
func (x *Span) GetError() int32 {
if x != nil {
return x.Error
}
return 0
}
func (x *Span) GetMeta() map[string]string {
if x != nil {
return x.Meta
}
return nil
}
func (x *Span) GetMetrics() map[string]float64 {
if x != nil {
return x.Metrics
}
return nil
}
func (x *Span) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *Span) GetMetaStruct() map[string][]byte {
if x != nil {
return x.MetaStruct
}
return nil
}
func (x *Span) GetSpanLinks() []*SpanLink {
if x != nil {
return x.SpanLinks
}
return nil
}
var File_datadog_trace_span_proto protoreflect.FileDescriptor
var file_datadog_trace_span_proto_rawDesc = []byte{
0x0a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0x9d, 0x02, 0x0a, 0x08, 0x53, 0x70,
0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49,
0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44,
0x12, 0x21, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x5f, 0x68, 0x69, 0x67, 0x68,
0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x48,
0x69, 0x67, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x18, 0x03, 0x20,
0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x47, 0x0a, 0x0a, 0x61,
0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x27, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
0x53, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75,
0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62,
0x75, 0x74, 0x65, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61,
0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73,
0x74, 0x61, 0x74, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20,
0x01, 0x28, 0x0d, 0x52, 0x05, 0x66, 0x6c, 0x61, 0x67, 0x73, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74,
0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x9a, 0x05, 0x0a, 0x04, 0x53, 0x70,
0x61, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65,
0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07,
0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x74,
0x72, 0x61, 0x63, 0x65, 0x49, 0x44, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44,
0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x73, 0x70, 0x61, 0x6e, 0x49, 0x44, 0x12, 0x1a,
0x0a, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04,
0x52, 0x08, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x49, 0x44, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74,
0x61, 0x72, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74,
0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01,
0x28, 0x03, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05,
0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x12, 0x31, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x1d, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
0x04, 0x6d, 0x65, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73,
0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67,
0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4d, 0x65, 0x74, 0x72,
0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63,
0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74,
0x72, 0x75, 0x63, 0x74, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74,
0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e,
0x4d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52,
0x0a, 0x6d, 0x65, 0x74, 0x61, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x12, 0x35, 0x0a, 0x09, 0x73,
0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17,
0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x53,
0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x4c, 0x69, 0x6e,
0x6b, 0x73, 0x1a, 0x37, 0x0a, 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12,
0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65,
0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3a, 0x0a, 0x0c, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b,
0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x61, 0x53,
0x74, 0x72, 0x75, 0x63, 0x74, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65,
0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a, 0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_datadog_trace_span_proto_rawDescOnce sync.Once
file_datadog_trace_span_proto_rawDescData = file_datadog_trace_span_proto_rawDesc
)
func file_datadog_trace_span_proto_rawDescGZIP() []byte {
file_datadog_trace_span_proto_rawDescOnce.Do(func() {
file_datadog_trace_span_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_span_proto_rawDescData)
})
return file_datadog_trace_span_proto_rawDescData
}
var file_datadog_trace_span_proto_msgTypes = make([]protoimpl.MessageInfo, 6)
var file_datadog_trace_span_proto_goTypes = []interface{}{
(*SpanLink)(nil), // 0: datadog.trace.SpanLink
(*Span)(nil), // 1: datadog.trace.Span
nil, // 2: datadog.trace.SpanLink.AttributesEntry
nil, // 3: datadog.trace.Span.MetaEntry
nil, // 4: datadog.trace.Span.MetricsEntry
nil, // 5: datadog.trace.Span.MetaStructEntry
}
var file_datadog_trace_span_proto_depIdxs = []int32{
2, // 0: datadog.trace.SpanLink.attributes:type_name -> datadog.trace.SpanLink.AttributesEntry
3, // 1: datadog.trace.Span.meta:type_name -> datadog.trace.Span.MetaEntry
4, // 2: datadog.trace.Span.metrics:type_name -> datadog.trace.Span.MetricsEntry
5, // 3: datadog.trace.Span.meta_struct:type_name -> datadog.trace.Span.MetaStructEntry
0, // 4: datadog.trace.Span.spanLinks:type_name -> datadog.trace.SpanLink
5, // [5:5] is the sub-list for method output_type
5, // [5:5] is the sub-list for method input_type
5, // [5:5] is the sub-list for extension type_name
5, // [5:5] is the sub-list for extension extendee
0, // [0:5] is the sub-list for field type_name
}
func init() { file_datadog_trace_span_proto_init() }
func file_datadog_trace_span_proto_init() {
if File_datadog_trace_span_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_datadog_trace_span_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*SpanLink); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_datadog_trace_span_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*Span); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_datadog_trace_span_proto_rawDesc,
NumEnums: 0,
NumMessages: 6,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_datadog_trace_span_proto_goTypes,
DependencyIndexes: file_datadog_trace_span_proto_depIdxs,
MessageInfos: file_datadog_trace_span_proto_msgTypes,
}.Build()
File_datadog_trace_span_proto = out.File
file_datadog_trace_span_proto_rawDesc = nil
file_datadog_trace_span_proto_goTypes = nil
file_datadog_trace_span_proto_depIdxs = nil
}

View File

@@ -0,0 +1,577 @@
package trace
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z *Span) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0001Len := uint32(14)
var zb0001Mask uint16 /* 14 bits */
if z.Meta == nil {
zb0001Len--
zb0001Mask |= 0x200
}
if z.Metrics == nil {
zb0001Len--
zb0001Mask |= 0x400
}
if z.MetaStruct == nil {
zb0001Len--
zb0001Mask |= 0x1000
}
if z.SpanLinks == nil {
zb0001Len--
zb0001Mask |= 0x2000
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len == 0 {
return
}
// string "service"
o = append(o, 0xa7, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65)
o = msgp.AppendString(o, z.Service)
// string "name"
o = append(o, 0xa4, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "resource"
o = append(o, 0xa8, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65)
o = msgp.AppendString(o, z.Resource)
// string "trace_id"
o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64)
o = msgp.AppendUint64(o, z.TraceID)
// string "span_id"
o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64)
o = msgp.AppendUint64(o, z.SpanID)
// string "parent_id"
o = append(o, 0xa9, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64)
o = msgp.AppendUint64(o, z.ParentID)
// string "start"
o = append(o, 0xa5, 0x73, 0x74, 0x61, 0x72, 0x74)
o = msgp.AppendInt64(o, z.Start)
// string "duration"
o = append(o, 0xa8, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e)
o = msgp.AppendInt64(o, z.Duration)
// string "error"
o = append(o, 0xa5, 0x65, 0x72, 0x72, 0x6f, 0x72)
o = msgp.AppendInt32(o, z.Error)
if (zb0001Mask & 0x200) == 0 { // if not empty
// string "meta"
o = append(o, 0xa4, 0x6d, 0x65, 0x74, 0x61)
o = msgp.AppendMapHeader(o, uint32(len(z.Meta)))
for za0001, za0002 := range z.Meta {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
}
if (zb0001Mask & 0x400) == 0 { // if not empty
// string "metrics"
o = append(o, 0xa7, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Metrics)))
for za0003, za0004 := range z.Metrics {
o = msgp.AppendString(o, za0003)
o = msgp.AppendFloat64(o, za0004)
}
}
// string "type"
o = append(o, 0xa4, 0x74, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, z.Type)
if (zb0001Mask & 0x1000) == 0 { // if not empty
// string "meta_struct"
o = append(o, 0xab, 0x6d, 0x65, 0x74, 0x61, 0x5f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74)
o = msgp.AppendMapHeader(o, uint32(len(z.MetaStruct)))
for za0005, za0006 := range z.MetaStruct {
o = msgp.AppendString(o, za0005)
o = msgp.AppendBytes(o, za0006)
}
}
if (zb0001Mask & 0x2000) == 0 { // if not empty
// string "span_links"
o = append(o, 0xaa, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.SpanLinks)))
for za0007 := range z.SpanLinks {
if z.SpanLinks[za0007] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.SpanLinks[za0007].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "SpanLinks", za0007)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Span) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "service":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Service = ""
break
}
z.Service, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Service")
return
}
case "name":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Name = ""
break
}
z.Name, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Service")
return
}
case "resource":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Resource = ""
break
}
z.Resource, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Service")
return
}
case "trace_id":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.TraceID = 0
break
}
z.TraceID, bts, err = parseUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TraceID")
return
}
case "span_id":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.SpanID = 0
break
}
z.SpanID, bts, err = parseUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SpanID")
return
}
case "parent_id":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.ParentID = 0
break
}
z.ParentID, bts, err = parseUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "ParentID")
return
}
case "start":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Start = 0
break
}
z.Start, bts, err = parseInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Start")
return
}
case "duration":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Duration = 0
break
}
z.Duration, bts, err = parseInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Duration")
return
}
case "error":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Error = 0
break
}
z.Error, bts, err = parseInt32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Error")
return
}
case "meta":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Meta = nil
break
}
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Meta")
return
}
if z.Meta == nil && zb0002 > 0 {
z.Meta = make(map[string]string, zb0002)
} else if len(z.Meta) > 0 {
for key := range z.Meta {
delete(z.Meta, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Meta")
return
}
za0002, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Meta", za0001)
return
}
z.Meta[za0001] = za0002
}
case "metrics":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Metrics = nil
break
}
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
if z.Metrics == nil && zb0003 > 0 {
z.Metrics = make(map[string]float64, zb0003)
} else if len(z.Metrics) > 0 {
for key := range z.Metrics {
delete(z.Metrics, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 float64
zb0003--
za0003, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics")
return
}
za0004, bts, err = parseFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Metrics", za0003)
return
}
z.Metrics[za0003] = za0004
}
case "type":
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
z.Type = ""
break
}
z.Type, bts, err = parseStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
case "meta_struct":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MetaStruct")
return
}
if z.MetaStruct == nil {
z.MetaStruct = make(map[string][]byte, zb0004)
} else if len(z.MetaStruct) > 0 {
for key := range z.MetaStruct {
delete(z.MetaStruct, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 []byte
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "MetaStruct")
return
}
za0006, bts, err = msgp.ReadBytesBytes(bts, za0006)
if err != nil {
err = msgp.WrapError(err, "MetaStruct", za0005)
return
}
z.MetaStruct[za0005] = za0006
}
case "span_links":
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "SpanLinks")
return
}
if cap(z.SpanLinks) >= int(zb0005) {
z.SpanLinks = (z.SpanLinks)[:zb0005]
} else {
z.SpanLinks = make([]*SpanLink, zb0005)
}
for za0007 := range z.SpanLinks {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.SpanLinks[za0007] = nil
} else {
if z.SpanLinks[za0007] == nil {
z.SpanLinks[za0007] = new(SpanLink)
}
bts, err = z.SpanLinks[za0007].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "SpanLinks", za0007)
return
}
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Span) Msgsize() (s int) {
s = 1 + 8 + msgp.StringPrefixSize + len(z.Service) + 5 + msgp.StringPrefixSize + len(z.Name) + 9 + msgp.StringPrefixSize + len(z.Resource) + 9 + msgp.Uint64Size + 8 + msgp.Uint64Size + 10 + msgp.Uint64Size + 6 + msgp.Int64Size + 9 + msgp.Int64Size + 6 + msgp.Int32Size + 5 + msgp.MapHeaderSize
if z.Meta != nil {
for za0001, za0002 := range z.Meta {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 8 + msgp.MapHeaderSize
if z.Metrics != nil {
for za0003, za0004 := range z.Metrics {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.Float64Size
}
}
s += 5 + msgp.StringPrefixSize + len(z.Type) + 12 + msgp.MapHeaderSize
if z.MetaStruct != nil {
for za0005, za0006 := range z.MetaStruct {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.BytesPrefixSize + len(za0006)
}
}
s += 11 + msgp.ArrayHeaderSize
for za0007 := range z.SpanLinks {
if z.SpanLinks[za0007] == nil {
s += msgp.NilSize
} else {
s += z.SpanLinks[za0007].Msgsize()
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *SpanLink) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0001Len := uint32(6)
var zb0001Mask uint8 /* 6 bits */
if z.TraceIDHigh == 0 {
zb0001Len--
zb0001Mask |= 0x2
}
if z.Attributes == nil {
zb0001Len--
zb0001Mask |= 0x8
}
if z.Tracestate == "" {
zb0001Len--
zb0001Mask |= 0x10
}
if z.Flags == 0 {
zb0001Len--
zb0001Mask |= 0x20
}
// variable map header, size zb0001Len
o = append(o, 0x80|uint8(zb0001Len))
if zb0001Len == 0 {
return
}
// string "trace_id"
o = append(o, 0xa8, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64)
o = msgp.AppendUint64(o, z.TraceID)
if (zb0001Mask & 0x2) == 0 { // if not empty
// string "trace_id_high"
o = append(o, 0xad, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x68, 0x69, 0x67, 0x68)
o = msgp.AppendUint64(o, z.TraceIDHigh)
}
// string "span_id"
o = append(o, 0xa7, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64)
o = msgp.AppendUint64(o, z.SpanID)
if (zb0001Mask & 0x8) == 0 { // if not empty
// string "attributes"
o = append(o, 0xaa, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Attributes)))
for za0001, za0002 := range z.Attributes {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
}
if (zb0001Mask & 0x10) == 0 { // if not empty
// string "tracestate"
o = append(o, 0xaa, 0x74, 0x72, 0x61, 0x63, 0x65, 0x73, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendString(o, z.Tracestate)
}
if (zb0001Mask & 0x20) == 0 { // if not empty
// string "flags"
o = append(o, 0xa5, 0x66, 0x6c, 0x61, 0x67, 0x73)
o = msgp.AppendUint32(o, z.Flags)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *SpanLink) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "trace_id":
z.TraceID, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TraceID")
return
}
case "trace_id_high":
z.TraceIDHigh, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "TraceIDHigh")
return
}
case "span_id":
z.SpanID, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "SpanID")
return
}
case "attributes":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attributes")
return
}
if z.Attributes == nil {
z.Attributes = make(map[string]string, zb0002)
} else if len(z.Attributes) > 0 {
for key := range z.Attributes {
delete(z.Attributes, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attributes")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attributes", za0001)
return
}
z.Attributes[za0001] = za0002
}
case "tracestate":
z.Tracestate, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tracestate")
return
}
case "flags":
z.Flags, bts, err = msgp.ReadUint32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Flags")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *SpanLink) Msgsize() (s int) {
s = 1 + 9 + msgp.Uint64Size + 14 + msgp.Uint64Size + 8 + msgp.Uint64Size + 11 + msgp.MapHeaderSize
if z.Attributes != nil {
for za0001, za0002 := range z.Attributes {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 11 + msgp.StringPrefixSize + len(z.Tracestate) + 6 + msgp.Uint32Size
return
}

View File

@@ -0,0 +1,53 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package trace
// spanCopiedFields records the fields that are copied in ShallowCopy.
// This should match exactly the fields set in (*Span).ShallowCopy.
// This is used by tests to enforce the correctness of ShallowCopy.
var spanCopiedFields = map[string]struct{}{
"Service": {},
"Name": {},
"Resource": {},
"TraceID": {},
"SpanID": {},
"ParentID": {},
"Start": {},
"Duration": {},
"Error": {},
"Meta": {},
"Metrics": {},
"Type": {},
"MetaStruct": {},
"SpanLinks": {},
}
// ShallowCopy returns a shallow copy of the copy-able portion of a Span. These are the
// public fields which will have a Get* method for them. The completeness of this
// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier,
// which incurs heavy reflection cost for every copy at runtime, we use reflection once at
// startup to ensure our method is complete.
func (s *Span) ShallowCopy() *Span {
if s == nil {
return &Span{}
}
return &Span{
Service: s.Service,
Name: s.Name,
Resource: s.Resource,
TraceID: s.TraceID,
SpanID: s.SpanID,
ParentID: s.ParentID,
Start: s.Start,
Duration: s.Duration,
Error: s.Error,
Meta: s.Meta,
Metrics: s.Metrics,
Type: s.Type,
MetaStruct: s.MetaStruct,
SpanLinks: s.SpanLinks,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,837 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.0
// protoc v5.26.1
// source: datadog/trace/stats.proto
package trace
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// Trilean is an expanded boolean type that is meant to differentiate between being unset and false.
type Trilean int32
const (
Trilean_NOT_SET Trilean = 0
Trilean_TRUE Trilean = 1
Trilean_FALSE Trilean = 2
)
// Enum value maps for Trilean.
var (
Trilean_name = map[int32]string{
0: "NOT_SET",
1: "TRUE",
2: "FALSE",
}
Trilean_value = map[string]int32{
"NOT_SET": 0,
"TRUE": 1,
"FALSE": 2,
}
)
func (x Trilean) Enum() *Trilean {
p := new(Trilean)
*p = x
return p
}
func (x Trilean) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (Trilean) Descriptor() protoreflect.EnumDescriptor {
return file_datadog_trace_stats_proto_enumTypes[0].Descriptor()
}
func (Trilean) Type() protoreflect.EnumType {
return &file_datadog_trace_stats_proto_enumTypes[0]
}
func (x Trilean) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use Trilean.Descriptor instead.
func (Trilean) EnumDescriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}
}
type TraceRootFlag int32
const (
TraceRootFlag_DEPRECATED_NOT_SET TraceRootFlag = 0
TraceRootFlag_DEPRECATED_TRUE TraceRootFlag = 1
TraceRootFlag_DEPRECATED_FALSE TraceRootFlag = 2
)
// Enum value maps for TraceRootFlag.
var (
TraceRootFlag_name = map[int32]string{
0: "DEPRECATED_NOT_SET",
1: "DEPRECATED_TRUE",
2: "DEPRECATED_FALSE",
}
TraceRootFlag_value = map[string]int32{
"DEPRECATED_NOT_SET": 0,
"DEPRECATED_TRUE": 1,
"DEPRECATED_FALSE": 2,
}
)
func (x TraceRootFlag) Enum() *TraceRootFlag {
p := new(TraceRootFlag)
*p = x
return p
}
func (x TraceRootFlag) String() string {
return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
}
func (TraceRootFlag) Descriptor() protoreflect.EnumDescriptor {
return file_datadog_trace_stats_proto_enumTypes[1].Descriptor()
}
func (TraceRootFlag) Type() protoreflect.EnumType {
return &file_datadog_trace_stats_proto_enumTypes[1]
}
func (x TraceRootFlag) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(x)
}
// Deprecated: Use TraceRootFlag.Descriptor instead.
func (TraceRootFlag) EnumDescriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}
}
// StatsPayload is the payload used to send stats from the agent to the backend.
type StatsPayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
AgentHostname string `protobuf:"bytes,1,opt,name=agentHostname,proto3" json:"agentHostname,omitempty"`
AgentEnv string `protobuf:"bytes,2,opt,name=agentEnv,proto3" json:"agentEnv,omitempty"`
// @gotags: json:"stats,omitempty" msg:"Stats,omitempty"
Stats []*ClientStatsPayload `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"`
AgentVersion string `protobuf:"bytes,4,opt,name=agentVersion,proto3" json:"agentVersion,omitempty"`
ClientComputed bool `protobuf:"varint,5,opt,name=clientComputed,proto3" json:"clientComputed,omitempty"`
// splitPayload indicates if the payload is actually one of several payloads split out from a larger payload.
// This field can be used in the backend to signal if re-aggregation is necessary.
SplitPayload bool `protobuf:"varint,6,opt,name=splitPayload,proto3" json:"splitPayload,omitempty"`
}
func (x *StatsPayload) Reset() {
*x = StatsPayload{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_stats_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *StatsPayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*StatsPayload) ProtoMessage() {}
func (x *StatsPayload) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_stats_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use StatsPayload.ProtoReflect.Descriptor instead.
func (*StatsPayload) Descriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{0}
}
func (x *StatsPayload) GetAgentHostname() string {
if x != nil {
return x.AgentHostname
}
return ""
}
func (x *StatsPayload) GetAgentEnv() string {
if x != nil {
return x.AgentEnv
}
return ""
}
func (x *StatsPayload) GetStats() []*ClientStatsPayload {
if x != nil {
return x.Stats
}
return nil
}
func (x *StatsPayload) GetAgentVersion() string {
if x != nil {
return x.AgentVersion
}
return ""
}
func (x *StatsPayload) GetClientComputed() bool {
if x != nil {
return x.ClientComputed
}
return false
}
func (x *StatsPayload) GetSplitPayload() bool {
if x != nil {
return x.SplitPayload
}
return false
}
// ClientStatsPayload is the first layer of span stats aggregation. It is also
// the payload sent by tracers to the agent when stats in tracer are enabled.
type ClientStatsPayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Hostname is the tracer hostname. It's extracted from spans with "_dd.hostname" meta
// or set by tracer stats payload when hostname reporting is enabled.
Hostname string `protobuf:"bytes,1,opt,name=hostname,proto3" json:"hostname,omitempty"`
Env string `protobuf:"bytes,2,opt,name=env,proto3" json:"env,omitempty"` // env tag set on spans or in the tracers, used for aggregation
Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"` // version tag set on spans or in the tracers, used for aggregation
// @gotags: json:"stats,omitempty" msg:"Stats,omitempty"
Stats []*ClientStatsBucket `protobuf:"bytes,4,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"`
Lang string `protobuf:"bytes,5,opt,name=lang,proto3" json:"lang,omitempty"` // informative field not used for aggregation
TracerVersion string `protobuf:"bytes,6,opt,name=tracerVersion,proto3" json:"tracerVersion,omitempty"` // informative field not used for aggregation
RuntimeID string `protobuf:"bytes,7,opt,name=runtimeID,proto3" json:"runtimeID,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message
Sequence uint64 `protobuf:"varint,8,opt,name=sequence,proto3" json:"sequence,omitempty"` // used on stats payloads sent by the tracer to identify uniquely a message
// AgentAggregation is set by the agent on tracer payloads modified by the agent aggregation layer
// characterizes counts only and distributions only payloads
AgentAggregation string `protobuf:"bytes,9,opt,name=agentAggregation,proto3" json:"agentAggregation,omitempty"`
// Service is the main service of the tracer.
// It is part of unified tagging: https://docs.datadoghq.com/getting_started/tagging/unified_service_tagging
Service string `protobuf:"bytes,10,opt,name=service,proto3" json:"service,omitempty"`
// ContainerID specifies the origin container ID. It is meant to be populated by the client and may
// be enhanced by the agent to ensure it is unique.
ContainerID string `protobuf:"bytes,11,opt,name=containerID,proto3" json:"containerID,omitempty"`
// Tags specifies a set of tags obtained from the orchestrator (where applicable) using the specified containerID.
// This field should be left empty by the client. It only applies to some specific environment.
Tags []string `protobuf:"bytes,12,rep,name=tags,proto3" json:"tags,omitempty"`
// The git commit SHA is obtained from a trace, where it may be set through a tracer <-> source code integration.
GitCommitSha string `protobuf:"bytes,13,opt,name=git_commit_sha,json=gitCommitSha,proto3" json:"git_commit_sha,omitempty"`
// The image tag is obtained from a container's set of tags.
ImageTag string `protobuf:"bytes,14,opt,name=image_tag,json=imageTag,proto3" json:"image_tag,omitempty"`
}
func (x *ClientStatsPayload) Reset() {
*x = ClientStatsPayload{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_stats_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ClientStatsPayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientStatsPayload) ProtoMessage() {}
func (x *ClientStatsPayload) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_stats_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientStatsPayload.ProtoReflect.Descriptor instead.
func (*ClientStatsPayload) Descriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{1}
}
func (x *ClientStatsPayload) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *ClientStatsPayload) GetEnv() string {
if x != nil {
return x.Env
}
return ""
}
func (x *ClientStatsPayload) GetVersion() string {
if x != nil {
return x.Version
}
return ""
}
func (x *ClientStatsPayload) GetStats() []*ClientStatsBucket {
if x != nil {
return x.Stats
}
return nil
}
func (x *ClientStatsPayload) GetLang() string {
if x != nil {
return x.Lang
}
return ""
}
func (x *ClientStatsPayload) GetTracerVersion() string {
if x != nil {
return x.TracerVersion
}
return ""
}
func (x *ClientStatsPayload) GetRuntimeID() string {
if x != nil {
return x.RuntimeID
}
return ""
}
func (x *ClientStatsPayload) GetSequence() uint64 {
if x != nil {
return x.Sequence
}
return 0
}
func (x *ClientStatsPayload) GetAgentAggregation() string {
if x != nil {
return x.AgentAggregation
}
return ""
}
func (x *ClientStatsPayload) GetService() string {
if x != nil {
return x.Service
}
return ""
}
func (x *ClientStatsPayload) GetContainerID() string {
if x != nil {
return x.ContainerID
}
return ""
}
func (x *ClientStatsPayload) GetTags() []string {
if x != nil {
return x.Tags
}
return nil
}
func (x *ClientStatsPayload) GetGitCommitSha() string {
if x != nil {
return x.GitCommitSha
}
return ""
}
func (x *ClientStatsPayload) GetImageTag() string {
if x != nil {
return x.ImageTag
}
return ""
}
// ClientStatsBucket is a time bucket containing aggregated stats.
type ClientStatsBucket struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Start uint64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` // bucket start in nanoseconds
Duration uint64 `protobuf:"varint,2,opt,name=duration,proto3" json:"duration,omitempty"` // bucket duration in nanoseconds
// @gotags: json:"stats,omitempty" msg:"Stats,omitempty"
Stats []*ClientGroupedStats `protobuf:"bytes,3,rep,name=stats,proto3" json:"stats,omitempty" msg:"Stats,omitempty"`
// AgentTimeShift is the shift applied by the agent stats aggregator on bucket start
// when the received bucket start is outside of the agent aggregation window
AgentTimeShift int64 `protobuf:"varint,4,opt,name=agentTimeShift,proto3" json:"agentTimeShift,omitempty"`
}
func (x *ClientStatsBucket) Reset() {
*x = ClientStatsBucket{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_stats_proto_msgTypes[2]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ClientStatsBucket) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientStatsBucket) ProtoMessage() {}
func (x *ClientStatsBucket) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_stats_proto_msgTypes[2]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientStatsBucket.ProtoReflect.Descriptor instead.
func (*ClientStatsBucket) Descriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{2}
}
func (x *ClientStatsBucket) GetStart() uint64 {
if x != nil {
return x.Start
}
return 0
}
func (x *ClientStatsBucket) GetDuration() uint64 {
if x != nil {
return x.Duration
}
return 0
}
func (x *ClientStatsBucket) GetStats() []*ClientGroupedStats {
if x != nil {
return x.Stats
}
return nil
}
func (x *ClientStatsBucket) GetAgentTimeShift() int64 {
if x != nil {
return x.AgentTimeShift
}
return 0
}
// ClientGroupedStats aggregate stats on spans grouped by service, name, resource, status_code, type
type ClientGroupedStats struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Service string `protobuf:"bytes,1,opt,name=service,proto3" json:"service,omitempty"`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
Resource string `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
HTTPStatusCode uint32 `protobuf:"varint,4,opt,name=HTTP_status_code,json=HTTPStatusCode,proto3" json:"HTTP_status_code,omitempty"`
Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"`
DBType string `protobuf:"bytes,6,opt,name=DB_type,json=DBType,proto3" json:"DB_type,omitempty"` // db_type might be used in the future to help in the obfuscation step
Hits uint64 `protobuf:"varint,7,opt,name=hits,proto3" json:"hits,omitempty"` // count of all spans aggregated in the groupedstats
Errors uint64 `protobuf:"varint,8,opt,name=errors,proto3" json:"errors,omitempty"` // count of error spans aggregated in the groupedstats
Duration uint64 `protobuf:"varint,9,opt,name=duration,proto3" json:"duration,omitempty"` // total duration in nanoseconds of spans aggregated in the bucket
OkSummary []byte `protobuf:"bytes,10,opt,name=okSummary,proto3" json:"okSummary,omitempty"` // ddsketch summary of ok spans latencies encoded in protobuf
ErrorSummary []byte `protobuf:"bytes,11,opt,name=errorSummary,proto3" json:"errorSummary,omitempty"` // ddsketch summary of error spans latencies encoded in protobuf
Synthetics bool `protobuf:"varint,12,opt,name=synthetics,proto3" json:"synthetics,omitempty"` // set to true on spans generated by synthetics traffic
TopLevelHits uint64 `protobuf:"varint,13,opt,name=topLevelHits,proto3" json:"topLevelHits,omitempty"` // count of top level spans aggregated in the groupedstats
SpanKind string `protobuf:"bytes,15,opt,name=span_kind,json=spanKind,proto3" json:"span_kind,omitempty"` // value of the span.kind tag on the span
// peer_tags are supplementary tags that further describe a peer entity
// E.g., `grpc.target` to describe the name of a gRPC peer, or `db.hostname` to describe the name of peer DB
PeerTags []string `protobuf:"bytes,16,rep,name=peer_tags,json=peerTags,proto3" json:"peer_tags,omitempty"`
IsTraceRoot Trilean `protobuf:"varint,17,opt,name=is_trace_root,json=isTraceRoot,proto3,enum=datadog.trace.Trilean" json:"is_trace_root,omitempty"` // this field's value is equal to span's ParentID == 0.
}
func (x *ClientGroupedStats) Reset() {
*x = ClientGroupedStats{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_stats_proto_msgTypes[3]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *ClientGroupedStats) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*ClientGroupedStats) ProtoMessage() {}
func (x *ClientGroupedStats) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_stats_proto_msgTypes[3]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use ClientGroupedStats.ProtoReflect.Descriptor instead.
func (*ClientGroupedStats) Descriptor() ([]byte, []int) {
return file_datadog_trace_stats_proto_rawDescGZIP(), []int{3}
}
func (x *ClientGroupedStats) GetService() string {
if x != nil {
return x.Service
}
return ""
}
func (x *ClientGroupedStats) GetName() string {
if x != nil {
return x.Name
}
return ""
}
func (x *ClientGroupedStats) GetResource() string {
if x != nil {
return x.Resource
}
return ""
}
func (x *ClientGroupedStats) GetHTTPStatusCode() uint32 {
if x != nil {
return x.HTTPStatusCode
}
return 0
}
func (x *ClientGroupedStats) GetType() string {
if x != nil {
return x.Type
}
return ""
}
func (x *ClientGroupedStats) GetDBType() string {
if x != nil {
return x.DBType
}
return ""
}
func (x *ClientGroupedStats) GetHits() uint64 {
if x != nil {
return x.Hits
}
return 0
}
func (x *ClientGroupedStats) GetErrors() uint64 {
if x != nil {
return x.Errors
}
return 0
}
func (x *ClientGroupedStats) GetDuration() uint64 {
if x != nil {
return x.Duration
}
return 0
}
func (x *ClientGroupedStats) GetOkSummary() []byte {
if x != nil {
return x.OkSummary
}
return nil
}
func (x *ClientGroupedStats) GetErrorSummary() []byte {
if x != nil {
return x.ErrorSummary
}
return nil
}
func (x *ClientGroupedStats) GetSynthetics() bool {
if x != nil {
return x.Synthetics
}
return false
}
func (x *ClientGroupedStats) GetTopLevelHits() uint64 {
if x != nil {
return x.TopLevelHits
}
return 0
}
func (x *ClientGroupedStats) GetSpanKind() string {
if x != nil {
return x.SpanKind
}
return ""
}
func (x *ClientGroupedStats) GetPeerTags() []string {
if x != nil {
return x.PeerTags
}
return nil
}
func (x *ClientGroupedStats) GetIsTraceRoot() Trilean {
if x != nil {
return x.IsTraceRoot
}
return Trilean_NOT_SET
}
var File_datadog_trace_stats_proto protoreflect.FileDescriptor
var file_datadog_trace_stats_proto_rawDesc = []byte{
0x0a, 0x19, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
0x73, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74,
0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x22, 0xf9, 0x01, 0x0a, 0x0c, 0x53,
0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x61,
0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0d, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x48, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d,
0x65, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x18, 0x02, 0x20,
0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x45, 0x6e, 0x76, 0x12, 0x37, 0x0a,
0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64,
0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52,
0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x56,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x67,
0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x26, 0x0a, 0x0e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
0x28, 0x08, 0x52, 0x0e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6d, 0x70, 0x75, 0x74,
0x65, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f,
0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xc7, 0x03, 0x0a, 0x12, 0x43, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x53, 0x74, 0x61, 0x74, 0x73, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x1a, 0x0a,
0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x18, 0x0a, 0x07, 0x76,
0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x04,
0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74,
0x72, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73,
0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x12, 0x0a,
0x04, 0x6c, 0x61, 0x6e, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6c, 0x61, 0x6e,
0x67, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72,
0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e, 0x74, 0x69,
0x6d, 0x65, 0x49, 0x44, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x74,
0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63,
0x65, 0x12, 0x2a, 0x0a, 0x10, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x67, 0x65,
0x6e, 0x74, 0x41, 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a,
0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61,
0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f,
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67,
0x73, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x24, 0x0a,
0x0e, 0x67, 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x18,
0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x67, 0x69, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
0x53, 0x68, 0x61, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x61, 0x67,
0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x54, 0x61, 0x67,
0x22, 0xa6, 0x01, 0x0a, 0x11, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x73,
0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18,
0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x1a, 0x0a, 0x08,
0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08,
0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74,
0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x47, 0x72,
0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74,
0x73, 0x12, 0x26, 0x0a, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x53, 0x68,
0x69, 0x66, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x61, 0x67, 0x65, 0x6e, 0x74,
0x54, 0x69, 0x6d, 0x65, 0x53, 0x68, 0x69, 0x66, 0x74, 0x22, 0xff, 0x03, 0x0a, 0x12, 0x43, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x65, 0x64, 0x53, 0x74, 0x61, 0x74, 0x73,
0x12, 0x18, 0x0a, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x07, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a,
0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x48, 0x54,
0x54, 0x50, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x04,
0x20, 0x01, 0x28, 0x0d, 0x52, 0x0e, 0x48, 0x54, 0x54, 0x50, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
0x43, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x44, 0x42, 0x5f, 0x74,
0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x44, 0x42, 0x54, 0x79, 0x70,
0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x69, 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52,
0x04, 0x68, 0x69, 0x74, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18,
0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, 0x1a, 0x0a,
0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52,
0x08, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x6b, 0x53,
0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x09, 0x6f, 0x6b,
0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72,
0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x65,
0x72, 0x72, 0x6f, 0x72, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x73,
0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52,
0x0a, 0x73, 0x79, 0x6e, 0x74, 0x68, 0x65, 0x74, 0x69, 0x63, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x74,
0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28,
0x04, 0x52, 0x0c, 0x74, 0x6f, 0x70, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48, 0x69, 0x74, 0x73, 0x12,
0x1b, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x0f, 0x20, 0x01,
0x28, 0x09, 0x52, 0x08, 0x73, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x1b, 0x0a, 0x09,
0x70, 0x65, 0x65, 0x72, 0x5f, 0x74, 0x61, 0x67, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52,
0x08, 0x70, 0x65, 0x65, 0x72, 0x54, 0x61, 0x67, 0x73, 0x12, 0x3a, 0x0a, 0x0d, 0x69, 0x73, 0x5f,
0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0e,
0x32, 0x16, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65,
0x2e, 0x54, 0x72, 0x69, 0x6c, 0x65, 0x61, 0x6e, 0x52, 0x0b, 0x69, 0x73, 0x54, 0x72, 0x61, 0x63,
0x65, 0x52, 0x6f, 0x6f, 0x74, 0x4a, 0x04, 0x08, 0x0e, 0x10, 0x0f, 0x2a, 0x2b, 0x0a, 0x07, 0x54,
0x72, 0x69, 0x6c, 0x65, 0x61, 0x6e, 0x12, 0x0b, 0x0a, 0x07, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45,
0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x09, 0x0a,
0x05, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x2a, 0x52, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63,
0x65, 0x52, 0x6f, 0x6f, 0x74, 0x46, 0x6c, 0x61, 0x67, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50,
0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x45, 0x54, 0x10,
0x00, 0x12, 0x13, 0x0a, 0x0f, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43, 0x41, 0x54, 0x45, 0x44, 0x5f,
0x54, 0x52, 0x55, 0x45, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x44, 0x45, 0x50, 0x52, 0x45, 0x43,
0x41, 0x54, 0x45, 0x44, 0x5f, 0x46, 0x41, 0x4c, 0x53, 0x45, 0x10, 0x02, 0x42, 0x16, 0x5a, 0x14,
0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f, 0x74,
0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_datadog_trace_stats_proto_rawDescOnce sync.Once
file_datadog_trace_stats_proto_rawDescData = file_datadog_trace_stats_proto_rawDesc
)
func file_datadog_trace_stats_proto_rawDescGZIP() []byte {
file_datadog_trace_stats_proto_rawDescOnce.Do(func() {
file_datadog_trace_stats_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_stats_proto_rawDescData)
})
return file_datadog_trace_stats_proto_rawDescData
}
var file_datadog_trace_stats_proto_enumTypes = make([]protoimpl.EnumInfo, 2)
var file_datadog_trace_stats_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_datadog_trace_stats_proto_goTypes = []interface{}{
(Trilean)(0), // 0: datadog.trace.Trilean
(TraceRootFlag)(0), // 1: datadog.trace.TraceRootFlag
(*StatsPayload)(nil), // 2: datadog.trace.StatsPayload
(*ClientStatsPayload)(nil), // 3: datadog.trace.ClientStatsPayload
(*ClientStatsBucket)(nil), // 4: datadog.trace.ClientStatsBucket
(*ClientGroupedStats)(nil), // 5: datadog.trace.ClientGroupedStats
}
var file_datadog_trace_stats_proto_depIdxs = []int32{
3, // 0: datadog.trace.StatsPayload.stats:type_name -> datadog.trace.ClientStatsPayload
4, // 1: datadog.trace.ClientStatsPayload.stats:type_name -> datadog.trace.ClientStatsBucket
5, // 2: datadog.trace.ClientStatsBucket.stats:type_name -> datadog.trace.ClientGroupedStats
0, // 3: datadog.trace.ClientGroupedStats.is_trace_root:type_name -> datadog.trace.Trilean
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_datadog_trace_stats_proto_init() }
func file_datadog_trace_stats_proto_init() {
if File_datadog_trace_stats_proto != nil {
return
}
if !protoimpl.UnsafeEnabled {
file_datadog_trace_stats_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*StatsPayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_datadog_trace_stats_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientStatsPayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_datadog_trace_stats_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientStatsBucket); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_datadog_trace_stats_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ClientGroupedStats); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_datadog_trace_stats_proto_rawDesc,
NumEnums: 2,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_datadog_trace_stats_proto_goTypes,
DependencyIndexes: file_datadog_trace_stats_proto_depIdxs,
EnumInfos: file_datadog_trace_stats_proto_enumTypes,
MessageInfos: file_datadog_trace_stats_proto_msgTypes,
}.Build()
File_datadog_trace_stats_proto = out.File
file_datadog_trace_stats_proto_rawDesc = nil
file_datadog_trace_stats_proto_goTypes = nil
file_datadog_trace_stats_proto_depIdxs = nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,52 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package trace
//go:generate go run github.com/tinylib/msgp -file=span.pb.go -o span_gen.go -io=false
//go:generate go run github.com/tinylib/msgp -file=tracer_payload.pb.go -o tracer_payload_gen.go -io=false
//go:generate go run github.com/tinylib/msgp -io=false
// Trace is a collection of spans with the same trace ID
type Trace []*Span
// Traces is a list of traces. This model matters as this is what we unpack from msgp.
type Traces []Trace
// RemoveChunk removes a chunk by its index.
func (p *TracerPayload) RemoveChunk(i int) {
if i < 0 || i >= len(p.Chunks) {
return
}
p.Chunks[i] = p.Chunks[len(p.Chunks)-1]
p.Chunks = p.Chunks[:len(p.Chunks)-1]
}
// Cut cuts off a new tracer payload from the `p` with [0, i-1] chunks
// and keeps [i, n-1] chunks in the original payload `p`.
func (p *TracerPayload) Cut(i int) *TracerPayload {
if i < 0 {
i = 0
}
if i > len(p.Chunks) {
i = len(p.Chunks)
}
newPayload := TracerPayload{
ContainerID: p.GetContainerID(),
LanguageName: p.GetLanguageName(),
LanguageVersion: p.GetLanguageVersion(),
TracerVersion: p.GetTracerVersion(),
RuntimeID: p.GetRuntimeID(),
Env: p.GetEnv(),
Hostname: p.GetHostname(),
AppVersion: p.GetAppVersion(),
Tags: p.GetTags(),
}
newPayload.Chunks = p.Chunks[:i]
p.Chunks = p.Chunks[i:]
return &newPayload
}

View File

@@ -0,0 +1,158 @@
package trace
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z Trace) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendArrayHeader(o, uint32(len(z)))
for za0001 := range z {
if z[za0001] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, za0001)
return
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Trace) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if cap((*z)) >= int(zb0002) {
(*z) = (*z)[:zb0002]
} else {
(*z) = make(Trace, zb0002)
}
for zb0001 := range *z {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
(*z)[zb0001] = nil
} else {
if (*z)[zb0001] == nil {
(*z)[zb0001] = new(Span)
}
bts, err = (*z)[zb0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Trace) Msgsize() (s int) {
s = msgp.ArrayHeaderSize
for zb0003 := range z {
if z[zb0003] == nil {
s += msgp.NilSize
} else {
s += z[zb0003].Msgsize()
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Traces) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendArrayHeader(o, uint32(len(z)))
for za0001 := range z {
o = msgp.AppendArrayHeader(o, uint32(len(z[za0001])))
for za0002 := range z[za0001] {
if z[za0001][za0002] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z[za0001][za0002].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, za0001, za0002)
return
}
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Traces) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0003 uint32
zb0003, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if cap((*z)) >= int(zb0003) {
(*z) = (*z)[:zb0003]
} else {
(*z) = make(Traces, zb0003)
}
for zb0001 := range *z {
var zb0004 uint32
zb0004, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap((*z)[zb0001]) >= int(zb0004) {
(*z)[zb0001] = ((*z)[zb0001])[:zb0004]
} else {
(*z)[zb0001] = make(Trace, zb0004)
}
for zb0002 := range (*z)[zb0001] {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
(*z)[zb0001][zb0002] = nil
} else {
if (*z)[zb0001][zb0002] == nil {
(*z)[zb0001][zb0002] = new(Span)
}
bts, err = (*z)[zb0001][zb0002].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0002)
return
}
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Traces) Msgsize() (s int) {
s = msgp.ArrayHeaderSize
for zb0005 := range z {
s += msgp.ArrayHeaderSize
for zb0006 := range z[zb0005] {
if z[zb0005][zb0006] == nil {
s += msgp.NilSize
} else {
s += z[zb0005][zb0006].Msgsize()
}
}
}
return
}

View File

@@ -0,0 +1,391 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.0
// protoc v5.26.1
// source: datadog/trace/tracer_payload.proto
package trace
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
)
const (
// Verify that this generated code is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
// Verify that runtime/protoimpl is sufficiently up-to-date.
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
)
// TraceChunk represents a list of spans with the same trace ID. In other words, a chunk of a trace.
type TraceChunk struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// priority specifies sampling priority of the trace.
// @gotags: json:"priority" msg:"priority"
Priority int32 `protobuf:"varint,1,opt,name=priority,proto3" json:"priority" msg:"priority"`
// origin specifies origin product ("lambda", "rum", etc.) of the trace.
// @gotags: json:"origin" msg:"origin"
Origin string `protobuf:"bytes,2,opt,name=origin,proto3" json:"origin" msg:"origin"`
// spans specifies list of containing spans.
// @gotags: json:"spans" msg:"spans"
Spans []*Span `protobuf:"bytes,3,rep,name=spans,proto3" json:"spans" msg:"spans"`
// tags specifies tags common in all `spans`.
// @gotags: json:"tags" msg:"tags"
Tags map[string]string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"`
// droppedTrace specifies whether the trace was dropped by samplers or not.
// @gotags: json:"dropped_trace" msg:"dropped_trace"
DroppedTrace bool `protobuf:"varint,5,opt,name=droppedTrace,proto3" json:"dropped_trace" msg:"dropped_trace"`
}
func (x *TraceChunk) Reset() {
*x = TraceChunk{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TraceChunk) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TraceChunk) ProtoMessage() {}
func (x *TraceChunk) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_tracer_payload_proto_msgTypes[0]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TraceChunk.ProtoReflect.Descriptor instead.
func (*TraceChunk) Descriptor() ([]byte, []int) {
return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{0}
}
func (x *TraceChunk) GetPriority() int32 {
if x != nil {
return x.Priority
}
return 0
}
func (x *TraceChunk) GetOrigin() string {
if x != nil {
return x.Origin
}
return ""
}
func (x *TraceChunk) GetSpans() []*Span {
if x != nil {
return x.Spans
}
return nil
}
func (x *TraceChunk) GetTags() map[string]string {
if x != nil {
return x.Tags
}
return nil
}
func (x *TraceChunk) GetDroppedTrace() bool {
if x != nil {
return x.DroppedTrace
}
return false
}
// TracerPayload represents a payload the trace agent receives from tracers.
type TracerPayload struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// containerID specifies the ID of the container where the tracer is running on.
// @gotags: json:"container_id" msg:"container_id"
ContainerID string `protobuf:"bytes,1,opt,name=containerID,proto3" json:"container_id" msg:"container_id"`
// languageName specifies language of the tracer.
// @gotags: json:"language_name" msg:"language_name"
LanguageName string `protobuf:"bytes,2,opt,name=languageName,proto3" json:"language_name" msg:"language_name"`
// languageVersion specifies language version of the tracer.
// @gotags: json:"language_version" msg:"language_version"
LanguageVersion string `protobuf:"bytes,3,opt,name=languageVersion,proto3" json:"language_version" msg:"language_version"`
// tracerVersion specifies version of the tracer.
// @gotags: json:"tracer_version" msg:"tracer_version"
TracerVersion string `protobuf:"bytes,4,opt,name=tracerVersion,proto3" json:"tracer_version" msg:"tracer_version"`
// runtimeID specifies V4 UUID representation of a tracer session.
// @gotags: json:"runtime_id" msg:"runtime_id"
RuntimeID string `protobuf:"bytes,5,opt,name=runtimeID,proto3" json:"runtime_id" msg:"runtime_id"`
// chunks specifies list of containing trace chunks.
// @gotags: json:"chunks" msg:"chunks"
Chunks []*TraceChunk `protobuf:"bytes,6,rep,name=chunks,proto3" json:"chunks" msg:"chunks"`
// tags specifies tags common in all `chunks`.
// @gotags: json:"tags" msg:"tags"
Tags map[string]string `protobuf:"bytes,7,rep,name=tags,proto3" json:"tags" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3" msg:"tags"`
// env specifies `env` tag that set with the tracer.
// @gotags: json:"env" msg:"env"
Env string `protobuf:"bytes,8,opt,name=env,proto3" json:"env" msg:"env"`
// hostname specifies hostname of where the tracer is running.
// @gotags: json:"hostname" msg:"hostname"
Hostname string `protobuf:"bytes,9,opt,name=hostname,proto3" json:"hostname" msg:"hostname"`
// version specifies `version` tag that set with the tracer.
// @gotags: json:"app_version" msg:"app_version"
AppVersion string `protobuf:"bytes,10,opt,name=appVersion,proto3" json:"app_version" msg:"app_version"`
}
func (x *TracerPayload) Reset() {
*x = TracerPayload{}
if protoimpl.UnsafeEnabled {
mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *TracerPayload) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*TracerPayload) ProtoMessage() {}
func (x *TracerPayload) ProtoReflect() protoreflect.Message {
mi := &file_datadog_trace_tracer_payload_proto_msgTypes[1]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use TracerPayload.ProtoReflect.Descriptor instead.
func (*TracerPayload) Descriptor() ([]byte, []int) {
return file_datadog_trace_tracer_payload_proto_rawDescGZIP(), []int{1}
}
func (x *TracerPayload) GetContainerID() string {
if x != nil {
return x.ContainerID
}
return ""
}
func (x *TracerPayload) GetLanguageName() string {
if x != nil {
return x.LanguageName
}
return ""
}
func (x *TracerPayload) GetLanguageVersion() string {
if x != nil {
return x.LanguageVersion
}
return ""
}
func (x *TracerPayload) GetTracerVersion() string {
if x != nil {
return x.TracerVersion
}
return ""
}
func (x *TracerPayload) GetRuntimeID() string {
if x != nil {
return x.RuntimeID
}
return ""
}
func (x *TracerPayload) GetChunks() []*TraceChunk {
if x != nil {
return x.Chunks
}
return nil
}
func (x *TracerPayload) GetTags() map[string]string {
if x != nil {
return x.Tags
}
return nil
}
func (x *TracerPayload) GetEnv() string {
if x != nil {
return x.Env
}
return ""
}
func (x *TracerPayload) GetHostname() string {
if x != nil {
return x.Hostname
}
return ""
}
func (x *TracerPayload) GetAppVersion() string {
if x != nil {
return x.AppVersion
}
return ""
}
var File_datadog_trace_tracer_payload_proto protoreflect.FileDescriptor
var file_datadog_trace_tracer_payload_proto_rawDesc = []byte{
0x0a, 0x22, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f,
0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x70,
0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72,
0x61, 0x63, 0x65, 0x1a, 0x18, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2f, 0x74, 0x72, 0x61,
0x63, 0x65, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x81, 0x02,
0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x1a, 0x0a, 0x08,
0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08,
0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67,
0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e,
0x12, 0x29, 0x0a, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x13, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e,
0x53, 0x70, 0x61, 0x6e, 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x04, 0x74,
0x61, 0x67, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x64, 0x61, 0x74, 0x61,
0x64, 0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43,
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04,
0x74, 0x61, 0x67, 0x73, 0x12, 0x22, 0x0a, 0x0c, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x54,
0x72, 0x61, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x64, 0x72, 0x6f, 0x70,
0x70, 0x65, 0x64, 0x54, 0x72, 0x61, 0x63, 0x65, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73,
0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38,
0x01, 0x22, 0xb9, 0x03, 0x0a, 0x0d, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50, 0x61, 0x79, 0x6c,
0x6f, 0x61, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
0x6e, 0x65, 0x72, 0x49, 0x44, 0x12, 0x22, 0x0a, 0x0c, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67,
0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6c, 0x61, 0x6e,
0x67, 0x75, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x0f, 0x6c, 0x61, 0x6e,
0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x0f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x56, 0x65, 0x72, 0x73,
0x69, 0x6f, 0x6e, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63,
0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x72, 0x75, 0x6e,
0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x72, 0x75,
0x6e, 0x74, 0x69, 0x6d, 0x65, 0x49, 0x44, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b,
0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64, 0x6f,
0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x68, 0x75,
0x6e, 0x6b, 0x52, 0x06, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73, 0x12, 0x3a, 0x0a, 0x04, 0x74, 0x61,
0x67, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x64,
0x6f, 0x67, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x72, 0x50,
0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79,
0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x6e, 0x76, 0x18, 0x08, 0x20,
0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x6e, 0x76, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74,
0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74,
0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72, 0x73, 0x69,
0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x70, 0x70, 0x56, 0x65, 0x72,
0x73, 0x69, 0x6f, 0x6e, 0x1a, 0x37, 0x0a, 0x09, 0x54, 0x61, 0x67, 0x73, 0x45, 0x6e, 0x74, 0x72,
0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03,
0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x16, 0x5a,
0x14, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x62, 0x67, 0x6f, 0x2f,
0x74, 0x72, 0x61, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
file_datadog_trace_tracer_payload_proto_rawDescOnce sync.Once
file_datadog_trace_tracer_payload_proto_rawDescData = file_datadog_trace_tracer_payload_proto_rawDesc
)
func file_datadog_trace_tracer_payload_proto_rawDescGZIP() []byte {
file_datadog_trace_tracer_payload_proto_rawDescOnce.Do(func() {
file_datadog_trace_tracer_payload_proto_rawDescData = protoimpl.X.CompressGZIP(file_datadog_trace_tracer_payload_proto_rawDescData)
})
return file_datadog_trace_tracer_payload_proto_rawDescData
}
var file_datadog_trace_tracer_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
var file_datadog_trace_tracer_payload_proto_goTypes = []interface{}{
(*TraceChunk)(nil), // 0: datadog.trace.TraceChunk
(*TracerPayload)(nil), // 1: datadog.trace.TracerPayload
nil, // 2: datadog.trace.TraceChunk.TagsEntry
nil, // 3: datadog.trace.TracerPayload.TagsEntry
(*Span)(nil), // 4: datadog.trace.Span
}
var file_datadog_trace_tracer_payload_proto_depIdxs = []int32{
4, // 0: datadog.trace.TraceChunk.spans:type_name -> datadog.trace.Span
2, // 1: datadog.trace.TraceChunk.tags:type_name -> datadog.trace.TraceChunk.TagsEntry
0, // 2: datadog.trace.TracerPayload.chunks:type_name -> datadog.trace.TraceChunk
3, // 3: datadog.trace.TracerPayload.tags:type_name -> datadog.trace.TracerPayload.TagsEntry
4, // [4:4] is the sub-list for method output_type
4, // [4:4] is the sub-list for method input_type
4, // [4:4] is the sub-list for extension type_name
4, // [4:4] is the sub-list for extension extendee
0, // [0:4] is the sub-list for field type_name
}
func init() { file_datadog_trace_tracer_payload_proto_init() }
func file_datadog_trace_tracer_payload_proto_init() {
if File_datadog_trace_tracer_payload_proto != nil {
return
}
file_datadog_trace_span_proto_init()
if !protoimpl.UnsafeEnabled {
file_datadog_trace_tracer_payload_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TraceChunk); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_datadog_trace_tracer_payload_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*TracerPayload); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_datadog_trace_tracer_payload_proto_rawDesc,
NumEnums: 0,
NumMessages: 4,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_datadog_trace_tracer_payload_proto_goTypes,
DependencyIndexes: file_datadog_trace_tracer_payload_proto_depIdxs,
MessageInfos: file_datadog_trace_tracer_payload_proto_msgTypes,
}.Build()
File_datadog_trace_tracer_payload_proto = out.File
file_datadog_trace_tracer_payload_proto_rawDesc = nil
file_datadog_trace_tracer_payload_proto_goTypes = nil
file_datadog_trace_tracer_payload_proto_depIdxs = nil
}

View File

@@ -0,0 +1,384 @@
package trace
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z *TraceChunk) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "priority"
o = append(o, 0x85, 0xa8, 0x70, 0x72, 0x69, 0x6f, 0x72, 0x69, 0x74, 0x79)
o = msgp.AppendInt32(o, z.Priority)
// string "origin"
o = append(o, 0xa6, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e)
o = msgp.AppendString(o, z.Origin)
// string "spans"
o = append(o, 0xa5, 0x73, 0x70, 0x61, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Spans)))
for za0001 := range z.Spans {
if z.Spans[za0001] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Spans[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Spans", za0001)
return
}
}
}
// string "tags"
o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Tags)))
for za0002, za0003 := range z.Tags {
o = msgp.AppendString(o, za0002)
o = msgp.AppendString(o, za0003)
}
// string "dropped_trace"
o = append(o, 0xad, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x65)
o = msgp.AppendBool(o, z.DroppedTrace)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TraceChunk) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "priority":
z.Priority, bts, err = msgp.ReadInt32Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Priority")
return
}
case "origin":
z.Origin, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Origin")
return
}
case "spans":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Spans")
return
}
if cap(z.Spans) >= int(zb0002) {
z.Spans = (z.Spans)[:zb0002]
} else {
z.Spans = make([]*Span, zb0002)
}
for za0001 := range z.Spans {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Spans[za0001] = nil
} else {
if z.Spans[za0001] == nil {
z.Spans[za0001] = new(Span)
}
bts, err = z.Spans[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Spans", za0001)
return
}
}
}
case "tags":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if z.Tags == nil {
z.Tags = make(map[string]string, zb0003)
} else if len(z.Tags) > 0 {
for key := range z.Tags {
delete(z.Tags, key)
}
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0002)
return
}
z.Tags[za0002] = za0003
}
case "dropped_trace":
z.DroppedTrace, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "DroppedTrace")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TraceChunk) Msgsize() (s int) {
s = 1 + 9 + msgp.Int32Size + 7 + msgp.StringPrefixSize + len(z.Origin) + 6 + msgp.ArrayHeaderSize
for za0001 := range z.Spans {
if z.Spans[za0001] == nil {
s += msgp.NilSize
} else {
s += z.Spans[za0001].Msgsize()
}
}
s += 5 + msgp.MapHeaderSize
if z.Tags != nil {
for za0002, za0003 := range z.Tags {
_ = za0003
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
}
}
s += 14 + msgp.BoolSize
return
}
// MarshalMsg implements msgp.Marshaler
func (z *TracerPayload) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 10
// string "container_id"
o = append(o, 0x8a, 0xac, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64)
o = msgp.AppendString(o, z.ContainerID)
// string "language_name"
o = append(o, 0xad, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.LanguageName)
// string "language_version"
o = append(o, 0xb0, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.LanguageVersion)
// string "tracer_version"
o = append(o, 0xae, 0x74, 0x72, 0x61, 0x63, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.TracerVersion)
// string "runtime_id"
o = append(o, 0xaa, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x69, 0x64)
o = msgp.AppendString(o, z.RuntimeID)
// string "chunks"
o = append(o, 0xa6, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x73)
o = msgp.AppendArrayHeader(o, uint32(len(z.Chunks)))
for za0001 := range z.Chunks {
if z.Chunks[za0001] == nil {
o = msgp.AppendNil(o)
} else {
o, err = z.Chunks[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Chunks", za0001)
return
}
}
}
// string "tags"
o = append(o, 0xa4, 0x74, 0x61, 0x67, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.Tags)))
for za0002, za0003 := range z.Tags {
o = msgp.AppendString(o, za0002)
o = msgp.AppendString(o, za0003)
}
// string "env"
o = append(o, 0xa3, 0x65, 0x6e, 0x76)
o = msgp.AppendString(o, z.Env)
// string "hostname"
o = append(o, 0xa8, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Hostname)
// string "app_version"
o = append(o, 0xab, 0x61, 0x70, 0x70, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e)
o = msgp.AppendString(o, z.AppVersion)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *TracerPayload) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "container_id":
z.ContainerID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ContainerID")
return
}
case "language_name":
z.LanguageName, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LanguageName")
return
}
case "language_version":
z.LanguageVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LanguageVersion")
return
}
case "tracer_version":
z.TracerVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TracerVersion")
return
}
case "runtime_id":
z.RuntimeID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "RuntimeID")
return
}
case "chunks":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Chunks")
return
}
if cap(z.Chunks) >= int(zb0002) {
z.Chunks = (z.Chunks)[:zb0002]
} else {
z.Chunks = make([]*TraceChunk, zb0002)
}
for za0001 := range z.Chunks {
if msgp.IsNil(bts) {
bts, err = msgp.ReadNilBytes(bts)
if err != nil {
return
}
z.Chunks[za0001] = nil
} else {
if z.Chunks[za0001] == nil {
z.Chunks[za0001] = new(TraceChunk)
}
bts, err = z.Chunks[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Chunks", za0001)
return
}
}
}
case "tags":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
if z.Tags == nil {
z.Tags = make(map[string]string, zb0003)
} else if len(z.Tags) > 0 {
for key := range z.Tags {
delete(z.Tags, key)
}
}
for zb0003 > 0 {
var za0002 string
var za0003 string
zb0003--
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags")
return
}
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Tags", za0002)
return
}
z.Tags[za0002] = za0003
}
case "env":
z.Env, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Env")
return
}
case "hostname":
z.Hostname, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Hostname")
return
}
case "app_version":
z.AppVersion, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AppVersion")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *TracerPayload) Msgsize() (s int) {
s = 1 + 13 + msgp.StringPrefixSize + len(z.ContainerID) + 14 + msgp.StringPrefixSize + len(z.LanguageName) + 17 + msgp.StringPrefixSize + len(z.LanguageVersion) + 15 + msgp.StringPrefixSize + len(z.TracerVersion) + 11 + msgp.StringPrefixSize + len(z.RuntimeID) + 7 + msgp.ArrayHeaderSize
for za0001 := range z.Chunks {
if z.Chunks[za0001] == nil {
s += msgp.NilSize
} else {
s += z.Chunks[za0001].Msgsize()
}
}
s += 5 + msgp.MapHeaderSize
if z.Tags != nil {
for za0002, za0003 := range z.Tags {
_ = za0003
s += msgp.StringPrefixSize + len(za0002) + msgp.StringPrefixSize + len(za0003)
}
}
s += 4 + msgp.StringPrefixSize + len(z.Env) + 9 + msgp.StringPrefixSize + len(z.Hostname) + 12 + msgp.StringPrefixSize + len(z.AppVersion)
return
}

View File

@@ -0,0 +1,35 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package trace
// traceChunkCopiedFields records the fields that are copied in ShallowCopy.
// This should match exactly the fields set in (*TraceChunk).ShallowCopy.
// This is used by tests to enforce the correctness of ShallowCopy.
var traceChunkCopiedFields = map[string]struct{}{
"Priority": {},
"Origin": {},
"Spans": {},
"Tags": {},
"DroppedTrace": {},
}
// ShallowCopy returns a shallow copy of the copy-able portion of a TraceChunk. These are the
// public fields which will have a Get* method for them. The completeness of this
// method is enforced by the init function above. Instead of using pkg/proto/utils.ProtoCopier,
// which incurs heavy reflection cost for every copy at runtime, we use reflection once at
// startup to ensure our method is complete.
func (t *TraceChunk) ShallowCopy() *TraceChunk {
if t == nil {
return nil
}
return &TraceChunk{
Priority: t.Priority,
Origin: t.Origin,
Spans: t.Spans,
Tags: t.Tags,
DroppedTrace: t.DroppedTrace,
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -17,12 +17,17 @@ var validProducts = map[string]struct{}{
ProductCWSDD: {},
ProductCWSCustom: {},
ProductCWSProfiles: {},
ProductCSMSideScanning: {},
ProductASM: {},
ProductASMFeatures: {},
ProductASMDD: {},
ProductASMData: {},
ProductAPMTracing: {},
ProductSDSRules: {},
ProductSDSAgentConfig: {},
ProductLiveDebugging: {},
ProductContainerAutoscalingSettings: {},
ProductContainerAutoscalingValues: {},
ProductTesting1: {},
ProductTesting2: {},
}
@@ -50,6 +55,8 @@ const (
ProductCWSCustom = "CWS_CUSTOM"
// ProductCWSProfiles is the cloud workload security profile product
ProductCWSProfiles = "CWS_SECURITY_PROFILES"
// ProductCSMSideScanning is the side scanning product
ProductCSMSideScanning = "CSM_SIDE_SCANNING"
// ProductASM is the ASM product used by customers to issue rules configurations
ProductASM = "ASM"
// ProductASMFeatures is the ASM product used form ASM activation through remote config
@@ -60,8 +67,16 @@ const (
ProductASMData = "ASM_DATA"
// ProductAPMTracing is the apm tracing product
ProductAPMTracing = "APM_TRACING"
// ProductSDSRules is the SDS definitions product
ProductSDSRules = "SDS_RULES_DD"
// ProductSDSAgentConfig is the user SDS configurations product.
ProductSDSAgentConfig = "SDS_AGENT_CONFIG"
// ProductLiveDebugging is the dynamic instrumentation product
ProductLiveDebugging = "LIVE_DEBUGGING"
// ProductContainerAutoscalingSettings receives definition of container autoscaling
ProductContainerAutoscalingSettings = "CONTAINER_AUTOSCALING_SETTINGS"
// ProductContainerAutoscalingValues receives values for container autoscaling
ProductContainerAutoscalingValues = "CONTAINER_AUTOSCALING_VALUES"
// ProductTesting1 is a product used for testing remote config
ProductTesting1 = "TESTING1"
// ProductTesting2 is a product used for testing remote config

View File

@@ -0,0 +1,200 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2016-present Datadog, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,70 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
// Package config contains the configuration for the trace-agent.
package config
import (
"net/http"
"sync"
"time"
)
// TODO(gbbr): Perhaps this is not the best place for this structure.
// ResetClient wraps (http.Client).Do and resets the underlying connections at the
// configured interval
type ResetClient struct {
httpClientFactory func() *http.Client
resetInterval time.Duration
mu sync.RWMutex
httpClient *http.Client
lastReset time.Time
}
// NewResetClient returns an initialized Client resetting connections at the passed resetInterval ("0"
// means that no reset is performed).
// The underlying http.Client used will be created using the passed http client factory.
func NewResetClient(resetInterval time.Duration, httpClientFactory func() *http.Client) *ResetClient {
return &ResetClient{
httpClientFactory: httpClientFactory,
resetInterval: resetInterval,
httpClient: httpClientFactory(),
lastReset: time.Now(),
}
}
// Do wraps (http.Client).Do. Thread safe.
func (c *ResetClient) Do(req *http.Request) (*http.Response, error) {
c.checkReset()
c.mu.RLock()
httpClient := c.httpClient
c.mu.RUnlock()
return httpClient.Do(req)
}
// checkReset checks whether a client reset should be performed, and performs it
// if so
func (c *ResetClient) checkReset() {
if c.resetInterval == 0 {
return
}
c.mu.Lock()
defer c.mu.Unlock()
if time.Since(c.lastReset) < c.resetInterval {
return
}
c.lastReset = time.Now()
// Close idle connections on underlying client. Safe to do while other goroutines use the client.
// This is a best effort: if other goroutine(s) are currently using the client,
// the related open connection(s) will remain open until the client is GC'ed
c.httpClient.CloseIdleConnections()
c.httpClient = c.httpClientFactory()
}

View File

@@ -0,0 +1,621 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package config
import (
"crypto/tls"
"errors"
"net"
"net/http"
"net/url"
"os"
"regexp"
"time"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
"github.com/DataDog/datadog-agent/pkg/obfuscate"
"github.com/DataDog/datadog-agent/pkg/remoteconfig/state"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/DataDog/datadog-agent/pkg/trace/traceutil"
)
// ServiceName specifies the service name used in the operating system.
const ServiceName = "datadog-trace-agent"
// ErrMissingAPIKey is returned when the config could not be validated due to missing API key.
var ErrMissingAPIKey = errors.New("you must specify an API Key, either via a configuration file or the DD_API_KEY env var")
// Endpoint specifies an endpoint that the trace agent will write data (traces, stats & services) to.
type Endpoint struct {
APIKey string `json:"-"` // never marshal this
Host string
// NoProxy will be set to true when the proxy setting for the trace API endpoint
// needs to be ignored (e.g. it is part of the "no_proxy" list in the yaml settings).
NoProxy bool
}
// TelemetryEndpointPrefix specifies the prefix of the telemetry endpoint URL.
const TelemetryEndpointPrefix = "https://instrumentation-telemetry-intake."
// OTLP holds the configuration for the OpenTelemetry receiver.
type OTLP struct {
// BindHost specifies the host to bind the receiver to.
BindHost string `mapstructure:"-"`
// GRPCPort specifies the port to use for the plain HTTP receiver.
// If unset (or 0), the receiver will be off.
GRPCPort int `mapstructure:"grpc_port"`
// SpanNameRemappings is the map of datadog span names and preferred name to map to. This can be used to
// automatically map Datadog Span Operation Names to an updated value. All entries should be key/value pairs.
SpanNameRemappings map[string]string `mapstructure:"span_name_remappings"`
// SpanNameAsResourceName specifies whether the OpenTelemetry span's name should be
// used as the Datadog span's operation name. By default (when this is false), the
// operation name is deduced from a combination between the instrumentation scope
// name and the span kind.
//
// For context, the OpenTelemetry 'Span Name' is equivalent to the Datadog 'resource name'.
// The Datadog Span's Operation Name equivalent in OpenTelemetry does not exist, but the span's
// kind comes close.
SpanNameAsResourceName bool `mapstructure:"span_name_as_resource_name"`
// MaxRequestBytes specifies the maximum number of bytes that will be read
// from an incoming HTTP request.
MaxRequestBytes int64 `mapstructure:"-"`
// ProbabilisticSampling specifies the percentage of traces to ingest. Exceptions are made for errors
// and rare traces (outliers) if "RareSamplerEnabled" is true. Invalid values are equivalent to 100.
// If spans have the "sampling.priority" attribute set, probabilistic sampling is skipped and the user's
// decision is followed.
ProbabilisticSampling float64
// AttributesTranslator specifies an OTLP to Datadog attributes translator.
AttributesTranslator *attributes.Translator `mapstructure:"-"`
}
// ObfuscationConfig holds the configuration for obfuscating sensitive data
// for various span types.
type ObfuscationConfig struct {
// ES holds the obfuscation configuration for ElasticSearch bodies.
ES obfuscate.JSONConfig `mapstructure:"elasticsearch"`
// OpenSearch holds the obfuscation configuration for OpenSearch bodies.
OpenSearch obfuscate.JSONConfig `mapstructure:"opensearch"`
// Mongo holds the obfuscation configuration for MongoDB queries.
Mongo obfuscate.JSONConfig `mapstructure:"mongodb"`
// SQLExecPlan holds the obfuscation configuration for SQL Exec Plans. This is strictly for safety related obfuscation,
// not normalization. Normalization of exec plans is configured in SQLExecPlanNormalize.
SQLExecPlan obfuscate.JSONConfig `mapstructure:"sql_exec_plan"`
// SQLExecPlanNormalize holds the normalization configuration for SQL Exec Plans.
SQLExecPlanNormalize obfuscate.JSONConfig `mapstructure:"sql_exec_plan_normalize"`
// HTTP holds the obfuscation settings for HTTP URLs.
HTTP obfuscate.HTTPConfig `mapstructure:"http"`
// RemoveStackTraces specifies whether stack traces should be removed.
// More specifically "error.stack" tag values will be cleared.
RemoveStackTraces bool `mapstructure:"remove_stack_traces"`
// Redis holds the configuration for obfuscating the "redis.raw_command" tag
// for spans of type "redis".
Redis obfuscate.RedisConfig `mapstructure:"redis"`
// Memcached holds the configuration for obfuscating the "memcached.command" tag
// for spans of type "memcached".
Memcached obfuscate.MemcachedConfig `mapstructure:"memcached"`
// CreditCards holds the configuration for obfuscating credit cards.
CreditCards obfuscate.CreditCardsConfig `mapstructure:"credit_cards"`
}
// Export returns an obfuscate.Config matching o.
func (o *ObfuscationConfig) Export(conf *AgentConfig) obfuscate.Config {
return obfuscate.Config{
SQL: obfuscate.SQLConfig{
TableNames: conf.HasFeature("table_names"),
ReplaceDigits: conf.HasFeature("quantize_sql_tables") || conf.HasFeature("replace_sql_digits"),
KeepSQLAlias: conf.HasFeature("keep_sql_alias"),
DollarQuotedFunc: conf.HasFeature("dollar_quoted_func"),
Cache: conf.HasFeature("sql_cache"),
},
ES: o.ES,
OpenSearch: o.OpenSearch,
Mongo: o.Mongo,
SQLExecPlan: o.SQLExecPlan,
SQLExecPlanNormalize: o.SQLExecPlanNormalize,
HTTP: o.HTTP,
Redis: o.Redis,
Memcached: o.Memcached,
CreditCard: o.CreditCards,
Logger: new(debugLogger),
}
}
type debugLogger struct{}
func (debugLogger) Debugf(format string, params ...interface{}) {
log.Debugf(format, params...)
}
// Enablable can represent any option that has an "enabled" boolean sub-field.
type Enablable struct {
Enabled bool `mapstructure:"enabled"`
}
// TelemetryConfig holds Instrumentation telemetry Endpoints information
type TelemetryConfig struct {
Enabled bool `mapstructure:"enabled"`
Endpoints []*Endpoint
}
// ReplaceRule specifies a replace rule.
type ReplaceRule struct {
// Name specifies the name of the tag that the replace rule addresses. However,
// some exceptions apply such as:
// • "resource.name" will target the resource
// • "*" will target all tags and the resource
Name string `mapstructure:"name"`
// Pattern specifies the regexp pattern to be used when replacing. It must compile.
Pattern string `mapstructure:"pattern"`
// Re holds the compiled Pattern and is only used internally.
Re *regexp.Regexp `mapstructure:"-"`
// Repl specifies the replacement string to be used when Pattern matches.
Repl string `mapstructure:"repl"`
}
// WriterConfig specifies configuration for an API writer.
type WriterConfig struct {
// ConnectionLimit specifies the maximum number of concurrent outgoing
// connections allowed for the sender.
ConnectionLimit int `mapstructure:"connection_limit"`
// QueueSize specifies the maximum number or payloads allowed to be queued
// in the sender.
QueueSize int `mapstructure:"queue_size"`
// FlushPeriodSeconds specifies the frequency at which the writer's buffer
// will be flushed to the sender, in seconds. Fractions are permitted.
FlushPeriodSeconds float64 `mapstructure:"flush_period_seconds"`
}
// FargateOrchestratorName is a Fargate orchestrator name.
type FargateOrchestratorName string
const (
// OrchestratorECS represents AWS ECS
OrchestratorECS FargateOrchestratorName = "ECS"
// OrchestratorEKS represents AWS EKS
OrchestratorEKS FargateOrchestratorName = "EKS"
// OrchestratorUnknown is used when we cannot retrieve the orchestrator
OrchestratorUnknown FargateOrchestratorName = "Unknown"
)
// ProfilingProxyConfig ...
type ProfilingProxyConfig struct {
// DDURL ...
DDURL string
// AdditionalEndpoints ...
AdditionalEndpoints map[string][]string
}
// EVPProxy contains the settings for the EVPProxy proxy.
type EVPProxy struct {
// Enabled reports whether EVPProxy is enabled (true by default).
Enabled bool
// DDURL is the Datadog site to forward payloads to (defaults to the Site setting if not set).
DDURL string
// APIKey is the main API Key (defaults to the main API key).
APIKey string `json:"-"` // Never marshal this field
// ApplicationKey to be used for requests with the X-Datadog-NeedsAppKey set (defaults to the top-level Application Key).
ApplicationKey string `json:"-"` // Never marshal this field
// AdditionalEndpoints is a map of additional Datadog sites to API keys.
AdditionalEndpoints map[string][]string
// MaxPayloadSize indicates the size at which payloads will be rejected, in bytes.
MaxPayloadSize int64
// ReceiverTimeout indicates the maximum time an EVPProxy request can take. Value in seconds.
ReceiverTimeout int
}
// InstallSignatureConfig contains the information on how the agent was installed
// and a unique identifier that distinguishes this agent from others.
type InstallSignatureConfig struct {
Found bool `json:"-"`
InstallID string `json:"install_id"`
InstallType string `json:"install_type"`
InstallTime int64 `json:"install_time"`
}
// DebuggerProxyConfig ...
type DebuggerProxyConfig struct {
// DDURL ...
DDURL string
// APIKey ...
APIKey string `json:"-"` // Never marshal this field
// AdditionalEndpoints is a map of additional Datadog sites to API keys.
AdditionalEndpoints map[string][]string `json:"-"` // Never marshal this field
}
// SymDBProxyConfig ...
type SymDBProxyConfig struct {
// DDURL ...
DDURL string
// APIKey ...
APIKey string `json:"-"` // Never marshal this field
// AdditionalEndpoints is a map of additional Datadog endpoints to API keys.
AdditionalEndpoints map[string][]string `json:"-"` // Never marshal this field
}
// AgentConfig handles the interpretation of the configuration (with default
// behaviors) in one place. It is also a simple structure to share across all
// the Agent components, with 100% safe and reliable values.
// It is exposed with expvar, so make sure to exclude any sensible field
// from JSON encoding. Use New() to create an instance.
type AgentConfig struct {
Features map[string]struct{}
Enabled bool
AgentVersion string
GitCommit string
Site string // the intake site to use (e.g. "datadoghq.com")
// FargateOrchestrator specifies the name of the Fargate orchestrator. e.g. "ECS", "EKS", "Unknown"
FargateOrchestrator FargateOrchestratorName
// Global
Hostname string
DefaultEnv string // the traces will default to this environment
ConfigPath string // the source of this config, if any
// Endpoints specifies the set of hosts and API keys where traces and stats
// will be uploaded to. The first endpoint is the main configuration endpoint;
// any following ones are read from the 'additional_endpoints' parts of the
// configuration file, if present.
Endpoints []*Endpoint
// Concentrator
BucketInterval time.Duration // the size of our pre-aggregation per bucket
ExtraAggregators []string // DEPRECATED
PeerTagsAggregation bool // enables/disables stats aggregation for peer entity tags, used by Concentrator and ClientStatsAggregator
ComputeStatsBySpanKind bool // enables/disables the computing of stats based on a span's `span.kind` field
PeerTags []string // additional tags to use for peer entity stats aggregation
// Sampler configuration
ExtraSampleRate float64
TargetTPS float64
ErrorTPS float64
MaxEPS float64
MaxRemoteTPS float64
// Rare Sampler configuration
RareSamplerEnabled bool
RareSamplerTPS int
RareSamplerCooldownPeriod time.Duration
RareSamplerCardinality int
// Probabilistic Sampler configuration
ProbabilisticSamplerEnabled bool
ProbabilisticSamplerHashSeed uint32
ProbabilisticSamplerSamplingPercentage float32
// Receiver
ReceiverEnabled bool // specifies whether Receiver listeners are enabled. Unless OTLPReceiver is used, this should always be true.
ReceiverHost string
ReceiverPort int
ReceiverSocket string // if not empty, UDS will be enabled on unix://<receiver_socket>
ConnectionLimit int // for rate-limiting, how many unique connections to allow in a lease period (30s)
ReceiverTimeout int
MaxRequestBytes int64 // specifies the maximum allowed request size for incoming trace payloads
TraceBuffer int // specifies the number of traces to buffer before blocking.
Decoders int // specifies the number of traces that can be concurrently decoded.
MaxConnections int // specifies the maximum number of concurrent incoming connections allowed.
DecoderTimeout int // specifies the maximum time in milliseconds that the decoders will wait for a turn to accept a payload before returning 429
WindowsPipeName string
PipeBufferSize int
PipeSecurityDescriptor string
GUIPort string // the port of the Datadog Agent GUI (for control access)
// Writers
SynchronousFlushing bool // Mode where traces are only submitted when FlushAsync is called, used for Serverless Extension
StatsWriter *WriterConfig
TraceWriter *WriterConfig
ConnectionResetInterval time.Duration // frequency at which outgoing connections are reset. 0 means no reset is performed
// MaxSenderRetries is the maximum number of retries that a sender will perform
// before giving up. Note that the sender may not perform all MaxSenderRetries if
// the agent is under load and the outgoing payload queue is full. In that
// case, the sender will drop failed payloads when it is unable to enqueue
// them for another retry.
MaxSenderRetries int
// HTTP client used in writer connections. If nil, default client values will be used.
HTTPClientFunc func() *http.Client `json:"-"`
// internal telemetry
StatsdEnabled bool
StatsdHost string
StatsdPort int
StatsdPipeName string // for Windows Pipes
StatsdSocket string // for UDS Sockets
// logging
LogFilePath string
// watchdog
MaxMemory float64 // MaxMemory is the threshold (bytes allocated) above which program panics and exits, to be restarted
MaxCPU float64 // MaxCPU is the max UserAvg CPU the program should consume
WatchdogInterval time.Duration // WatchdogInterval is the delay between 2 watchdog checks
// http/s proxying
ProxyURL *url.URL
SkipSSLValidation bool
// filtering
Ignore map[string][]string
// ReplaceTags is used to filter out sensitive information from tag values.
// It maps tag keys to a set of replacements. Only supported in A6.
ReplaceTags []*ReplaceRule
// GlobalTags list metadata that will be added to all spans
GlobalTags map[string]string
// transaction analytics
AnalyzedRateByServiceLegacy map[string]float64
AnalyzedSpansByService map[string]map[string]float64
// infrastructure agent binary
DDAgentBin string
// Obfuscation holds sensitive data obufscator's configuration.
Obfuscation *ObfuscationConfig
// MaxResourceLen the maximum length the resource can have
MaxResourceLen int
// RequireTags specifies a list of tags which must be present on the root span in order for a trace to be accepted.
RequireTags []*Tag
// RejectTags specifies a list of tags which must be absent on the root span in order for a trace to be accepted.
RejectTags []*Tag
// RequireTagsRegex specifies a list of regexp for tags which must be present on the root span in order for a trace to be accepted.
RequireTagsRegex []*TagRegex
// RejectTagsRegex specifies a list of regexp for tags which must be absent on the root span in order for a trace to be accepted.
RejectTagsRegex []*TagRegex
// OTLPReceiver holds the configuration for OpenTelemetry receiver.
OTLPReceiver *OTLP
// ProfilingProxy specifies settings for the profiling proxy.
ProfilingProxy ProfilingProxyConfig
// Telemetry settings
TelemetryConfig *TelemetryConfig
// EVPProxy contains the settings for the EVPProxy proxy.
EVPProxy EVPProxy
// DebuggerProxy contains the settings for the Live Debugger proxy.
DebuggerProxy DebuggerProxyConfig
// DebuggerDiagnosticsProxy contains the settings for the Live Debugger diagnostics proxy.
DebuggerDiagnosticsProxy DebuggerProxyConfig
// SymDBProxy contains the settings for the Symbol Database proxy.
SymDBProxy SymDBProxyConfig
// Proxy specifies a function to return a proxy for a given Request.
// See (net/http.Transport).Proxy for more details.
Proxy func(*http.Request) (*url.URL, error) `json:"-"`
// MaxCatalogEntries specifies the maximum number of services to be added to the priority sampler's
// catalog. If not set (0) it will default to 5000.
MaxCatalogEntries int
// RemoteConfigClient retrieves sampling updates from the remote config backend
RemoteConfigClient RemoteClient `json:"-"`
// ContainerTags ...
ContainerTags func(cid string) ([]string, error) `json:"-"`
// ContainerProcRoot is the root dir for `proc` info
ContainerProcRoot string
// DebugServerPort defines the port used by the debug server
DebugServerPort int
// Install Signature
InstallSignature InstallSignatureConfig
// Lambda function name
LambdaFunctionName string
}
// RemoteClient client is used to APM Sampling Updates from a remote source.
// This is an interface around the client provided by pkg/config/remote to allow for easier testing.
type RemoteClient interface {
Close()
Start()
Subscribe(string, func(update map[string]state.RawConfig, applyStateCallback func(string, state.ApplyStatus)))
UpdateApplyStatus(cfgPath string, status state.ApplyStatus)
}
// Tag represents a key/value pair.
type Tag struct {
K, V string
}
// TagRegex represents a key/value regex pattern pair.
type TagRegex struct {
K string
V *regexp.Regexp
}
// New returns a configuration with the default values.
func New() *AgentConfig {
return &AgentConfig{
Enabled: true,
DefaultEnv: "none",
Endpoints: []*Endpoint{{Host: "https://trace.agent.datadoghq.com"}},
FargateOrchestrator: OrchestratorUnknown,
Site: "datadoghq.com",
MaxCatalogEntries: 5000,
BucketInterval: time.Duration(10) * time.Second,
ExtraSampleRate: 1.0,
TargetTPS: 10,
ErrorTPS: 10,
MaxEPS: 200,
MaxRemoteTPS: 100,
RareSamplerEnabled: false,
RareSamplerTPS: 5,
RareSamplerCooldownPeriod: 5 * time.Minute,
RareSamplerCardinality: 200,
ReceiverEnabled: true,
ReceiverHost: "localhost",
ReceiverPort: 8126,
MaxRequestBytes: 25 * 1024 * 1024, // 25MB
PipeBufferSize: 1_000_000,
PipeSecurityDescriptor: "D:AI(A;;GA;;;WD)",
GUIPort: "5002",
StatsWriter: new(WriterConfig),
TraceWriter: new(WriterConfig),
ConnectionResetInterval: 0, // disabled
MaxSenderRetries: 4,
StatsdHost: "localhost",
StatsdPort: 8125,
StatsdEnabled: true,
LambdaFunctionName: os.Getenv("AWS_LAMBDA_FUNCTION_NAME"),
MaxMemory: 5e8, // 500 Mb, should rarely go above 50 Mb
MaxCPU: 0.5, // 50%, well behaving agents keep below 5%
WatchdogInterval: 10 * time.Second,
Ignore: make(map[string][]string),
AnalyzedRateByServiceLegacy: make(map[string]float64),
AnalyzedSpansByService: make(map[string]map[string]float64),
Obfuscation: &ObfuscationConfig{},
MaxResourceLen: 5000,
GlobalTags: computeGlobalTags(),
Proxy: http.ProxyFromEnvironment,
OTLPReceiver: &OTLP{},
ContainerTags: noopContainerTagsFunc,
TelemetryConfig: &TelemetryConfig{
Endpoints: []*Endpoint{{Host: TelemetryEndpointPrefix + "datadoghq.com"}},
},
EVPProxy: EVPProxy{
Enabled: true,
MaxPayloadSize: 5 * 1024 * 1024,
},
Features: make(map[string]struct{}),
}
}
func computeGlobalTags() map[string]string {
if inAzureAppServices() {
return traceutil.GetAppServicesTags()
}
return make(map[string]string)
}
// ErrContainerTagsFuncNotDefined is returned when the containerTags function is not defined.
var ErrContainerTagsFuncNotDefined = errors.New("containerTags function not defined")
func noopContainerTagsFunc(_ string) ([]string, error) {
return nil, ErrContainerTagsFuncNotDefined
}
// APIKey returns the first (main) endpoint's API key.
func (c *AgentConfig) APIKey() string {
if len(c.Endpoints) == 0 {
return ""
}
return c.Endpoints[0].APIKey
}
// NewHTTPClient returns a new http.Client to be used for outgoing connections to the
// Datadog API.
func (c *AgentConfig) NewHTTPClient() *ResetClient {
// If a custom HTTPClientFunc been set, use it. Otherwise use default client values
if c.HTTPClientFunc != nil {
return NewResetClient(c.ConnectionResetInterval, c.HTTPClientFunc)
}
return NewResetClient(c.ConnectionResetInterval, func() *http.Client {
return &http.Client{
Timeout: 10 * time.Second,
Transport: c.NewHTTPTransport(),
}
})
}
// NewHTTPTransport returns a new http.Transport to be used for outgoing connections to
// the Datadog API.
func (c *AgentConfig) NewHTTPTransport() *http.Transport {
transport := &http.Transport{
TLSClientConfig: &tls.Config{InsecureSkipVerify: c.SkipSSLValidation},
// below field values are from http.DefaultTransport (go1.12)
Proxy: c.Proxy,
DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 30 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
return transport
}
// HasFeature returns true if the agent has the given feature flag.
func (c *AgentConfig) HasFeature(feat string) bool {
_, ok := c.Features[feat]
return ok
}
// AllFeatures returns a slice of all the feature flags the agent has.
func (c *AgentConfig) AllFeatures() []string {
feats := []string{}
for feat := range c.Features {
feats = append(feats, feat)
}
return feats
}
// ConfiguredPeerTags returns the set of peer tags that should be used
// for aggregation based on the various config values and the base set of tags.
func (c *AgentConfig) ConfiguredPeerTags() []string {
if !c.PeerTagsAggregation {
return nil
}
return preparePeerTags(append(basePeerTags, c.PeerTags...))
}
func inAzureAppServices() bool {
_, existsLinux := os.LookupEnv("WEBSITE_STACK")
_, existsWin := os.LookupEnv("WEBSITE_APPSERVICEAPPLOGS_TRACE_ENABLED")
return existsLinux || existsWin
}

View File

@@ -0,0 +1,55 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package config
import (
_ "embed" //nolint:revive
"sort"
"strings"
"github.com/DataDog/datadog-agent/pkg/util/log"
"gopkg.in/ini.v1"
)
//go:embed peer_tags.ini
var peerTagFile []byte
// basePeerTags is the base set of peer tag precursors (tags from which peer tags
// are derived) we aggregate on when peer tag aggregation is enabled.
var basePeerTags = func() []string {
var precursors []string = []string{"_dd.base_service"}
cfg, err := ini.Load(peerTagFile)
if err != nil {
log.Error("Error loading file for peer tags: ", err)
return precursors
}
peerTags := cfg.Section("dd.apm.peer.tags").Keys()
for _, t := range peerTags {
ps := strings.Split(t.Value(), ",")
precursors = append(precursors, ps...)
}
sort.Strings(precursors)
return precursors
}()
func preparePeerTags(tags []string) []string {
if len(tags) == 0 {
return nil
}
var deduped []string
seen := make(map[string]struct{})
for _, t := range tags {
if _, ok := seen[t]; !ok {
seen[t] = struct{}{}
deduped = append(deduped, t)
}
}
sort.Strings(deduped)
return deduped
}

View File

@@ -0,0 +1,18 @@
# Generated - DO NOT EDIT
# Source: https://github.com/DataDog/semantic-core/
[dd.apm.peer.tags]
peer.aws.dynamodb.table = "tablename"
peer.aws.kinesis.stream = "streamname"
peer.aws.s3.bucket = "bucketname,aws.s3.bucket"
peer.aws.sqs.queue = "queuename"
peer.cassandra.contact.points = "db.cassandra.contact.points"
peer.couchbase.seed.nodes = "db.couchbase.seed.nodes"
peer.db.name = "db.name,mongodb.db,db.instance,cassandra.keyspace,db.namespace"
peer.db.system = "db.system"
peer.hostname = "peer.hostname,hostname,net.peer.name,db.hostname,network.destination.name,grpc.host,http.host,server.address,http.server_name,out.host,dns.hostname"
peer.kafka.bootstrap.servers = "messaging.kafka.bootstrap.servers"
peer.messaging.destination = "topicname,messaging.destination,messaging.destination.name,messaging.rabbitmq.exchange,amqp.destination,amqp.queue,amqp.exchange,msmq.queue.path,aws.queue.name"
peer.messaging.system = "messaging.system"
peer.rpc.service = "rpc.service"
peer.rpc.system = "rpc.system"
peer.service = "peer.service"

View File

@@ -0,0 +1,97 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
//go:build test
package log
import (
"bytes"
"fmt"
"sync"
)
var _ Logger = (*buflogger)(nil)
// NewBufferLogger creates a new Logger which outputs everything to the given buffer.
// It is synchronised for concurrent use; as such, it is not optimal for use outside
// testing environments.
func NewBufferLogger(out *bytes.Buffer) Logger {
return &buflogger{buf: out}
}
type buflogger struct {
mu sync.Mutex
buf *bytes.Buffer
}
func (b *buflogger) logWithLevel(lvl string, msg string) {
b.mu.Lock()
defer b.mu.Unlock()
b.buf.WriteString(fmt.Sprintf("[%s] %s", lvl, msg))
}
// Trace implements Logger.
func (b *buflogger) Trace(v ...interface{}) { b.logWithLevel("TRACE", fmt.Sprint(v...)) }
// Tracef implements Logger.
func (b *buflogger) Tracef(format string, params ...interface{}) {
b.logWithLevel("TRACE", fmt.Sprintf(format, params...))
}
// Debug implements Logger.
func (b *buflogger) Debug(v ...interface{}) { b.logWithLevel("DEBUG", fmt.Sprint(v...)) }
// Debugf implements Logger.
func (b *buflogger) Debugf(format string, params ...interface{}) {
b.logWithLevel("DEBUG", fmt.Sprintf(format, params...))
}
// Info implements Logger.
func (b *buflogger) Info(v ...interface{}) { b.logWithLevel("INFO", fmt.Sprint(v...)) }
// Infof implements Logger.
func (b *buflogger) Infof(format string, params ...interface{}) {
b.logWithLevel("INFO", fmt.Sprintf(format, params...))
}
// Warn implements Logger.
func (b *buflogger) Warn(v ...interface{}) error {
b.logWithLevel("WARN", fmt.Sprint(v...))
return nil
}
// Warnf implements Logger.
func (b *buflogger) Warnf(format string, params ...interface{}) error {
b.logWithLevel("WARN", fmt.Sprintf(format, params...))
return nil
}
// Error implements Logger.
func (b *buflogger) Error(v ...interface{}) error {
b.logWithLevel("ERROR", fmt.Sprint(v...))
return nil
}
// Errorf implements Logger.
func (b *buflogger) Errorf(format string, params ...interface{}) error {
b.logWithLevel("ERROR", fmt.Sprintf(format, params...))
return nil
}
// Critical implements Logger.
func (b *buflogger) Critical(v ...interface{}) error {
b.logWithLevel("CRITICAL", fmt.Sprint(v...))
return nil
}
// Criticalf implements Logger.
func (b *buflogger) Criticalf(format string, params ...interface{}) error {
b.logWithLevel("CRITICAL", fmt.Sprintf(format, params...))
return nil
}
// Flush implements Logger.
func (b *buflogger) Flush() {}

View File

@@ -0,0 +1,196 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
// Package log implements the trace-agent logger.
package log
import (
"sync"
)
var (
mu sync.RWMutex
logger Logger = NoopLogger
)
// SetLogger sets l as the default Logger and returns the old logger.
func SetLogger(l Logger) Logger {
mu.Lock()
oldlogger := logger
logger = l
mu.Unlock()
return oldlogger
}
// IsSet returns whether the logger has been set up.
func IsSet() bool {
mu.Lock()
defer mu.Unlock()
return logger != NoopLogger
}
// Logger implements the core logger interface.
type Logger interface {
Trace(v ...interface{})
Tracef(format string, params ...interface{})
Debug(v ...interface{})
Debugf(format string, params ...interface{})
Info(v ...interface{})
Infof(format string, params ...interface{})
Warn(v ...interface{}) error
Warnf(format string, params ...interface{}) error
Error(v ...interface{}) error
Errorf(format string, params ...interface{}) error
Critical(v ...interface{}) error
Criticalf(format string, params ...interface{}) error
Flush()
}
// Trace formats message using the default formats for its operands
// and writes to log with level = Trace
func Trace(v ...interface{}) {
mu.RLock()
logger.Trace(v...)
mu.RUnlock()
}
// Tracef formats message according to format specifier
// and writes to log with level = Trace.
func Tracef(format string, params ...interface{}) {
mu.RLock()
logger.Tracef(format, params...)
mu.RUnlock()
}
// Debug formats message using the default formats for its operands
// and writes to log with level = Debug
func Debug(v ...interface{}) {
mu.RLock()
logger.Debug(v...)
mu.RUnlock()
}
// Debugf formats message according to format specifier
// and writes to log with level = Debug.
func Debugf(format string, params ...interface{}) {
mu.RLock()
logger.Debugf(format, params...)
mu.RUnlock()
}
// Info formats message using the default formats for its operands
// and writes to log with level = Info
func Info(v ...interface{}) {
mu.RLock()
logger.Info(v...)
mu.RUnlock()
}
// Infof formats message according to format specifier
// and writes to log with level = Info.
func Infof(format string, params ...interface{}) {
mu.RLock()
logger.Infof(format, params...)
mu.RUnlock()
}
// Warn formats message using the default formats for its operands
// and writes to log with level = Warn
func Warn(v ...interface{}) {
mu.RLock()
logger.Warn(v...) //nolint:errcheck
mu.RUnlock()
}
// Warnf formats message according to format specifier
// and writes to log with level = Warn.
func Warnf(format string, params ...interface{}) {
mu.RLock()
logger.Warnf(format, params...) //nolint:errcheck
mu.RUnlock()
}
// Error formats message using the default formats for its operands
// and writes to log with level = Error
func Error(v ...interface{}) {
mu.RLock()
logger.Error(v...) //nolint:errcheck
mu.RUnlock()
}
// Errorf formats message according to format specifier
// and writes to log with level = Error.
func Errorf(format string, params ...interface{}) {
mu.RLock()
logger.Errorf(format, params...) //nolint:errcheck
mu.RUnlock()
}
// Critical formats message using the default formats for its operands
// and writes to log with level = Critical
func Critical(v ...interface{}) {
mu.RLock()
logger.Critical(v...) //nolint:errcheck
mu.RUnlock()
}
// Criticalf formats message according to format specifier
// and writes to log with level = Critical.
func Criticalf(format string, params ...interface{}) {
mu.RLock()
logger.Criticalf(format, params...) //nolint:errcheck
mu.RUnlock()
}
// Flush flushes all the messages in the logger.
func Flush() {
mu.RLock()
logger.Flush()
mu.RUnlock()
}
// NoopLogger is a logger which has no effect upon calling.
var NoopLogger = noopLogger{}
type noopLogger struct{}
// Trace implements Logger.
func (noopLogger) Trace(_ ...interface{}) {}
// Tracef implements Logger.
func (noopLogger) Tracef(_ string, _ ...interface{}) {}
// Debug implements Logger.
func (noopLogger) Debug(_ ...interface{}) {}
// Debugf implements Logger.
func (noopLogger) Debugf(_ string, _ ...interface{}) {}
// Info implements Logger.
func (noopLogger) Info(_ ...interface{}) {}
// Infof implements Logger.
func (noopLogger) Infof(_ string, _ ...interface{}) {}
// Warn implements Logger.
func (noopLogger) Warn(_ ...interface{}) error { return nil }
// Warnf implements Logger.
func (noopLogger) Warnf(_ string, _ ...interface{}) error { return nil }
// Error implements Logger.
func (noopLogger) Error(_ ...interface{}) error { return nil }
// Errorf implements Logger.
func (noopLogger) Errorf(_ string, _ ...interface{}) error { return nil }
// Critical implements Logger.
func (noopLogger) Critical(_ ...interface{}) error { return nil }
// Criticalf implements Logger.
func (noopLogger) Criticalf(_ string, _ ...interface{}) error { return nil }
// Flush implements Logger.
func (noopLogger) Flush() {}

View File

@@ -0,0 +1,63 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package log
import (
"time"
"go.uber.org/atomic"
)
// NewThrottled returns a new throttled logger. The returned logger will allow up to n calls in
// a time period of length d.
func NewThrottled(n int, d time.Duration) *ThrottledLogger {
return &ThrottledLogger{
n: uint64(n),
c: atomic.NewUint64(0),
d: d,
}
}
// ThrottledLogger limits the number of log calls during a time window. To create a new logger
// use NewThrottled.
type ThrottledLogger struct {
n uint64 // number of log calls allowed during interval d
c *atomic.Uint64 // number of log calls performed during an interval d
d time.Duration
}
type loggerFunc func(format string, params ...interface{})
func (tl *ThrottledLogger) log(logFunc loggerFunc, format string, params ...interface{}) {
c := tl.c.Inc() - 1
if c == 0 {
// first call, trigger the reset
time.AfterFunc(tl.d, func() { tl.c.Store(0) })
}
if c >= tl.n {
if c == tl.n {
logFunc("Too many similar messages, pausing up to %s...", tl.d)
}
return
}
logFunc(format, params...)
}
// Error logs the message at the error level.
func (tl *ThrottledLogger) Error(format string, params ...interface{}) {
tl.log(Errorf, format, params...)
}
// Warn logs the message at the warning level.
func (tl *ThrottledLogger) Warn(format string, params ...interface{}) {
tl.log(Warnf, format, params...)
}
// Write implements io.Writer.
func (tl *ThrottledLogger) Write(p []byte) (n int, err error) {
tl.Error(string(p))
return len(p), nil
}

View File

@@ -0,0 +1,130 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
// Package stats contains the logic to process APM stats.
package stats
import (
"hash/fnv"
"sort"
"strconv"
"strings"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/log"
)
const (
tagStatusCode = "http.status_code"
tagSynthetics = "synthetics"
tagSpanKind = "span.kind"
tagBaseService = "_dd.base_service"
)
// Aggregation contains all the dimension on which we aggregate statistics.
type Aggregation struct {
BucketsAggregationKey
PayloadAggregationKey
}
// BucketsAggregationKey specifies the key by which a bucket is aggregated.
type BucketsAggregationKey struct {
Service string
Name string
Resource string
Type string
SpanKind string
StatusCode uint32
Synthetics bool
PeerTagsHash uint64
IsTraceRoot pb.Trilean
}
// PayloadAggregationKey specifies the key by which a payload is aggregated.
type PayloadAggregationKey struct {
Env string
Hostname string
Version string
ContainerID string
GitCommitSha string
ImageTag string
}
func getStatusCode(meta map[string]string, metrics map[string]float64) uint32 {
code, ok := metrics[tagStatusCode]
if ok {
// only 7.39.0+, for lesser versions, always use Meta
return uint32(code)
}
strC := meta[tagStatusCode]
if strC == "" {
return 0
}
c, err := strconv.ParseUint(strC, 10, 32)
if err != nil {
log.Debugf("Invalid status code %s. Using 0.", strC)
return 0
}
return uint32(c)
}
// NewAggregationFromSpan creates a new aggregation from the provided span and env
func NewAggregationFromSpan(s *StatSpan, origin string, aggKey PayloadAggregationKey) Aggregation {
synthetics := strings.HasPrefix(origin, tagSynthetics)
var isTraceRoot pb.Trilean
if s.parentID == 0 {
isTraceRoot = pb.Trilean_TRUE
} else {
isTraceRoot = pb.Trilean_FALSE
}
agg := Aggregation{
PayloadAggregationKey: aggKey,
BucketsAggregationKey: BucketsAggregationKey{
Resource: s.resource,
Service: s.service,
Name: s.name,
SpanKind: s.spanKind,
Type: s.typ,
StatusCode: s.statusCode,
Synthetics: synthetics,
IsTraceRoot: isTraceRoot,
PeerTagsHash: peerTagsHash(s.matchingPeerTags),
},
}
return agg
}
func peerTagsHash(tags []string) uint64 {
if len(tags) == 0 {
return 0
}
if !sort.StringsAreSorted(tags) {
sort.Strings(tags)
}
h := fnv.New64a()
for i, t := range tags {
if i > 0 {
h.Write([]byte{0})
}
h.Write([]byte(t))
}
return h.Sum64()
}
// NewAggregationFromGroup gets the Aggregation key of grouped stats.
func NewAggregationFromGroup(g *pb.ClientGroupedStats) Aggregation {
return Aggregation{
BucketsAggregationKey: BucketsAggregationKey{
Resource: g.Resource,
Service: g.Service,
Name: g.Name,
SpanKind: g.SpanKind,
StatusCode: g.HTTPStatusCode,
Synthetics: g.Synthetics,
PeerTagsHash: peerTagsHash(g.PeerTags),
IsTraceRoot: g.IsTraceRoot,
},
}
}

View File

@@ -0,0 +1,438 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import (
"time"
"github.com/DataDog/datadog-agent/pkg/trace/version"
"github.com/DataDog/sketches-go/ddsketch"
"github.com/DataDog/sketches-go/ddsketch/mapping"
"github.com/DataDog/sketches-go/ddsketch/pb/sketchpb"
"github.com/DataDog/sketches-go/ddsketch/store"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/DataDog/datadog-agent/pkg/trace/watchdog"
"github.com/DataDog/datadog-go/v5/statsd"
"google.golang.org/protobuf/proto"
)
const (
bucketDuration = 2 * time.Second
clientBucketDuration = 10 * time.Second
oldestBucketStart = 20 * time.Second
)
var (
ddsketchMapping, _ = mapping.NewLogarithmicMapping(relativeAccuracy)
)
// ClientStatsAggregator aggregates client stats payloads on buckets of bucketDuration
// If a single payload is received on a bucket, this Aggregator is a passthrough.
// If two or more payloads collide, their counts will be aggregated into one bucket.
// Multiple payloads will be sent:
// - Original payloads with their distributions will be sent with counts zeroed.
// - A single payload with the bucket aggregated counts will be sent.
// This and the aggregator timestamp alignment ensure that all counts will have at most one point per second per agent for a specific granularity.
// While distributions are not tied to the agent.
type ClientStatsAggregator struct {
In chan *pb.ClientStatsPayload
writer Writer
buckets map[int64]*bucket // buckets used to aggregate client stats
conf *config.AgentConfig
flushTicker *time.Ticker
oldestTs time.Time
agentEnv string
agentHostname string
agentVersion string
exit chan struct{}
done chan struct{}
statsd statsd.ClientInterface
}
// NewClientStatsAggregator initializes a new aggregator ready to be started
func NewClientStatsAggregator(conf *config.AgentConfig, writer Writer, statsd statsd.ClientInterface) *ClientStatsAggregator {
c := &ClientStatsAggregator{
flushTicker: time.NewTicker(time.Second),
In: make(chan *pb.ClientStatsPayload, 10),
buckets: make(map[int64]*bucket, 20),
conf: conf,
writer: writer,
agentEnv: conf.DefaultEnv,
agentHostname: conf.Hostname,
agentVersion: conf.AgentVersion,
oldestTs: alignAggTs(time.Now().Add(bucketDuration - oldestBucketStart)),
exit: make(chan struct{}),
done: make(chan struct{}),
statsd: statsd,
}
return c
}
// Start starts the aggregator.
func (a *ClientStatsAggregator) Start() {
go func() {
defer watchdog.LogOnPanic(a.statsd)
for {
select {
case t := <-a.flushTicker.C:
a.flushOnTime(t)
case input := <-a.In:
a.add(time.Now(), input)
case <-a.exit:
a.flushAll()
close(a.done)
return
}
}
}()
}
// Stop stops the aggregator. Calling Stop twice will panic.
func (a *ClientStatsAggregator) Stop() {
close(a.exit)
a.flushTicker.Stop()
<-a.done
}
// flushOnTime flushes all buckets up to flushTs, except the last one.
func (a *ClientStatsAggregator) flushOnTime(now time.Time) {
flushTs := alignAggTs(now.Add(bucketDuration - oldestBucketStart))
for t := a.oldestTs; t.Before(flushTs); t = t.Add(bucketDuration) {
if b, ok := a.buckets[t.Unix()]; ok {
a.flush(b.aggregationToPayloads())
delete(a.buckets, t.Unix())
}
}
a.oldestTs = flushTs
}
func (a *ClientStatsAggregator) flushAll() {
for _, b := range a.buckets {
a.flush(b.aggregationToPayloads())
}
}
// getAggregationBucketTime returns unix time at which we aggregate the bucket.
// We timeshift payloads older than a.oldestTs to a.oldestTs.
// Payloads in the future are timeshifted to the latest bucket.
func (a *ClientStatsAggregator) getAggregationBucketTime(now, bs time.Time) time.Time {
if bs.Before(a.oldestTs) {
return a.oldestTs
}
if bs.After(now) {
return alignAggTs(now)
}
return alignAggTs(bs)
}
// add takes a new ClientStatsPayload and aggregates its stats in the internal buckets.
func (a *ClientStatsAggregator) add(now time.Time, p *pb.ClientStatsPayload) {
// populate container tags data on the payload
a.setVersionDataFromContainerTags(p)
// compute the PayloadAggregationKey, common for all buckets within the payload
payloadAggKey := newPayloadAggregationKey(p.Env, p.Hostname, p.Version, p.ContainerID, p.GitCommitSha, p.ImageTag)
for _, clientBucket := range p.Stats {
clientBucketStart := time.Unix(0, int64(clientBucket.Start))
ts := a.getAggregationBucketTime(now, clientBucketStart)
b, ok := a.buckets[ts.Unix()]
if !ok {
b = &bucket{
ts: ts,
agg: make(map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedStats),
}
a.buckets[ts.Unix()] = b
}
b.aggregateStatsBucket(clientBucket, payloadAggKey)
}
}
func (a *ClientStatsAggregator) flush(p []*pb.ClientStatsPayload) {
if len(p) == 0 {
return
}
a.writer.Write(&pb.StatsPayload{
Stats: p,
AgentEnv: a.agentEnv,
AgentHostname: a.agentHostname,
AgentVersion: a.agentVersion,
ClientComputed: true,
})
}
func (a *ClientStatsAggregator) setVersionDataFromContainerTags(p *pb.ClientStatsPayload) {
// No need to go any further if we already have the information in the payload.
if p.ImageTag != "" && p.GitCommitSha != "" {
return
}
if p.ContainerID != "" {
gitCommitSha, imageTag, err := version.GetVersionDataFromContainerTags(p.ContainerID, a.conf)
if err != nil {
log.Error("Client stats aggregator is unable to resolve container ID (%s) to container tags: %v", p.ContainerID, err)
} else {
// Only override if the payload's original values were empty strings.
if p.ImageTag == "" {
p.ImageTag = imageTag
}
if p.GitCommitSha == "" {
p.GitCommitSha = gitCommitSha
}
}
}
}
// alignAggTs aligns time to the aggregator timestamps.
// Timestamps from the aggregator are never aligned with concentrator timestamps.
// This ensures that all counts sent by a same agent host are never on the same second.
// aggregator timestamps: 2ks+1s (1s, 3s, 5s, 7s, 9s, 11s)
// concentrator timestamps: 10ks (0s, 10s, 20s ..)
func alignAggTs(t time.Time) time.Time {
return t.Truncate(bucketDuration).Add(time.Second)
}
type bucket struct {
// ts is the timestamp attached to the payload
ts time.Time
// agg contains the aggregated Hits/Errors/Duration counts
agg map[PayloadAggregationKey]map[BucketsAggregationKey]*aggregatedStats
}
// aggregateStatsBucket takes a ClientStatsBucket and a PayloadAggregationKey, and aggregates all counts
// and distributions from the ClientGroupedStats inside the bucket.
func (b *bucket) aggregateStatsBucket(sb *pb.ClientStatsBucket, payloadAggKey PayloadAggregationKey) {
payloadAgg, ok := b.agg[payloadAggKey]
if !ok {
payloadAgg = make(map[BucketsAggregationKey]*aggregatedStats, len(sb.Stats))
b.agg[payloadAggKey] = payloadAgg
}
for _, gs := range sb.Stats {
if gs == nil {
continue
}
aggKey := newBucketAggregationKey(gs)
agg, ok := payloadAgg[aggKey]
if !ok {
agg = &aggregatedStats{
hits: gs.Hits,
topLevelHits: gs.TopLevelHits,
errors: gs.Errors,
duration: gs.Duration,
peerTags: gs.PeerTags,
okDistributionRaw: gs.OkSummary, // store encoded version only
errDistributionRaw: gs.ErrorSummary, // store encoded version only
}
payloadAgg[aggKey] = agg
continue
}
// aggregate counts
agg.hits += gs.Hits
agg.topLevelHits += gs.TopLevelHits
agg.errors += gs.Errors
agg.duration += gs.Duration
// Decode, if needed, the raw ddsketches from the first payload that reached the bucket
if agg.okDistributionRaw != nil {
sketch, err := decodeSketch(agg.okDistributionRaw)
if err != nil {
log.Error("Unable to decode OK distribution ddsketch: %v", err)
} else {
agg.okDistribution = normalizeSketch(sketch)
}
agg.okDistributionRaw = nil
}
if agg.errDistributionRaw != nil {
sketch, err := decodeSketch(agg.errDistributionRaw)
if err != nil {
log.Error("Unable to decode Error distribution ddsketch: %v", err)
} else {
agg.errDistribution = normalizeSketch(sketch)
}
agg.errDistributionRaw = nil
}
// aggregate distributions
if sketch, err := mergeSketch(agg.okDistribution, gs.OkSummary); err == nil {
agg.okDistribution = sketch
} else {
log.Error("Unable to merge OK distribution ddsketch: %v", err)
}
if sketch, err := mergeSketch(agg.errDistribution, gs.ErrorSummary); err == nil {
agg.errDistribution = sketch
} else {
log.Error("Unable to merge Error distribution ddsketch: %v", err)
}
}
}
// aggregationToPayloads converts the contents of the bucket into ClientStatsPayloads
func (b *bucket) aggregationToPayloads() []*pb.ClientStatsPayload {
res := make([]*pb.ClientStatsPayload, 0, len(b.agg))
for payloadKey, aggrStats := range b.agg {
groupedStats := make([]*pb.ClientGroupedStats, 0, len(aggrStats))
for aggrKey, stats := range aggrStats {
gs, err := exporGroupedStats(aggrKey, stats)
if err != nil {
log.Errorf("Dropping stats bucket due to encoding error: %v.", err)
continue
}
groupedStats = append(groupedStats, gs)
}
clientBuckets := []*pb.ClientStatsBucket{
{
Start: uint64(b.ts.UnixNano()),
Duration: uint64(clientBucketDuration.Nanoseconds()),
Stats: groupedStats,
}}
res = append(res, &pb.ClientStatsPayload{
Hostname: payloadKey.Hostname,
Env: payloadKey.Env,
Version: payloadKey.Version,
ImageTag: payloadKey.ImageTag,
GitCommitSha: payloadKey.GitCommitSha,
Stats: clientBuckets,
})
}
return res
}
func exporGroupedStats(aggrKey BucketsAggregationKey, stats *aggregatedStats) (*pb.ClientGroupedStats, error) {
// if the raw sketches are still present (only one payload received), we use them directly.
// Otherwise the aggregated DDSketches are serialized.
okSummary := stats.okDistributionRaw
errSummary := stats.errDistributionRaw
var err error
if stats.okDistribution != nil {
msg := stats.okDistribution.ToProto()
okSummary, err = proto.Marshal(msg)
if err != nil {
return &pb.ClientGroupedStats{}, err
}
}
if stats.errDistribution != nil {
msg := stats.errDistribution.ToProto()
errSummary, err = proto.Marshal(msg)
if err != nil {
return &pb.ClientGroupedStats{}, err
}
}
return &pb.ClientGroupedStats{
Service: aggrKey.Service,
Name: aggrKey.Name,
SpanKind: aggrKey.SpanKind,
Resource: aggrKey.Resource,
HTTPStatusCode: aggrKey.StatusCode,
Type: aggrKey.Type,
Synthetics: aggrKey.Synthetics,
IsTraceRoot: aggrKey.IsTraceRoot,
PeerTags: stats.peerTags,
TopLevelHits: stats.topLevelHits,
Hits: stats.hits,
Errors: stats.errors,
Duration: stats.duration,
OkSummary: okSummary,
ErrorSummary: errSummary,
}, nil
}
func newPayloadAggregationKey(env, hostname, version, cid string, gitCommitSha string, imageTag string) PayloadAggregationKey {
return PayloadAggregationKey{
Env: env,
Hostname: hostname,
Version: version,
ContainerID: cid,
GitCommitSha: gitCommitSha,
ImageTag: imageTag,
}
}
func newBucketAggregationKey(b *pb.ClientGroupedStats) BucketsAggregationKey {
k := BucketsAggregationKey{
Service: b.Service,
Name: b.Name,
SpanKind: b.SpanKind,
Resource: b.Resource,
Type: b.Type,
Synthetics: b.Synthetics,
StatusCode: b.HTTPStatusCode,
IsTraceRoot: b.IsTraceRoot,
}
if tags := b.GetPeerTags(); len(tags) > 0 {
k.PeerTagsHash = peerTagsHash(tags)
}
return k
}
// aggregatedStats holds aggregated counts and distributions
type aggregatedStats struct {
// aggregated counts
hits, topLevelHits, errors, duration uint64
peerTags []string
// aggregated DDSketches
okDistribution, errDistribution *ddsketch.DDSketch
// raw (encoded) DDSketches. Only present if a single payload is received on the active bucket,
// allowing the bucket to not decode the sketch. If a second payload matches the bucket,
// sketches will be decoded and stored in the okDistribution and errDistribution fields.
okDistributionRaw, errDistributionRaw []byte
}
// mergeSketch take an existing DDSketch, and merges a second one, decoding its contents
func mergeSketch(s1 *ddsketch.DDSketch, raw []byte) (*ddsketch.DDSketch, error) {
if raw == nil {
return s1, nil
}
s2, err := decodeSketch(raw)
if err != nil {
return s1, err
}
s2 = normalizeSketch(s2)
if s1 == nil {
return s2, nil
}
if err = s1.MergeWith(s2); err != nil {
return nil, err
}
return s1, nil
}
func normalizeSketch(s *ddsketch.DDSketch) *ddsketch.DDSketch {
if s.IndexMapping.Equals(ddsketchMapping) {
// already normalized
return s
}
return s.ChangeMapping(ddsketchMapping, store.NewCollapsingLowestDenseStore(maxNumBins), store.NewCollapsingLowestDenseStore(maxNumBins), 1)
}
func decodeSketch(data []byte) (*ddsketch.DDSketch, error) {
if len(data) == 0 {
return nil, nil
}
var sketch sketchpb.DDSketch
err := proto.Unmarshal(data, &sketch)
if err != nil {
return nil, err
}
return ddsketch.FromProto(&sketch)
}

View File

@@ -0,0 +1,182 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import (
"sync"
"time"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/DataDog/datadog-agent/pkg/trace/traceutil"
"github.com/DataDog/datadog-agent/pkg/trace/watchdog"
"github.com/DataDog/datadog-go/v5/statsd"
)
// defaultBufferLen represents the default buffer length; the number of bucket size
// units used by the concentrator.
const defaultBufferLen = 2
// Writer is an interface for something that can Write Stats Payloads
type Writer interface {
// Write this payload
Write(*pb.StatsPayload)
}
// Concentrator produces time bucketed statistics from a stream of raw traces.
// https://en.wikipedia.org/wiki/Knelson_concentrator
// Gets an imperial shitton of traces, and outputs pre-computed data structures
// allowing to find the gold (stats) amongst the traces.
type Concentrator struct {
Writer Writer
spanConcentrator *SpanConcentrator
// bucket duration in nanoseconds
bsize int64
exit chan struct{}
exitWG sync.WaitGroup
agentEnv string
agentHostname string
agentVersion string
statsd statsd.ClientInterface
peerTagKeys []string
}
// NewConcentrator initializes a new concentrator ready to be started
func NewConcentrator(conf *config.AgentConfig, writer Writer, now time.Time, statsd statsd.ClientInterface) *Concentrator {
bsize := conf.BucketInterval.Nanoseconds()
sc := NewSpanConcentrator(&SpanConcentratorConfig{
ComputeStatsBySpanKind: conf.ComputeStatsBySpanKind,
BucketInterval: bsize,
}, now)
c := Concentrator{
spanConcentrator: sc,
Writer: writer,
exit: make(chan struct{}),
agentEnv: conf.DefaultEnv,
agentHostname: conf.Hostname,
agentVersion: conf.AgentVersion,
statsd: statsd,
bsize: bsize,
peerTagKeys: conf.ConfiguredPeerTags(),
}
return &c
}
// Start starts the concentrator.
func (c *Concentrator) Start() {
c.exitWG.Add(1)
go func() {
defer watchdog.LogOnPanic(c.statsd)
defer c.exitWG.Done()
c.Run()
}()
}
// Run runs the main loop of the concentrator goroutine. Traces are received
// through `Add`, this loop only deals with flushing.
func (c *Concentrator) Run() {
// flush with the same period as stats buckets
flushTicker := time.NewTicker(time.Duration(c.bsize) * time.Nanosecond)
defer flushTicker.Stop()
log.Debug("Starting concentrator")
for {
select {
case <-flushTicker.C:
c.Writer.Write(c.Flush(false))
case <-c.exit:
log.Info("Exiting concentrator, computing remaining stats")
c.Writer.Write(c.Flush(true))
return
}
}
}
// Stop stops the main Run loop.
func (c *Concentrator) Stop() {
close(c.exit)
c.exitWG.Wait()
}
// Input specifies a set of traces originating from a certain payload.
type Input struct {
Traces []traceutil.ProcessedTrace
ContainerID string
ContainerTags []string
}
// NewStatsInput allocates a stats input for an incoming trace payload
func NewStatsInput(numChunks int, containerID string, clientComputedStats bool, conf *config.AgentConfig) Input {
if clientComputedStats {
return Input{}
}
in := Input{Traces: make([]traceutil.ProcessedTrace, 0, numChunks)}
_, enabledCIDStats := conf.Features["enable_cid_stats"]
_, disabledCIDStats := conf.Features["disable_cid_stats"]
enableContainers := enabledCIDStats || (conf.FargateOrchestrator != config.OrchestratorUnknown)
if enableContainers && !disabledCIDStats {
// only allow the ContainerID stats dimension if we're in a Fargate instance or it's
// been explicitly enabled and it's not prohibited by the disable_cid_stats feature flag.
in.ContainerID = containerID
}
return in
}
// Add applies the given input to the concentrator.
func (c *Concentrator) Add(t Input) {
for _, trace := range t.Traces {
c.addNow(&trace, t.ContainerID, t.ContainerTags)
}
}
// addNow adds the given input into the concentrator.
// Callers must guard!
func (c *Concentrator) addNow(pt *traceutil.ProcessedTrace, containerID string, containerTags []string) {
hostname := pt.TracerHostname
if hostname == "" {
hostname = c.agentHostname
}
env := pt.TracerEnv
if env == "" {
env = c.agentEnv
}
weight := weight(pt.Root)
aggKey := PayloadAggregationKey{
Env: env,
Hostname: hostname,
Version: pt.AppVersion,
ContainerID: containerID,
GitCommitSha: pt.GitCommitSha,
ImageTag: pt.ImageTag,
}
for _, s := range pt.TraceChunk.Spans {
statSpan, ok := c.spanConcentrator.NewStatSpanFromPB(s, c.peerTagKeys)
if ok {
c.spanConcentrator.addSpan(statSpan, aggKey, containerID, containerTags, pt.TraceChunk.Origin, weight)
}
}
}
// Flush deletes and returns complete statistic buckets.
// The force boolean guarantees flushing all buckets if set to true.
func (c *Concentrator) Flush(force bool) *pb.StatsPayload {
return c.flushNow(time.Now().UnixNano(), force)
}
func (c *Concentrator) flushNow(now int64, force bool) *pb.StatsPayload {
sb := c.spanConcentrator.Flush(now, force)
return &pb.StatsPayload{Stats: sb, AgentHostname: c.agentHostname, AgentEnv: c.agentEnv, AgentVersion: c.agentVersion}
}
// alignTs returns the provided timestamp truncated to the bucket size.
// It gives us the start time of the time bucket in which such timestamp falls.
func alignTs(ts int64, bsize int64) int64 {
return ts - ts%bsize
}

View File

@@ -0,0 +1,149 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import (
"slices"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
semconv "go.opentelemetry.io/collector/semconv/v1.17.0"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/config"
"github.com/DataDog/datadog-agent/pkg/trace/traceutil"
)
// chunkKey is used to group TraceChunks
type chunkKey struct {
traceIDUInt64 uint64
env string
version string
hostname string
cid string
}
// OTLPTracesToConcentratorInputs converts eligible OTLP spans to Concentrator.Input.
// The converted Inputs only have the minimal number of fields for APM stats calculation and are only meant
// to be used in Concentrator.Add(). Do not use them for other purposes.
func OTLPTracesToConcentratorInputs(
traces ptrace.Traces,
conf *config.AgentConfig,
containerTagKeys []string,
peerTagKeys []string,
) []Input {
spanByID, resByID, scopeByID := traceutil.IndexOTelSpans(traces)
topLevelByKind := conf.HasFeature("enable_otlp_compute_top_level_by_span_kind")
topLevelSpans := traceutil.GetTopLevelOTelSpans(spanByID, resByID, topLevelByKind)
ignoreResNames := make(map[string]struct{})
for _, resName := range conf.Ignore["resource"] {
ignoreResNames[resName] = struct{}{}
}
chunks := make(map[chunkKey]*pb.TraceChunk)
containerTagsByID := make(map[string][]string)
for spanID, otelspan := range spanByID {
otelres := resByID[spanID]
if _, exists := ignoreResNames[traceutil.GetOTelResource(otelspan, otelres)]; exists {
continue
}
// TODO(songy23): use AttributeDeploymentEnvironmentName once collector version upgrade is unblocked
env := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, "deployment.environment.name", semconv.AttributeDeploymentEnvironment)
hostname := traceutil.GetOTelHostname(otelspan, otelres, conf.OTLPReceiver.AttributesTranslator, conf.Hostname)
version := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeServiceVersion)
cid := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, true, semconv.AttributeContainerID, semconv.AttributeK8SPodUID)
var ctags []string
if cid != "" {
ctags = traceutil.GetOTelContainerTags(otelres.Attributes(), containerTagKeys)
if ctags != nil {
// Make sure container tags are sorted per APM stats intake requirement
if !slices.IsSorted(ctags) {
slices.Sort(ctags)
}
containerTagsByID[cid] = ctags
}
}
ckey := chunkKey{
traceIDUInt64: traceutil.OTelTraceIDToUint64(otelspan.TraceID()),
env: env,
version: version,
hostname: hostname,
cid: cid,
}
chunk, ok := chunks[ckey]
if !ok {
chunk = &pb.TraceChunk{}
chunks[ckey] = chunk
}
_, isTop := topLevelSpans[spanID]
chunk.Spans = append(chunk.Spans, otelSpanToDDSpan(otelspan, otelres, scopeByID[spanID], isTop, topLevelByKind, conf, peerTagKeys))
}
inputs := make([]Input, 0, len(chunks))
for ckey, chunk := range chunks {
pt := traceutil.ProcessedTrace{
TraceChunk: chunk,
Root: traceutil.GetRoot(chunk.Spans),
TracerEnv: ckey.env,
AppVersion: ckey.version,
TracerHostname: ckey.hostname,
}
inputs = append(inputs, Input{
Traces: []traceutil.ProcessedTrace{pt},
ContainerID: ckey.cid,
ContainerTags: containerTagsByID[ckey.cid],
})
}
return inputs
}
// otelSpanToDDSpan converts an OTel span to a DD span.
// The converted DD span only has the minimal number of fields for APM stats calculation and is only meant
// to be used in OTLPTracesToConcentratorInputs. Do not use them for other purposes.
// TODO(OTEL-1726): use the same function here and in pkg/trace/api/otlp.go
func otelSpanToDDSpan(
otelspan ptrace.Span,
otelres pcommon.Resource,
lib pcommon.InstrumentationScope,
isTopLevel, topLevelByKind bool,
conf *config.AgentConfig,
peerTagKeys []string,
) *pb.Span {
ddspan := &pb.Span{
Service: traceutil.GetOTelService(otelspan, otelres, true),
Name: traceutil.GetOTelOperationName(otelspan, otelres, lib, conf.OTLPReceiver.SpanNameAsResourceName, conf.OTLPReceiver.SpanNameRemappings, true),
Resource: traceutil.GetOTelResource(otelspan, otelres),
TraceID: traceutil.OTelTraceIDToUint64(otelspan.TraceID()),
SpanID: traceutil.OTelSpanIDToUint64(otelspan.SpanID()),
ParentID: traceutil.OTelSpanIDToUint64(otelspan.ParentSpanID()),
Start: int64(otelspan.StartTimestamp()),
Duration: int64(otelspan.EndTimestamp()) - int64(otelspan.StartTimestamp()),
Type: traceutil.GetOTelSpanType(otelspan, otelres),
}
spanKind := otelspan.Kind()
traceutil.SetMeta(ddspan, "span.kind", traceutil.OTelSpanKindName(spanKind))
code := traceutil.GetOTelStatusCode(otelspan)
if code != 0 {
traceutil.SetMetric(ddspan, tagStatusCode, float64(code))
}
if otelspan.Status().Code() == ptrace.StatusCodeError {
ddspan.Error = 1
}
if isTopLevel {
traceutil.SetTopLevel(ddspan, true)
}
if isMeasured := traceutil.GetOTelAttrVal(otelspan.Attributes(), false, "_dd.measured"); isMeasured == "1" {
traceutil.SetMeasured(ddspan, true)
} else if topLevelByKind && (spanKind == ptrace.SpanKindClient || spanKind == ptrace.SpanKindProducer) {
// When enable_otlp_compute_top_level_by_span_kind is true, compute stats for client-side spans
traceutil.SetMeasured(ddspan, true)
}
for _, peerTagKey := range peerTagKeys {
if peerTagVal := traceutil.GetOTelAttrValInResAndSpanAttrs(otelspan, otelres, false, peerTagKey); peerTagVal != "" {
traceutil.SetMeta(ddspan, peerTagKey, peerTagVal)
}
}
return ddspan
}

View File

@@ -0,0 +1,253 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import (
"slices"
"strings"
"sync"
"time"
"github.com/DataDog/datadog-agent/pkg/obfuscate"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/DataDog/datadog-agent/pkg/trace/traceutil"
)
// SpanConcentratorConfig exposes configuration options for a SpanConcentrator
type SpanConcentratorConfig struct {
// ComputeStatsBySpanKind enables/disables the computing of stats based on a span's `span.kind` field
ComputeStatsBySpanKind bool
// BucketInterval the size of our pre-aggregation per bucket
BucketInterval int64
}
// StatSpan holds all the required fields from a span needed to calculate stats
type StatSpan struct {
service string
resource string
name string
typ string
error int32
parentID uint64
start int64
duration int64
//Fields below this are derived on creation
spanKind string
statusCode uint32
isTopLevel bool
matchingPeerTags []string
}
func matchingPeerTags(meta map[string]string, peerTagKeys []string) []string {
if len(peerTagKeys) == 0 {
return nil
}
var pt []string
for _, t := range peerTagKeysToAggregateForSpan(meta[tagSpanKind], meta[tagBaseService], peerTagKeys) {
if v, ok := meta[t]; ok && v != "" {
v = obfuscate.QuantizePeerIPAddresses(v)
pt = append(pt, t+":"+v)
}
}
return pt
}
// peerTagKeysToAggregateForSpan returns the set of peerTagKeys to use for stats aggregation for the given
// span.kind and _dd.base_service
func peerTagKeysToAggregateForSpan(spanKind string, baseService string, peerTagKeys []string) []string {
if len(peerTagKeys) == 0 {
return nil
}
spanKind = strings.ToLower(spanKind)
if (spanKind == "" || spanKind == "internal") && baseService != "" {
// it's a service override on an internal span so it comes from custom instrumentation and does not represent
// a client|producer|consumer span which is talking to a peer entity
// in this case only the base service tag is relevant for stats aggregation
return []string{tagBaseService}
}
if spanKind == "client" || spanKind == "producer" || spanKind == "consumer" {
return peerTagKeys
}
return nil
}
// SpanConcentrator produces time bucketed statistics from a stream of raw spans.
type SpanConcentrator struct {
computeStatsBySpanKind bool
// bucket duration in nanoseconds
bsize int64
// Timestamp of the oldest time bucket for which we allow data.
// Any ingested stats older than it get added to this bucket.
oldestTs int64
// bufferLen is the number of 10s stats bucket we keep in memory before flushing them.
// It means that we can compute stats only for the last `bufferLen * bsize` and that we
// wait such time before flushing the stats.
// This only applies to past buckets. Stats buckets in the future are allowed with no restriction.
bufferLen int
// mu protects the buckets field
mu sync.Mutex
buckets map[int64]*RawBucket
}
// NewSpanConcentrator builds a new SpanConcentrator object
func NewSpanConcentrator(cfg *SpanConcentratorConfig, now time.Time) *SpanConcentrator {
sc := &SpanConcentrator{
computeStatsBySpanKind: cfg.ComputeStatsBySpanKind,
bsize: cfg.BucketInterval,
oldestTs: alignTs(now.UnixNano(), cfg.BucketInterval),
bufferLen: defaultBufferLen,
mu: sync.Mutex{},
buckets: make(map[int64]*RawBucket),
}
return sc
}
// NewStatSpanFromPB is a helper version of NewStatSpan that builds a StatSpan from a pb.Span.
func (sc *SpanConcentrator) NewStatSpanFromPB(s *pb.Span, peerTags []string) (statSpan *StatSpan, ok bool) {
return sc.NewStatSpan(s.Service, s.Resource, s.Name, s.Type, s.ParentID, s.Start, s.Duration, s.Error, s.Meta, s.Metrics, peerTags)
}
// NewStatSpan builds a StatSpan from the required fields for stats calculation
// peerTags is the configured list of peer tags to look for
// returns (nil,false) if the provided fields indicate a span should not have stats calculated
func (sc *SpanConcentrator) NewStatSpan(
service, resource, name string,
typ string,
parentID uint64,
start, duration int64,
error int32,
meta map[string]string,
metrics map[string]float64,
peerTags []string,
) (statSpan *StatSpan, ok bool) {
if meta == nil {
meta = make(map[string]string)
}
if metrics == nil {
metrics = make(map[string]float64)
}
eligibleSpanKind := sc.computeStatsBySpanKind && computeStatsForSpanKind(meta["span.kind"])
isTopLevel := traceutil.HasTopLevelMetrics(metrics)
if !(isTopLevel || traceutil.IsMeasuredMetrics(metrics) || eligibleSpanKind) {
return nil, false
}
if traceutil.IsPartialSnapshotMetrics(metrics) {
return nil, false
}
return &StatSpan{
service: service,
resource: resource,
name: name,
typ: typ,
error: error,
parentID: parentID,
start: start,
duration: duration,
spanKind: meta[tagSpanKind],
statusCode: getStatusCode(meta, metrics),
isTopLevel: isTopLevel,
matchingPeerTags: matchingPeerTags(meta, peerTags),
}, true
}
// computeStatsForSpanKind returns true if the span.kind value makes the span eligible for stats computation.
func computeStatsForSpanKind(kind string) bool {
k := strings.ToLower(kind)
return slices.Contains(KindsComputed, k)
}
// KindsComputed is the list of span kinds that will have stats computed on them
// when computeStatsByKind is enabled in the concentrator.
var KindsComputed = []string{
"server",
"consumer",
"client",
"producer",
}
func (sc *SpanConcentrator) addSpan(s *StatSpan, aggKey PayloadAggregationKey, containerID string, containerTags []string, origin string, weight float64) {
sc.mu.Lock()
defer sc.mu.Unlock()
end := s.start + s.duration
btime := end - end%sc.bsize
// If too far in the past, count in the oldest-allowed time bucket instead.
if btime < sc.oldestTs {
btime = sc.oldestTs
}
b, ok := sc.buckets[btime]
if !ok {
b = NewRawBucket(uint64(btime), uint64(sc.bsize))
if containerID != "" && len(containerTags) > 0 {
b.containerTagsByID[containerID] = containerTags
}
sc.buckets[btime] = b
}
b.HandleSpan(s, weight, origin, aggKey)
}
// AddSpan to the SpanConcentrator, appending the new data to the appropriate internal bucket.
func (sc *SpanConcentrator) AddSpan(s *StatSpan, aggKey PayloadAggregationKey, containerID string, containerTags []string, origin string) {
sc.addSpan(s, aggKey, containerID, containerTags, origin, 1)
}
// Flush deletes and returns complete ClientStatsPayloads.
// The force boolean guarantees flushing all buckets if set to true.
func (sc *SpanConcentrator) Flush(now int64, force bool) []*pb.ClientStatsPayload {
m := make(map[PayloadAggregationKey][]*pb.ClientStatsBucket)
containerTagsByID := make(map[string][]string)
sc.mu.Lock()
for ts, srb := range sc.buckets {
// Always keep `bufferLen` buckets (default is 2: current + previous one).
// This is a trade-off: we accept slightly late traces (clock skew and stuff)
// but we delay flushing by at most `bufferLen` buckets.
//
// This delay might result in not flushing stats payload (data loss)
// if the agent stops while the latest buckets aren't old enough to be flushed.
// The "force" boolean skips the delay and flushes all buckets, typically on agent shutdown.
if !force && ts > now-int64(sc.bufferLen)*sc.bsize {
log.Tracef("Bucket %d is not old enough to be flushed, keeping it", ts)
continue
}
log.Debugf("Flushing bucket %d", ts)
for k, b := range srb.Export() {
m[k] = append(m[k], b)
if ctags, ok := srb.containerTagsByID[k.ContainerID]; ok {
containerTagsByID[k.ContainerID] = ctags
}
}
delete(sc.buckets, ts)
}
// After flushing, update the oldest timestamp allowed to prevent having stats for
// an already-flushed bucket.
newOldestTs := alignTs(now, sc.bsize) - int64(sc.bufferLen-1)*sc.bsize
if newOldestTs > sc.oldestTs {
log.Debugf("Update oldestTs to %d", newOldestTs)
sc.oldestTs = newOldestTs
}
sc.mu.Unlock()
sb := make([]*pb.ClientStatsPayload, 0, len(m))
for k, s := range m {
p := &pb.ClientStatsPayload{
Env: k.Env,
Hostname: k.Hostname,
ContainerID: k.ContainerID,
Version: k.Version,
GitCommitSha: k.GitCommitSha,
ImageTag: k.ImageTag,
Stats: s,
Tags: containerTagsByID[k.ContainerID],
}
sb = append(sb, p)
}
return sb
}

View File

@@ -0,0 +1,206 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import (
"math"
"math/rand"
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/golang/protobuf/proto"
"github.com/DataDog/sketches-go/ddsketch"
)
const (
// relativeAccuracy is the value accuracy we have on the percentiles. For example, we can
// say that p99 is 100ms +- 1ms
relativeAccuracy = 0.01
// maxNumBins is the maximum number of bins of the ddSketch we use to store percentiles.
// It can affect relative accuracy, but in practice, 2048 bins is enough to have 1% relative accuracy from
// 80 micro second to 1 year: http://www.vldb.org/pvldb/vol12/p2195-masson.pdf
maxNumBins = 2048
)
// Most "algorithm" stuff here is tested with stats_test.go as what is important
// is that the final data, the one with send after a call to Export(), is correct.
type groupedStats struct {
// using float64 here to avoid the accumulation of rounding issues.
hits float64
topLevelHits float64
errors float64
duration float64
okDistribution *ddsketch.DDSketch
errDistribution *ddsketch.DDSketch
peerTags []string
}
// round a float to an int, uniformly choosing
// between the lower and upper approximations.
func round(f float64) uint64 {
i := uint64(f)
if rand.Float64() < f-float64(i) {
i++
}
return i
}
func (s *groupedStats) export(a Aggregation) (*pb.ClientGroupedStats, error) {
msg := s.okDistribution.ToProto()
okSummary, err := proto.Marshal(msg)
if err != nil {
return &pb.ClientGroupedStats{}, err
}
msg = s.errDistribution.ToProto()
errSummary, err := proto.Marshal(msg)
if err != nil {
return &pb.ClientGroupedStats{}, err
}
return &pb.ClientGroupedStats{
Service: a.Service,
Name: a.Name,
Resource: a.Resource,
HTTPStatusCode: a.StatusCode,
Type: a.Type,
Hits: round(s.hits),
Errors: round(s.errors),
Duration: round(s.duration),
TopLevelHits: round(s.topLevelHits),
OkSummary: okSummary,
ErrorSummary: errSummary,
Synthetics: a.Synthetics,
SpanKind: a.SpanKind,
PeerTags: s.peerTags,
IsTraceRoot: a.IsTraceRoot,
}, nil
}
func newGroupedStats() *groupedStats {
okSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
if err != nil {
log.Errorf("Error when creating ddsketch: %v", err)
}
errSketch, err := ddsketch.LogCollapsingLowestDenseDDSketch(relativeAccuracy, maxNumBins)
if err != nil {
log.Errorf("Error when creating ddsketch: %v", err)
}
return &groupedStats{
okDistribution: okSketch,
errDistribution: errSketch,
}
}
// RawBucket is used to compute span data and aggregate it
// within a time-framed bucket. This should not be used outside
// the agent, use ClientStatsBucket for this.
type RawBucket struct {
// This should really have no public fields. At all.
start uint64 // timestamp of start in our format
duration uint64 // duration of a bucket in nanoseconds
// this should really remain private as it's subject to refactoring
data map[Aggregation]*groupedStats
containerTagsByID map[string][]string // a map from container ID to container tags
}
// NewRawBucket opens a new calculation bucket for time ts and initializes it properly
func NewRawBucket(ts, d uint64) *RawBucket {
// The only non-initialized value is the Duration which should be set by whoever closes that bucket
return &RawBucket{
start: ts,
duration: d,
data: make(map[Aggregation]*groupedStats),
containerTagsByID: make(map[string][]string),
}
}
// Export transforms a RawBucket into a ClientStatsBucket, typically used
// before communicating data to the API, as RawBucket is the internal
// type while ClientStatsBucket is the public, shared one.
func (sb *RawBucket) Export() map[PayloadAggregationKey]*pb.ClientStatsBucket {
m := make(map[PayloadAggregationKey]*pb.ClientStatsBucket)
for k, v := range sb.data {
b, err := v.export(k)
if err != nil {
log.Errorf("Dropping stats bucket due to encoding error: %v.", err)
continue
}
key := PayloadAggregationKey{
Hostname: k.Hostname,
Version: k.Version,
Env: k.Env,
ContainerID: k.ContainerID,
GitCommitSha: k.GitCommitSha,
ImageTag: k.ImageTag,
}
s, ok := m[key]
if !ok {
s = &pb.ClientStatsBucket{
Start: sb.start,
Duration: sb.duration,
}
}
s.Stats = append(s.Stats, b)
m[key] = s
}
return m
}
// HandleSpan adds the span to this bucket stats, aggregated with the finest grain matching given aggregators
func (sb *RawBucket) HandleSpan(s *StatSpan, weight float64, origin string, aggKey PayloadAggregationKey) {
if aggKey.Env == "" {
panic("env should never be empty")
}
aggr := NewAggregationFromSpan(s, origin, aggKey)
sb.add(s, weight, aggr)
}
func (sb *RawBucket) add(s *StatSpan, weight float64, aggr Aggregation) {
var gs *groupedStats
var ok bool
if gs, ok = sb.data[aggr]; !ok {
gs = newGroupedStats()
gs.peerTags = s.matchingPeerTags
sb.data[aggr] = gs
}
if s.isTopLevel {
gs.topLevelHits += weight
}
gs.hits += weight
if s.error != 0 {
gs.errors += weight
}
gs.duration += float64(s.duration) * weight
// alter resolution of duration distro
trundur := nsTimestampToFloat(s.duration)
if s.error != 0 {
if err := gs.errDistribution.Add(trundur); err != nil {
log.Debugf("Error adding error distribution stats: %v", err)
}
} else {
if err := gs.okDistribution.Add(trundur); err != nil {
log.Debugf("Error adding distribution stats: %v", err)
}
}
}
// nsTimestampToFloat converts a nanosec timestamp into a float nanosecond timestamp truncated to a fixed precision
func nsTimestampToFloat(ns int64) float64 {
b := math.Float64bits(float64(ns))
// IEEE-754
// the mask include 1 bit sign 11 bits exponent (0xfff)
// then we filter the mantissa to 10bits (0xff8) (9 bits as it has implicit value of 1)
// 10 bits precision (any value will be +/- 1/1024)
// https://en.wikipedia.org/wiki/Double-precision_floating-point_format
b &= 0xfffff80000000000
return math.Float64frombits(b)
}

View File

@@ -0,0 +1,24 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package stats
import pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
// keySamplingRateGlobal is a metric key holding the global sampling rate.
const keySamplingRateGlobal = "_sample_rate"
// weight returns the weight of the span as defined for sampling, i.e. the
// inverse of the sampling rate.
func weight(s *pb.Span) float64 {
if s == nil {
return 1
}
sampleRate, ok := s.Metrics[keySamplingRateGlobal]
if !ok || sampleRate <= 0.0 || sampleRate > 1.0 {
return 1
}
return 1.0 / sampleRate
}

View File

@@ -0,0 +1,166 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package traceutil
import (
"fmt"
"os"
"runtime"
"strings"
)
const (
aasInstanceID = "aas.environment.instance_id"
aasInstanceName = "aas.environment.instance_name"
aasOperatingSystem = "aas.environment.os"
aasRuntime = "aas.environment.runtime"
aasExtensionVersion = "aas.environment.extension_version"
aasFunctionRuntime = "aas.environment.function_runtime"
aasResourceGroup = "aas.resource.group"
aasResourceID = "aas.resource.id"
aasSiteKind = "aas.site.kind"
aasSiteName = "aas.site.name"
aasSiteType = "aas.site.type"
aasSubscriptionID = "aas.subscription.id"
dotnetFramework = ".NET"
nodeFramework = "Node.js"
javaFramework = "Java"
pythonFramework = "Python"
phpFramework = "PHP"
goFramework = "Go"
containerFramework = "Container"
unknown = "unknown"
appService = "app"
)
// GetAppServicesTags returns the env vars pulled from the Azure App Service instance.
// In some cases we will need to add extra tags for function apps.
func GetAppServicesTags() map[string]string {
siteName := os.Getenv("WEBSITE_SITE_NAME")
ownerName := os.Getenv("WEBSITE_OWNER_NAME")
resourceGroup := os.Getenv("WEBSITE_RESOURCE_GROUP")
instanceID := getEnvOrUnknown("WEBSITE_INSTANCE_ID")
computerName := getEnvOrUnknown("COMPUTERNAME")
extensionVersion := os.Getenv("DD_AAS_EXTENSION_VERSION")
// Windows and linux environments provide the OS differently
// We should grab it from GO's builtin runtime pkg
websiteOS := runtime.GOOS
currentRuntime := getRuntime(websiteOS)
subscriptionID := parseAzureSubscriptionID(ownerName)
resourceID := compileAzureResourceID(subscriptionID, resourceGroup, siteName)
tags := map[string]string{
aasInstanceID: instanceID,
aasInstanceName: computerName,
aasOperatingSystem: websiteOS,
aasRuntime: currentRuntime,
aasResourceGroup: resourceGroup,
aasResourceID: resourceID,
aasSiteKind: appService,
aasSiteName: siteName,
aasSiteType: appService,
aasSubscriptionID: subscriptionID,
}
// Remove the Java and .NET logic once non-universal extensions are deprecated
if websiteOS == "windows" {
if extensionVersion != "" {
tags[aasExtensionVersion] = extensionVersion
} else if val := os.Getenv("DD_AAS_JAVA_EXTENSION_VERSION"); val != "" {
tags[aasExtensionVersion] = val
} else if val := os.Getenv("DD_AAS_DOTNET_EXTENSION_VERSION"); val != "" {
tags[aasExtensionVersion] = val
}
}
// Function Apps require a different runtime and kind
if rt, ok := os.LookupEnv("FUNCTIONS_WORKER_RUNTIME"); ok {
tags[aasRuntime] = rt
tags[aasFunctionRuntime] = os.Getenv("FUNCTIONS_EXTENSION_VERSION")
tags[aasSiteKind] = "functionapp"
}
return tags
}
func getEnvOrUnknown(env string) string {
if val, ok := os.LookupEnv(env); ok {
return val
}
return unknown
}
func getRuntime(websiteOS string) (rt string) {
switch websiteOS {
case "windows":
rt = getWindowsRuntime()
case "linux", "darwin":
rt = getLinuxRuntime()
default:
rt = unknown
}
return rt
}
func getWindowsRuntime() (rt string) {
if os.Getenv("WEBSITE_STACK") == "JAVA" {
rt = javaFramework
} else if val := os.Getenv("WEBSITE_NODE_DEFAULT_VERSION"); val != "" {
rt = nodeFramework
} else {
// FIXME: Windows AAS only supports Java, Node, and .NET so we can infer this
// Needs to be inferred because no other env vars give us context on the runtime
rt = dotnetFramework
}
return rt
}
func getLinuxRuntime() (rt string) {
rt = unknown
switch os.Getenv("WEBSITE_STACK") {
case "DOCKER":
rt = containerFramework
case "":
if val := os.Getenv("DOCKER_SERVER_VERSION"); val != "" {
rt = containerFramework
}
case "NODE":
rt = nodeFramework
case "PYTHON":
rt = pythonFramework
case "JAVA", "TOMCAT":
rt = javaFramework
case "DOTNETCORE":
rt = dotnetFramework
case "PHP":
rt = phpFramework
}
return rt
}
func parseAzureSubscriptionID(subID string) (id string) {
parsedSubID := strings.SplitN(subID, "+", 2)
if len(parsedSubID) > 1 {
id = parsedSubID[0]
}
return
}
func compileAzureResourceID(subID, resourceGroup, siteName string) (id string) {
if len(subID) > 0 && len(resourceGroup) > 0 && len(siteName) > 0 {
id = fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/microsoft.web/sites/%s",
subID, resourceGroup, siteName)
}
return
}

View File

@@ -0,0 +1,8 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
// Package traceutil contains functions for extracting and processing traces. It should
// only import payload and nothing else.
package traceutil

View File

@@ -0,0 +1,356 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package traceutil
import (
"errors"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
const (
// DefaultSpanName is the default name we assign a span if it's missing and we have no reasonable fallback
DefaultSpanName = "unnamed_operation"
// DefaultServiceName is the default name we assign a service if it's missing and we have no reasonable fallback
DefaultServiceName = "unnamed-service"
)
const (
// MaxNameLen the maximum length a name can have
MaxNameLen = 100
// MaxServiceLen the maximum length a service can have
MaxServiceLen = 100
// MaxResourceLen the maximum length a resource can have
MaxResourceLen = 5000
)
var (
// ErrEmpty specifies that the passed input was empty.
ErrEmpty = errors.New("empty")
// ErrTooLong signifies that the input was too long.
ErrTooLong = errors.New("too long")
// ErrInvalid signifies that the input was invalid.
ErrInvalid = errors.New("invalid")
)
// NormalizeName normalizes a span name and returns an error describing the reason
// (if any) why the name was modified.
func NormalizeName(name string) (string, error) {
if name == "" {
return DefaultSpanName, ErrEmpty
}
var err error
if len(name) > MaxNameLen {
name = TruncateUTF8(name, MaxNameLen)
err = ErrTooLong
}
name, ok := normMetricNameParse(name)
if !ok {
return DefaultSpanName, ErrInvalid
}
return name, err
}
// NormalizeService normalizes a span service and returns an error describing the reason
// (if any) why the name was modified.
func NormalizeService(svc string, lang string) (string, error) {
if svc == "" {
return fallbackService(lang), ErrEmpty
}
var err error
if len(svc) > MaxServiceLen {
svc = TruncateUTF8(svc, MaxServiceLen)
err = ErrTooLong
}
// We are normalizing just the tag value.
s := NormalizeTagValue(svc)
if s == "" {
return fallbackService(lang), ErrInvalid
}
return s, err
}
// NormalizePeerService normalizes a span's peer.service and returns an error describing the reason
// (if any) why the name was modified.
func NormalizePeerService(svc string) (string, error) {
if svc == "" {
return "", nil
}
var err error
if len(svc) > MaxServiceLen {
svc = TruncateUTF8(svc, MaxServiceLen)
err = ErrTooLong
}
// We are normalizing just the tag value.
s := NormalizeTagValue(svc)
if s == "" {
return "", ErrInvalid
}
return s, err
}
// fallbackServiceNames is a cache of default service names to use
// when the span's service is unset or invalid.
var fallbackServiceNames sync.Map
// fallbackService returns the fallback service name for a service
// belonging to language lang.
func fallbackService(lang string) string {
if lang == "" {
return DefaultServiceName
}
if v, ok := fallbackServiceNames.Load(lang); ok {
return v.(string)
}
var str strings.Builder
str.WriteString("unnamed-")
str.WriteString(lang)
str.WriteString("-service")
fallbackServiceNames.Store(lang, str.String())
return str.String()
}
const maxTagLength = 200
// NormalizeTag applies some normalization to ensure the full tag_key:tag_value string matches the backend requirements.
func NormalizeTag(v string) string {
return normalize(v, true)
}
// NormalizeTagValue applies some normalization to ensure the tag value matches the backend requirements.
// It should be used for cases where we have just the tag_value as the input (instead of tag_key:tag_value).
func NormalizeTagValue(v string) string {
return normalize(v, false)
}
func normalize(v string, allowDigitStartChar bool) string {
// Fast path: Check if the tag is valid and only contains ASCII characters,
// if yes return it as-is right away. For most use-cases this reduces CPU usage.
if isNormalizedASCIITag(v, allowDigitStartChar) {
return v
}
// the algorithm works by creating a set of cuts marking start and end offsets in v
// that have to be replaced with underscore (_)
if len(v) == 0 {
return ""
}
var (
trim int // start character (if trimming)
cuts [][2]int // sections to discard: (start, end) pairs
chars int // number of characters processed
)
var (
i int // current byte
r rune // current rune
jump int // tracks how many bytes the for range advances on its next iteration
)
tag := []byte(v)
for i, r = range v {
jump = utf8.RuneLen(r) // next i will be i+jump
if r == utf8.RuneError {
// On invalid UTF-8, the for range advances only 1 byte (see: https://golang.org/ref/spec#For_range (point 2)).
// However, utf8.RuneError is equivalent to unicode.ReplacementChar so we should rely on utf8.DecodeRune to tell
// us whether this is an actual error or just a unicode.ReplacementChar that was present in the string.
_, width := utf8.DecodeRune(tag[i:])
jump = width
}
// fast path; all letters (and colons) are ok
switch {
case r >= 'a' && r <= 'z' || r == ':':
chars++
goto end
case r >= 'A' && r <= 'Z':
// lower-case
tag[i] += 'a' - 'A'
chars++
goto end
}
if unicode.IsUpper(r) {
// lowercase this character
if low := unicode.ToLower(r); utf8.RuneLen(r) == utf8.RuneLen(low) {
// but only if the width of the lowercased character is the same;
// there are some rare edge-cases where this is not the case, such
// as \u017F (ſ)
utf8.EncodeRune(tag[i:], low)
r = low
}
}
switch {
case unicode.IsLetter(r):
chars++
// If it's not a unicode letter, and it's the first char, and digits are allowed for the start char,
// we should goto end because the remaining cases are not valid for a start char.
case allowDigitStartChar && chars == 0:
trim = i + jump
goto end
case unicode.IsDigit(r) || r == '.' || r == '/' || r == '-':
chars++
default:
// illegal character
chars++
if n := len(cuts); n > 0 && cuts[n-1][1] >= i {
// merge intersecting cuts
cuts[n-1][1] += jump
} else {
// start a new cut
cuts = append(cuts, [2]int{i, i + jump})
}
}
end:
if i+jump >= 2*maxTagLength {
// bail early if the tag contains a lot of non-letter/digit characters.
// If a tag is test🍣🍣[...]🍣, then it's unlikely to be a properly formatted tag
break
}
if chars >= maxTagLength {
// we've reached the maximum
break
}
}
tag = tag[trim : i+jump] // trim start and end
if len(cuts) == 0 {
// tag was ok, return it as it is
return string(tag)
}
delta := trim // cut offsets delta
for _, cut := range cuts {
// start and end of cut, including delta from previous cuts:
start, end := cut[0]-delta, cut[1]-delta
if end >= len(tag) {
// this cut includes the end of the string; discard it
// completely and finish the loop.
tag = tag[:start]
break
}
// replace the beginning of the cut with '_'
tag[start] = '_'
if end-start == 1 {
// nothing to discard
continue
}
// discard remaining characters in the cut
copy(tag[start+1:], tag[end:])
// shorten the slice
tag = tag[:len(tag)-(end-start)+1]
// count the new delta for future cuts
delta += cut[1] - cut[0] - 1
}
return string(tag)
}
// This code is borrowed from dd-go metric normalization
// fast isAlpha for ascii
func isAlpha(b byte) bool {
return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z')
}
// fast isAlphaNumeric for ascii
func isAlphaNum(b byte) bool {
return isAlpha(b) || (b >= '0' && b <= '9')
}
// normMetricNameParse normalizes metric names with a parser instead of using
// garbage-creating string replacement routines.
func normMetricNameParse(name string) (string, bool) {
if name == "" || len(name) > MaxNameLen {
return name, false
}
var i, ptr int
var resa [MaxNameLen]byte
res := resa[:0]
// skip non-alphabetic characters
for ; i < len(name) && !isAlpha(name[i]); i++ {
continue
}
// if there were no alphabetic characters it wasn't valid
if i == len(name) {
return "", false
}
for ; i < len(name); i++ {
switch {
case isAlphaNum(name[i]):
res = append(res, name[i])
ptr++
case name[i] == '.':
// we skipped all non-alpha chars up front so we have seen at least one
switch res[ptr-1] {
// overwrite underscores that happen before periods
case '_':
res[ptr-1] = '.'
default:
res = append(res, '.')
ptr++
}
default:
// we skipped all non-alpha chars up front so we have seen at least one
switch res[ptr-1] {
// no double underscores, no underscores after periods
case '.', '_':
default:
res = append(res, '_')
ptr++
}
}
}
if res[ptr-1] == '_' {
res = res[:ptr-1]
}
return string(res), true
}
func isNormalizedASCIITag(tag string, checkValidStartChar bool) bool {
if len(tag) == 0 {
return true
}
if len(tag) > maxTagLength {
return false
}
i := 0
if checkValidStartChar {
if !isValidASCIIStartChar(tag[0]) {
return false
}
i++
}
for ; i < len(tag); i++ {
b := tag[i]
// TODO: Attempt to optimize this check using SIMD/vectorization.
if isValidASCIITagChar(b) {
continue
}
if b == '_' {
// an underscore is only okay if followed by a valid non-underscore character
i++
if i == len(tag) || !isValidASCIITagChar(tag[i]) {
return false
}
} else {
return false
}
}
return true
}
func isValidASCIIStartChar(c byte) bool {
return ('a' <= c && c <= 'z') || c == ':'
}
func isValidASCIITagChar(c byte) bool {
return isValidASCIIStartChar(c) || ('0' <= c && c <= '9') || c == '.' || c == '/' || c == '-'
}

View File

@@ -0,0 +1,329 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package traceutil
import (
"context"
"encoding/binary"
"strings"
"github.com/DataDog/datadog-agent/pkg/trace/log"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes"
"github.com/DataDog/opentelemetry-mapping-go/pkg/otlp/attributes/source"
"go.opentelemetry.io/collector/pdata/pcommon"
"go.opentelemetry.io/collector/pdata/ptrace"
semconv117 "go.opentelemetry.io/collector/semconv/v1.17.0"
semconv "go.opentelemetry.io/collector/semconv/v1.6.1"
"go.opentelemetry.io/otel/attribute"
)
// Util functions for converting OTel semantics to DD semantics.
// TODO(OTEL-1726): reuse the same mapping code for ReceiveResourceSpans and Concentrator
var (
// SignalTypeSet is the OTel attribute set for traces.
SignalTypeSet = attribute.NewSet(attribute.String("signal", "traces"))
)
// IndexOTelSpans iterates over the input OTel spans and returns 3 maps:
// OTel spans indexed by span ID, OTel resources indexed by span ID, OTel instrumentation scopes indexed by span ID.
// Skips spans with invalid trace ID or span ID. If there are multiple spans with the same (non-zero) span ID, the last one wins.
func IndexOTelSpans(traces ptrace.Traces) (map[pcommon.SpanID]ptrace.Span, map[pcommon.SpanID]pcommon.Resource, map[pcommon.SpanID]pcommon.InstrumentationScope) {
spanByID := make(map[pcommon.SpanID]ptrace.Span)
resByID := make(map[pcommon.SpanID]pcommon.Resource)
scopeByID := make(map[pcommon.SpanID]pcommon.InstrumentationScope)
rspanss := traces.ResourceSpans()
for i := 0; i < rspanss.Len(); i++ {
rspans := rspanss.At(i)
res := rspans.Resource()
for j := 0; j < rspans.ScopeSpans().Len(); j++ {
libspans := rspans.ScopeSpans().At(j)
for k := 0; k < libspans.Spans().Len(); k++ {
span := libspans.Spans().At(k)
if span.TraceID().IsEmpty() || span.SpanID().IsEmpty() {
continue
}
spanByID[span.SpanID()] = span
resByID[span.SpanID()] = res
scopeByID[span.SpanID()] = libspans.Scope()
}
}
}
return spanByID, resByID, scopeByID
}
// GetTopLevelOTelSpans returns the span IDs of the top level OTel spans.
func GetTopLevelOTelSpans(spanByID map[pcommon.SpanID]ptrace.Span, resByID map[pcommon.SpanID]pcommon.Resource, topLevelByKind bool) map[pcommon.SpanID]struct{} {
topLevelSpans := make(map[pcommon.SpanID]struct{})
for spanID, span := range spanByID {
if span.ParentSpanID().IsEmpty() {
// case 1: root span
topLevelSpans[spanID] = struct{}{}
continue
}
if topLevelByKind {
// New behavior for computing top level OTel spans, see computeTopLevelAndMeasured in pkg/trace/api/otlp.go
spanKind := span.Kind()
if spanKind == ptrace.SpanKindServer || spanKind == ptrace.SpanKindConsumer {
// span is a server-side span, mark as top level
topLevelSpans[spanID] = struct{}{}
}
continue
}
// Otherwise, fall back to old behavior in ComputeTopLevel
parentSpan, ok := spanByID[span.ParentSpanID()]
if !ok {
// case 2: parent span not in the same chunk, presumably it belongs to another service
topLevelSpans[spanID] = struct{}{}
continue
}
svc := GetOTelService(span, resByID[spanID], true)
parentSvc := GetOTelService(parentSpan, resByID[parentSpan.SpanID()], true)
if svc != parentSvc {
// case 3: parent is not in the same service
topLevelSpans[spanID] = struct{}{}
}
}
return topLevelSpans
}
// GetOTelAttrVal returns the matched value as a string in the input map with the given keys.
// If there are multiple keys present, the first matched one is returned.
// If normalize is true, normalize the return value with NormalizeTagValue.
func GetOTelAttrVal(attrs pcommon.Map, normalize bool, keys ...string) string {
val := ""
for _, key := range keys {
attrval, exists := attrs.Get(key)
if exists {
val = attrval.AsString()
}
}
if normalize {
val = NormalizeTagValue(val)
}
return val
}
// GetOTelAttrValInResAndSpanAttrs returns the matched value as a string in the OTel resource attributes and span attributes with the given keys.
// If there are multiple keys present, the first matched one is returned.
// If the key is present in both resource attributes and span attributes, resource attributes take precedence.
// If normalize is true, normalize the return value with NormalizeTagValue.
func GetOTelAttrValInResAndSpanAttrs(span ptrace.Span, res pcommon.Resource, normalize bool, keys ...string) string {
if val := GetOTelAttrVal(res.Attributes(), normalize, keys...); val != "" {
return val
}
return GetOTelAttrVal(span.Attributes(), normalize, keys...)
}
// GetOTelSpanType returns the DD span type based on OTel span kind and attributes.
func GetOTelSpanType(span ptrace.Span, res pcommon.Resource) string {
typ := GetOTelAttrValInResAndSpanAttrs(span, res, false, "span.type")
if typ != "" {
return typ
}
switch span.Kind() {
case ptrace.SpanKindServer:
typ = "web"
case ptrace.SpanKindClient:
db := GetOTelAttrValInResAndSpanAttrs(span, res, true, semconv.AttributeDBSystem)
if db == "redis" || db == "memcached" {
typ = "cache"
} else if db != "" {
typ = "db"
} else {
typ = "http"
}
default:
typ = "custom"
}
return typ
}
// GetOTelService returns the DD service name based on OTel span and resource attributes.
func GetOTelService(span ptrace.Span, res pcommon.Resource, normalize bool) string {
// No need to normalize with NormalizeTagValue since we will do NormalizeService later
svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeServiceName)
if svc == "" {
svc = "otlpresourcenoservicename"
}
if normalize {
newsvc, err := NormalizeService(svc, "")
switch err {
case ErrTooLong:
log.Debugf("Fixing malformed trace. Service is too long (reason:service_truncate), truncating span.service to length=%d: %s", MaxServiceLen, svc)
case ErrInvalid:
log.Debugf("Fixing malformed trace. Service is invalid (reason:service_invalid), replacing invalid span.service=%s with fallback span.service=%s", svc, newsvc)
}
svc = newsvc
}
return svc
}
// GetOTelResource returns the DD resource name based on OTel span and resource attributes.
func GetOTelResource(span ptrace.Span, res pcommon.Resource) (resName string) {
resName = GetOTelAttrValInResAndSpanAttrs(span, res, false, "resource.name")
if resName == "" {
if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, "http.request.method", semconv.AttributeHTTPMethod); m != "" {
// use the HTTP method + route (if available)
resName = m
if route := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeHTTPRoute); route != "" {
resName = resName + " " + route
}
} else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingOperation); m != "" {
resName = m
// use the messaging operation
if dest := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeMessagingDestination, semconv117.AttributeMessagingDestinationName); dest != "" {
resName = resName + " " + dest
}
} else if m := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCMethod); m != "" {
resName = m
// use the RPC method
if svc := GetOTelAttrValInResAndSpanAttrs(span, res, false, semconv.AttributeRPCService); m != "" {
// ...and service if available
resName = resName + " " + svc
}
} else {
resName = span.Name()
}
}
if len(resName) > MaxResourceLen {
resName = resName[:MaxResourceLen]
}
return
}
// GetOTelOperationName returns the DD operation name based on OTel span and resource attributes and given configs.
func GetOTelOperationName(
span ptrace.Span,
res pcommon.Resource,
lib pcommon.InstrumentationScope,
spanNameAsResourceName bool,
spanNameRemappings map[string]string,
normalize bool) string {
// No need to normalize with NormalizeTagValue since we will do NormalizeName later
name := GetOTelAttrValInResAndSpanAttrs(span, res, false, "operation.name")
if name == "" {
if spanNameAsResourceName {
name = span.Name()
} else {
name = strings.ToLower(span.Kind().String())
if lib.Name() != "" {
name = lib.Name() + "." + name
} else {
name = "opentelemetry." + name
}
}
}
if v, ok := spanNameRemappings[name]; ok {
name = v
}
if normalize {
normalizeName, err := NormalizeName(name)
switch err {
case ErrEmpty:
log.Debugf("Fixing malformed trace. Name is empty (reason:span_name_empty), setting span.name=%s", normalizeName)
case ErrTooLong:
log.Debugf("Fixing malformed trace. Name is too long (reason:span_name_truncate), truncating span.name to length=%d", MaxServiceLen)
case ErrInvalid:
log.Debugf("Fixing malformed trace. Name is invalid (reason:span_name_invalid), setting span.name=%s", normalizeName)
}
name = normalizeName
}
return name
}
// GetOTelHostname returns the DD hostname based on OTel span and resource attributes.
func GetOTelHostname(span ptrace.Span, res pcommon.Resource, tr *attributes.Translator, fallbackHost string) string {
ctx := context.Background()
src, srcok := tr.ResourceToSource(ctx, res, SignalTypeSet)
if !srcok {
if v := GetOTelAttrValInResAndSpanAttrs(span, res, false, "_dd.hostname"); v != "" {
src = source.Source{Kind: source.HostnameKind, Identifier: v}
srcok = true
}
}
if srcok {
switch src.Kind {
case source.HostnameKind:
return src.Identifier
default:
// We are not on a hostname (serverless), hence the hostname is empty
return ""
}
} else {
// fallback hostname from Agent conf.Hostname
return fallbackHost
}
}
// GetOTelStatusCode returns the DD status code of the OTel span.
func GetOTelStatusCode(span ptrace.Span) uint32 {
if code, ok := span.Attributes().Get("http.response.status_code"); ok {
return uint32(code.Int())
}
if code, ok := span.Attributes().Get(semconv.AttributeHTTPStatusCode); ok {
return uint32(code.Int())
}
return 0
}
// GetOTelContainerTags returns a list of DD container tags in the OTel resource attributes.
// Tags are always normalized.
func GetOTelContainerTags(rattrs pcommon.Map, tagKeys []string) []string {
var containerTags []string
containerTagsMap := attributes.ContainerTagsFromResourceAttributes(rattrs)
for _, key := range tagKeys {
if mappedKey, ok := attributes.ContainerMappings[key]; ok {
// If the key has a mapping in ContainerMappings, use the mapped key
if val, ok := containerTagsMap[mappedKey]; ok {
t := NormalizeTag(mappedKey + ":" + val)
containerTags = append(containerTags, t)
}
} else {
// Otherwise populate as additional container tags
if val := GetOTelAttrVal(rattrs, false, key); val != "" {
t := NormalizeTag(key + ":" + val)
containerTags = append(containerTags, t)
}
}
}
return containerTags
}
// OTelTraceIDToUint64 converts an OTel trace ID to an uint64
func OTelTraceIDToUint64(b [16]byte) uint64 {
return binary.BigEndian.Uint64(b[len(b)-8:])
}
// OTelSpanIDToUint64 converts an OTel span ID to an uint64
func OTelSpanIDToUint64(b [8]byte) uint64 {
return binary.BigEndian.Uint64(b[:])
}
var spanKindNames = map[ptrace.SpanKind]string{
ptrace.SpanKindUnspecified: "unspecified",
ptrace.SpanKindInternal: "internal",
ptrace.SpanKindServer: "server",
ptrace.SpanKindClient: "client",
ptrace.SpanKindProducer: "producer",
ptrace.SpanKindConsumer: "consumer",
}
// OTelSpanKindName converts the given SpanKind to a valid Datadog span kind name.
func OTelSpanKindName(k ptrace.SpanKind) string {
name, ok := spanKindNames[k]
if !ok {
return "unspecified"
}
return name
}

View File

@@ -0,0 +1,53 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016-present Datadog, Inc.
package traceutil
import (
pb "github.com/DataDog/datadog-agent/pkg/proto/pbgo/trace"
)
// ProcessedTrace represents a trace being processed in the agent.
type ProcessedTrace struct {
TraceChunk *pb.TraceChunk
Root *pb.Span
TracerEnv string
AppVersion string
TracerHostname string
ClientDroppedP0sWeight float64
GitCommitSha string
ImageTag string
}
// Clone creates a copy of ProcessedTrace, cloning p, p.TraceChunk, and p.Root. This means it is
// safe to modify the returned ProcessedTrace's (pt's) fields along with fields in
// pt.TraceChunk and fields in pt.Root.
//
// The most important consequence of this is that the TraceChunk's Spans field can be assigned,
// *BUT* the Spans value itself should not be modified. i.e. This is ok:
//
// pt2 := pt.Clone()
// pt2.TraceChunk.Spans = make([]*pb.Span)
//
// but this is NOT ok:
//
// pt2 := pt.Clone()
// pt2.TraceChunk.Spans[0] = &pb.Span{} // This will be visible in pt.
func (pt *ProcessedTrace) Clone() *ProcessedTrace {
if pt == nil {
return nil
}
ptClone := new(ProcessedTrace)
*ptClone = *pt
if pt.TraceChunk != nil {
c := pt.TraceChunk.ShallowCopy()
ptClone.TraceChunk = c
}
if pt.Root != nil {
r := pt.Root.ShallowCopy()
ptClone.Root = r
}
return ptClone
}

Some files were not shown because too many files have changed in this diff Show More