diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 0000000..fcd1496 --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1,13 @@ +# These are supported funding model platforms + +github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] +patreon: # Replace with a single Patreon username +open_collective: monibuca +ko_fi: # Replace with a single Ko-fi username +tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel +community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry +liberapay: # Replace with a single Liberapay username +issuehunt: # Replace with a single IssueHunt username +otechie: # Replace with a single Otechie username +lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry +custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml new file mode 100644 index 0000000..714748a --- /dev/null +++ b/.github/workflows/go.yml @@ -0,0 +1,96 @@ +name: Go + +on: + push: + branches: + - v5 +env: + dest: bin +jobs: + + build: + runs-on: ubuntu-latest + steps: + + - uses: actions/checkout@v2 + with: + fetch-depth: 1 + + - name: Set up Env + run: echo "version=${GITHUB_REF:11}" >> $GITHUB_ENV + - name: Set beta + if: contains(env.version, 'beta') + run: echo "dest=beta" >> $GITHUB_ENV + - name: Set up Go + uses: actions/setup-go@v2 + with: + go-version: 1.23.4 + + - name: Cache Go modules + uses: actions/cache@v1 + with: + path: ~/go/pkg/mod + key: runner.os−go−{ { hashFiles('**/go.sum') } } + restore-keys: ${{ runner.os }}-go- + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v2 + if: success() && startsWith(github.ref, 'refs/tags/') + with: + version: v1.8.3 + args: release --rm-dist + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # - name: Checkout m7s-import + # uses: actions/checkout@v3 + # with: + # repository: langhuihui/m7s-import + # path: m7s-import + # persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token. + # fetch-depth: 0 + + # - name: Add bin to m7s-import + # if: success() && startsWith(github.ref, 'refs/tags/') + # run: | + # cd m7s-import + # mkdir -p apps/m7s-website/src/public/bin + # cp ../dist/m7s_${{ env.version }}_windows_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_windows_amd64.tar.gz + # cp ../dist/m7s_${{ env.version }}_darwin_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_darwin_amd64.tar.gz + # cp ../dist/m7s_${{ env.version }}_darwin_arm64.tar.gz apps/m7s-website/src/public/bin/m7s_darwin_arm64.tar.gz + # cp ../dist/m7s_${{ env.version }}_linux_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_linux_amd64.tar.gz + # cp ../dist/m7s_${{ env.version }}_linux_arm64.tar.gz apps/m7s-website/src/public/bin/m7s_linux_arm64.tar.gz + # ls apps/m7s-website/src/public/bin + - name: copy + if: success() && startsWith(github.ref, 'refs/tags/') + run: | + mkdir -p bin + cp dist/m7s_${{ env.version }}_windows_amd64.tar.gz bin/m7s_windows_amd64.tar.gz + cp dist/m7s_${{ env.version }}_darwin_amd64.tar.gz bin/m7s_darwin_amd64.tar.gz + cp dist/m7s_${{ env.version }}_darwin_arm64.tar.gz bin/m7s_darwin_arm64.tar.gz + cp dist/m7s_${{ env.version }}_linux_amd64.tar.gz bin/m7s_linux_amd64.tar.gz + cp dist/m7s_${{ env.version }}_linux_arm64.tar.gz bin/m7s_linux_arm64.tar.gz + ls bin + - uses: jakejarvis/s3-sync-action@master + # with: + # args: --acl public-read --follow-symlinks --delete + env: + AWS_S3_ENDPOINT: https://${{ secrets.R2_DOMAIN }} + AWS_ACCESS_KEY_ID: ${{ secrets.R2_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_SECRET }} + AWS_S3_BUCKET: monibuca + SOURCE_DIR: 'bin' + DEST_DIR: ${{ env.dest }} + - name: docker build + if: success() && startsWith(github.ref, 'refs/tags/') + run: | + tar -zxvf bin/m7s_linux_amd64.tar.gz + mv m7s monibuca_linux + docker login -u langhuihui -p ${{ secrets.DOCKER_PASSWORD }} + docker build -t langhuihui/monibuca:v5 . + docker push langhuihui/monibuca:v5 + - name: docker push + if: success() && !contains(env.version, 'beta') + run: | + docker tag langhuihui/monibuca:latest langhuihui/monibuca:${{ env.version }} + docker push langhuihui/monibuca:${{ env.version }} \ No newline at end of file diff --git a/api.go b/api.go index 6e08200..062e72a 100644 --- a/api.go +++ b/api.go @@ -740,6 +740,28 @@ func (s *Server) AddPullProxy(ctx context.Context, req *pb.PullProxyInfo) (res * Description: req.Description, StreamPath: req.StreamPath, } + if device.Type == "" { + var u *url.URL + u, err = url.Parse(req.PullURL) + if err != nil { + s.Error("parse pull url failed", "error", err) + return + } + switch u.Scheme { + case "srt", "rtsp", "rtmp": + device.Type = u.Scheme + default: + ext := filepath.Ext(u.Path) + switch ext { + case ".m3u8": + device.Type = "hls" + case ".flv": + device.Type = "flv" + case ".mp4": + device.Type = "mp4" + } + } + } defaults.SetDefaults(&device.Pull) defaults.SetDefaults(&device.Record) device.URL = req.PullURL @@ -763,11 +785,36 @@ func (s *Server) UpdatePullProxy(ctx context.Context, req *pb.PullProxyInfo) (re return } target := &PullProxy{} - s.DB.First(target, req.ID) + err = s.DB.First(target, req.ID).Error + if err != nil { + return + } target.Name = req.Name target.URL = req.PullURL target.ParentID = uint(req.ParentID) target.Type = req.Type + if target.Type == "" { + var u *url.URL + u, err = url.Parse(req.PullURL) + if err != nil { + s.Error("parse pull url failed", "error", err) + return + } + switch u.Scheme { + case "srt", "rtsp", "rtmp": + target.Type = u.Scheme + default: + ext := filepath.Ext(u.Path) + switch ext { + case ".m3u8": + target.Type = "hls" + case ".flv": + target.Type = "flv" + case ".mp4": + target.Type = "mp4" + } + } + } target.PullOnStart = req.PullOnStart target.StopOnIdle = req.StopOnIdle target.Audio = req.Audio @@ -777,6 +824,24 @@ func (s *Server) UpdatePullProxy(ctx context.Context, req *pb.PullProxyInfo) (re target.RTT = time.Duration(int(req.Rtt)) * time.Millisecond target.StreamPath = req.StreamPath s.DB.Save(target) + s.PullProxies.Call(func() error { + if device, ok := s.PullProxies.Get(uint(req.ID)); ok { + if target.URL != device.URL || device.Audio != target.Audio || device.StreamPath != target.StreamPath || device.Record.FilePath != target.Record.FilePath || device.Record.Fragment != target.Record.Fragment { + device.Stop(task.ErrStopByUser) + device.WaitStopped() + s.PullProxies.Add(target) + return nil + } + if device.PullOnStart != target.PullOnStart && target.PullOnStart && device.Handler != nil && device.Status == PullProxyStatusOnline { + device.Handler.Pull() + } + device.Name = target.Name + device.PullOnStart = target.PullOnStart + device.StopOnIdle = target.StopOnIdle + device.Description = target.Description + } + return nil + }) res = &pb.SuccessResponse{} return } @@ -951,6 +1016,30 @@ func (s *Server) AddPushProxy(ctx context.Context, req *pb.PushProxyInfo) (res * Description: req.Description, StreamPath: req.StreamPath, } + + if device.Type == "" { + var u *url.URL + u, err = url.Parse(req.PushURL) + if err != nil { + s.Error("parse pull url failed", "error", err) + return + } + switch u.Scheme { + case "srt", "rtsp", "rtmp": + device.Type = u.Scheme + default: + ext := filepath.Ext(u.Path) + switch ext { + case ".m3u8": + device.Type = "hls" + case ".flv": + device.Type = "flv" + case ".mp4": + device.Type = "mp4" + } + } + } + defaults.SetDefaults(&device.Push) device.URL = req.PushURL device.Audio = req.Audio @@ -975,6 +1064,28 @@ func (s *Server) UpdatePushProxy(ctx context.Context, req *pb.PushProxyInfo) (re target.URL = req.PushURL target.ParentID = uint(req.ParentID) target.Type = req.Type + if target.Type == "" { + var u *url.URL + u, err = url.Parse(req.PushURL) + if err != nil { + s.Error("parse pull url failed", "error", err) + return + } + switch u.Scheme { + case "srt", "rtsp", "rtmp": + target.Type = u.Scheme + default: + ext := filepath.Ext(u.Path) + switch ext { + case ".m3u8": + target.Type = "hls" + case ".flv": + target.Type = "flv" + case ".mp4": + target.Type = "mp4" + } + } + } target.PushOnStart = req.PushOnStart target.Audio = req.Audio target.Description = req.Description diff --git a/go.mod b/go.mod index f44941f..15b0ab1 100644 --- a/go.mod +++ b/go.mod @@ -129,6 +129,7 @@ require ( github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect github.com/gorilla/websocket v1.5.1 + github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/phsym/console-slog v0.3.1 github.com/prometheus/client_golang v1.20.4 diff --git a/go.sum b/go.sum index c000afe..aa39dd7 100644 --- a/go.sum +++ b/go.sum @@ -128,6 +128,8 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/husanpao/ip v0.0.0-20220711082147-73160bb611a8 h1:4Jk58quTZmzJcTrLlbB5L1Q6qXu49EIjCReWxcBFWKo= github.com/husanpao/ip v0.0.0-20220711082147-73160bb611a8/go.mod h1:medl9/CfYoQlqAXtAARmMW5dAX2UOdwwkhaszYPk0AM= +github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd h1:EVX1s+XNss9jkRW9K6XGJn2jL2lB1h5H804oKPsxOec= +github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/icholy/digest v0.1.22 h1:dRIwCjtAcXch57ei+F0HSb5hmprL873+q7PoVojdMzM= github.com/icholy/digest v0.1.22/go.mod h1:uLAeDdWKIWNFMH0wqbwchbTQOmJWhzSnL7zmqSPqEEc= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= diff --git a/goreleaser.yml b/goreleaser.yml new file mode 100644 index 0000000..2cf62be --- /dev/null +++ b/goreleaser.yml @@ -0,0 +1,24 @@ +project_name: m7s +archives: + - + files: + - favicon.ico +builds: + - id: "all" + main: ./example/default/main.go + env: + - CGO_ENABLED=0 + tags: + - sqlite + ldflags: + - -s -w -X main.version={{.Tag}} + goos: + - linux + - windows + - darwin + goarch: + - arm64 + - amd64 + hooks: + pre: + - go mod tidy \ No newline at end of file diff --git a/plugin/debug/index.go b/plugin/debug/index.go index a4a7287..976ffe8 100644 --- a/plugin/debug/index.go +++ b/plugin/debug/index.go @@ -1,23 +1,33 @@ package plugin_debug import ( - myproc "github.com/cloudwego/goref/pkg/proc" - "github.com/go-delve/delve/pkg/config" - "github.com/go-delve/delve/service/debugger" + "context" + "fmt" "io" - "m7s.live/v5" "net/http" "net/http/pprof" "os" + "runtime" runtimePPROF "runtime/pprof" + "sort" "strings" "time" + + myproc "github.com/cloudwego/goref/pkg/proc" + "github.com/go-delve/delve/pkg/config" + "github.com/go-delve/delve/service/debugger" + "google.golang.org/protobuf/types/known/emptypb" + "m7s.live/v5" + "m7s.live/v5/plugin/debug/pb" + debug "m7s.live/v5/plugin/debug/pkg" + "m7s.live/v5/plugin/debug/pkg/profile" ) -var _ = m7s.InstallPlugin[DebugPlugin]() +var _ = m7s.InstallPlugin[DebugPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler) var conf, _ = config.LoadConfig() type DebugPlugin struct { + pb.UnimplementedApiServer m7s.Plugin ProfileDuration time.Duration `default:"10s" desc:"profile持续时间"` Profile string `desc:"采集profile存储文件"` @@ -114,3 +124,170 @@ func (p *DebugPlugin) Grf(w http.ResponseWriter, r *http.Request) { } w.Write([]byte("ok")) } + +func (p *DebugPlugin) GetHeap(ctx context.Context, empty *emptypb.Empty) (*pb.HeapResponse, error) { + // 创建临时文件用于存储堆信息 + f, err := os.CreateTemp("", "heap") + if err != nil { + return nil, err + } + defer os.Remove(f.Name()) + defer f.Close() + + // 获取堆信息 + runtime.GC() + if err := runtimePPROF.WriteHeapProfile(f); err != nil { + return nil, err + } + + // 读取堆信息 + f.Seek(0, 0) + prof, err := profile.Parse(f) + if err != nil { + return nil, err + } + + // 准备响应数据 + resp := &pb.HeapResponse{ + Data: &pb.HeapData{ + Stats: &pb.HeapStats{}, + Objects: make([]*pb.HeapObject, 0), + Edges: make([]*pb.HeapEdge, 0), + }, + } + + // 创建类型映射用于聚合统计 + typeMap := make(map[string]*pb.HeapObject) + var totalSize int64 + + // 处理每个样本 + for _, sample := range prof.Sample { + size := sample.Value[1] // 内存大小 + if size == 0 { + continue + } + + // 获取分配类型信息 + var typeName string + if len(sample.Location) > 0 && len(sample.Location[0].Line) > 0 { + if fn := sample.Location[0].Line[0].Function; fn != nil { + typeName = fn.Name + } + } + + // 创建或更新堆对象 + obj, exists := typeMap[typeName] + if !exists { + obj = &pb.HeapObject{ + Type: typeName, + Address: fmt.Sprintf("%p", sample), + Refs: make([]string, 0), + } + typeMap[typeName] = obj + resp.Data.Objects = append(resp.Data.Objects, obj) + } + + obj.Count++ + obj.Size += size + totalSize += size + + // 构建引用关系 + for i := 1; i < len(sample.Location); i++ { + loc := sample.Location[i] + if len(loc.Line) == 0 || loc.Line[0].Function == nil { + continue + } + + callerName := loc.Line[0].Function.Name + // 跳过系统函数 + if callerName == "" || strings.HasPrefix(callerName, "runtime.") { + continue + } + + // 添加边 + edge := &pb.HeapEdge{ + From: callerName, + To: typeName, + FieldName: callerName, + } + resp.Data.Edges = append(resp.Data.Edges, edge) + + // 将调用者添加到引用列表 + if !contains(obj.Refs, callerName) { + obj.Refs = append(obj.Refs, callerName) + } + } + } + + // 计算百分比 + for _, obj := range resp.Data.Objects { + if totalSize > 0 { + obj.SizePerc = float64(obj.Size) / float64(totalSize) * 100 + } + } + + // 按大小排序 + sort.Slice(resp.Data.Objects, func(i, j int) bool { + return resp.Data.Objects[i].Size > resp.Data.Objects[j].Size + }) + + // 获取运行时内存统计 + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + // 填充内存统计信息 + resp.Data.Stats.Alloc = ms.Alloc + resp.Data.Stats.TotalAlloc = ms.TotalAlloc + resp.Data.Stats.Sys = ms.Sys + resp.Data.Stats.NumGC = ms.NumGC + resp.Data.Stats.HeapAlloc = ms.HeapAlloc + resp.Data.Stats.HeapSys = ms.HeapSys + resp.Data.Stats.HeapIdle = ms.HeapIdle + resp.Data.Stats.HeapInuse = ms.HeapInuse + resp.Data.Stats.HeapReleased = ms.HeapReleased + resp.Data.Stats.HeapObjects = ms.HeapObjects + resp.Data.Stats.GcCPUFraction = ms.GCCPUFraction + + return resp, nil +} + +// 辅助函数:检查字符串切片是否包含特定字符串 +func contains(slice []string, str string) bool { + for _, s := range slice { + if s == str { + return true + } + } + return false +} + +func (p *DebugPlugin) GetHeapGraph(ctx context.Context, empty *emptypb.Empty) (*pb.HeapGraphResponse, error) { + // 创建临时文件用于存储堆信息 + f, err := os.CreateTemp("", "heap") + if err != nil { + return nil, err + } + defer os.Remove(f.Name()) + defer f.Close() + + // 获取堆信息 + runtime.GC() + if err := runtimePPROF.WriteHeapProfile(f); err != nil { + return nil, err + } + + // 读取堆信息 + f.Seek(0, 0) + profile, err := profile.Parse(f) + if err != nil { + return nil, err + } + // Generate dot graph. + dot, err := debug.GetDotGraph(profile) + if err != nil { + return nil, err + } + return &pb.HeapGraphResponse{ + Data: dot, + }, nil +} diff --git a/plugin/debug/pb/debug.pb.go b/plugin/debug/pb/debug.pb.go new file mode 100644 index 0000000..bfe8e90 --- /dev/null +++ b/plugin/debug/pb/debug.pb.go @@ -0,0 +1,709 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.19.1 +// source: debug.proto + +package pb + +import ( + _ "google.golang.org/genproto/googleapis/api/annotations" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + emptypb "google.golang.org/protobuf/types/known/emptypb" + _ "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HeapObject struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Count int64 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"` + Size int64 `protobuf:"varint,3,opt,name=size,proto3" json:"size,omitempty"` + SizePerc float64 `protobuf:"fixed64,4,opt,name=sizePerc,proto3" json:"sizePerc,omitempty"` + Address string `protobuf:"bytes,5,opt,name=address,proto3" json:"address,omitempty"` + Refs []string `protobuf:"bytes,6,rep,name=refs,proto3" json:"refs,omitempty"` +} + +func (x *HeapObject) Reset() { + *x = HeapObject{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapObject) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapObject) ProtoMessage() {} + +func (x *HeapObject) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapObject.ProtoReflect.Descriptor instead. +func (*HeapObject) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{0} +} + +func (x *HeapObject) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *HeapObject) GetCount() int64 { + if x != nil { + return x.Count + } + return 0 +} + +func (x *HeapObject) GetSize() int64 { + if x != nil { + return x.Size + } + return 0 +} + +func (x *HeapObject) GetSizePerc() float64 { + if x != nil { + return x.SizePerc + } + return 0 +} + +func (x *HeapObject) GetAddress() string { + if x != nil { + return x.Address + } + return "" +} + +func (x *HeapObject) GetRefs() []string { + if x != nil { + return x.Refs + } + return nil +} + +type HeapStats struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Alloc uint64 `protobuf:"varint,1,opt,name=alloc,proto3" json:"alloc,omitempty"` + TotalAlloc uint64 `protobuf:"varint,2,opt,name=totalAlloc,proto3" json:"totalAlloc,omitempty"` + Sys uint64 `protobuf:"varint,3,opt,name=sys,proto3" json:"sys,omitempty"` + NumGC uint32 `protobuf:"varint,4,opt,name=numGC,proto3" json:"numGC,omitempty"` + HeapAlloc uint64 `protobuf:"varint,5,opt,name=heapAlloc,proto3" json:"heapAlloc,omitempty"` + HeapSys uint64 `protobuf:"varint,6,opt,name=heapSys,proto3" json:"heapSys,omitempty"` + HeapIdle uint64 `protobuf:"varint,7,opt,name=heapIdle,proto3" json:"heapIdle,omitempty"` + HeapInuse uint64 `protobuf:"varint,8,opt,name=heapInuse,proto3" json:"heapInuse,omitempty"` + HeapReleased uint64 `protobuf:"varint,9,opt,name=heapReleased,proto3" json:"heapReleased,omitempty"` + HeapObjects uint64 `protobuf:"varint,10,opt,name=heapObjects,proto3" json:"heapObjects,omitempty"` + GcCPUFraction float64 `protobuf:"fixed64,11,opt,name=gcCPUFraction,proto3" json:"gcCPUFraction,omitempty"` +} + +func (x *HeapStats) Reset() { + *x = HeapStats{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapStats) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapStats) ProtoMessage() {} + +func (x *HeapStats) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapStats.ProtoReflect.Descriptor instead. +func (*HeapStats) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{1} +} + +func (x *HeapStats) GetAlloc() uint64 { + if x != nil { + return x.Alloc + } + return 0 +} + +func (x *HeapStats) GetTotalAlloc() uint64 { + if x != nil { + return x.TotalAlloc + } + return 0 +} + +func (x *HeapStats) GetSys() uint64 { + if x != nil { + return x.Sys + } + return 0 +} + +func (x *HeapStats) GetNumGC() uint32 { + if x != nil { + return x.NumGC + } + return 0 +} + +func (x *HeapStats) GetHeapAlloc() uint64 { + if x != nil { + return x.HeapAlloc + } + return 0 +} + +func (x *HeapStats) GetHeapSys() uint64 { + if x != nil { + return x.HeapSys + } + return 0 +} + +func (x *HeapStats) GetHeapIdle() uint64 { + if x != nil { + return x.HeapIdle + } + return 0 +} + +func (x *HeapStats) GetHeapInuse() uint64 { + if x != nil { + return x.HeapInuse + } + return 0 +} + +func (x *HeapStats) GetHeapReleased() uint64 { + if x != nil { + return x.HeapReleased + } + return 0 +} + +func (x *HeapStats) GetHeapObjects() uint64 { + if x != nil { + return x.HeapObjects + } + return 0 +} + +func (x *HeapStats) GetGcCPUFraction() float64 { + if x != nil { + return x.GcCPUFraction + } + return 0 +} + +type HeapData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Stats *HeapStats `protobuf:"bytes,1,opt,name=stats,proto3" json:"stats,omitempty"` + Objects []*HeapObject `protobuf:"bytes,2,rep,name=objects,proto3" json:"objects,omitempty"` + Edges []*HeapEdge `protobuf:"bytes,3,rep,name=edges,proto3" json:"edges,omitempty"` +} + +func (x *HeapData) Reset() { + *x = HeapData{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapData) ProtoMessage() {} + +func (x *HeapData) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapData.ProtoReflect.Descriptor instead. +func (*HeapData) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{2} +} + +func (x *HeapData) GetStats() *HeapStats { + if x != nil { + return x.Stats + } + return nil +} + +func (x *HeapData) GetObjects() []*HeapObject { + if x != nil { + return x.Objects + } + return nil +} + +func (x *HeapData) GetEdges() []*HeapEdge { + if x != nil { + return x.Edges + } + return nil +} + +type HeapEdge struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + From string `protobuf:"bytes,1,opt,name=from,proto3" json:"from,omitempty"` + To string `protobuf:"bytes,2,opt,name=to,proto3" json:"to,omitempty"` + FieldName string `protobuf:"bytes,3,opt,name=fieldName,proto3" json:"fieldName,omitempty"` +} + +func (x *HeapEdge) Reset() { + *x = HeapEdge{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapEdge) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapEdge) ProtoMessage() {} + +func (x *HeapEdge) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapEdge.ProtoReflect.Descriptor instead. +func (*HeapEdge) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{3} +} + +func (x *HeapEdge) GetFrom() string { + if x != nil { + return x.From + } + return "" +} + +func (x *HeapEdge) GetTo() string { + if x != nil { + return x.To + } + return "" +} + +func (x *HeapEdge) GetFieldName() string { + if x != nil { + return x.FieldName + } + return "" +} + +type HeapResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Data *HeapData `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *HeapResponse) Reset() { + *x = HeapResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapResponse) ProtoMessage() {} + +func (x *HeapResponse) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapResponse.ProtoReflect.Descriptor instead. +func (*HeapResponse) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{4} +} + +func (x *HeapResponse) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *HeapResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *HeapResponse) GetData() *HeapData { + if x != nil { + return x.Data + } + return nil +} + +type HeapGraphResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Data string `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` +} + +func (x *HeapGraphResponse) Reset() { + *x = HeapGraphResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_debug_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeapGraphResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeapGraphResponse) ProtoMessage() {} + +func (x *HeapGraphResponse) ProtoReflect() protoreflect.Message { + mi := &file_debug_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeapGraphResponse.ProtoReflect.Descriptor instead. +func (*HeapGraphResponse) Descriptor() ([]byte, []int) { + return file_debug_proto_rawDescGZIP(), []int{5} +} + +func (x *HeapGraphResponse) GetCode() uint32 { + if x != nil { + return x.Code + } + return 0 +} + +func (x *HeapGraphResponse) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *HeapGraphResponse) GetData() string { + if x != nil { + return x.Data + } + return "" +} + +var File_debug_proto protoreflect.FileDescriptor + +var file_debug_proto_rawDesc = []byte{ + 0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x64, + 0x65, 0x62, 0x75, 0x67, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, + 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x94, 0x01, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x73, 0x69, 0x7a, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x72, 0x63, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x08, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x72, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, + 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, + 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x09, 0x48, 0x65, 0x61, 0x70, + 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x74, + 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x12, 0x10, 0x0a, 0x03, 0x73, + 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x79, 0x73, 0x12, 0x14, 0x0a, + 0x05, 0x6e, 0x75, 0x6d, 0x47, 0x43, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6e, 0x75, + 0x6d, 0x47, 0x43, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70, 0x41, 0x6c, 0x6c, 0x6f, 0x63, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x68, 0x65, 0x61, 0x70, 0x41, 0x6c, 0x6c, 0x6f, + 0x63, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x70, 0x53, 0x79, 0x73, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x61, 0x70, 0x53, 0x79, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x68, + 0x65, 0x61, 0x70, 0x49, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x68, + 0x65, 0x61, 0x70, 0x49, 0x64, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70, 0x49, + 0x6e, 0x75, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x68, 0x65, 0x61, 0x70, + 0x49, 0x6e, 0x75, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x70, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0c, 0x68, 0x65, 0x61, + 0x70, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x68, 0x65, 0x61, + 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, + 0x68, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x67, + 0x63, 0x43, 0x50, 0x55, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x0d, 0x67, 0x63, 0x43, 0x50, 0x55, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x86, 0x01, 0x0a, 0x08, 0x48, 0x65, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x12, 0x26, + 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x07, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, + 0x48, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x45, + 0x64, 0x67, 0x65, 0x52, 0x05, 0x65, 0x64, 0x67, 0x65, 0x73, 0x22, 0x4c, 0x0a, 0x08, 0x48, 0x65, + 0x61, 0x70, 0x45, 0x64, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e, 0x0a, 0x02, 0x74, 0x6f, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x74, 0x6f, 0x12, 0x1c, 0x0a, 0x09, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x61, 0x0a, 0x0c, 0x48, 0x65, 0x61, 0x70, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, + 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x55, 0x0a, 0x11, 0x48, + 0x65, 0x61, 0x70, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, + 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, + 0x74, 0x61, 0x32, 0xb7, 0x01, 0x0a, 0x03, 0x61, 0x70, 0x69, 0x12, 0x4f, 0x0a, 0x07, 0x47, 0x65, + 0x74, 0x48, 0x65, 0x61, 0x70, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, + 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x64, 0x65, 0x62, + 0x75, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x5f, 0x0a, 0x0c, 0x47, + 0x65, 0x74, 0x48, 0x65, 0x61, 0x70, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x17, 0x12, 0x15, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x42, 0x1d, 0x5a, 0x1b, + 0x6d, 0x37, 0x73, 0x2e, 0x6c, 0x69, 0x76, 0x65, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_debug_proto_rawDescOnce sync.Once + file_debug_proto_rawDescData = file_debug_proto_rawDesc +) + +func file_debug_proto_rawDescGZIP() []byte { + file_debug_proto_rawDescOnce.Do(func() { + file_debug_proto_rawDescData = protoimpl.X.CompressGZIP(file_debug_proto_rawDescData) + }) + return file_debug_proto_rawDescData +} + +var file_debug_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_debug_proto_goTypes = []interface{}{ + (*HeapObject)(nil), // 0: debug.HeapObject + (*HeapStats)(nil), // 1: debug.HeapStats + (*HeapData)(nil), // 2: debug.HeapData + (*HeapEdge)(nil), // 3: debug.HeapEdge + (*HeapResponse)(nil), // 4: debug.HeapResponse + (*HeapGraphResponse)(nil), // 5: debug.HeapGraphResponse + (*emptypb.Empty)(nil), // 6: google.protobuf.Empty +} +var file_debug_proto_depIdxs = []int32{ + 1, // 0: debug.HeapData.stats:type_name -> debug.HeapStats + 0, // 1: debug.HeapData.objects:type_name -> debug.HeapObject + 3, // 2: debug.HeapData.edges:type_name -> debug.HeapEdge + 2, // 3: debug.HeapResponse.data:type_name -> debug.HeapData + 6, // 4: debug.api.GetHeap:input_type -> google.protobuf.Empty + 6, // 5: debug.api.GetHeapGraph:input_type -> google.protobuf.Empty + 4, // 6: debug.api.GetHeap:output_type -> debug.HeapResponse + 5, // 7: debug.api.GetHeapGraph:output_type -> debug.HeapGraphResponse + 6, // [6:8] is the sub-list for method output_type + 4, // [4:6] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_debug_proto_init() } +func file_debug_proto_init() { + if File_debug_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_debug_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapObject); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debug_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapStats); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debug_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debug_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapEdge); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debug_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_debug_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeapGraphResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_debug_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_debug_proto_goTypes, + DependencyIndexes: file_debug_proto_depIdxs, + MessageInfos: file_debug_proto_msgTypes, + }.Build() + File_debug_proto = out.File + file_debug_proto_rawDesc = nil + file_debug_proto_goTypes = nil + file_debug_proto_depIdxs = nil +} diff --git a/plugin/debug/pb/debug.pb.gw.go b/plugin/debug/pb/debug.pb.gw.go new file mode 100644 index 0000000..65bbb15 --- /dev/null +++ b/plugin/debug/pb/debug.pb.gw.go @@ -0,0 +1,225 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: debug.proto + +/* +Package pb is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package pb + +import ( + "context" + "io" + "net/http" + + "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" + "github.com/grpc-ecosystem/grpc-gateway/v2/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/types/known/emptypb" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = metadata.Join + +func request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := client.GetHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := server.GetHeap(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := client.GetHeapGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq emptypb.Empty + var metadata runtime.ServerMetadata + + msg, err := server.GetHeapGraph(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterApiHandlerServer registers the http handlers for service Api to "mux". +// UnaryRPC :call ApiServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterApiHandlerFromEndpoint instead. +func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ApiServer) error { + + mux.Handle("GET", pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Api_GetHeap_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Api_GetHeapGraph_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterApiHandlerFromEndpoint is same as RegisterApiHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.DialContext(ctx, endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterApiHandler(ctx, mux, conn) +} + +// RegisterApiHandler registers the http handlers for service Api to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterApiHandlerClient(ctx, mux, NewApiClient(conn)) +} + +// RegisterApiHandlerClient registers the http handlers for service Api +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ApiClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ApiClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "ApiClient" to call the correct interceptors. +func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ApiClient) error { + + mux.Handle("GET", pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Api_GetHeap_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + var err error + var annotatedContext context.Context + annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Api_GetHeapGraph_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + + forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Api_GetHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "heap"}, "")) + + pattern_Api_GetHeapGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "heap", "graph"}, "")) +) + +var ( + forward_Api_GetHeap_0 = runtime.ForwardResponseMessage + + forward_Api_GetHeapGraph_0 = runtime.ForwardResponseMessage +) diff --git a/plugin/debug/pb/debug.proto b/plugin/debug/pb/debug.proto new file mode 100644 index 0000000..1d1b765 --- /dev/null +++ b/plugin/debug/pb/debug.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; +import "google/api/annotations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/timestamp.proto"; +package debug; +option go_package="m7s.live/v5/plugin/debug/pb"; + +service api { + rpc GetHeap (google.protobuf.Empty) returns (HeapResponse) { + option (google.api.http) = { + get: "/debug/api/heap" + }; + } + rpc GetHeapGraph (google.protobuf.Empty) returns (HeapGraphResponse) { + option (google.api.http) = { + get: "/debug/api/heap/graph" + }; + } +} + +message HeapObject { + string type = 1; + int64 count = 2; + int64 size = 3; + double sizePerc = 4; + string address = 5; + repeated string refs = 6; +} + +message HeapStats { + uint64 alloc = 1; + uint64 totalAlloc = 2; + uint64 sys = 3; + uint32 numGC = 4; + uint64 heapAlloc = 5; + uint64 heapSys = 6; + uint64 heapIdle = 7; + uint64 heapInuse = 8; + uint64 heapReleased = 9; + uint64 heapObjects = 10; + double gcCPUFraction = 11; +} + +message HeapData { + HeapStats stats = 1; + repeated HeapObject objects = 2; + repeated HeapEdge edges = 3; +} + +message HeapEdge { + string from = 1; + string to = 2; + string fieldName = 3; +} + +message HeapResponse { + uint32 code = 1; + string message = 2; + HeapData data = 3; +} + +message HeapGraphResponse { + uint32 code = 1; + string message = 2; + string data = 3; +} diff --git a/plugin/debug/pb/debug_grpc.pb.go b/plugin/debug/pb/debug_grpc.pb.go new file mode 100644 index 0000000..2cccf1f --- /dev/null +++ b/plugin/debug/pb/debug_grpc.pb.go @@ -0,0 +1,142 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.1 +// source: debug.proto + +package pb + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ApiClient is the client API for Api service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ApiClient interface { + GetHeap(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapResponse, error) + GetHeapGraph(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapGraphResponse, error) +} + +type apiClient struct { + cc grpc.ClientConnInterface +} + +func NewApiClient(cc grpc.ClientConnInterface) ApiClient { + return &apiClient{cc} +} + +func (c *apiClient) GetHeap(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapResponse, error) { + out := new(HeapResponse) + err := c.cc.Invoke(ctx, "/debug.api/GetHeap", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *apiClient) GetHeapGraph(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapGraphResponse, error) { + out := new(HeapGraphResponse) + err := c.cc.Invoke(ctx, "/debug.api/GetHeapGraph", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ApiServer is the server API for Api service. +// All implementations must embed UnimplementedApiServer +// for forward compatibility +type ApiServer interface { + GetHeap(context.Context, *emptypb.Empty) (*HeapResponse, error) + GetHeapGraph(context.Context, *emptypb.Empty) (*HeapGraphResponse, error) + mustEmbedUnimplementedApiServer() +} + +// UnimplementedApiServer must be embedded to have forward compatible implementations. +type UnimplementedApiServer struct { +} + +func (UnimplementedApiServer) GetHeap(context.Context, *emptypb.Empty) (*HeapResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHeap not implemented") +} +func (UnimplementedApiServer) GetHeapGraph(context.Context, *emptypb.Empty) (*HeapGraphResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetHeapGraph not implemented") +} +func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {} + +// UnsafeApiServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ApiServer will +// result in compilation errors. +type UnsafeApiServer interface { + mustEmbedUnimplementedApiServer() +} + +func RegisterApiServer(s grpc.ServiceRegistrar, srv ApiServer) { + s.RegisterService(&Api_ServiceDesc, srv) +} + +func _Api_GetHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApiServer).GetHeap(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/debug.api/GetHeap", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApiServer).GetHeap(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _Api_GetHeapGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ApiServer).GetHeapGraph(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/debug.api/GetHeapGraph", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ApiServer).GetHeapGraph(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// Api_ServiceDesc is the grpc.ServiceDesc for Api service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var Api_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "debug.api", + HandlerType: (*ApiServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetHeap", + Handler: _Api_GetHeap_Handler, + }, + { + MethodName: "GetHeapGraph", + Handler: _Api_GetHeapGraph_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "debug.proto", +} diff --git a/plugin/debug/pkg/index.go b/plugin/debug/pkg/index.go new file mode 100644 index 0000000..79a1ff2 --- /dev/null +++ b/plugin/debug/pkg/index.go @@ -0,0 +1,17 @@ +package debug + +import ( + "bytes" + + "m7s.live/v5/plugin/debug/pkg/internal/graph" + "m7s.live/v5/plugin/debug/pkg/internal/report" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func GetDotGraph(profile *profile.Profile) (string, error) { + rpt := report.NewDefault(profile, report.Options{}) + g, config := report.GetDOT(rpt) + dot := &bytes.Buffer{} + graph.ComposeDot(dot, g, &graph.DotAttributes{}, config) + return dot.String(), nil +} diff --git a/plugin/debug/pkg/internal/binutils/addr2liner.go b/plugin/debug/pkg/internal/binutils/addr2liner.go new file mode 100644 index 0000000..e0a9c13 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/addr2liner.go @@ -0,0 +1,238 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +const ( + defaultAddr2line = "addr2line" + + // addr2line may produce multiple lines of output. We + // use this sentinel to identify the end of the output. + sentinel = ^uint64(0) +) + +// addr2Liner is a connection to an addr2line command for obtaining +// address and line number information from a binary. +type addr2Liner struct { + mu sync.Mutex + rw lineReaderWriter + base uint64 + + // nm holds an addr2Liner using nm tool. Certain versions of addr2line + // produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. As a workaround, + // the names from nm are used when they look more complete. See addrInfo() + // code below for the exact heuristic. + nm *addr2LinerNM +} + +// lineReaderWriter is an interface to abstract the I/O to an addr2line +// process. It writes a line of input to the job, and reads its output +// one line at a time. +type lineReaderWriter interface { + write(string) error + readLine() (string, error) + close() +} + +type addr2LinerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader +} + +func (a *addr2LinerJob) write(s string) error { + _, err := fmt.Fprint(a.in, s+"\n") + return err +} + +func (a *addr2LinerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the addr2liner object. +func (a *addr2LinerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newAddr2Liner starts the given addr2liner command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) { + if cmd == "" { + cmd = defaultAddr2line + } + + j := &addr2LinerJob{ + cmd: exec.Command(cmd, "-aif", "-e", file), + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &addr2Liner{ + rw: j, + base: base, + } + + return a, nil +} + +// readFrame parses the addr2line output for a single address. It +// returns a populated plugin.Frame and whether it has reached the end of the +// data. +func (d *addr2Liner) readFrame() (plugin.Frame, bool) { + funcname, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + if strings.HasPrefix(funcname, "0x") { + // If addr2line returns a hex address we can assume it is the + // sentinel. Read and ignore next two lines of output from + // addr2line + d.rw.readLine() + d.rw.readLine() + return plugin.Frame{}, true + } + + fileline, err := d.rw.readLine() + if err != nil { + return plugin.Frame{}, true + } + + linenumber := 0 + + if funcname == "??" { + funcname = "" + } + + if fileline == "??:0" { + fileline = "" + } else { + if i := strings.LastIndex(fileline, ":"); i >= 0 { + // Remove discriminator, if present + if disc := strings.Index(fileline, " (discriminator"); disc > 0 { + fileline = fileline[:disc] + } + // If we cannot parse a number after the last ":", keep it as + // part of the filename. + if line, err := strconv.Atoi(fileline[i+1:]); err == nil { + linenumber = line + fileline = fileline[:i] + } + } + } + + return plugin.Frame{ + Func: funcname, + File: fileline, + Line: linenumber}, false +} + +func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) { + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.rw.write(fmt.Sprintf("%x", addr-d.base)); err != nil { + return nil, err + } + + if err := d.rw.write(fmt.Sprintf("%x", sentinel)); err != nil { + return nil, err + } + + resp, err := d.rw.readLine() + if err != nil { + return nil, err + } + + if !strings.HasPrefix(resp, "0x") { + return nil, fmt.Errorf("unexpected addr2line output: %s", resp) + } + + var stack []plugin.Frame + for { + frame, end := d.readFrame() + if end { + break + } + + if frame != (plugin.Frame{}) { + stack = append(stack, frame) + } + } + return stack, err +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *addr2Liner) addrInfo(addr uint64) ([]plugin.Frame, error) { + stack, err := d.rawAddrInfo(addr) + if err != nil { + return nil, err + } + + // Certain versions of addr2line produce incomplete names due to + // https://sourceware.org/bugzilla/show_bug.cgi?id=17541. Attempt to replace + // the name with a better one from nm. + if len(stack) > 0 && d.nm != nil { + nm, err := d.nm.addrInfo(addr) + if err == nil && len(nm) > 0 { + // Last entry in frame list should match since it is non-inlined. As a + // simple heuristic, we only switch to the nm-based name if it is longer + // by 2 or more characters. We consider nm names that are longer by 1 + // character insignificant to avoid replacing foo with _foo on MacOS (for + // unknown reasons read2line produces the former and nm produces the + // latter on MacOS even though both tools are asked to produce mangled + // names). + nmName := nm[len(nm)-1].Func + a2lName := stack[len(stack)-1].Func + if len(nmName) > len(a2lName)+1 { + stack[len(stack)-1].Func = nmName + } + } + } + + return stack, nil +} diff --git a/plugin/debug/pkg/internal/binutils/addr2liner_llvm.go b/plugin/debug/pkg/internal/binutils/addr2liner_llvm.go new file mode 100644 index 0000000..2a54c64 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/addr2liner_llvm.go @@ -0,0 +1,184 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "encoding/json" + "fmt" + "io" + "os/exec" + "strconv" + "strings" + "sync" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +const ( + defaultLLVMSymbolizer = "llvm-symbolizer" +) + +// llvmSymbolizer is a connection to an llvm-symbolizer command for +// obtaining address and line number information from a binary. +type llvmSymbolizer struct { + sync.Mutex + filename string + rw lineReaderWriter + base uint64 + isData bool +} + +type llvmSymbolizerJob struct { + cmd *exec.Cmd + in io.WriteCloser + out *bufio.Reader + // llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization. + symType string +} + +func (a *llvmSymbolizerJob) write(s string) error { + _, err := fmt.Fprintln(a.in, a.symType, s) + return err +} + +func (a *llvmSymbolizerJob) readLine() (string, error) { + s, err := a.out.ReadString('\n') + if err != nil { + return "", err + } + return strings.TrimSpace(s), nil +} + +// close releases any resources used by the llvmSymbolizer object. +func (a *llvmSymbolizerJob) close() { + a.in.Close() + a.cmd.Wait() +} + +// newLLVMSymbolizer starts the given llvmSymbolizer command reporting +// information about the given executable file. If file is a shared +// library, base should be the address at which it was mapped in the +// program under consideration. +func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) { + if cmd == "" { + cmd = defaultLLVMSymbolizer + } + + j := &llvmSymbolizerJob{ + cmd: exec.Command(cmd, "--inlining", "-demangle=false", "--output-style=JSON"), + symType: "CODE", + } + if isData { + j.symType = "DATA" + } + + var err error + if j.in, err = j.cmd.StdinPipe(); err != nil { + return nil, err + } + + outPipe, err := j.cmd.StdoutPipe() + if err != nil { + return nil, err + } + + j.out = bufio.NewReader(outPipe) + if err := j.cmd.Start(); err != nil { + return nil, err + } + + a := &llvmSymbolizer{ + filename: file, + rw: j, + base: base, + isData: isData, + } + + return a, nil +} + +// readDataFrames parses the llvm-symbolizer DATA output for a single address. It +// returns a populated plugin.Frame array with a single entry. +func (d *llvmSymbolizer) readDataFrames() ([]plugin.Frame, error) { + line, err := d.rw.readLine() + if err != nil { + return nil, err + } + var frame struct { + Address string `json:"Address"` + ModuleName string `json:"ModuleName"` + Data struct { + Start string `json:"Start"` + Size string `json:"Size"` + Name string `json:"Name"` + } `json:"Data"` + } + if err := json.Unmarshal([]byte(line), &frame); err != nil { + return nil, err + } + // Match non-JSON output behaviour of stuffing the start/size into the filename of a single frame, + // with the size being a decimal value. + size, err := strconv.ParseInt(frame.Data.Size, 0, 0) + if err != nil { + return nil, err + } + var stack []plugin.Frame + stack = append(stack, plugin.Frame{Func: frame.Data.Name, File: fmt.Sprintf("%s %d", frame.Data.Start, size)}) + return stack, nil +} + +// readCodeFrames parses the llvm-symbolizer CODE output for a single address. It +// returns a populated plugin.Frame array. +func (d *llvmSymbolizer) readCodeFrames() ([]plugin.Frame, error) { + line, err := d.rw.readLine() + if err != nil { + return nil, err + } + var frame struct { + Address string `json:"Address"` + ModuleName string `json:"ModuleName"` + Symbol []struct { + Line int `json:"Line"` + Column int `json:"Column"` + FunctionName string `json:"FunctionName"` + FileName string `json:"FileName"` + StartLine int `json:"StartLine"` + } `json:"Symbol"` + } + if err := json.Unmarshal([]byte(line), &frame); err != nil { + return nil, err + } + var stack []plugin.Frame + for _, s := range frame.Symbol { + stack = append(stack, plugin.Frame{Func: s.FunctionName, File: s.FileName, Line: s.Line, Column: s.Column, StartLine: s.StartLine}) + } + return stack, nil +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (d *llvmSymbolizer) addrInfo(addr uint64) ([]plugin.Frame, error) { + d.Lock() + defer d.Unlock() + + if err := d.rw.write(fmt.Sprintf("%s 0x%x", d.filename, addr-d.base)); err != nil { + return nil, err + } + if d.isData { + return d.readDataFrames() + } + return d.readCodeFrames() +} diff --git a/plugin/debug/pkg/internal/binutils/addr2liner_nm.go b/plugin/debug/pkg/internal/binutils/addr2liner_nm.go new file mode 100644 index 0000000..b916ed6 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/addr2liner_nm.go @@ -0,0 +1,144 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bufio" + "bytes" + "io" + "os/exec" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +const ( + defaultNM = "nm" +) + +// addr2LinerNM is a connection to an nm command for obtaining symbol +// information from a binary. +type addr2LinerNM struct { + m []symbolInfo // Sorted list of symbol addresses from binary. +} + +type symbolInfo struct { + address uint64 + size uint64 + name string + symType string +} + +// isData returns if the symbol has a known data object symbol type. +func (s *symbolInfo) isData() bool { + // The following symbol types are taken from https://linux.die.net/man/1/nm: + // Lowercase letter means local symbol, uppercase denotes a global symbol. + // - b or B: the symbol is in the uninitialized data section, e.g. .bss; + // - d or D: the symbol is in the initialized data section; + // - r or R: the symbol is in a read only data section; + // - v or V: the symbol is a weak object; + // - W: the symbol is a weak symbol that has not been specifically tagged as a + // weak object symbol. Experiments with some binaries, showed these to be + // mostly data objects. + return strings.ContainsAny(s.symType, "bBdDrRvVW") +} + +// newAddr2LinerNM starts the given nm command reporting information about the +// given executable file. If file is a shared library, base should be the +// address at which it was mapped in the program under consideration. +func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) { + if cmd == "" { + cmd = defaultNM + } + var b bytes.Buffer + c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file) + c.Stdout = &b + if err := c.Run(); err != nil { + return nil, err + } + return parseAddr2LinerNM(base, &b) +} + +func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) { + a := &addr2LinerNM{ + m: []symbolInfo{}, + } + + // Parse nm output and populate symbol map. + // Skip lines we fail to parse. + buf := bufio.NewReader(nm) + for { + line, err := buf.ReadString('\n') + if line == "" && err != nil { + if err == io.EOF { + break + } + return nil, err + } + line = strings.TrimSpace(line) + fields := strings.Split(line, " ") + if len(fields) != 4 { + continue + } + address, err := strconv.ParseUint(fields[2], 16, 64) + if err != nil { + continue + } + size, err := strconv.ParseUint(fields[3], 16, 64) + if err != nil { + continue + } + a.m = append(a.m, symbolInfo{ + address: address + base, + size: size, + name: fields[0], + symType: fields[1], + }) + } + + return a, nil +} + +// addrInfo returns the stack frame information for a specific program +// address. It returns nil if the address could not be identified. +func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) { + if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) { + return nil, nil + } + + // Binary search. Search until low, high are separated by 1. + low, high := 0, len(a.m) + for low+1 < high { + mid := (low + high) / 2 + v := a.m[mid].address + if addr == v { + low = mid + break + } else if addr > v { + low = mid + } else { + high = mid + } + } + + // Address is between a.m[low] and a.m[high]. Pick low, as it represents + // [low, high). For data symbols, we use a strict check that the address is in + // the [start, start + size) range of a.m[low]. + if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) { + return nil, nil + } + return []plugin.Frame{{Func: a.m[low].name}}, nil +} diff --git a/plugin/debug/pkg/internal/binutils/binutils.go b/plugin/debug/pkg/internal/binutils/binutils.go new file mode 100644 index 0000000..2fc31ac --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/binutils.go @@ -0,0 +1,736 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package binutils provides access to the GNU binutils. +package binutils + +import ( + "debug/elf" + "debug/macho" + "debug/pe" + "encoding/binary" + "errors" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + + "m7s.live/v5/plugin/debug/pkg/internal/elfexec" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +// A Binutils implements plugin.ObjTool by invoking the GNU binutils. +type Binutils struct { + mu sync.Mutex + rep *binrep +} + +var ( + objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`) + + // Defined for testing + elfOpen = elf.Open +) + +// binrep is an immutable representation for Binutils. It is atomically +// replaced on every mutation to provide thread-safe access. +type binrep struct { + // Commands to invoke. + llvmSymbolizer string + llvmSymbolizerFound bool + addr2line string + addr2lineFound bool + nm string + nmFound bool + objdump string + objdumpFound bool + isLLVMObjdump bool + + // if fast, perform symbolization using nm (symbol names only), + // instead of file-line detail from the slower addr2line. + fast bool +} + +// get returns the current representation for bu, initializing it if necessary. +func (bu *Binutils) get() *binrep { + bu.mu.Lock() + r := bu.rep + if r == nil { + r = &binrep{} + initTools(r, "") + bu.rep = r + } + bu.mu.Unlock() + return r +} + +// update modifies the rep for bu via the supplied function. +func (bu *Binutils) update(fn func(r *binrep)) { + r := &binrep{} + bu.mu.Lock() + defer bu.mu.Unlock() + if bu.rep == nil { + initTools(r, "") + } else { + *r = *bu.rep + } + fn(r) + bu.rep = r +} + +// String returns string representation of the binutils state for debug logging. +func (bu *Binutils) String() string { + r := bu.get() + var llvmSymbolizer, addr2line, nm, objdump string + if r.llvmSymbolizerFound { + llvmSymbolizer = r.llvmSymbolizer + } + if r.addr2lineFound { + addr2line = r.addr2line + } + if r.nmFound { + nm = r.nm + } + if r.objdumpFound { + objdump = r.objdump + } + return fmt.Sprintf("llvm-symbolizer=%q addr2line=%q nm=%q objdump=%q fast=%t", + llvmSymbolizer, addr2line, nm, objdump, r.fast) +} + +// SetFastSymbolization sets a toggle that makes binutils use fast +// symbolization (using nm), which is much faster than addr2line but +// provides only symbol name information (no file/line). +func (bu *Binutils) SetFastSymbolization(fast bool) { + bu.update(func(r *binrep) { r.fast = fast }) +} + +// SetTools processes the contents of the tools option. It +// expects a set of entries separated by commas; each entry is a pair +// of the form t:path, where cmd will be used to look only for the +// tool named t. If t is not specified, the path is searched for all +// tools. +func (bu *Binutils) SetTools(config string) { + bu.update(func(r *binrep) { initTools(r, config) }) +} + +func initTools(b *binrep, config string) { + // paths collect paths per tool; Key "" contains the default. + paths := make(map[string][]string) + for _, t := range strings.Split(config, ",") { + name, path := "", t + if ct := strings.SplitN(t, ":", 2); len(ct) == 2 { + name, path = ct[0], ct[1] + } + paths[name] = append(paths[name], path) + } + + defaultPath := paths[""] + b.llvmSymbolizer, b.llvmSymbolizerFound = chooseExe([]string{"llvm-symbolizer"}, []string{}, append(paths["llvm-symbolizer"], defaultPath...)) + b.addr2line, b.addr2lineFound = chooseExe([]string{"addr2line"}, []string{"gaddr2line"}, append(paths["addr2line"], defaultPath...)) + // The "-n" option is supported by LLVM since 2011. The output of llvm-nm + // and GNU nm with "-n" option is interchangeable for our purposes, so we do + // not need to differrentiate them. + b.nm, b.nmFound = chooseExe([]string{"llvm-nm", "nm"}, []string{"gnm"}, append(paths["nm"], defaultPath...)) + b.objdump, b.objdumpFound, b.isLLVMObjdump = findObjdump(append(paths["objdump"], defaultPath...)) +} + +// findObjdump finds and returns path to preferred objdump binary. +// Order of preference is: llvm-objdump, objdump. +// On MacOS only, also looks for gobjdump with least preference. +// Accepts a list of paths and returns: +// a string with path to the preferred objdump binary if found, +// or an empty string if not found; +// a boolean if any acceptable objdump was found; +// a boolean indicating if it is an LLVM objdump. +func findObjdump(paths []string) (string, bool, bool) { + objdumpNames := []string{"llvm-objdump", "objdump"} + if runtime.GOOS == "darwin" { + objdumpNames = append(objdumpNames, "gobjdump") + } + + for _, objdumpName := range objdumpNames { + if objdump, objdumpFound := findExe(objdumpName, paths); objdumpFound { + cmdOut, err := exec.Command(objdump, "--version").Output() + if err != nil { + continue + } + if isLLVMObjdump(string(cmdOut)) { + return objdump, true, true + } + if isBuObjdump(string(cmdOut)) { + return objdump, true, false + } + } + } + return "", false, false +} + +// chooseExe finds and returns path to preferred binary. names is a list of +// names to search on both Linux and OSX. osxNames is a list of names specific +// to OSX. names always has a higher priority than osxNames. The order of +// the name within each list decides its priority (e.g. the first name has a +// higher priority than the second name in the list). +// +// It returns a string with path to the binary and a boolean indicating if any +// acceptable binary was found. +func chooseExe(names, osxNames []string, paths []string) (string, bool) { + if runtime.GOOS == "darwin" { + names = append(names, osxNames...) + } + for _, name := range names { + if binary, found := findExe(name, paths); found { + return binary, true + } + } + return "", false +} + +// isLLVMObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is an LLVM +// objdump binary of an acceptable version. +func isLLVMObjdump(output string) bool { + fields := objdumpLLVMVerRE.FindStringSubmatch(output) + if len(fields) != 5 { + return false + } + if fields[4] == "trunk" { + return true + } + verMajor, err := strconv.Atoi(fields[1]) + if err != nil { + return false + } + verPatch, err := strconv.Atoi(fields[3]) + if err != nil { + return false + } + if runtime.GOOS == "linux" && verMajor >= 8 { + // Ensure LLVM objdump is at least version 8.0 on Linux. + // Some flags, like --demangle, and double dashes for options are + // not supported by previous versions. + return true + } + if runtime.GOOS == "darwin" { + // Ensure LLVM objdump is at least version 10.0.1 on MacOS. + return verMajor > 10 || (verMajor == 10 && verPatch >= 1) + } + return false +} + +// isBuObjdump accepts a string with path to an objdump binary, +// and returns a boolean indicating if the given binary is a GNU +// binutils objdump binary. No version check is performed. +func isBuObjdump(output string) bool { + return strings.Contains(output, "GNU objdump") +} + +// findExe looks for an executable command on a set of paths. +// If it cannot find it, returns cmd. +func findExe(cmd string, paths []string) (string, bool) { + for _, p := range paths { + cp := filepath.Join(p, cmd) + if c, err := exec.LookPath(cp); err == nil { + return c, true + } + } + return cmd, false +} + +// Disasm returns the assembly instructions for the specified address range +// of a binary. +func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + b := bu.get() + if !b.objdumpFound { + return nil, errors.New("cannot disasm: no objdump tool available") + } + args := []string{"--disassemble", "--demangle", "--no-show-raw-insn", + "--line-numbers", fmt.Sprintf("--start-address=%#x", start), + fmt.Sprintf("--stop-address=%#x", end)} + + if intelSyntax { + if b.isLLVMObjdump { + args = append(args, "--x86-asm-syntax=intel") + } else { + args = append(args, "-M", "intel") + } + } + + args = append(args, file) + cmd := exec.Command(b.objdump, args...) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return disassemble(out) +} + +// Open satisfies the plugin.ObjTool interface. +func (bu *Binutils) Open(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + b := bu.get() + + // Make sure file is a supported executable. + // This uses magic numbers, mainly to provide better error messages but + // it should also help speed. + + if _, err := os.Stat(name); err != nil { + // For testing, do not require file name to exist. + if strings.Contains(b.addr2line, "testdata/") { + return &fileAddr2Line{file: file{b: b, name: name}}, nil + } + return nil, err + } + + // Read the first 4 bytes of the file. + + f, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("error opening %s: %v", name, err) + } + defer f.Close() + + var header [4]byte + if _, err = io.ReadFull(f, header[:]); err != nil { + return nil, fmt.Errorf("error reading magic number from %s: %v", name, err) + } + + elfMagic := string(header[:]) + + // Match against supported file types. + if elfMagic == elf.ELFMAG { + f, err := b.openELF(name, start, limit, offset, relocationSymbol) + if err != nil { + return nil, fmt.Errorf("error reading ELF file %s: %v", name, err) + } + return f, nil + } + + // Mach-O magic numbers can be big or little endian. + machoMagicLittle := binary.LittleEndian.Uint32(header[:]) + machoMagicBig := binary.BigEndian.Uint32(header[:]) + + if machoMagicLittle == macho.Magic32 || machoMagicLittle == macho.Magic64 || + machoMagicBig == macho.Magic32 || machoMagicBig == macho.Magic64 { + f, err := b.openMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading Mach-O file %s: %v", name, err) + } + return f, nil + } + if machoMagicLittle == macho.MagicFat || machoMagicBig == macho.MagicFat { + f, err := b.openFatMachO(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading fat Mach-O file %s: %v", name, err) + } + return f, nil + } + + peMagic := string(header[:2]) + if peMagic == "MZ" { + f, err := b.openPE(name, start, limit, offset) + if err != nil { + return nil, fmt.Errorf("error reading PE file %s: %v", name, err) + } + return f, nil + } + + return nil, fmt.Errorf("unrecognized binary format: %s", name) +} + +func (b *binrep) openMachOCommon(name string, of *macho.File, start, limit, offset uint64) (plugin.ObjFile, error) { + + // Subtract the load address of the __TEXT section. Usually 0 for shared + // libraries or 0x100000000 for executables. You can check this value by + // running `objdump -private-headers `. + + textSegment := of.Segment("__TEXT") + if textSegment == nil { + return nil, fmt.Errorf("could not identify base for %s: no __TEXT segment", name) + } + if textSegment.Addr > start { + return nil, fmt.Errorf("could not identify base for %s: __TEXT segment address (0x%x) > mapping start address (0x%x)", + name, textSegment.Addr, start) + } + + base := start - textSegment.Addr + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +func (b *binrep) openFatMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.OpenFat(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + if len(of.Arches) == 0 { + return nil, fmt.Errorf("empty fat Mach-O file: %s", name) + } + + var arch macho.Cpu + // Use the host architecture. + // TODO: This is not ideal because the host architecture may not be the one + // that was profiled. E.g. an amd64 host can profile a 386 program. + switch runtime.GOARCH { + case "386": + arch = macho.Cpu386 + case "amd64", "amd64p32": + arch = macho.CpuAmd64 + case "arm", "armbe", "arm64", "arm64be": + arch = macho.CpuArm + case "ppc": + arch = macho.CpuPpc + case "ppc64", "ppc64le": + arch = macho.CpuPpc64 + default: + return nil, fmt.Errorf("unsupported host architecture for %s: %s", name, runtime.GOARCH) + } + for i := range of.Arches { + if of.Arches[i].Cpu == arch { + return b.openMachOCommon(name, of.Arches[i].File, start, limit, offset) + } + } + return nil, fmt.Errorf("architecture not found in %s: %s", name, runtime.GOARCH) +} + +func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + of, err := macho.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer of.Close() + + return b.openMachOCommon(name, of, start, limit, offset) +} + +func (b *binrep) openELF(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + ef, err := elfOpen(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer ef.Close() + + buildID := "" + if id, err := elfexec.GetBuildID(ef); err == nil { + buildID = fmt.Sprintf("%x", id) + } + + var ( + kernelOffset *uint64 + pageAligned = func(addr uint64) bool { return addr%4096 == 0 } + ) + if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) { + // Reading all Symbols is expensive, and we only rarely need it so + // we don't want to do it every time. But if _stext happens to be + // page-aligned but isn't the same as Vaddr, we would symbolize + // wrong. So if the name the addresses aren't page aligned, or if + // the name is "vmlinux" we read _stext. We can be wrong if: (1) + // someone passes a kernel path that doesn't contain "vmlinux" AND + // (2) _stext is page-aligned AND (3) _stext is not at Vaddr + symbols, err := ef.Symbols() + if err != nil && err != elf.ErrNoSymbols { + return nil, err + } + + // The kernel relocation symbol (the mapping start address) can be either + // _text or _stext. When profiles are generated by `perf`, which one was used is + // distinguished by the mapping name for the kernel image: + // '[kernel.kallsyms]_text' or '[kernel.kallsyms]_stext', respectively. If we haven't + // been able to parse it from the mapping, we default to _stext. + if relocationSymbol == "" { + relocationSymbol = "_stext" + } + for _, s := range symbols { + if s.Name == relocationSymbol { + kernelOffset = &s.Value + break + } + } + } + + // Check that we can compute a base for the binary. This may not be the + // correct base value, so we don't save it. We delay computing the actual base + // value until we have a sample address for this mapping, so that we can + // correctly identify the associated program segment that is needed to compute + // the base. + if _, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), kernelOffset, start, limit, offset); err != nil { + return nil, fmt.Errorf("could not identify base for %s: %v", name, err) + } + + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil + } + return &fileAddr2Line{file: file{ + b: b, + name: name, + buildID: buildID, + m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset}, + }}, nil +} + +func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) { + pf, err := pe.Open(name) + if err != nil { + return nil, fmt.Errorf("error parsing %s: %v", name, err) + } + defer pf.Close() + + var imageBase uint64 + switch h := pf.OptionalHeader.(type) { + case *pe.OptionalHeader32: + imageBase = uint64(h.ImageBase) + case *pe.OptionalHeader64: + imageBase = uint64(h.ImageBase) + default: + return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader) + } + + var base uint64 + if start > 0 { + base = start - imageBase + } + if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) { + return &fileNM{file: file{b: b, name: name, base: base}}, nil + } + return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil +} + +// elfMapping stores the parameters of a runtime mapping that are needed to +// identify the ELF segment associated with a mapping. +type elfMapping struct { + // Runtime mapping parameters. + start, limit, offset uint64 + // Offset of kernel relocation symbol. Only defined for kernel images, nil otherwise. + kernelOffset *uint64 +} + +// findProgramHeader returns the program segment that matches the current +// mapping and the given address, or an error if it cannot find a unique program +// header. +func (m *elfMapping) findProgramHeader(ef *elf.File, addr uint64) (*elf.ProgHeader, error) { + // For user space executables, we try to find the actual program segment that + // is associated with the given mapping. Skip this search if limit <= start. + // We cannot use just a check on the start address of the mapping to tell if + // it's a kernel / .ko module mapping, because with quipper address remapping + // enabled, the address would be in the lower half of the address space. + + if m.kernelOffset != nil || m.start >= m.limit || m.limit >= (uint64(1)<<63) { + // For the kernel, find the program segment that includes the .text section. + return elfexec.FindTextProgHeader(ef), nil + } + + // Fetch all the loadable segments. + var phdrs []elf.ProgHeader + for i := range ef.Progs { + if ef.Progs[i].Type == elf.PT_LOAD { + phdrs = append(phdrs, ef.Progs[i].ProgHeader) + } + } + // Some ELF files don't contain any loadable program segments, e.g. .ko + // kernel modules. It's not an error to have no header in such cases. + if len(phdrs) == 0 { + return nil, nil + } + // Get all program headers associated with the mapping. + headers := elfexec.ProgramHeadersForMapping(phdrs, m.offset, m.limit-m.start) + if len(headers) == 0 { + return nil, errors.New("no program header matches mapping info") + } + if len(headers) == 1 { + return headers[0], nil + } + + // Use the file offset corresponding to the address to symbolize, to narrow + // down the header. + return elfexec.HeaderForFileOffset(headers, addr-m.start+m.offset) +} + +// file implements the binutils.ObjFile interface. +type file struct { + b *binrep + name string + buildID string + + baseOnce sync.Once // Ensures the base, baseErr and isData are computed once. + base uint64 + baseErr error // Any eventual error while computing the base. + isData bool + // Mapping information. Relevant only for ELF files, nil otherwise. + m *elfMapping +} + +// computeBase computes the relocation base for the given binary file only if +// the elfMapping field is set. It populates the base and isData fields and +// returns an error. +func (f *file) computeBase(addr uint64) error { + if f == nil || f.m == nil { + return nil + } + if addr < f.m.start || addr >= f.m.limit { + return fmt.Errorf("specified address %x is outside the mapping range [%x, %x] for file %q", addr, f.m.start, f.m.limit, f.name) + } + ef, err := elfOpen(f.name) + if err != nil { + return fmt.Errorf("error parsing %s: %v", f.name, err) + } + defer ef.Close() + + ph, err := f.m.findProgramHeader(ef, addr) + if err != nil { + return fmt.Errorf("failed to find program header for file %q, ELF mapping %#v, address %x: %v", f.name, *f.m, addr, err) + } + + base, err := elfexec.GetBase(&ef.FileHeader, ph, f.m.kernelOffset, f.m.start, f.m.limit, f.m.offset) + if err != nil { + return err + } + f.base = base + f.isData = ph != nil && ph.Flags&elf.PF_X == 0 + return nil +} + +func (f *file) Name() string { + return f.name +} + +func (f *file) ObjAddr(addr uint64) (uint64, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return 0, f.baseErr + } + return addr - f.base, nil +} + +func (f *file) BuildID() string { + return f.buildID +} + +func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + return nil, nil +} + +func (f *file) Close() error { + return nil +} + +func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + // Get from nm a list of symbols sorted by address. + cmd := exec.Command(f.b.nm, "-n", f.name) + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("%v: %v", cmd.Args, err) + } + + return findSymbols(out, f.name, r, addr) +} + +// fileNM implements the binutils.ObjFile interface, using 'nm' to map +// addresses to symbols (without file/line number information). It is +// faster than fileAddr2Line. +type fileNM struct { + file + addr2linernm *addr2LinerNM +} + +func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + if f.addr2linernm == nil { + addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base) + if err != nil { + return nil, err + } + f.addr2linernm = addr2liner + } + return f.addr2linernm.addrInfo(addr) +} + +// fileAddr2Line implements the binutils.ObjFile interface, using +// llvm-symbolizer, if that's available, or addr2line to map addresses to +// symbols (with file/line number information). It can be slow for large +// binaries with debug information. +type fileAddr2Line struct { + once sync.Once + file + addr2liner *addr2Liner + llvmSymbolizer *llvmSymbolizer + isData bool +} + +func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) { + f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) }) + if f.baseErr != nil { + return nil, f.baseErr + } + f.once.Do(f.init) + if f.llvmSymbolizer != nil { + return f.llvmSymbolizer.addrInfo(addr) + } + if f.addr2liner != nil { + return f.addr2liner.addrInfo(addr) + } + return nil, fmt.Errorf("could not find local addr2liner") +} + +func (f *fileAddr2Line) init() { + if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil { + f.llvmSymbolizer = llvmSymbolizer + return + } + + if addr2liner, err := newAddr2Liner(f.b.addr2line, f.name, f.base); err == nil { + f.addr2liner = addr2liner + + // When addr2line encounters some gcc compiled binaries, it + // drops interesting parts of names in anonymous namespaces. + // Fallback to NM for better function names. + if nm, err := newAddr2LinerNM(f.b.nm, f.name, f.base); err == nil { + f.addr2liner.nm = nm + } + } +} + +func (f *fileAddr2Line) Close() error { + if f.llvmSymbolizer != nil { + f.llvmSymbolizer.rw.close() + f.llvmSymbolizer = nil + } + if f.addr2liner != nil { + f.addr2liner.rw.close() + f.addr2liner = nil + } + return nil +} diff --git a/plugin/debug/pkg/internal/binutils/binutils_test.go b/plugin/debug/pkg/internal/binutils/binutils_test.go new file mode 100644 index 0000000..a41baac --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/binutils_test.go @@ -0,0 +1,978 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bytes" + "debug/elf" + "encoding/binary" + "errors" + "fmt" + "math" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +var testAddrMap = map[int]string{ + 1000: "_Z3fooid.clone2", + 2000: "_ZNSaIiEC1Ev.clone18", + 3000: "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", +} + +func functionName(level int) (name string) { + if name = testAddrMap[level]; name != "" { + return name + } + return fmt.Sprintf("fun%d", level) +} + +func TestAddr2Liner(t *testing.T) { + const offset = 0x500 + + a := addr2Liner{rw: &mockAddr2liner{}, base: offset} + for i := 1; i < 8; i++ { + addr := i*0x1000 + offset + s, err := a.addrInfo(uint64(addr)) + if err != nil { + t.Fatalf("addrInfo(%#x): %v", addr, err) + } + if len(s) != i { + t.Fatalf("addrInfo(%#x): got len==%d, want %d", addr, len(s), i) + } + for l, f := range s { + level := (len(s) - l) * 1000 + want := plugin.Frame{Func: functionName(level), File: fmt.Sprintf("file%d", level), Line: level} + + if f != want { + t.Errorf("AddrInfo(%#x)[%d]: = %+v, want %+v", addr, l, f, want) + } + } + } + s, err := a.addrInfo(0xFFFF) + if err != nil { + t.Fatalf("addrInfo(0xFFFF): %v", err) + } + if len(s) != 0 { + t.Fatalf("AddrInfo(0xFFFF): got len==%d, want 0", len(s)) + } + a.rw.close() +} + +type mockAddr2liner struct { + output []string +} + +func (a *mockAddr2liner) write(s string) error { + var lines []string + switch s { + case "1000": + lines = []string{"_Z3fooid.clone2", "file1000:1000"} + case "2000": + lines = []string{"_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "3000": + lines = []string{"_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "4000": + lines = []string{"fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "5000": + lines = []string{"fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "6000": + lines = []string{"fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "7000": + lines = []string{"fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "8000": + lines = []string{"fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + case "9000": + lines = []string{"fun9000", "file9000:9000", "fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"} + default: + lines = []string{"??", "??:0"} + } + a.output = append(a.output, "0x"+s) + a.output = append(a.output, lines...) + return nil +} + +func (a *mockAddr2liner) readLine() (string, error) { + if len(a.output) == 0 { + return "", fmt.Errorf("end of file") + } + next := a.output[0] + a.output = a.output[1:] + return next, nil +} + +func (a *mockAddr2liner) close() { +} + +func TestAddr2LinerLookup(t *testing.T) { + for _, tc := range []struct { + desc string + nmOutput string + wantSymbolized map[uint64]string + wantUnsymbolized []uint64 + }{ + { + desc: "odd symbol count", + nmOutput: ` +0x1000 T 1000 100 +0x2000 T 2000 120 +0x3000 T 3000 130 +`, + wantSymbolized: map[uint64]string{ + 0x1000: "0x1000", + 0x1001: "0x1000", + 0x1FFF: "0x1000", + 0x2000: "0x2000", + 0x2001: "0x2000", + 0x3000: "0x3000", + 0x312f: "0x3000", + }, + wantUnsymbolized: []uint64{0x0fff, 0x3130}, + }, + { + desc: "even symbol count", + nmOutput: ` +0x1000 T 1000 100 +0x2000 T 2000 120 +0x3000 T 3000 130 +0x4000 T 4000 140 +`, + wantSymbolized: map[uint64]string{ + 0x1000: "0x1000", + 0x1001: "0x1000", + 0x1FFF: "0x1000", + 0x2000: "0x2000", + 0x2fff: "0x2000", + 0x3000: "0x3000", + 0x3fff: "0x3000", + 0x4000: "0x4000", + 0x413f: "0x4000", + }, + wantUnsymbolized: []uint64{0x0fff, 0x4140}, + }, + { + desc: "different symbol types", + nmOutput: ` +absolute_0x100 a 100 +absolute_0x200 A 200 +text_0x1000 t 1000 100 +bss_0x2000 b 2000 120 +data_0x3000 d 3000 130 +rodata_0x4000 r 4000 140 +weak_0x5000 v 5000 150 +text_0x6000 T 6000 160 +bss_0x7000 B 7000 170 +data_0x8000 D 8000 180 +rodata_0x9000 R 9000 190 +weak_0xa000 V a000 1a0 +weak_0xb000 W b000 1b0 +`, + wantSymbolized: map[uint64]string{ + 0x1000: "text_0x1000", + 0x1FFF: "text_0x1000", + 0x2000: "bss_0x2000", + 0x211f: "bss_0x2000", + 0x3000: "data_0x3000", + 0x312f: "data_0x3000", + 0x4000: "rodata_0x4000", + 0x413f: "rodata_0x4000", + 0x5000: "weak_0x5000", + 0x514f: "weak_0x5000", + 0x6000: "text_0x6000", + 0x6fff: "text_0x6000", + 0x7000: "bss_0x7000", + 0x716f: "bss_0x7000", + 0x8000: "data_0x8000", + 0x817f: "data_0x8000", + 0x9000: "rodata_0x9000", + 0x918f: "rodata_0x9000", + 0xa000: "weak_0xa000", + 0xa19f: "weak_0xa000", + 0xb000: "weak_0xb000", + 0xb1af: "weak_0xb000", + }, + wantUnsymbolized: []uint64{0x100, 0x200, 0x0fff, 0x2120, 0x3130, 0x4140, 0x5150, 0x7170, 0x8180, 0x9190, 0xa1a0, 0xb1b0}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + a, err := parseAddr2LinerNM(0, bytes.NewBufferString(tc.nmOutput)) + if err != nil { + t.Fatalf("nm parse error: %v", err) + } + for address, want := range tc.wantSymbolized { + if got, _ := a.addrInfo(address); !checkAddress(got, address, want) { + t.Errorf("%x: got %v, want %s", address, got, want) + } + } + for _, unknown := range tc.wantUnsymbolized { + if got, _ := a.addrInfo(unknown); got != nil { + t.Errorf("%x: got %v, want nil", unknown, got) + } + } + }) + } +} + +func checkAddress(got []plugin.Frame, address uint64, want string) bool { + if len(got) != 1 { + return false + } + return got[0].Func == want +} + +func TestSetTools(t *testing.T) { + // Test that multiple calls work. + bu := &Binutils{} + bu.SetTools("") + bu.SetTools("") +} + +func TestSetFastSymbolization(t *testing.T) { + // Test that multiple calls work. + bu := &Binutils{} + bu.SetFastSymbolization(true) + bu.SetFastSymbolization(false) +} + +func skipUnlessLinuxAmd64(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skip("This test only works on x86-64 Linux") + } +} + +func skipUnlessDarwinAmd64(t *testing.T) { + if runtime.GOOS != "darwin" || runtime.GOARCH != "amd64" { + t.Skip("This test only works on x86-64 macOS") + } +} + +func skipUnlessWindowsAmd64(t *testing.T) { + if runtime.GOOS != "windows" || runtime.GOARCH != "amd64" { + t.Skip("This test only works on x86-64 Windows") + } +} + +func testDisasm(t *testing.T, intelSyntax bool) { + _, llvmObjdump, buObjdump := findObjdump([]string{""}) + if !(llvmObjdump || buObjdump) { + t.Skip("cannot disasm: no objdump tool available") + } + + bu := &Binutils{} + var testexe string + switch runtime.GOOS { + case "linux": + testexe = "exe_linux_64" + case "darwin": + testexe = "exe_mac_64" + case "windows": + testexe = "exe_windows_64.exe" + default: + t.Skipf("unsupported OS %q", runtime.GOOS) + } + + insts, err := bu.Disasm(filepath.Join("testdata", testexe), 0, math.MaxUint64, intelSyntax) + if err != nil { + t.Fatalf("Disasm: unexpected error %v", err) + } + mainCount := 0 + for _, x := range insts { + // macOS symbols have a leading underscore. + if x.Function == "main" || x.Function == "_main" { + mainCount++ + } + } + if mainCount == 0 { + t.Error("Disasm: found no main instructions") + } +} + +func TestDisasm(t *testing.T) { + if (runtime.GOOS != "linux" && runtime.GOOS != "darwin" && runtime.GOOS != "windows") || runtime.GOARCH != "amd64" { + t.Skip("This test only works on x86-64 Linux, macOS or Windows") + } + testDisasm(t, false) +} + +func TestDisasmIntelSyntax(t *testing.T) { + if (runtime.GOOS != "linux" && runtime.GOOS != "darwin" && runtime.GOOS != "windows") || runtime.GOARCH != "amd64" { + t.Skip("This test only works on x86_64 Linux, macOS or Windows as it tests Intel asm syntax") + } + testDisasm(t, true) +} + +func findSymbol(syms []*plugin.Sym, name string) *plugin.Sym { + for _, s := range syms { + for _, n := range s.Name { + if n == name { + return s + } + } + } + return nil +} + +func TestObjFile(t *testing.T) { + // If this test fails, check the address for main function in testdata/exe_linux_64 + // using the command 'nm -n '. Update the hardcoded addresses below to match + // the addresses from the output. + skipUnlessLinuxAmd64(t) + for _, tc := range []struct { + desc string + start, limit, offset uint64 + addr uint64 + }{ + {"fixed load address", 0x400000, 0x4006fc, 0, 0x40052d}, + // True user-mode ASLR binaries are ET_DYN rather than ET_EXEC so this case + // is a bit artificial except that it approximates the + // vmlinux-with-kernel-ASLR case where the binary *is* ET_EXEC. + {"simulated ASLR address", 0x500000, 0x5006fc, 0, 0x50052d}, + } { + t.Run(tc.desc, func(t *testing.T) { + bu := &Binutils{} + f, err := bu.Open(filepath.Join("testdata", "exe_linux_64"), tc.start, tc.limit, tc.offset, "") + if err != nil { + t.Fatalf("Open: unexpected error %v", err) + } + defer f.Close() + syms, err := f.Symbols(regexp.MustCompile("main"), 0) + if err != nil { + t.Fatalf("Symbols: unexpected error %v", err) + } + + m := findSymbol(syms, "main") + if m == nil { + t.Fatalf("Symbols: did not find main") + } + addr, err := f.ObjAddr(tc.addr) + if err != nil { + t.Fatalf("ObjAddr(%x) failed: %v", tc.addr, err) + } + if addr != m.Start { + t.Errorf("ObjAddr(%x) got %x, want %x", tc.addr, addr, m.Start) + } + gotFrames, err := f.SourceLine(tc.addr) + if err != nil { + t.Fatalf("SourceLine: unexpected error %v", err) + } + wantFrames := []plugin.Frame{ + {Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3}, + } + if !reflect.DeepEqual(gotFrames, wantFrames) { + t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, wantFrames) + } + }) + } +} + +func TestMachoFiles(t *testing.T) { + // If this test fails, check the address for main function in testdata/exe_mac_64 + // and testdata/lib_mac_64 using addr2line or gaddr2line. Update the + // hardcoded addresses below to match the addresses from the output. + skipUnlessDarwinAmd64(t) + + // Load `file`, pretending it was mapped at `start`. Then get the symbol + // table. Check that it contains the symbol `sym` and that the address + // `addr` gives the `expected` stack trace. + for _, tc := range []struct { + desc string + file string + start, limit, offset uint64 + addr uint64 + sym string + expected []plugin.Frame + }{ + {"normal mapping", "exe_mac_64", 0x100000000, math.MaxUint64, 0, + 0x100000f50, "_main", + []plugin.Frame{ + {Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3}, + }}, + {"other mapping", "exe_mac_64", 0x200000000, math.MaxUint64, 0, + 0x200000f50, "_main", + []plugin.Frame{ + {Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3}, + }}, + {"lib normal mapping", "lib_mac_64", 0, math.MaxUint64, 0, + 0xfa0, "_bar", + []plugin.Frame{ + {Func: "bar", File: "/tmp/lib.c", Line: 5, StartLine: 5}, + }}, + } { + t.Run(tc.desc, func(t *testing.T) { + bu := &Binutils{} + f, err := bu.Open(filepath.Join("testdata", tc.file), tc.start, tc.limit, tc.offset, "") + if err != nil { + t.Fatalf("Open: unexpected error %v", err) + } + t.Logf("binutils: %v", bu) + if runtime.GOOS == "darwin" && !bu.rep.addr2lineFound && !bu.rep.llvmSymbolizerFound { + // On macOS, user needs to install gaddr2line or llvm-symbolizer with + // Homebrew, skip the test when the environment doesn't have it + // installed. + t.Skip("couldn't find addr2line or gaddr2line") + } + defer f.Close() + syms, err := f.Symbols(nil, 0) + if err != nil { + t.Fatalf("Symbols: unexpected error %v", err) + } + + m := findSymbol(syms, tc.sym) + if m == nil { + t.Fatalf("Symbols: could not find symbol %v", tc.sym) + } + gotFrames, err := f.SourceLine(tc.addr) + if err != nil { + t.Fatalf("SourceLine: unexpected error %v", err) + } + if !reflect.DeepEqual(gotFrames, tc.expected) { + t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, tc.expected) + } + }) + } +} + +func TestLLVMSymbolizer(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skip("testtdata/llvm-symbolizer has only been tested on linux") + } + + cmd := filepath.Join("testdata", "fake-llvm-symbolizer") + for _, c := range []struct { + addr uint64 + isData bool + frames []plugin.Frame + }{ + {0x10, false, []plugin.Frame{ + {Func: "Inlined_0x10", File: "foo.h", Line: 0, Column: 0, StartLine: 0}, + {Func: "Func_0x10", File: "foo.c", Line: 2, Column: 1, StartLine: 2}, + }}, + {0x20, true, []plugin.Frame{ + {Func: "foo_0x20", File: "0x20 8"}, + }}, + } { + desc := fmt.Sprintf("Code %x", c.addr) + if c.isData { + desc = fmt.Sprintf("Data %x", c.addr) + } + t.Run(desc, func(t *testing.T) { + symbolizer, err := newLLVMSymbolizer(cmd, "foo", 0, c.isData) + if err != nil { + t.Fatalf("newLLVMSymbolizer: unexpected error %v", err) + } + defer symbolizer.rw.close() + + frames, err := symbolizer.addrInfo(c.addr) + if err != nil { + t.Fatalf("LLVM: unexpected error %v", err) + } + if !reflect.DeepEqual(frames, c.frames) { + t.Errorf("LLVM: expect %v; got %v\n", c.frames, frames) + } + }) + } +} + +func TestPEFile(t *testing.T) { + // If this test fails, check the address for main function in testdata/exe_windows_64.exe + // using the command 'nm -n '. Update the hardcoded addresses below to match + // the addresses from the output. + skipUnlessWindowsAmd64(t) + for _, tc := range []struct { + desc string + start, limit, offset uint64 + addr uint64 + }{ + {"fake mapping", 0, math.MaxUint64, 0, 0x140001594}, + {"fixed load address", 0x140000000, 0x140002000, 0, 0x140001594}, + {"simulated ASLR address", 0x150000000, 0x150002000, 0, 0x150001594}, + } { + t.Run(tc.desc, func(t *testing.T) { + bu := &Binutils{} + f, err := bu.Open(filepath.Join("testdata", "exe_windows_64.exe"), tc.start, tc.limit, tc.offset, "") + if err != nil { + t.Fatalf("Open: unexpected error %v", err) + } + defer f.Close() + syms, err := f.Symbols(regexp.MustCompile("main"), 0) + if err != nil { + t.Fatalf("Symbols: unexpected error %v", err) + } + + m := findSymbol(syms, "main") + if m == nil { + t.Fatalf("Symbols: did not find main") + } + addr, err := f.ObjAddr(tc.addr) + if err != nil { + t.Fatalf("ObjAddr(%x) failed: %v", tc.addr, err) + } + if addr != m.Start { + t.Errorf("ObjAddr(%x) got %x, want %x", tc.addr, addr, m.Start) + } + gotFrames, err := f.SourceLine(tc.addr) + if err != nil { + t.Fatalf("SourceLine: unexpected error %v", err) + } + wantFrames := []plugin.Frame{ + {Func: "main", File: "hello.c", Line: 3, Column: 12, StartLine: 3}, + } + if !reflect.DeepEqual(gotFrames, wantFrames) { + t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, wantFrames) + } + }) + } +} + +func TestOpenMalformedELF(t *testing.T) { + // Test that opening a malformed ELF file will report an error containing + // the word "ELF". + bu := &Binutils{} + _, err := bu.Open(filepath.Join("testdata", "malformed_elf"), 0, 0, 0, "") + if err == nil { + t.Fatalf("Open: unexpected success") + } + + if !strings.Contains(err.Error(), "ELF") { + t.Errorf("Open: got %v, want error containing 'ELF'", err) + } +} + +func TestOpenMalformedMachO(t *testing.T) { + // Test that opening a malformed Mach-O file will report an error containing + // the word "Mach-O". + bu := &Binutils{} + _, err := bu.Open(filepath.Join("testdata", "malformed_macho"), 0, 0, 0, "") + if err == nil { + t.Fatalf("Open: unexpected success") + } + + if !strings.Contains(err.Error(), "Mach-O") { + t.Errorf("Open: got %v, want error containing 'Mach-O'", err) + } +} + +func TestObjdumpVersionChecks(t *testing.T) { + // Test that the objdump version strings are parsed properly. + type testcase struct { + desc string + os string + ver string + want bool + } + + for _, tc := range []testcase{ + { + desc: "Valid Apple LLVM version string with usable version", + os: "darwin", + ver: "Apple LLVM version 11.0.3 (clang-1103.0.32.62)\nOptimized build.", + want: true, + }, + { + desc: "Valid Apple LLVM version string with unusable version", + os: "darwin", + ver: "Apple LLVM version 10.0.0 (clang-1000.11.45.5)\nOptimized build.", + want: false, + }, + { + desc: "Invalid Apple LLVM version string with usable version", + os: "darwin", + ver: "Apple LLVM versions 11.0.3 (clang-1103.0.32.62)\nOptimized build.", + want: false, + }, + { + desc: "Valid LLVM version string with usable version", + os: "linux", + ver: "LLVM (http://llvm.org/):\nLLVM version 9.0.1\n\nOptimized build.", + want: true, + }, + { + desc: "Valid LLVM version string with unusable version", + os: "linux", + ver: "LLVM (http://llvm.org/):\nLLVM version 6.0.1\n\nOptimized build.", + want: false, + }, + { + desc: "Invalid LLVM version string with usable version", + os: "linux", + ver: "LLVM (http://llvm.org/):\nLLVM versions 9.0.1\n\nOptimized build.", + want: false, + }, + { + desc: "Valid LLVM objdump version string with trunk", + os: runtime.GOOS, + ver: "LLVM (http://llvm.org/):\nLLVM version custom-trunk 124ffeb592a00bfe\nOptimized build.", + want: true, + }, + { + desc: "Invalid LLVM objdump version string with trunk", + os: runtime.GOOS, + ver: "LLVM (http://llvm.org/):\nLLVM version custom-trank 124ffeb592a00bfe\nOptimized build.", + want: false, + }, + { + desc: "Invalid LLVM objdump version string with trunk", + os: runtime.GOOS, + ver: "LLVM (http://llvm.org/):\nllvm version custom-trunk 124ffeb592a00bfe\nOptimized build.", + want: false, + }, + } { + if runtime.GOOS == tc.os { + if got := isLLVMObjdump(tc.ver); got != tc.want { + t.Errorf("%v: got %v, want %v", tc.desc, got, tc.want) + } + } + } + for _, tc := range []testcase{ + { + desc: "Valid GNU objdump version string", + ver: "GNU objdump (GNU Binutils) 2.34\nCopyright (C) 2020 Free Software Foundation, Inc.", + want: true, + }, + { + desc: "Invalid GNU objdump version string", + ver: "GNU nm (GNU Binutils) 2.34\nCopyright (C) 2020 Free Software Foundation, Inc.", + want: false, + }, + } { + if got := isBuObjdump(tc.ver); got != tc.want { + t.Errorf("%v: got %v, want %v", tc.desc, got, tc.want) + } + } +} + +func TestComputeBase(t *testing.T) { + realELFOpen := elfOpen + defer func() { + elfOpen = realELFOpen + }() + + tinyExecFile := &elf.File{ + FileHeader: elf.FileHeader{Type: elf.ET_EXEC}, + Progs: []*elf.Prog{ + {ProgHeader: elf.ProgHeader{Type: elf.PT_PHDR, Flags: elf.PF_R | elf.PF_X, Off: 0x40, Vaddr: 0x400040, Paddr: 0x400040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x400238, Paddr: 0x400238, Filesz: 0x1c, Memsz: 0x1c, Align: 1}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}}, + }, + } + tinyBadBSSExecFile := &elf.File{ + FileHeader: elf.FileHeader{Type: elf.ET_EXEC}, + Progs: []*elf.Prog{ + {ProgHeader: elf.ProgHeader{Type: elf.PT_PHDR, Flags: elf.PF_R | elf.PF_X, Off: 0x40, Vaddr: 0x400040, Paddr: 0x400040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x400238, Paddr: 0x400238, Filesz: 0x1c, Memsz: 0x1c, Align: 1}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}}, + {ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x90, Memsz: 0x90, Align: 0x200000}}, + }, + } + + for _, tc := range []struct { + desc string + file *elf.File + openErr error + mapping *elfMapping + addr uint64 + wantError bool + wantBase uint64 + wantIsData bool + }{ + { + desc: "no elf mapping, no error", + mapping: nil, + addr: 0x1000, + wantBase: 0, + wantIsData: false, + }, + { + desc: "address outside mapping bounds means error", + file: &elf.File{}, + mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000}, + addr: 0x1000, + wantError: true, + }, + { + desc: "elf.Open failing means error", + file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_EXEC}}, + openErr: errors.New("elf.Open failed"), + mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000}, + addr: 0x4000, + wantError: true, + }, + { + desc: "no loadable segments, no error", + file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_EXEC}}, + mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000}, + addr: 0x4000, + wantBase: 0, + wantIsData: false, + }, + { + desc: "unsupported executable type, Get Base returns error", + file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_NONE}}, + mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000}, + addr: 0x4000, + wantError: true, + }, + { + desc: "tiny file select executable segment by offset", + file: tinyExecFile, + mapping: &elfMapping{start: 0x5000000, limit: 0x5001000, offset: 0x0}, + addr: 0x5000c00, + wantBase: 0x5000000, + wantIsData: false, + }, + { + desc: "tiny file select data segment by offset", + file: tinyExecFile, + mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0}, + addr: 0x5200c80, + wantBase: 0x5000000, + wantIsData: true, + }, + { + desc: "tiny file offset outside any segment means error", + file: tinyExecFile, + mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0}, + addr: 0x5200e70, + wantError: true, + }, + { + desc: "tiny file with bad BSS segment selects data segment by offset in initialized section", + file: tinyBadBSSExecFile, + mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0}, + addr: 0x5200d79, + wantBase: 0x5000000, + wantIsData: true, + }, + { + desc: "tiny file with bad BSS segment with offset in uninitialized section means error", + file: tinyBadBSSExecFile, + mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0}, + addr: 0x5200d80, + wantError: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + elfOpen = func(_ string) (*elf.File, error) { + return tc.file, tc.openErr + } + f := file{m: tc.mapping} + err := f.computeBase(tc.addr) + if (err != nil) != tc.wantError { + t.Errorf("got error %v, want any error=%v", err, tc.wantError) + } + if err != nil { + return + } + if f.base != tc.wantBase { + t.Errorf("got base %x, want %x", f.base, tc.wantBase) + } + if f.isData != tc.wantIsData { + t.Errorf("got isData %v, want %v", f.isData, tc.wantIsData) + } + }) + } +} + +func TestELFObjAddr(t *testing.T) { + // The exe_linux_64 has two loadable program headers: + // LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000 + // 0x00000000000006fc 0x00000000000006fc R E 0x200000 + // LOAD 0x0000000000000e10 0x0000000000600e10 0x0000000000600e10 + // 0x0000000000000230 0x0000000000000238 RW 0x200000 + name := filepath.Join("testdata", "exe_linux_64") + + for _, tc := range []struct { + desc string + start, limit, offset uint64 + wantOpenError bool + addr uint64 + wantObjAddr uint64 + wantAddrError bool + }{ + {"exec mapping, good address", 0x5400000, 0x5401000, 0, false, 0x5400400, 0x400400, false}, + {"exec mapping, address outside segment", 0x5400000, 0x5401000, 0, false, 0x5400800, 0, true}, + {"short data mapping, good address", 0x5600e00, 0x5602000, 0xe00, false, 0x5600e10, 0x600e10, false}, + {"short data mapping, address outside segment", 0x5600e00, 0x5602000, 0xe00, false, 0x5600e00, 0x600e00, false}, + {"page aligned data mapping, good address", 0x5600000, 0x5602000, 0, false, 0x5601000, 0x601000, false}, + {"page aligned data mapping, address outside segment", 0x5600000, 0x5602000, 0, false, 0x5601048, 0, true}, + {"bad file offset, no matching segment", 0x5600000, 0x5602000, 0x2000, false, 0x5600e10, 0, true}, + {"large mapping size, match by sample offset", 0x5600000, 0x5603000, 0, false, 0x5600e10, 0x600e10, false}, + } { + t.Run(tc.desc, func(t *testing.T) { + b := binrep{} + o, err := b.openELF(name, tc.start, tc.limit, tc.offset, "") + if (err != nil) != tc.wantOpenError { + t.Errorf("openELF got error %v, want any error=%v", err, tc.wantOpenError) + } + if err != nil { + return + } + got, err := o.ObjAddr(tc.addr) + if (err != nil) != tc.wantAddrError { + t.Errorf("ObjAddr got error %v, want any error=%v", err, tc.wantAddrError) + } + if err != nil { + return + } + if got != tc.wantObjAddr { + t.Errorf("got ObjAddr %x; want %x\n", got, tc.wantObjAddr) + } + }) + } +} + +type buf struct { + data []byte +} + +// write appends a null-terminated string and returns its starting index. +func (b *buf) write(s string) uint32 { + res := uint32(len(b.data)) + b.data = append(b.data, s...) + b.data = append(b.data, '\x00') + return res +} + +// fakeELFFile generates a minimal valid ELF file, with fake .head.text and +// .text sections, and their corresponding _text and _stext start symbols, +// mimicking a kernel vmlinux image. +func fakeELFFile(t *testing.T) *elf.File { + var ( + sizeHeader64 = binary.Size(elf.Header64{}) + sizeProg64 = binary.Size(elf.Prog64{}) + sizeSection64 = binary.Size(elf.Section64{}) + ) + + const ( + textAddr = 0xffff000010080000 + stextAddr = 0xffff000010081000 + ) + + // Generate magic to identify as an ELF file. + var ident [16]uint8 + ident[0] = '\x7f' + ident[1] = 'E' + ident[2] = 'L' + ident[3] = 'F' + ident[elf.EI_CLASS] = uint8(elf.ELFCLASS64) + ident[elf.EI_DATA] = uint8(elf.ELFDATA2LSB) + ident[elf.EI_VERSION] = uint8(elf.EV_CURRENT) + ident[elf.EI_OSABI] = uint8(elf.ELFOSABI_NONE) + + // A single program header, containing code and starting at the _text address. + progs := []elf.Prog64{{ + Type: uint32(elf.PT_LOAD), Flags: uint32(elf.PF_R | elf.PF_X), Off: 0x10000, Vaddr: textAddr, Paddr: textAddr, Filesz: 0x1234567, Memsz: 0x1234567, Align: 0x10000}} + + symNames := buf{} + syms := []elf.Sym64{ + {}, // first symbol empty by convention + {Name: symNames.write("_text"), Info: 0, Other: 0, Shndx: 0, Value: textAddr, Size: 0}, + {Name: symNames.write("_stext"), Info: 0, Other: 0, Shndx: 0, Value: stextAddr, Size: 0}, + } + + const numSections = 5 + // We'll write `textSize` zero bytes as contents of the .head.text and .text sections. + const textSize = 16 + // Offset of section contents in the byte stream -- after header, program headers, and section headers. + sectionsStart := uint64(sizeHeader64 + len(progs)*sizeProg64 + numSections*sizeSection64) + + secNames := buf{} + sections := [numSections]elf.Section64{ + {Name: secNames.write(".head.text"), Type: uint32(elf.SHT_PROGBITS), Flags: uint64(elf.SHF_ALLOC | elf.SHF_EXECINSTR), Addr: textAddr, Off: sectionsStart, Size: textSize, Link: 0, Info: 0, Addralign: 2048, Entsize: 0}, + {Name: secNames.write(".text"), Type: uint32(elf.SHT_PROGBITS), Flags: uint64(elf.SHF_ALLOC | elf.SHF_EXECINSTR), Addr: stextAddr, Off: sectionsStart + textSize, Size: textSize, Link: 0, Info: 0, Addralign: 2048, Entsize: 0}, + {Name: secNames.write(".symtab"), Type: uint32(elf.SHT_SYMTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize, Size: uint64(len(syms) * elf.Sym64Size), Link: 3 /*index of .strtab*/, Info: 0, Addralign: 8, Entsize: elf.Sym64Size}, + {Name: secNames.write(".strtab"), Type: uint32(elf.SHT_STRTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize + uint64(len(syms)*elf.Sym64Size), Size: uint64(len(symNames.data)), Link: 0, Info: 0, Addralign: 1, Entsize: 0}, + {Name: secNames.write(".shstrtab"), Type: uint32(elf.SHT_STRTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize + uint64(len(syms)*elf.Sym64Size+len(symNames.data)), Size: uint64(len(secNames.data)), Link: 0, Info: 0, Addralign: 1, Entsize: 0}, + } + + hdr := elf.Header64{ + Ident: ident, + Type: uint16(elf.ET_DYN), + Machine: uint16(elf.EM_AARCH64), + Version: uint32(elf.EV_CURRENT), + Entry: textAddr, + Phoff: uint64(sizeHeader64), + Shoff: uint64(sizeHeader64 + len(progs)*sizeProg64), + Flags: 0, + Ehsize: uint16(sizeHeader64), + Phentsize: uint16(sizeProg64), + Phnum: uint16(len(progs)), + Shentsize: uint16(sizeSection64), + Shnum: uint16(len(sections)), + Shstrndx: 4, // index of .shstrtab + } + + // Serialize all headers and sections into a single binary stream. + var data bytes.Buffer + for i, b := range []interface{}{hdr, progs, sections, [textSize]byte{}, [textSize]byte{}, syms, symNames.data, secNames.data} { + err := binary.Write(&data, binary.LittleEndian, b) + if err != nil { + t.Fatalf("Write(%v) got err %v, want nil", i, err) + } + } + + // ... and parse it as and ELF file. + ef, err := elf.NewFile(bytes.NewReader(data.Bytes())) + if err != nil { + t.Fatalf("elf.NewFile got err %v, want nil", err) + } + return ef +} + +func TestELFKernelOffset(t *testing.T) { + realELFOpen := elfOpen + defer func() { + elfOpen = realELFOpen + }() + + wantAddr := uint64(0xffff000010082000) + elfOpen = func(_ string) (*elf.File, error) { + return fakeELFFile(t), nil + } + + for _, tc := range []struct { + name string + relocationSymbol string + start uint64 + }{ + {"text", "_text", 0xffff000020080000}, + {"stext", "_stext", 0xffff000020081000}, + } { + + b := binrep{} + o, err := b.openELF("vmlinux", tc.start, 0xffffffffffffffff, tc.start, tc.relocationSymbol) + if err != nil { + t.Errorf("%v: openELF got error %v, want nil", tc.name, err) + continue + } + + addr, err := o.ObjAddr(0xffff000020082000) + if err != nil { + t.Errorf("%v: ObjAddr got err %v, want nil", tc.name, err) + continue + } + if addr != wantAddr { + t.Errorf("%v: ObjAddr got %x, want %x", tc.name, addr, wantAddr) + } + + } +} diff --git a/plugin/debug/pkg/internal/binutils/disasm.go b/plugin/debug/pkg/internal/binutils/disasm.go new file mode 100644 index 0000000..701f669 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/disasm.go @@ -0,0 +1,180 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "bytes" + "io" + "regexp" + "strconv" + "strings" + + "github.com/ianlancetaylor/demangle" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +var ( + nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`) + objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`) + objdumpOutputFileLine = regexp.MustCompile(`^;?\s?(.*):([0-9]+)`) + objdumpOutputFunction = regexp.MustCompile(`^;?\s?(\S.*)\(\):`) + objdumpOutputFunctionLLVM = regexp.MustCompile(`^([[:xdigit:]]+)?\s?(.*):`) +) + +func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) { + // Collect all symbols from the nm output, grouping names mapped to + // the same address into a single symbol. + + // The symbols to return. + var symbols []*plugin.Sym + + // The current group of symbol names, and the address they are all at. + names, start := []string{}, uint64(0) + + buf := bytes.NewBuffer(syms) + + for { + symAddr, name, err := nextSymbol(buf) + if err == io.EOF { + // Done. If there was an unfinished group, append it. + if len(names) != 0 { + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + } + + // And return the symbols. + return symbols, nil + } + + if err != nil { + // There was some kind of serious error reading nm's output. + return nil, err + } + + // If this symbol is at the same address as the current group, add it to the group. + if symAddr == start { + names = append(names, name) + continue + } + + // Otherwise append the current group to the list of symbols. + if match := matchSymbol(names, start, symAddr-1, r, address); match != nil { + symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1}) + } + + // And start a new group. + names, start = []string{name}, symAddr + } +} + +// matchSymbol checks if a symbol is to be selected by checking its +// name to the regexp and optionally its address. It returns the name(s) +// to be used for the matched symbol, or nil if no match +func matchSymbol(names []string, start, end uint64, r *regexp.Regexp, address uint64) []string { + if address != 0 && address >= start && address <= end { + return names + } + for _, name := range names { + if r == nil || r.MatchString(name) { + return []string{name} + } + + // Match all possible demangled versions of the name. + for _, o := range [][]demangle.Option{ + {demangle.NoClones}, + {demangle.NoParams, demangle.NoEnclosingParams}, + {demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams}, + } { + if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) { + return []string{demangled} + } + } + } + return nil +} + +// disassemble parses the output of the objdump command and returns +// the assembly instructions in a slice. +func disassemble(asm []byte) ([]plugin.Inst, error) { + buf := bytes.NewBuffer(asm) + function, file, line := "", "", 0 + var assembly []plugin.Inst + for { + input, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF { + return nil, err + } + if input == "" { + break + } + } + input = strings.TrimSpace(input) + + if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + assembly = append(assembly, + plugin.Inst{ + Addr: address, + Text: fields[2], + Function: function, + File: file, + Line: line, + }) + continue + } + } + if fields := objdumpOutputFileLine.FindStringSubmatch(input); len(fields) == 3 { + if l, err := strconv.ParseUint(fields[2], 10, 32); err == nil { + file, line = fields[1], int(l) + } + continue + } + if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 { + function = fields[1] + continue + } else { + if fields := objdumpOutputFunctionLLVM.FindStringSubmatch(input); len(fields) == 3 { + function = fields[2] + continue + } + } + // Reset on unrecognized lines. + function, file, line = "", "", 0 + } + + return assembly, nil +} + +// nextSymbol parses the nm output to find the next symbol listed. +// Skips over any output it cannot recognize. +func nextSymbol(buf *bytes.Buffer) (uint64, string, error) { + for { + line, err := buf.ReadString('\n') + if err != nil { + if err != io.EOF || line == "" { + return 0, "", err + } + } + line = strings.TrimSpace(line) + + if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 { + if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil { + return address, fields[3], nil + } + } + } +} diff --git a/plugin/debug/pkg/internal/binutils/disasm_test.go b/plugin/debug/pkg/internal/binutils/disasm_test.go new file mode 100644 index 0000000..b837f82 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/disasm_test.go @@ -0,0 +1,160 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package binutils + +import ( + "fmt" + "regexp" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +// TestFindSymbols tests the FindSymbols routine using a hardcoded nm output. +func TestFindSymbols(t *testing.T) { + type testcase struct { + query, syms string + want []plugin.Sym + } + + testsyms := `0000000000001000 t lineA001 +0000000000001000 t lineA002 +0000000000001000 t line1000 +0000000000002000 t line200A +0000000000002000 t line2000 +0000000000002000 t line200B +0000000000003000 t line3000 +0000000000003000 t _ZNK4DumbclEPKc +0000000000003000 t lineB00C +0000000000003000 t line300D +0000000000004000 t _the_end + ` + testcases := []testcase{ + { + "line.*[AC]", + testsyms, + []plugin.Sym{ + {Name: []string{"lineA001"}, File: "object.o", Start: 0x1000, End: 0x1FFF}, + {Name: []string{"line200A"}, File: "object.o", Start: 0x2000, End: 0x2FFF}, + {Name: []string{"lineB00C"}, File: "object.o", Start: 0x3000, End: 0x3FFF}, + }, + }, + { + "Dumb::operator", + testsyms, + []plugin.Sym{ + {Name: []string{"Dumb::operator()(char const*) const"}, File: "object.o", Start: 0x3000, End: 0x3FFF}, + }, + }, + } + + for _, tc := range testcases { + syms, err := findSymbols([]byte(tc.syms), "object.o", regexp.MustCompile(tc.query), 0) + if err != nil { + t.Fatalf("%q: findSymbols: %v", tc.query, err) + } + if err := checkSymbol(syms, tc.want); err != nil { + t.Errorf("%q: %v", tc.query, err) + } + } +} + +func checkSymbol(got []*plugin.Sym, want []plugin.Sym) error { + if len(got) != len(want) { + return fmt.Errorf("unexpected number of symbols %d (want %d)", len(got), len(want)) + } + + for i, g := range got { + w := want[i] + if len(g.Name) != len(w.Name) { + return fmt.Errorf("names, got %d, want %d", len(g.Name), len(w.Name)) + } + for n := range g.Name { + if g.Name[n] != w.Name[n] { + return fmt.Errorf("name %d, got %q, want %q", n, g.Name[n], w.Name[n]) + } + } + if g.File != w.File { + return fmt.Errorf("filename, got %q, want %q", g.File, w.File) + } + if g.Start != w.Start { + return fmt.Errorf("start address, got %#x, want %#x", g.Start, w.Start) + } + if g.End != w.End { + return fmt.Errorf("end address, got %#x, want %#x", g.End, w.End) + } + } + return nil +} + +// TestFunctionAssembly tests the FunctionAssembly routine by using a +// fake objdump script. +func TestFunctionAssembly(t *testing.T) { + type testcase struct { + s plugin.Sym + asm string + want []plugin.Inst + } + testcases := []testcase{ + { + plugin.Sym{Name: []string{"symbol1"}, Start: 0x1000, End: 0x1FFF}, + " 1000: instruction one\n 1001: instruction two\n 1002: instruction three\n 1003: instruction four", + []plugin.Inst{ + {Addr: 0x1000, Text: "instruction one"}, + {Addr: 0x1001, Text: "instruction two"}, + {Addr: 0x1002, Text: "instruction three"}, + {Addr: 0x1003, Text: "instruction four"}, + }, + }, + { + plugin.Sym{Name: []string{"symbol2"}, Start: 0x2000, End: 0x2FFF}, + " 2000: instruction one\n 2001: instruction two", + []plugin.Inst{ + {Addr: 0x2000, Text: "instruction one"}, + {Addr: 0x2001, Text: "instruction two"}, + }, + }, + { + plugin.Sym{Name: []string{"_main"}, Start: 0x30000, End: 0x3FFF}, + "_main:\n; /tmp/hello.c:3\n30001: push %rbp", + []plugin.Inst{ + {Addr: 0x30001, Text: "push %rbp", Function: "_main", File: "/tmp/hello.c", Line: 3}, + }, + }, + { + plugin.Sym{Name: []string{"main"}, Start: 0x4000, End: 0x4FFF}, + "000000000040052d
:\nmain():\n/tmp/hello.c:3\n40001: push %rbp", + []plugin.Inst{ + {Addr: 0x40001, Text: "push %rbp", Function: "main", File: "/tmp/hello.c", Line: 3}, + }, + }, + } + + for _, tc := range testcases { + insts, err := disassemble([]byte(tc.asm)) + if err != nil { + t.Fatalf("FunctionAssembly: %v", err) + } + + if len(insts) != len(tc.want) { + t.Errorf("Unexpected number of assembly instructions %d (want %d)\n", len(insts), len(tc.want)) + } + for i := range insts { + if insts[i] != tc.want[i] { + t.Errorf("Expected symbol %v, got %v\n", tc.want[i], insts[i]) + } + } + } +} diff --git a/plugin/debug/pkg/internal/binutils/testdata/build_binaries.go b/plugin/debug/pkg/internal/binutils/testdata/build_binaries.go new file mode 100644 index 0000000..695250b --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/build_binaries.go @@ -0,0 +1,94 @@ +// Copyright 2019 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This is a script that generates the test executables for MacOS and Linux +// in this directory. It should be needed very rarely to run this script. +// It is mostly provided as a future reference on how the original binary +// set was created. + +// When a new executable is generated, hardcoded addresses in the +// functions TestObjFile, TestMachoFiles, TestPEFile in binutils_test.go must be updated. +package main + +import ( + "log" + "os" + "os/exec" + "path/filepath" + "runtime" +) + +func main() { + wd, err := os.Getwd() + if err != nil { + log.Fatal(err) + } + + switch runtime.GOOS { + case "linux": + if err := removeGlob("exe_linux_64*"); err != nil { + log.Fatal(err) + } + + out, err := exec.Command("cc", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "exe_linux_64", "hello.c").CombinedOutput() + log.Println(string(out)) + if err != nil { + log.Fatal(err) + } + + case "darwin": + if err := removeGlob("exe_mac_64*", "lib_mac_64"); err != nil { + log.Fatal(err) + } + + out, err := exec.Command("clang", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "exe_mac_64", "hello.c").CombinedOutput() + log.Println(string(out)) + if err != nil { + log.Fatal(err) + } + + out, err = exec.Command("clang", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "lib_mac_64", "-dynamiclib", "lib.c").CombinedOutput() + log.Println(string(out)) + if err != nil { + log.Fatal(err) + } + + case "windows": + // Many gcc environments may create binaries that trigger false-positives + // in antiviruses. MSYS2 with gcc 10.2.0 is a working environment for + // compiling. To setup the environment follow the guide at + // https://www.msys2.org/ and install gcc with `pacman -S gcc`. + out, err := exec.Command("gcc", "-g", "-ffile-prefix-map="+wd+"=", "-o", "exe_windows_64.exe", "hello.c").CombinedOutput() + log.Println(string(out)) + if err != nil { + log.Fatal(err) + } + log.Println("Please verify that exe_windows_64.exe does not trigger any antivirus on `virustotal.com`.") + default: + log.Fatalf("Unsupported OS %q", runtime.GOOS) + } +} + +func removeGlob(globs ...string) error { + for _, glob := range globs { + matches, err := filepath.Glob(glob) + if err != nil { + return err + } + for _, p := range matches { + os.Remove(p) + } + } + return nil +} diff --git a/plugin/debug/pkg/internal/binutils/testdata/exe_linux_64 b/plugin/debug/pkg/internal/binutils/testdata/exe_linux_64 new file mode 100755 index 0000000..d86dc7c Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/exe_linux_64 differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64 b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64 new file mode 100755 index 0000000..dba1ae1 Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64 differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Info.plist b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Info.plist new file mode 100644 index 0000000..41ce537 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Info.plist @@ -0,0 +1,20 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleIdentifier + com.apple.xcode.dsym.exe_mac_64 + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + dSYM + CFBundleSignature + ???? + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Resources/DWARF/exe_mac_64 b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Resources/DWARF/exe_mac_64 new file mode 100644 index 0000000..2cb0e3b Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Resources/DWARF/exe_mac_64 differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/exe_windows_64.exe b/plugin/debug/pkg/internal/binutils/testdata/exe_windows_64.exe new file mode 100644 index 0000000..291e324 Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/exe_windows_64.exe differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/fake-llvm-symbolizer b/plugin/debug/pkg/internal/binutils/testdata/fake-llvm-symbolizer new file mode 100755 index 0000000..491a2cd --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/fake-llvm-symbolizer @@ -0,0 +1,39 @@ +#!/bin/sh +# +# Copyright 2014 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Fake llvm-symbolizer to use in tests + +set -f +IFS=" " + +while read line; do + # line has form: + # filename 0xaddr + # Emit dummy output that matches llvm-symbolizer JSON output format. + set -- ${line} + kind=$1 + fname=$2 + addr=$3 + case ${kind} in + CODE) + echo "{\"Address\":\"${addr}\",\"ModuleName\":\"${fname}\",\"Symbol\":[{\"Column\":0,\"FileName\":\"${fname}.h\",\"FunctionName\":\"Inlined_${addr}\",\"Line\":0,\"StartLine\":0},{\"Column\":1,\"FileName\":\"${fname}.c\",\"FunctionName\":\"Func_${addr}\",\"Line\":2,\"StartLine\":2}]}" + ;; + DATA) + echo "{\"Address\":\"${addr}\",\"ModuleName\":\"${fname}\",\"Data\":{\"Name\":\"${fname}_${addr}\",\"Size\":\"0x8\",\"Start\":\"${addr}\"}}" + ;; + *) exit 1;; + esac +done diff --git a/plugin/debug/pkg/internal/binutils/testdata/hello.c b/plugin/debug/pkg/internal/binutils/testdata/hello.c new file mode 100644 index 0000000..aed773b --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/hello.c @@ -0,0 +1,6 @@ +#include + +int main() { + printf("Hello, world!\n"); + return 0; +} diff --git a/plugin/debug/pkg/internal/binutils/testdata/lib.c b/plugin/debug/pkg/internal/binutils/testdata/lib.c new file mode 100644 index 0000000..f6207b8 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/lib.c @@ -0,0 +1,7 @@ +int foo() { + return 1; +} + +int bar() { + return 2; +} diff --git a/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64 b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64 new file mode 100755 index 0000000..933a3f6 Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64 differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Info.plist b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Info.plist new file mode 100644 index 0000000..409e4cf --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Info.plist @@ -0,0 +1,20 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleIdentifier + com.apple.xcode.dsym.lib_mac_64 + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + dSYM + CFBundleSignature + ???? + CFBundleShortVersionString + 1.0 + CFBundleVersion + 1 + + diff --git a/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Resources/DWARF/lib_mac_64 b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Resources/DWARF/lib_mac_64 new file mode 100644 index 0000000..e466c13 Binary files /dev/null and b/plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Resources/DWARF/lib_mac_64 differ diff --git a/plugin/debug/pkg/internal/binutils/testdata/malformed_elf b/plugin/debug/pkg/internal/binutils/testdata/malformed_elf new file mode 100644 index 0000000..f0b503b --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/malformed_elf @@ -0,0 +1 @@ +ELF \ No newline at end of file diff --git a/plugin/debug/pkg/internal/binutils/testdata/malformed_macho b/plugin/debug/pkg/internal/binutils/testdata/malformed_macho new file mode 100644 index 0000000..b01ddf6 --- /dev/null +++ b/plugin/debug/pkg/internal/binutils/testdata/malformed_macho @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/plugin/debug/pkg/internal/driver/cli.go b/plugin/debug/pkg/internal/driver/cli.go new file mode 100644 index 0000000..8b1fa24 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/cli.go @@ -0,0 +1,360 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "errors" + "fmt" + "os" + + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +type source struct { + Sources []string + ExecName string + BuildID string + Base []string + DiffBase bool + Normalize bool + + Seconds int + Timeout int + Symbolize string + HTTPHostport string + HTTPDisableBrowser bool + Comment string +} + +// parseFlags parses the command lines through the specified flags package +// and returns the source of the profile and optionally the command +// for the kind of report to generate (nil for interactive use). +func parseFlags(o *plugin.Options) (*source, []string, error) { + flag := o.Flagset + // Comparisons. + flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") + flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") + // Source options. + flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") + flagBuildID := flag.String("buildid", "", "Override build id for first mapping") + flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile") + flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile") + // CPU profile options + flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles") + // Heap profile options + flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size") + flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts") + flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size") + flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts") + // Contention profile options + flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region") + flagContentions := flag.Bool("contentions", false, "Display number of delays at each region") + flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region") + flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames") + + flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") + flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browser for the interactive web UI") + + // Flags that set configuration properties. + cfg := currentConfig() + configFlagSetter := installConfigFlags(flag, &cfg) + + flagCommands := make(map[string]*bool) + flagParamCommands := make(map[string]*string) + for name, cmd := range pprofCommands { + if cmd.hasParam { + flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp") + } else { + flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format") + } + } + + args := flag.Parse(func() { + o.UI.Print(usageMsgHdr + + usage(true) + + usageMsgSrc + + flag.ExtraUsage() + + usageMsgVars) + }) + if len(args) == 0 { + return nil, nil, errors.New("no profile source specified") + } + + var execName string + // Recognize first argument as an executable or buildid override. + if len(args) > 1 { + arg0 := args[0] + if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil { + file.Close() + execName = arg0 + args = args[1:] + } + } + + // Apply any specified flags to cfg. + if err := configFlagSetter(); err != nil { + return nil, nil, err + } + + cmd, err := outputFormat(flagCommands, flagParamCommands) + if err != nil { + return nil, nil, err + } + if cmd != nil && *flagHTTP != "" { + return nil, nil, errors.New("-http is not compatible with an output format on the command line") + } + + if *flagNoBrowser && *flagHTTP == "" { + return nil, nil, errors.New("-no_browser only makes sense with -http") + } + + si := cfg.SampleIndex + si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) + si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) + si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) + si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI) + si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) + si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) + si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) + cfg.SampleIndex = si + + if *flagMeanDelay { + cfg.Mean = true + } + + source := &source{ + Sources: args, + ExecName: execName, + BuildID: *flagBuildID, + Seconds: *flagSeconds, + Timeout: *flagTimeout, + Symbolize: *flagSymbolize, + HTTPHostport: *flagHTTP, + HTTPDisableBrowser: *flagNoBrowser, + Comment: *flagAddComment, + } + + if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil { + return nil, nil, err + } + + normalize := cfg.Normalize + if normalize && len(source.Base) == 0 { + return nil, nil, errors.New("must have base profile to normalize by") + } + source.Normalize = normalize + + if bu, ok := o.Obj.(*binutils.Binutils); ok { + bu.SetTools(*flagTools) + } + + setCurrentConfig(cfg) + return source, cmd, nil +} + +// addBaseProfiles adds the list of base profiles or diff base profiles to +// the source. This function will return an error if both base and diff base +// profiles are specified. +func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error { + base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase) + if len(base) > 0 && len(diffBase) > 0 { + return errors.New("-base and -diff_base flags cannot both be specified") + } + + source.Base = base + if len(diffBase) > 0 { + source.Base, source.DiffBase = diffBase, true + } + return nil +} + +// dropEmpty list takes a slice of string pointers, and outputs a slice of +// non-empty strings associated with the flag. +func dropEmpty(list []*string) []string { + var l []string + for _, s := range list { + if *s != "" { + l = append(l, *s) + } + } + return l +} + +// installConfigFlags creates command line flags for configuration +// fields and returns a function which can be called after flags have +// been parsed to copy any flags specified on the command line to +// *cfg. +func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { + // List of functions for setting the different parts of a config. + var setters []func() + var err error // Holds any errors encountered while running setters. + + for _, field := range configFields { + n := field.name + help := configHelp[n] + var setter func() + switch ptr := cfg.fieldPtr(field).(type) { + case *bool: + f := flag.Bool(n, *ptr, help) + setter = func() { *ptr = *f } + case *int: + f := flag.Int(n, *ptr, help) + setter = func() { *ptr = *f } + case *float64: + f := flag.Float64(n, *ptr, help) + setter = func() { *ptr = *f } + case *string: + if len(field.choices) == 0 { + f := flag.String(n, *ptr, help) + setter = func() { *ptr = *f } + } else { + // Make a separate flag per possible choice. + // Set all flags to initially false so we can + // identify conflicts. + bools := make(map[string]*bool) + for _, choice := range field.choices { + bools[choice] = flag.Bool(choice, false, configHelp[choice]) + } + setter = func() { + var set []string + for k, v := range bools { + if *v { + set = append(set, k) + } + } + switch len(set) { + case 0: + // Leave as default value. + case 1: + *ptr = set[0] + default: + err = fmt.Errorf("conflicting options set: %v", set) + } + } + } + } + setters = append(setters, setter) + } + + return func() error { + // Apply the setter for every flag. + for _, setter := range setters { + setter() + if err != nil { + return err + } + } + return nil + } +} + +func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string { + if *flag { + if si == "" { + return sampleType + } + ui.PrintErr("Multiple value selections, ignoring ", option) + } + return si +} + +func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) { + for n, b := range bcmd { + if *b { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n} + } + } + for n, s := range acmd { + if *s != "" { + if cmd != nil { + return nil, errors.New("must set at most one output format") + } + cmd = []string{n, *s} + } + } + return cmd, nil +} + +var usageMsgHdr = `usage: + +Produce output in the specified format. + + pprof [options] [binary] ... + +Omit the format to get an interactive shell whose commands can be used +to generate various views of a profile + + pprof [options] [binary] ... + +Omit the format and provide the "-http" flag to get an interactive web +interface at the specified host:port that can be used to navigate through +various views of a profile. + + pprof -http [host]:[port] [options] [binary] ... + +Details: +` + +var usageMsgSrc = "\n\n" + + " Source options:\n" + + " -seconds Duration for time-based profile collection\n" + + " -timeout Timeout in seconds for profile collection\n" + + " -buildid Override build id for main binary\n" + + " -add_comment Free-form annotation to add to the profile\n" + + " Displayed on some reports or with pprof -comments\n" + + " -diff_base source Source of base profile for comparison\n" + + " -base source Source of base profile for profile subtraction\n" + + " profile.pb.gz Profile in compressed protobuf format\n" + + " legacy_profile Profile in legacy pprof format\n" + + " http://host/profile URL for profile handler to retrieve\n" + + " -symbolize= Controls source of symbol information\n" + + " none Do not attempt symbolization\n" + + " local Examine only local binaries\n" + + " fastlocal Only get function names from local binaries\n" + + " remote Do not examine local binaries\n" + + " force Force re-symbolization\n" + + " Binary Local path or build id of binary for symbolization\n" + +var usageMsgVars = "\n\n" + + " Misc options:\n" + + " -http Provide web interface at host:port.\n" + + " Host is optional and 'localhost' by default.\n" + + " Port is optional and a randomly available port by default.\n" + + " -no_browser Skip opening a browser for the interactive web UI.\n" + + " -tools Search path for object tools\n" + + "\n" + + " Legacy convenience options:\n" + + " -inuse_space Same as -sample_index=inuse_space\n" + + " -inuse_objects Same as -sample_index=inuse_objects\n" + + " -alloc_space Same as -sample_index=alloc_space\n" + + " -alloc_objects Same as -sample_index=alloc_objects\n" + + " -total_delay Same as -sample_index=delay\n" + + " -contentions Same as -sample_index=contentions\n" + + " -mean_delay Same as -mean -sample_index=delay\n" + + "\n" + + " Environment Variables:\n" + + " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" + + " PPROF_TOOLS Search path for object-level tools\n" + + " PPROF_BINARY_PATH Search path for local binary files\n" + + " default: $HOME/pprof/binaries\n" + + " searches $buildid/$name, $buildid/*, $path/$buildid,\n" + + " ${buildid:0:2}/${buildid:2}.debug, $name, $path,\n" + + " ${name}.debug, $dir/.debug/${name}.debug,\n" + + " usr/lib/debug/$dir/${name}.debug\n" + + " * On Windows, %USERPROFILE% is used instead of $HOME" diff --git a/plugin/debug/pkg/internal/driver/commands.go b/plugin/debug/pkg/internal/driver/commands.go new file mode 100644 index 0000000..dc45ab1 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/commands.go @@ -0,0 +1,461 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "runtime" + "sort" + "strings" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/report" +) + +// commands describes the commands accepted by pprof. +type commands map[string]*command + +// command describes the actions for a pprof command. Includes a +// function for command-line completion, the report format to use +// during report generation, any postprocessing functions, and whether +// the command expects a regexp parameter (typically a function name). +type command struct { + format int // report format to generate + postProcess PostProcessor // postprocessing to run on report + visualizer PostProcessor // display output using some callback + hasParam bool // collect a parameter from the CLI + description string // single-line description text saying what the command does + usage string // multi-line help text saying how the command is used +} + +// help returns a help string for a command. +func (c *command) help(name string) string { + message := c.description + "\n" + if c.usage != "" { + message += " Usage:\n" + lines := strings.Split(c.usage, "\n") + for _, line := range lines { + message += fmt.Sprintf(" %s\n", line) + } + } + return message + "\n" +} + +// AddCommand adds an additional command to the set of commands +// accepted by pprof. This enables extensions to add new commands for +// specialized visualization formats. If the command specified already +// exists, it is overwritten. +func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) { + pprofCommands[cmd] = &command{format, post, nil, false, desc, usage} +} + +// SetVariableDefault sets the default value for a pprof +// variable. This enables extensions to set their own defaults. +func SetVariableDefault(variable, value string) { + configure(variable, value) +} + +// PostProcessor is a function that applies post-processing to the report output +type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error + +// interactiveMode is true if pprof is running on interactive mode, reading +// commands from its shell. +var interactiveMode = false + +// pprofCommands are the report generation commands recognized by pprof. +var pprofCommands = commands{ + // Commands that require no post-processing. + "comments": {report.Comments, nil, nil, false, "Output all profile comments", ""}, + "disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)}, + "dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)}, + "list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)}, + "peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."}, + "raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""}, + "tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."}, + "text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)}, + "top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)}, + "traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""}, + "tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)}, + + // Save binary formats to a file + "callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)}, + "proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""}, + "topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""}, + + // Generate report in DOT format and postprocess with dot + "gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)}, + "pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)}, + "png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)}, + "ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)}, + + // Save SVG output into a file + "svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)}, + + // Visualize postprocessed dot output + "eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)}, + "evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)}, + "gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)}, + "web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)}, + + // Visualize callgrind output + "kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)}, + + // Visualize HTML directly generated by report. + "weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)}, +} + +// configHelp contains help text per configuration parameter. +var configHelp = map[string]string{ + // Filename for file-based output formats, stdout by default. + "output": helpText("Output filename for file-based outputs"), + + // Comparisons. + "drop_negative": helpText( + "Ignore negative differences", + "Do not show any locations with values <0."), + + // Graph handling options. + "call_tree": helpText( + "Create a context-sensitive call tree", + "Treat locations reached through different paths as separate."), + + // Display options. + "relative_percentages": helpText( + "Show percentages relative to focused subgraph", + "If unset, percentages are relative to full graph before focusing", + "to facilitate comparison with original graph."), + "unit": helpText( + "Measurement units to display", + "Scale the sample values to this unit.", + "For time-based profiles, use seconds, milliseconds, nanoseconds, etc.", + "For memory profiles, use megabytes, kilobytes, bytes, etc.", + "Using auto will scale each value independently to the most natural unit."), + "compact_labels": "Show minimal headers", + "source_path": "Search path for source files", + "trim_path": "Path to trim from source paths before search", + "intel_syntax": helpText( + "Show assembly in Intel syntax", + "Only applicable to commands `disasm` and `weblist`"), + + // Filtering options + "nodecount": helpText( + "Max number of nodes to show", + "Uses heuristics to limit the number of locations to be displayed.", + "On graphs, dotted edges represent paths through nodes that have been removed."), + "nodefraction": "Hide nodes below *total", + "edgefraction": "Hide edges below *total", + "trim": helpText( + "Honor nodefraction/edgefraction/nodecount defaults", + "Set to false to get the full profile, without any trimming."), + "focus": helpText( + "Restricts to samples going through a node matching regexp", + "Discard samples that do not include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "ignore": helpText( + "Skips paths going through any nodes matching regexp", + "If set, discard samples that include a node matching this regexp.", + "Matching includes the function name, filename or object name."), + "prune_from": helpText( + "Drops any functions below the matched frame.", + "If set, any frames matching the specified regexp and any frames", + "below it will be dropped from each sample."), + "hide": helpText( + "Skips nodes matching regexp", + "Discard nodes that match this location.", + "Other nodes from samples that include this location will be shown.", + "Matching includes the function name, filename or object name."), + "show": helpText( + "Only show nodes matching regexp", + "If set, only show nodes that match this location.", + "Matching includes the function name, filename or object name."), + "show_from": helpText( + "Drops functions above the highest matched frame.", + "If set, all frames above the highest match are dropped from every sample.", + "Matching includes the function name, filename or object name."), + "tagroot": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack root.", + "A comma-separated list of label keys.", + "The first key creates frames at the new root."), + "tagleaf": helpText( + "Adds pseudo stack frames for labels key/value pairs at the callstack leaf.", + "A comma-separated list of label keys.", + "The last key creates frames at the new leaf."), + "tagfocus": helpText( + "Restricts to samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagignore": helpText( + "Discard samples with tags in range or matched by regexp", + "Use name=value syntax to limit the matching to a specific tag.", + "Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:", + "String tag filter examples: foo, foo.*bar, mytag=foo.*bar"), + "tagshow": helpText( + "Only consider tags matching this regexp", + "Discard tags that do not match this regexp"), + "taghide": helpText( + "Skip tags matching this regexp", + "Discard tags that match this regexp"), + // Heap profile options + "divide_by": helpText( + "Ratio to divide all samples before visualization", + "Divide all samples values by a constant, eg the number of processors or jobs."), + "mean": helpText( + "Average sample value over first value (count)", + "For memory profiles, report average memory per allocation.", + "For time-based profiles, report average time per event."), + "sample_index": helpText( + "Sample value to report (0-based index or name)", + "Profiles contain multiple values per sample.", + "Use sample_index=i to select the ith value (starting at 0)."), + "normalize": helpText( + "Scales profile based on the base profile."), + + // Data sorting criteria + "flat": helpText("Sort entries based on own weight"), + "cum": helpText("Sort entries based on cumulative weight"), + + // Output granularity + "functions": helpText( + "Aggregate at the function level.", + "Ignores the filename where the function was defined."), + "filefunctions": helpText( + "Aggregate at the function level.", + "Takes into account the filename where the function was defined."), + "files": "Aggregate at the file level.", + "lines": "Aggregate at the source code line level.", + "addresses": helpText( + "Aggregate at the address level.", + "Includes functions' addresses in the output."), + "noinlines": helpText( + "Ignore inlines.", + "Attributes inlined functions to their first out-of-line caller."), + "showcolumns": helpText( + "Show column numbers at the source code line level."), +} + +func helpText(s ...string) string { + return strings.Join(s, "\n") + "\n" +} + +// usage returns a string describing the pprof commands and configuration +// options. if commandLine is set, the output reflect cli usage. +func usage(commandLine bool) string { + var prefix string + if commandLine { + prefix = "-" + } + fmtHelp := func(c, d string) string { + return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0]) + } + + var commands []string + for name, cmd := range pprofCommands { + commands = append(commands, fmtHelp(prefix+name, cmd.description)) + } + sort.Strings(commands) + + var help string + if commandLine { + help = " Output formats (select at most one):\n" + } else { + help = " Commands:\n" + commands = append(commands, fmtHelp("o/options", "List options and their current values")) + commands = append(commands, fmtHelp("q/quit/exit/^D", "Exit pprof")) + } + + help = help + strings.Join(commands, "\n") + "\n\n" + + " Options:\n" + + // Print help for configuration options after sorting them. + // Collect choices for multi-choice options print them together. + var variables []string + var radioStrings []string + for _, f := range configFields { + if len(f.choices) == 0 { + variables = append(variables, fmtHelp(prefix+f.name, configHelp[f.name])) + continue + } + // Format help for for this group. + s := []string{fmtHelp(f.name, "")} + for _, choice := range f.choices { + s = append(s, " "+fmtHelp(prefix+choice, configHelp[choice])) + } + radioStrings = append(radioStrings, strings.Join(s, "\n")) + } + sort.Strings(variables) + sort.Strings(radioStrings) + return help + strings.Join(variables, "\n") + "\n\n" + + " Option groups (only set one per group):\n" + + strings.Join(radioStrings, "\n") +} + +func reportHelp(c string, cum, redirect bool) string { + h := []string{ + c + " [n] [focus_regex]* [-ignore_regex]*", + "Include up to n samples", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if cum { + h[0] += " [-cum]" + h = append(h, "-cum sorts the output by cumulative weight") + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +func listHelp(c string, redirect bool) string { + h := []string{ + c + " [-focus_regex]* [-ignore_regex]*", + "Include functions matching func_regex, or including the address specified.", + "Include samples matching focus_regex, and exclude ignore_regex.", + } + if redirect { + h[0] += " >f" + h = append(h, "Optionally save the report on the file f") + } + return strings.Join(h, "\n") +} + +// browsers returns a list of commands to attempt for web visualization. +func browsers() []string { + var cmds []string + if userBrowser := os.Getenv("BROWSER"); userBrowser != "" { + cmds = append(cmds, userBrowser) + } + switch runtime.GOOS { + case "darwin": + cmds = append(cmds, "/usr/bin/open") + case "windows": + cmds = append(cmds, "cmd /c start") + default: + // Commands opening browsers are prioritized over xdg-open, so browser() + // command can be used on linux to open the .svg file generated by the -web + // command (the .svg file includes embedded javascript so is best viewed in + // a browser). + cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox", "sensible-browser"}...) + if os.Getenv("DISPLAY") != "" { + // xdg-open is only for use in a desktop environment. + cmds = append(cmds, "xdg-open") + } + } + return cmds +} + +var kcachegrind = []string{"kcachegrind"} + +// awayFromTTY saves the output in a file if it would otherwise go to +// the terminal screen. This is used to avoid dumping binary data on +// the screen. +func awayFromTTY(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + if output == os.Stdout && (ui.IsTerminal() || interactiveMode) { + tempFile, err := newTempFile("", "profile", "."+format) + if err != nil { + return err + } + ui.PrintErr("Generating report in ", tempFile.Name()) + output = tempFile + } + _, err := io.Copy(output, input) + return err + } +} + +func invokeDot(format string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + cmd := exec.Command("dot", "-T"+format) + cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to execute dot. Is Graphviz installed? Error: %v", err) + } + return nil + } +} + +// massageDotSVG invokes the dot tool to generate an SVG image and alters +// the image to have panning capabilities when viewed in a browser. +func massageDotSVG() PostProcessor { + generateSVG := invokeDot("svg") + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + baseSVG := new(bytes.Buffer) + if err := generateSVG(input, baseSVG, ui); err != nil { + return err + } + _, err := output.Write([]byte(massageSVG(baseSVG.String()))) + return err + } +} + +func invokeVisualizer(suffix string, visualizers []string) PostProcessor { + return func(input io.Reader, output io.Writer, ui plugin.UI) error { + tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix) + if err != nil { + return err + } + deferDeleteTempFile(tempFile.Name()) + if _, err := io.Copy(tempFile, input); err != nil { + return err + } + tempFile.Close() + // Try visualizers until one is successful + for _, v := range visualizers { + // Separate command and arguments for exec.Command. + args := strings.Split(v, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...) + viewer.Stderr = os.Stderr + if err = viewer.Start(); err == nil { + // Wait for a second so that the visualizer has a chance to + // open the input file. This needs to be done even if we're + // waiting for the visualizer as it can be just a wrapper that + // spawns a browser tab and returns right away. + defer func(t <-chan time.Time) { + <-t + }(time.After(time.Second)) + // On interactive mode, let the visualizer run in the background + // so other commands can be issued. + if !interactiveMode { + return viewer.Wait() + } + return nil + } + } + return err + } +} + +// stringToBool is a custom parser for bools. We avoid using strconv.ParseBool +// to remain compatible with old pprof behavior (e.g., treating "" as true). +func stringToBool(s string) (bool, error) { + switch strings.ToLower(s) { + case "true", "t", "yes", "y", "1", "": + return true, nil + case "false", "f", "no", "n", "0": + return false, nil + default: + return false, fmt.Errorf(`illegal value "%s" for bool variable`, s) + } +} diff --git a/plugin/debug/pkg/internal/driver/config.go b/plugin/debug/pkg/internal/driver/config.go new file mode 100644 index 0000000..090230e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/config.go @@ -0,0 +1,373 @@ +package driver + +import ( + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "sync" +) + +// config holds settings for a single named config. +// The JSON tag name for a field is used both for JSON encoding and as +// a named variable. +type config struct { + // Filename for file-based output formats, stdout by default. + Output string `json:"-"` + + // Display options. + CallTree bool `json:"call_tree,omitempty"` + RelativePercentages bool `json:"relative_percentages,omitempty"` + Unit string `json:"unit,omitempty"` + CompactLabels bool `json:"compact_labels,omitempty"` + SourcePath string `json:"-"` + TrimPath string `json:"-"` + IntelSyntax bool `json:"intel_syntax,omitempty"` + Mean bool `json:"mean,omitempty"` + SampleIndex string `json:"-"` + DivideBy float64 `json:"-"` + Normalize bool `json:"normalize,omitempty"` + Sort string `json:"sort,omitempty"` + + // Label pseudo stack frame generation options + TagRoot string `json:"tagroot,omitempty"` + TagLeaf string `json:"tagleaf,omitempty"` + + // Filtering options + DropNegative bool `json:"drop_negative,omitempty"` + NodeCount int `json:"nodecount,omitempty"` + NodeFraction float64 `json:"nodefraction,omitempty"` + EdgeFraction float64 `json:"edgefraction,omitempty"` + Trim bool `json:"trim,omitempty"` + Focus string `json:"focus,omitempty"` + Ignore string `json:"ignore,omitempty"` + PruneFrom string `json:"prune_from,omitempty"` + Hide string `json:"hide,omitempty"` + Show string `json:"show,omitempty"` + ShowFrom string `json:"show_from,omitempty"` + TagFocus string `json:"tagfocus,omitempty"` + TagIgnore string `json:"tagignore,omitempty"` + TagShow string `json:"tagshow,omitempty"` + TagHide string `json:"taghide,omitempty"` + NoInlines bool `json:"noinlines,omitempty"` + ShowColumns bool `json:"showcolumns,omitempty"` + + // Output granularity + Granularity string `json:"granularity,omitempty"` +} + +// defaultConfig returns the default configuration values; it is unaffected by +// flags and interactive assignments. +func defaultConfig() config { + return config{ + Unit: "minimum", + NodeCount: -1, + NodeFraction: 0.005, + EdgeFraction: 0.001, + Trim: true, + DivideBy: 1.0, + Sort: "flat", + Granularity: "", // Default depends on the display format + } +} + +// currentConfig holds the current configuration values; it is affected by +// flags and interactive assignments. +var currentCfg = defaultConfig() +var currentMu sync.Mutex + +func currentConfig() config { + currentMu.Lock() + defer currentMu.Unlock() + return currentCfg +} + +func setCurrentConfig(cfg config) { + currentMu.Lock() + defer currentMu.Unlock() + currentCfg = cfg +} + +// configField contains metadata for a single configuration field. +type configField struct { + name string // JSON field name/key in variables + urlparam string // URL parameter name + saved bool // Is field saved in settings? + field reflect.StructField // Field in config + choices []string // Name Of variables in group + defaultValue string // Default value for this field. +} + +var ( + configFields []configField // Precomputed metadata per config field + + // configFieldMap holds an entry for every config field as well as an + // entry for every valid choice for a multi-choice field. + configFieldMap map[string]configField +) + +func init() { + // Config names for fields that are not saved in settings and therefore + // do not have a JSON name. + notSaved := map[string]string{ + // Not saved in settings, but present in URLs. + "SampleIndex": "sample_index", + + // Following fields are also not placed in URLs. + "Output": "output", + "SourcePath": "source_path", + "TrimPath": "trim_path", + "DivideBy": "divide_by", + } + + // choices holds the list of allowed values for config fields that can + // take on one of a bounded set of values. + choices := map[string][]string{ + "sort": {"cum", "flat"}, + "granularity": {"functions", "filefunctions", "files", "lines", "addresses"}, + } + + // urlparam holds the mapping from a config field name to the URL + // parameter used to hold that config field. If no entry is present for + // a name, the corresponding field is not saved in URLs. + urlparam := map[string]string{ + "drop_negative": "dropneg", + "call_tree": "calltree", + "relative_percentages": "rel", + "unit": "unit", + "compact_labels": "compact", + "intel_syntax": "intel", + "nodecount": "n", + "nodefraction": "nf", + "edgefraction": "ef", + "trim": "trim", + "focus": "f", + "ignore": "i", + "prune_from": "prunefrom", + "hide": "h", + "show": "s", + "show_from": "sf", + "tagfocus": "tf", + "tagignore": "ti", + "tagshow": "ts", + "taghide": "th", + "mean": "mean", + "sample_index": "si", + "normalize": "norm", + "sort": "sort", + "granularity": "g", + "noinlines": "noinlines", + "showcolumns": "showcolumns", + } + + def := defaultConfig() + configFieldMap = map[string]configField{} + t := reflect.TypeOf(config{}) + for i, n := 0, t.NumField(); i < n; i++ { + field := t.Field(i) + js := strings.Split(field.Tag.Get("json"), ",") + if len(js) == 0 { + continue + } + // Get the configuration name for this field. + name := js[0] + if name == "-" { + name = notSaved[field.Name] + if name == "" { + // Not a configurable field. + continue + } + } + f := configField{ + name: name, + urlparam: urlparam[name], + saved: (name == js[0]), + field: field, + choices: choices[name], + } + f.defaultValue = def.get(f) + configFields = append(configFields, f) + configFieldMap[f.name] = f + for _, choice := range f.choices { + configFieldMap[choice] = f + } + } +} + +// fieldPtr returns a pointer to the field identified by f in *cfg. +func (cfg *config) fieldPtr(f configField) interface{} { + // reflect.ValueOf: converts to reflect.Value + // Elem: dereferences cfg to make *cfg + // FieldByIndex: fetches the field + // Addr: takes address of field + // Interface: converts back from reflect.Value to a regular value + return reflect.ValueOf(cfg).Elem().FieldByIndex(f.field.Index).Addr().Interface() +} + +// get returns the value of field f in cfg. +func (cfg *config) get(f configField) string { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + return *ptr + case *int: + return fmt.Sprint(*ptr) + case *float64: + return fmt.Sprint(*ptr) + case *bool: + return fmt.Sprint(*ptr) + } + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) +} + +// set sets the value of field f in cfg to value. +func (cfg *config) set(f configField, value string) error { + switch ptr := cfg.fieldPtr(f).(type) { + case *string: + if len(f.choices) > 0 { + // Verify that value is one of the allowed choices. + for _, choice := range f.choices { + if choice == value { + *ptr = value + return nil + } + } + return fmt.Errorf("invalid %q value %q", f.name, value) + } + *ptr = value + case *int: + v, err := strconv.Atoi(value) + if err != nil { + return err + } + *ptr = v + case *float64: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + *ptr = v + case *bool: + v, err := stringToBool(value) + if err != nil { + return err + } + *ptr = v + default: + panic(fmt.Sprintf("unsupported config field type %v", f.field.Type)) + } + return nil +} + +// isConfigurable returns true if name is either the name of a config field, or +// a valid value for a multi-choice config field. +func isConfigurable(name string) bool { + _, ok := configFieldMap[name] + return ok +} + +// isBoolConfig returns true if name is either name of a boolean config field, +// or a valid value for a multi-choice config field. +func isBoolConfig(name string) bool { + f, ok := configFieldMap[name] + if !ok { + return false + } + if name != f.name { + return true // name must be one possible value for the field + } + var cfg config + _, ok = cfg.fieldPtr(f).(*bool) + return ok +} + +// completeConfig returns the list of configurable names starting with prefix. +func completeConfig(prefix string) []string { + var result []string + for v := range configFieldMap { + if strings.HasPrefix(v, prefix) { + result = append(result, v) + } + } + return result +} + +// configure stores the name=value mapping into the current config, correctly +// handling the case when name identifies a particular choice in a field. +func configure(name, value string) error { + currentMu.Lock() + defer currentMu.Unlock() + f, ok := configFieldMap[name] + if !ok { + return fmt.Errorf("unknown config field %q", name) + } + if f.name == name { + return currentCfg.set(f, value) + } + // name must be one of the choices. If value is true, set field-value + // to name. + if v, err := strconv.ParseBool(value); v && err == nil { + return currentCfg.set(f, name) + } + return fmt.Errorf("unknown config field %q", name) +} + +// resetTransient sets all transient fields in *cfg to their currently +// configured values. +func (cfg *config) resetTransient() { + current := currentConfig() + cfg.Output = current.Output + cfg.SourcePath = current.SourcePath + cfg.TrimPath = current.TrimPath + cfg.DivideBy = current.DivideBy + cfg.SampleIndex = current.SampleIndex +} + +// applyURL updates *cfg based on params. +func (cfg *config) applyURL(params url.Values) error { + for _, f := range configFields { + var value string + if f.urlparam != "" { + value = params.Get(f.urlparam) + } + if value == "" { + continue + } + if err := cfg.set(f, value); err != nil { + return fmt.Errorf("error setting config field %s: %v", f.name, err) + } + } + return nil +} + +// makeURL returns a URL based on initialURL that contains the config contents +// as parameters. The second result is true iff a parameter value was changed. +func (cfg *config) makeURL(initialURL url.URL) (url.URL, bool) { + q := initialURL.Query() + changed := false + for _, f := range configFields { + if f.urlparam == "" || !f.saved { + continue + } + v := cfg.get(f) + if v == f.defaultValue { + v = "" // URL for of default value is the empty string. + } else if f.field.Type.Kind() == reflect.Bool { + // Shorten bool values to "f" or "t" + v = v[:1] + } + if q.Get(f.urlparam) == v { + continue + } + changed = true + if v == "" { + q.Del(f.urlparam) + } else { + q.Set(f.urlparam, v) + } + } + if changed { + initialURL.RawQuery = q.Encode() + } + return initialURL, changed +} diff --git a/plugin/debug/pkg/internal/driver/driver.go b/plugin/debug/pkg/internal/driver/driver.go new file mode 100644 index 0000000..af51e62 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/driver.go @@ -0,0 +1,408 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package driver implements the core pprof functionality. It can be +// parameterized with a flag implementation, fetch and symbolize +// mechanisms. +package driver + +import ( + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "regexp" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/report" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// PProf acquires a profile, and symbolizes it using a profile +// manager. Then it generates a report formatted according to the +// options selected through the flags package. +func PProf(eo *plugin.Options) error { + // Remove any temporary files created during pprof processing. + defer cleanupTempFiles() + + o := setDefaults(eo) + + src, cmd, err := parseFlags(o) + if err != nil { + return err + } + + p, err := fetchProfiles(src, o) + if err != nil { + return err + } + + if cmd != nil { + return generateReport(p, cmd, currentConfig(), o) + } + + if src.HTTPHostport != "" { + return serveWebInterface(src.HTTPHostport, p, o, src.HTTPDisableBrowser) + } + return interactive(p, o) +} + +// generateRawReport is allowed to modify p. +func generateRawReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) (*command, *report.Report, error) { + // Identify units of numeric tags in profile. + numLabelUnits := identifyNumLabelUnits(p, o.UI) + + // Get report output format + c := pprofCommands[cmd[0]] + if c == nil { + panic("unexpected nil command") + } + + cfg = applyCommandOverrides(cmd[0], c.format, cfg) + + // Create label pseudo nodes before filtering, in case the filters use + // the generated nodes. + generateTagRootsLeaves(p, cfg, o.UI) + + // Delay focus after configuring report to get percentages on all samples. + relative := cfg.RelativePercentages + if relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + ropt, err := reportOptions(p, numLabelUnits, cfg) + if err != nil { + return nil, nil, err + } + ropt.OutputFormat = c.format + if len(cmd) == 2 { + s, err := regexp.Compile(cmd[1]) + if err != nil { + return nil, nil, fmt.Errorf("parsing argument regexp %s: %v", cmd[1], err) + } + ropt.Symbol = s + } + + rpt := report.New(p, ropt) + if !relative { + if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil { + return nil, nil, err + } + } + if err := aggregate(p, cfg); err != nil { + return nil, nil, err + } + + return c, rpt, nil +} + +// generateReport is allowed to modify p. +func generateReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error { + c, rpt, err := generateRawReport(p, cmd, cfg, o) + if err != nil { + return err + } + + // Generate the report. + dst := new(bytes.Buffer) + switch rpt.OutputFormat() { + case report.WebList: + // We need template expansion, so generate here instead of in report. + err = printWebList(dst, rpt, o.Obj) + default: + err = report.Generate(dst, rpt, o.Obj) + } + if err != nil { + return err + } + src := dst + + // If necessary, perform any data post-processing. + if c.postProcess != nil { + dst = new(bytes.Buffer) + if err := c.postProcess(src, dst, o.UI); err != nil { + return err + } + src = dst + } + + // If no output is specified, use default visualizer. + output := cfg.Output + if output == "" { + if c.visualizer != nil { + return c.visualizer(src, os.Stdout, o.UI) + } + _, err := src.WriteTo(os.Stdout) + return err + } + + // Output to specified file. + o.UI.PrintErr("Generating report in ", output) + out, err := o.Writer.Open(output) + if err != nil { + return err + } + if _, err := src.WriteTo(out); err != nil { + out.Close() + return err + } + return out.Close() +} + +func printWebList(dst io.Writer, rpt *report.Report, obj plugin.ObjTool) error { + listing, err := report.MakeWebList(rpt, obj, -1) + if err != nil { + return err + } + legend := report.ProfileLabels(rpt) + return renderHTML(dst, "sourcelisting", rpt, nil, legend, webArgs{ + Standalone: true, + Listing: listing, + }) +} + +func applyCommandOverrides(cmd string, outputFormat int, cfg config) config { + // Some report types override the trim flag to false below. This is to make + // sure the default heuristics of excluding insignificant nodes and edges + // from the call graph do not apply. One example where it is important is + // annotated source or disassembly listing. Those reports run on a specific + // function (or functions), but the trimming is applied before the function + // data is selected. So, with trimming enabled, the report could end up + // showing no data if the specified function is "uninteresting" as far as the + // trimming is concerned. + trim := cfg.Trim + + switch cmd { + case "disasm": + trim = false + cfg.Granularity = "addresses" + // Force the 'noinlines' mode so that source locations for a given address + // collapse and there is only one for the given address. Without this + // cumulative metrics would be double-counted when annotating the assembly. + // This is because the merge is done by address and in case of an inlined + // stack each of the inlined entries is a separate callgraph node. + cfg.NoInlines = true + case "weblist": + trim = false + cfg.Granularity = "addresses" + cfg.NoInlines = false // Need inline info to support call expansion + case "peek": + trim = false + case "list": + trim = false + cfg.Granularity = "lines" + // Do not force 'noinlines' to be false so that specifying + // "-list foo -noinlines" is supported and works as expected. + case "text", "top", "topproto": + if cfg.NodeCount == -1 { + cfg.NodeCount = 0 + } + default: + if cfg.NodeCount == -1 { + cfg.NodeCount = 80 + } + } + + switch outputFormat { + case report.Proto, report.Raw, report.Callgrind: + trim = false + cfg.Granularity = "addresses" + } + + if !trim { + cfg.NodeCount = 0 + cfg.NodeFraction = 0 + cfg.EdgeFraction = 0 + } + return cfg +} + +// generateTagRootsLeaves generates extra nodes from the tagroot and tagleaf options. +func generateTagRootsLeaves(prof *profile.Profile, cfg config, ui plugin.UI) { + tagRootLabelKeys := dropEmptyStrings(strings.Split(cfg.TagRoot, ",")) + tagLeafLabelKeys := dropEmptyStrings(strings.Split(cfg.TagLeaf, ",")) + rootm, leafm := addLabelNodes(prof, tagRootLabelKeys, tagLeafLabelKeys, cfg.Unit) + warnNoMatches(cfg.TagRoot == "" || rootm, "TagRoot", ui) + warnNoMatches(cfg.TagLeaf == "" || leafm, "TagLeaf", ui) +} + +// dropEmptyStrings filters a slice to only non-empty strings +func dropEmptyStrings(in []string) (out []string) { + for _, s := range in { + if s != "" { + out = append(out, s) + } + } + return +} + +func aggregate(prof *profile.Profile, cfg config) error { + var function, filename, linenumber, address bool + inlines := !cfg.NoInlines + switch cfg.Granularity { + case "": + function = true // Default granularity is "functions" + case "addresses": + if inlines { + return nil + } + function = true + filename = true + linenumber = true + address = true + case "lines": + function = true + filename = true + linenumber = true + case "files": + filename = true + case "functions": + function = true + case "filefunctions": + function = true + filename = true + default: + return fmt.Errorf("unexpected granularity") + } + return prof.Aggregate(inlines, function, filename, linenumber, cfg.ShowColumns, address) +} + +func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) { + si, mean := cfg.SampleIndex, cfg.Mean + value, meanDiv, sample, err := sampleFormat(p, si, mean) + if err != nil { + return nil, err + } + + stype := sample.Type + if mean { + stype = "mean_" + stype + } + + if cfg.DivideBy == 0 { + return nil, fmt.Errorf("zero divisor specified") + } + + var filters []string + addFilter := func(k string, v string) { + if v != "" { + filters = append(filters, k+"="+v) + } + } + addFilter("focus", cfg.Focus) + addFilter("ignore", cfg.Ignore) + addFilter("hide", cfg.Hide) + addFilter("show", cfg.Show) + addFilter("show_from", cfg.ShowFrom) + addFilter("tagfocus", cfg.TagFocus) + addFilter("tagignore", cfg.TagIgnore) + addFilter("tagshow", cfg.TagShow) + addFilter("taghide", cfg.TagHide) + + ropt := &report.Options{ + CumSort: cfg.Sort == "cum", + CallTree: cfg.CallTree, + DropNegative: cfg.DropNegative, + + CompactLabels: cfg.CompactLabels, + Ratio: 1 / cfg.DivideBy, + + NodeCount: cfg.NodeCount, + NodeFraction: cfg.NodeFraction, + EdgeFraction: cfg.EdgeFraction, + + ActiveFilters: filters, + NumLabelUnits: numLabelUnits, + + SampleValue: value, + SampleMeanDivisor: meanDiv, + SampleType: stype, + SampleUnit: sample.Unit, + + OutputUnit: cfg.Unit, + + SourcePath: cfg.SourcePath, + TrimPath: cfg.TrimPath, + + IntelSyntax: cfg.IntelSyntax, + } + + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + ropt.Title = filepath.Base(p.Mapping[0].File) + } + + return ropt, nil +} + +// identifyNumLabelUnits returns a map of numeric label keys to the units +// associated with those keys. +func identifyNumLabelUnits(p *profile.Profile, ui plugin.UI) map[string]string { + numLabelUnits, ignoredUnits := p.NumLabelUnits() + + // Print errors for tags with multiple units associated with + // a single key. + for k, units := range ignoredUnits { + ui.PrintErr(fmt.Sprintf("For tag %s used unit %s, also encountered unit(s) %s", k, numLabelUnits[k], strings.Join(units, ", "))) + } + return numLabelUnits +} + +type sampleValueFunc func([]int64) int64 + +// sampleFormat returns a function to extract values out of a profile.Sample, +// and the type/units of those values. +func sampleFormat(p *profile.Profile, sampleIndex string, mean bool) (value, meanDiv sampleValueFunc, v *profile.ValueType, err error) { + if len(p.SampleType) == 0 { + return nil, nil, nil, fmt.Errorf("profile has no samples") + } + index, err := p.SampleIndexByName(sampleIndex) + if err != nil { + return nil, nil, nil, err + } + value = valueExtractor(index) + if mean { + meanDiv = valueExtractor(0) + } + v = p.SampleType[index] + return +} + +func valueExtractor(ix int) sampleValueFunc { + return func(v []int64) int64 { + return v[ix] + } +} + +// profileCopier can be used to obtain a fresh copy of a profile. +// It is useful since reporting code may mutate the profile handed to it. +type profileCopier []byte + +func makeProfileCopier(src *profile.Profile) profileCopier { + // Pre-serialize the profile. We will deserialize every time a fresh copy is needed. + var buf bytes.Buffer + src.WriteUncompressed(&buf) + return profileCopier(buf.Bytes()) +} + +// newCopy returns a new copy of the profile. +func (c profileCopier) newCopy() *profile.Profile { + p, err := profile.ParseUncompressed([]byte(c)) + if err != nil { + panic(err) + } + return p +} diff --git a/plugin/debug/pkg/internal/driver/driver_focus.go b/plugin/debug/pkg/internal/driver/driver_focus.go new file mode 100644 index 0000000..510334f --- /dev/null +++ b/plugin/debug/pkg/internal/driver/driver_focus.go @@ -0,0 +1,219 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "regexp" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +var tagFilterRangeRx = regexp.MustCompile("([+-]?[[:digit:]]+)([[:alpha:]]+)?") + +// applyFocus filters samples based on the focus/ignore options +func applyFocus(prof *profile.Profile, numLabelUnits map[string]string, cfg config, ui plugin.UI) error { + focus, err := compileRegexOption("focus", cfg.Focus, nil) + ignore, err := compileRegexOption("ignore", cfg.Ignore, err) + hide, err := compileRegexOption("hide", cfg.Hide, err) + show, err := compileRegexOption("show", cfg.Show, err) + showfrom, err := compileRegexOption("show_from", cfg.ShowFrom, err) + tagfocus, err := compileTagFilter("tagfocus", cfg.TagFocus, numLabelUnits, ui, err) + tagignore, err := compileTagFilter("tagignore", cfg.TagIgnore, numLabelUnits, ui, err) + prunefrom, err := compileRegexOption("prune_from", cfg.PruneFrom, err) + if err != nil { + return err + } + + fm, im, hm, hnm := prof.FilterSamplesByName(focus, ignore, hide, show) + warnNoMatches(focus == nil || fm, "Focus", ui) + warnNoMatches(ignore == nil || im, "Ignore", ui) + warnNoMatches(hide == nil || hm, "Hide", ui) + warnNoMatches(show == nil || hnm, "Show", ui) + + sfm := prof.ShowFrom(showfrom) + warnNoMatches(showfrom == nil || sfm, "ShowFrom", ui) + + tfm, tim := prof.FilterSamplesByTag(tagfocus, tagignore) + warnNoMatches(tagfocus == nil || tfm, "TagFocus", ui) + warnNoMatches(tagignore == nil || tim, "TagIgnore", ui) + + tagshow, err := compileRegexOption("tagshow", cfg.TagShow, err) + taghide, err := compileRegexOption("taghide", cfg.TagHide, err) + tns, tnh := prof.FilterTagsByName(tagshow, taghide) + warnNoMatches(tagshow == nil || tns, "TagShow", ui) + warnNoMatches(taghide == nil || tnh, "TagHide", ui) + + if prunefrom != nil { + prof.PruneFrom(prunefrom) + } + return err +} + +func compileRegexOption(name, value string, err error) (*regexp.Regexp, error) { + if value == "" || err != nil { + return nil, err + } + rx, err := regexp.Compile(value) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + return rx, nil +} + +func compileTagFilter(name, value string, numLabelUnits map[string]string, ui plugin.UI, err error) (func(*profile.Sample) bool, error) { + if value == "" || err != nil { + return nil, err + } + + tagValuePair := strings.SplitN(value, "=", 2) + var wantKey string + if len(tagValuePair) == 2 { + wantKey = tagValuePair[0] + value = tagValuePair[1] + } + + if numFilter := parseTagFilterRange(value); numFilter != nil { + ui.PrintErr(name, ":Interpreted '", value, "' as range, not regexp") + labelFilter := func(vals []int64, unit string) bool { + for _, val := range vals { + if numFilter(val, unit) { + return true + } + } + return false + } + numLabelUnit := func(key string) string { + return numLabelUnits[key] + } + if wantKey == "" { + return func(s *profile.Sample) bool { + for key, vals := range s.NumLabel { + if labelFilter(vals, numLabelUnit(key)) { + return true + } + } + return false + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.NumLabel[wantKey]; ok { + return labelFilter(vals, numLabelUnit(wantKey)) + } + return false + }, nil + } + + var rfx []*regexp.Regexp + for _, tagf := range strings.Split(value, ",") { + fx, err := regexp.Compile(tagf) + if err != nil { + return nil, fmt.Errorf("parsing %s regexp: %v", name, err) + } + rfx = append(rfx, fx) + } + if wantKey == "" { + return func(s *profile.Sample) bool { + matchedrx: + for _, rx := range rfx { + for key, vals := range s.Label { + for _, val := range vals { + // TODO: Match against val, not key:val in future + if rx.MatchString(key + ":" + val) { + continue matchedrx + } + } + } + return false + } + return true + }, nil + } + return func(s *profile.Sample) bool { + if vals, ok := s.Label[wantKey]; ok { + for _, rx := range rfx { + for _, val := range vals { + if rx.MatchString(val) { + return true + } + } + } + } + return false + }, nil +} + +// parseTagFilterRange returns a function to checks if a value is +// contained on the range described by a string. It can recognize +// strings of the form: +// "32kb" -- matches values == 32kb +// ":64kb" -- matches values <= 64kb +// "4mb:" -- matches values >= 4mb +// "12kb:64mb" -- matches values between 12kb and 64mb (both included). +func parseTagFilterRange(filter string) func(int64, string) bool { + ranges := tagFilterRangeRx.FindAllStringSubmatch(filter, 2) + if len(ranges) == 0 { + return nil // No ranges were identified + } + v, err := strconv.ParseInt(ranges[0][1], 10, 64) + if err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[0][1], err)) + } + scaledValue, unit := measurement.Scale(v, ranges[0][2], ranges[0][2]) + if len(ranges) == 1 { + switch match := ranges[0][0]; filter { + case match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv == scaledValue + } + case match + ":": + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue + } + case ":" + match: + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv <= scaledValue + } + } + return nil + } + if filter != ranges[0][0]+":"+ranges[1][0] { + return nil + } + if v, err = strconv.ParseInt(ranges[1][1], 10, 64); err != nil { + panic(fmt.Errorf("failed to parse int %s: %v", ranges[1][1], err)) + } + scaledValue2, unit2 := measurement.Scale(v, ranges[1][2], unit) + if unit != unit2 { + return nil + } + return func(v int64, u string) bool { + sv, su := measurement.Scale(v, u, unit) + return su == unit && sv >= scaledValue && sv <= scaledValue2 + } +} + +func warnNoMatches(match bool, option string, ui plugin.UI) { + if !match { + ui.PrintErr(option + " expression matched no samples") + } +} diff --git a/plugin/debug/pkg/internal/driver/driver_test.go b/plugin/debug/pkg/internal/driver/driver_test.go new file mode 100644 index 0000000..5f0b26c --- /dev/null +++ b/plugin/debug/pkg/internal/driver/driver_test.go @@ -0,0 +1,1755 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "flag" + "fmt" + "net" + _ "net/http/pprof" + "os" + "reflect" + "regexp" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/internal/symbolz" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +var updateFlag = flag.Bool("update", false, "Update the golden files") + +func TestParse(t *testing.T) { + // Override weblist command to collect output in buffer + pprofCommands["weblist"].postProcess = nil + + // Our mockObjTool.Open will always return success, causing + // driver.locateBinaries to "find" the binaries below in a non-existent + // directory. As a workaround, point the search path to the fake + // directory containing out fake binaries. + savePath := os.Getenv("PPROF_BINARY_PATH") + os.Setenv("PPROF_BINARY_PATH", "/path/to") + defer os.Setenv("PPROF_BINARY_PATH", savePath) + testcase := []struct { + flags, source string + }{ + {"text,functions,flat", "cpu"}, + {"text,functions,noinlines,flat", "cpu"}, + {"text,filefunctions,noinlines,flat", "cpu"}, + {"text,addresses,noinlines,flat", "cpu"}, + {"tree,addresses,flat,nodecount=4", "cpusmall"}, + {"text,functions,flat,nodecount=5,call_tree", "unknown"}, + {"text,alloc_objects,flat", "heap_alloc"}, + {"text,files,flat", "heap"}, + {"text,files,flat,focus=[12]00,taghide=[X3]00", "heap"}, + {"text,inuse_objects,flat", "heap"}, + {"text,lines,cum,hide=line[X3]0", "cpu"}, + {"text,lines,cum,show=[12]00", "cpu"}, + {"text,lines,cum,hide=line[X3]0,focus=[12]00", "cpu"}, + {"topproto,lines,cum,hide=mangled[X3]0", "cpu"}, + {"topproto,lines", "cpu"}, + {"tree,lines,cum,focus=[24]00", "heap"}, + {"tree,relative_percentages,cum,focus=[24]00", "heap"}, + {"tree,lines,cum,show_from=line2", "cpu"}, + {"callgrind", "cpu"}, + {"callgrind,call_tree", "cpu"}, + {"callgrind", "heap"}, + {"dot,functions,flat", "cpu"}, + {"dot,functions,flat,call_tree", "cpu"}, + {"dot,lines,flat,focus=[12]00", "heap"}, + {"dot,unit=minimum", "heap_sizetags"}, + {"dot,addresses,flat,ignore=[X3]002,focus=[X1]000", "contention"}, + {"dot,files,cum", "contention"}, + {"comments,add_comment=some-comment", "cpu"}, + {"comments", "heap"}, + {"tags", "cpu"}, + {"tags,tagignore=tag[13],tagfocus=key[12]", "cpu"}, + {"tags", "heap"}, + {"tags,unit=bytes", "heap"}, + {"traces", "cpu"}, + {"traces,addresses", "cpu"}, + {"traces", "heap_tags"}, + {"dot,alloc_space,flat,focus=[234]00", "heap_alloc"}, + {"dot,alloc_space,flat,tagshow=[2]00", "heap_alloc"}, + {"dot,alloc_space,flat,hide=line.*1?23?", "heap_alloc"}, + {"dot,inuse_space,flat,tagfocus=1mb:2gb", "heap"}, + {"dot,inuse_space,flat,tagfocus=30kb:,tagignore=1mb:2mb", "heap"}, + {"disasm=line[13],addresses,flat", "cpu"}, + {"peek=line.*01", "cpu"}, + {"weblist=line(1000|3000)$,addresses,flat", "cpu"}, + {"tags,tagfocus=400kb:", "heap_request"}, + {"tags,tagfocus=+400kb:", "heap_request"}, + {"dot", "long_name_funcs"}, + {"text", "long_name_funcs"}, + } + + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + for _, tc := range testcase { + t.Run(tc.flags+":"+tc.source, func(t *testing.T) { + // Reset config before processing + setCurrentConfig(baseConfig) + + testUI := &proftest.TestUI{T: t, AllowRx: "Generating report in|Ignoring local file|expression matched no samples|Interpreted .* as range, not regexp"} + + f := baseFlags() + f.args = []string{tc.source} + + flags := strings.Split(tc.flags, ",") + + // Encode profile into a protobuf and decode it again. + protoTempFile, err := os.CreateTemp("", "profile_proto") + if err != nil { + t.Errorf("cannot create tempfile: %v", err) + } + defer os.Remove(protoTempFile.Name()) + defer protoTempFile.Close() + f.strings["output"] = protoTempFile.Name() + + if flags[0] == "topproto" { + f.bools["proto"] = false + f.bools["topproto"] = true + f.bools["addresses"] = true + } + + // First pprof invocation to save the profile into a profile.proto. + // Pass in flag set hen setting defaults, because otherwise default + // transport will try to add flags to the default flag set. + o1 := setDefaults(&plugin.Options{Flagset: f}) + o1.Fetch = testFetcher{} + o1.Sym = testSymbolizer{} + o1.UI = testUI + if err := PProf(o1); err != nil { + t.Fatalf("%s %q: %v", tc.source, tc.flags, err) + } + // Reset config after the proto invocation + setCurrentConfig(baseConfig) + + // Read the profile from the encoded protobuf + outputTempFile, err := os.CreateTemp("", "profile_output") + if err != nil { + t.Errorf("cannot create tempfile: %v", err) + } + defer os.Remove(outputTempFile.Name()) + defer outputTempFile.Close() + + f = baseFlags() + f.strings["output"] = outputTempFile.Name() + f.args = []string{protoTempFile.Name()} + + delete(f.bools, "proto") + addFlags(&f, flags) + solution := solutionFilename(tc.source, &f) + // Apply the flags for the second pprof run, and identify name of + // the file containing expected results + if flags[0] == "topproto" { + addFlags(&f, flags) + solution = solutionFilename(tc.source, &f) + delete(f.bools, "topproto") + f.bools["text"] = true + } + + // Second pprof invocation to read the profile from profile.proto + // and generate a report. + // Pass in flag set hen setting defaults, because otherwise default + // transport will try to add flags to the default flag set. + o2 := setDefaults(&plugin.Options{Flagset: f}) + o2.Sym = testSymbolizeDemangler{} + o2.Obj = new(mockObjTool) + o2.UI = testUI + + if err := PProf(o2); err != nil { + t.Errorf("%s: %v", tc.source, err) + } + b, err := os.ReadFile(outputTempFile.Name()) + if err != nil { + t.Errorf("Failed to read profile %s: %v", outputTempFile.Name(), err) + } + + // Read data file with expected solution + solution = "testdata/" + solution + sbuf, err := os.ReadFile(solution) + if err != nil { + t.Fatalf("reading solution file %s: %v", solution, err) + } + if runtime.GOOS == "windows" { + if flags[0] == "dot" { + // The .dot test has the paths inside strings, so \ must be escaped. + sbuf = bytes.Replace(sbuf, []byte("testdata/"), []byte(`testdata\\`), -1) + sbuf = bytes.Replace(sbuf, []byte("/path/to/"), []byte(`\\path\\to\\`), -1) + } else { + sbuf = bytes.Replace(sbuf, []byte("testdata/"), []byte(`testdata\`), -1) + sbuf = bytes.Replace(sbuf, []byte("/path/to/"), []byte(`\path\to\`), -1) + } + } + + if flags[0] == "svg" { + b = removeScripts(b) + sbuf = removeScripts(sbuf) + } + + if string(b) != string(sbuf) { + t.Errorf("diff %s %s", solution, tc.source) + d, err := proftest.Diff(sbuf, b) + if err != nil { + t.Fatalf("diff %s %v", solution, err) + } + t.Errorf("%s\n%s\n", solution, d) + if *updateFlag { + err := os.WriteFile(solution, b, 0644) + if err != nil { + t.Errorf("failed to update the solution file %q: %v", solution, err) + } + } + } + }) + } +} + +// removeScripts removes pairs from its input +func removeScripts(in []byte) []byte { + beginMarker := []byte("") + + if begin := bytes.Index(in, beginMarker); begin > 0 { + if end := bytes.Index(in[begin:], endMarker); end > 0 { + in = append(in[:begin], removeScripts(in[begin+end+len(endMarker):])...) + } + } + return in +} + +// addFlags parses flag descriptions and adds them to the testFlags +func addFlags(f *testFlags, flags []string) { + for _, flag := range flags { + fields := strings.SplitN(flag, "=", 2) + switch len(fields) { + case 1: + f.bools[fields[0]] = true + case 2: + if i, err := strconv.Atoi(fields[1]); err == nil { + f.ints[fields[0]] = i + } else { + f.strings[fields[0]] = fields[1] + } + } + } +} + +func testSourceURL(port int) string { + return fmt.Sprintf("http://%s/", net.JoinHostPort(testSourceAddress, strconv.Itoa(port))) +} + +// solutionFilename returns the name of the solution file for the test +func solutionFilename(source string, f *testFlags) string { + name := []string{"pprof", strings.TrimPrefix(source, testSourceURL(8000))} + name = addString(name, f, []string{"flat", "cum"}) + name = addString(name, f, []string{"functions", "filefunctions", "files", "lines", "addresses"}) + name = addString(name, f, []string{"noinlines"}) + name = addString(name, f, []string{"inuse_space", "inuse_objects", "alloc_space", "alloc_objects"}) + name = addString(name, f, []string{"relative_percentages"}) + name = addString(name, f, []string{"seconds"}) + name = addString(name, f, []string{"call_tree"}) + name = addString(name, f, []string{"text", "tree", "callgrind", "dot", "svg", "tags", "dot", "traces", "disasm", "peek", "weblist", "topproto", "comments"}) + if f.strings["focus"] != "" || f.strings["tagfocus"] != "" { + name = append(name, "focus") + } + if f.strings["ignore"] != "" || f.strings["tagignore"] != "" { + name = append(name, "ignore") + } + if f.strings["show_from"] != "" { + name = append(name, "show_from") + } + name = addString(name, f, []string{"hide", "show"}) + if f.strings["unit"] != "minimum" { + name = addString(name, f, []string{"unit"}) + } + return strings.Join(name, ".") +} + +func addString(name []string, f *testFlags, components []string) []string { + for _, c := range components { + if f.bools[c] || f.strings[c] != "" || f.ints[c] != 0 { + return append(name, c) + } + } + return name +} + +// testFlags implements the plugin.FlagSet interface. +type testFlags struct { + bools map[string]bool + ints map[string]int + floats map[string]float64 + strings map[string]string + args []string + stringLists map[string][]string +} + +func (testFlags) ExtraUsage() string { return "" } + +func (testFlags) AddExtraUsage(eu string) {} + +func (f testFlags) Bool(s string, d bool, c string) *bool { + if b, ok := f.bools[s]; ok { + return &b + } + return &d +} + +func (f testFlags) Int(s string, d int, c string) *int { + if i, ok := f.ints[s]; ok { + return &i + } + return &d +} + +func (f testFlags) Float64(s string, d float64, c string) *float64 { + if g, ok := f.floats[s]; ok { + return &g + } + return &d +} + +func (f testFlags) String(s, d, c string) *string { + if t, ok := f.strings[s]; ok { + return &t + } + return &d +} + +func (f testFlags) StringList(s, d, c string) *[]*string { + if t, ok := f.stringLists[s]; ok { + // convert slice of strings to slice of string pointers before returning. + tp := make([]*string, len(t)) + for i, v := range t { + tp[i] = &v + } + return &tp + } + return &[]*string{} +} + +func (f testFlags) Parse(func()) []string { + return f.args +} + +func baseFlags() testFlags { + return testFlags{ + bools: map[string]bool{ + "proto": true, + "trim": true, + "compact_labels": true, + }, + ints: map[string]int{ + "nodecount": 20, + }, + floats: map[string]float64{ + "nodefraction": 0.05, + "edgefraction": 0.01, + "divide_by": 1.0, + }, + strings: map[string]string{ + "unit": "minimum", + }, + } +} + +const testStart = 0x1000 +const testOffset = 0x5000 + +type testFetcher struct{} + +func (testFetcher) Fetch(s string, d, t time.Duration) (*profile.Profile, string, error) { + var p *profile.Profile + switch s { + case "cpu", "unknown": + p = cpuProfile() + case "cpusmall": + p = cpuProfileSmall() + case "heap": + p = heapProfile() + case "heap_alloc": + p = heapProfile() + p.SampleType = []*profile.ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + } + case "heap_request": + p = heapProfile() + for _, s := range p.Sample { + s.NumLabel["request"] = s.NumLabel["bytes"] + } + case "heap_sizetags": + p = heapProfile() + tags := []int64{2, 4, 8, 16, 32, 64, 128, 256} + for _, s := range p.Sample { + numValues := append(s.NumLabel["bytes"], tags...) + s.NumLabel["bytes"] = numValues + } + case "heap_tags": + p = heapProfile() + for i := 0; i < len(p.Sample); i += 2 { + s := p.Sample[i] + if s.Label == nil { + s.Label = make(map[string][]string) + } + s.NumLabel["request"] = s.NumLabel["bytes"] + s.Label["key1"] = []string{"tag"} + } + case "contention": + p = contentionProfile() + case "symbolz": + p = symzProfile() + case "long_name_funcs": + p = longNameFuncsProfile() + default: + return nil, "", fmt.Errorf("unexpected source: %s", s) + } + return p, testSourceURL(8000) + s, nil +} + +type testSymbolizer struct{} + +func (testSymbolizer) Symbolize(_ string, _ plugin.MappingSources, _ *profile.Profile) error { + return nil +} + +type testSymbolizeDemangler struct{} + +func (testSymbolizeDemangler) Symbolize(_ string, _ plugin.MappingSources, p *profile.Profile) error { + for _, fn := range p.Function { + if fn.Name == "" || fn.SystemName == fn.Name { + fn.Name = fakeDemangler(fn.SystemName) + } + } + return nil +} + +func testFetchSymbols(source, post string) ([]byte, error) { + var buf bytes.Buffer + + switch source { + case testSourceURL(8000) + "symbolz": + for _, address := range strings.Split(post, "+") { + a, _ := strconv.ParseInt(address, 0, 64) + fmt.Fprintf(&buf, "%v\t", address) + if a-testStart > testOffset { + fmt.Fprintf(&buf, "wrong_source_%v_", address) + continue + } + fmt.Fprintf(&buf, "%#x\n", a-testStart) + } + return buf.Bytes(), nil + case testSourceURL(8001) + "symbolz": + for _, address := range strings.Split(post, "+") { + a, _ := strconv.ParseInt(address, 0, 64) + fmt.Fprintf(&buf, "%v\t", address) + if a-testStart < testOffset { + fmt.Fprintf(&buf, "wrong_source_%v_", address) + continue + } + fmt.Fprintf(&buf, "%#x\n", a-testStart-testOffset) + } + return buf.Bytes(), nil + default: + return nil, fmt.Errorf("unexpected source: %s", source) + } +} + +type testSymbolzSymbolizer struct{} + +func (testSymbolzSymbolizer) Symbolize(variables string, sources plugin.MappingSources, p *profile.Profile) error { + return symbolz.Symbolize(p, false, sources, testFetchSymbols, nil) +} + +func fakeDemangler(name string) string { + switch name { + case "mangled1000": + return "line1000" + case "mangled2000": + return "line2000" + case "mangled2001": + return "line2001" + case "mangled3000": + return "line3000" + case "mangled3001": + return "line3001" + case "mangled3002": + return "line3002" + case "mangledNEW": + return "operator new" + case "mangledMALLOC": + return "malloc" + default: + return name + } +} + +// longNameFuncsProfile returns a profile with function names which should be +// shortened in graph and flame views. +func longNameFuncsProfile() *profile.Profile { + var longNameFuncsM = []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x4000, + File: "/path/to/testbinary", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var longNameFuncsF = []*profile.Function{ + {ID: 1, Name: "path/to/package1.object.function1", SystemName: "path/to/package1.object.function1", Filename: "path/to/package1.go"}, + {ID: 2, Name: "(anonymous namespace)::Bar::Foo", SystemName: "(anonymous namespace)::Bar::Foo", Filename: "a/long/path/to/package2.cc"}, + {ID: 3, Name: "java.bar.foo.FooBar.run(java.lang.Runnable)", SystemName: "java.bar.foo.FooBar.run(java.lang.Runnable)", Filename: "FooBar.java"}, + } + + var longNameFuncsL = []*profile.Location{ + { + ID: 1000, + Mapping: longNameFuncsM[0], + Address: 0x1000, + Line: []profile.Line{ + {Function: longNameFuncsF[0], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: longNameFuncsM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: longNameFuncsF[1], Line: 4}, + }, + }, + { + ID: 3000, + Mapping: longNameFuncsM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: longNameFuncsF[2], Line: 9}, + }, + }, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{longNameFuncsL[0], longNameFuncsL[1], longNameFuncsL[2]}, + Value: []int64{1000, 1000}, + }, + { + Location: []*profile.Location{longNameFuncsL[0], longNameFuncsL[1]}, + Value: []int64{100, 100}, + }, + { + Location: []*profile.Location{longNameFuncsL[2]}, + Value: []int64{10, 10}, + }, + }, + Location: longNameFuncsL, + Function: longNameFuncsF, + Mapping: longNameFuncsM, + } +} + +func cpuProfile() *profile.Profile { + var cpuM = []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x4000, + File: "/path/to/testbinary", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var cpuF = []*profile.Function{ + {ID: 1, Name: "mangled1000", SystemName: "mangled1000", Filename: "testdata/file1000.src"}, + {ID: 2, Name: "mangled2000", SystemName: "mangled2000", Filename: "testdata/file2000.src"}, + {ID: 3, Name: "mangled2001", SystemName: "mangled2001", Filename: "testdata/file2000.src"}, + {ID: 4, Name: "mangled3000", SystemName: "mangled3000", Filename: "testdata/file3000.src"}, + {ID: 5, Name: "mangled3001", SystemName: "mangled3001", Filename: "testdata/file3000.src"}, + {ID: 6, Name: "mangled3002", SystemName: "mangled3002", Filename: "testdata/file3000.src"}, + } + + var cpuL = []*profile.Location{ + { + ID: 1000, + Mapping: cpuM[0], + Address: 0x1000, + Line: []profile.Line{ + {Function: cpuF[0], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: cpuM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: cpuF[2], Line: 9}, + {Function: cpuF[1], Line: 4}, + }, + }, + { + ID: 3000, + Mapping: cpuM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: cpuF[5], Line: 2}, + {Function: cpuF[4], Line: 5}, + {Function: cpuF[3], Line: 6}, + }, + }, + { + ID: 3001, + Mapping: cpuM[0], + Address: 0x3001, + Line: []profile.Line{ + {Function: cpuF[4], Line: 8}, + {Function: cpuF[3], Line: 9}, + }, + }, + { + ID: 3002, + Mapping: cpuM[0], + Address: 0x3002, + Line: []profile.Line{ + {Function: cpuF[5], Line: 5}, + {Function: cpuF[3], Line: 9}, + }, + }, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{cpuL[0], cpuL[1], cpuL[2]}, + Value: []int64{1000, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*profile.Location{cpuL[0], cpuL[3]}, + Value: []int64{100, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*profile.Location{cpuL[1], cpuL[4]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*profile.Location{cpuL[2]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, + } +} + +func cpuProfileSmall() *profile.Profile { + var cpuM = []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x4000, + File: "/path/to/testbinary", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var cpuL = []*profile.Location{ + { + ID: 1000, + Mapping: cpuM[0], + Address: 0x1000, + }, + { + ID: 2000, + Mapping: cpuM[0], + Address: 0x2000, + }, + { + ID: 3000, + Mapping: cpuM[0], + Address: 0x3000, + }, + { + ID: 4000, + Mapping: cpuM[0], + Address: 0x4000, + }, + { + ID: 5000, + Mapping: cpuM[0], + Address: 0x5000, + }, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{cpuL[0], cpuL[1], cpuL[2]}, + Value: []int64{1000, 1000}, + }, + { + Location: []*profile.Location{cpuL[3], cpuL[1], cpuL[4]}, + Value: []int64{1000, 1000}, + }, + { + Location: []*profile.Location{cpuL[2]}, + Value: []int64{1000, 1000}, + }, + { + Location: []*profile.Location{cpuL[4]}, + Value: []int64{1000, 1000}, + }, + }, + Location: cpuL, + Function: nil, + Mapping: cpuM, + } +} + +func heapProfile() *profile.Profile { + var heapM = []*profile.Mapping{ + { + ID: 1, + BuildID: "buildid", + Start: 0x1000, + Limit: 0x4000, + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var heapF = []*profile.Function{ + {ID: 1, Name: "pruneme", SystemName: "pruneme", Filename: "prune.h"}, + {ID: 2, Name: "mangled1000", SystemName: "mangled1000", Filename: "testdata/file1000.src"}, + {ID: 3, Name: "mangled2000", SystemName: "mangled2000", Filename: "testdata/file2000.src"}, + {ID: 4, Name: "mangled2001", SystemName: "mangled2001", Filename: "testdata/file2000.src"}, + {ID: 5, Name: "mangled3000", SystemName: "mangled3000", Filename: "testdata/file3000.src"}, + {ID: 6, Name: "mangled3001", SystemName: "mangled3001", Filename: "testdata/file3000.src"}, + {ID: 7, Name: "mangled3002", SystemName: "mangled3002", Filename: "testdata/file3000.src"}, + {ID: 8, Name: "mangledMALLOC", SystemName: "mangledMALLOC", Filename: "malloc.h"}, + {ID: 9, Name: "mangledNEW", SystemName: "mangledNEW", Filename: "new.h"}, + } + + var heapL = []*profile.Location{ + { + ID: 1000, + Mapping: heapM[0], + Address: 0x1000, + Line: []profile.Line{ + {Function: heapF[0], Line: 100}, + {Function: heapF[7], Line: 100}, + {Function: heapF[1], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: heapM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: heapF[8], Line: 100}, + {Function: heapF[3], Line: 2}, + {Function: heapF[2], Line: 3}, + }, + }, + { + ID: 3000, + Mapping: heapM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: heapF[8], Line: 100}, + {Function: heapF[6], Line: 3}, + {Function: heapF[5], Line: 2}, + {Function: heapF[4], Line: 4}, + }, + }, + { + ID: 3001, + Mapping: heapM[0], + Address: 0x3001, + Line: []profile.Line{ + {Function: heapF[0], Line: 100}, + {Function: heapF[8], Line: 100}, + {Function: heapF[5], Line: 2}, + {Function: heapF[4], Line: 4}, + }, + }, + { + ID: 3002, + Mapping: heapM[0], + Address: 0x3002, + Line: []profile.Line{ + {Function: heapF[6], Line: 3}, + {Function: heapF[4], Line: 4}, + }, + }, + } + + return &profile.Profile{ + Comments: []string{"comment", "#hidden comment"}, + PeriodType: &profile.ValueType{Type: "allocations", Unit: "bytes"}, + Period: 524288, + SampleType: []*profile.ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{heapL[0], heapL[1], heapL[2]}, + Value: []int64{10, 1024000}, + NumLabel: map[string][]int64{"bytes": {102400}}, + }, + { + Location: []*profile.Location{heapL[0], heapL[3]}, + Value: []int64{20, 4096000}, + NumLabel: map[string][]int64{"bytes": {204800}}, + }, + { + Location: []*profile.Location{heapL[1], heapL[4]}, + Value: []int64{40, 65536000}, + NumLabel: map[string][]int64{"bytes": {1638400}}, + }, + { + Location: []*profile.Location{heapL[2]}, + Value: []int64{80, 32768000}, + NumLabel: map[string][]int64{"bytes": {409600}}, + }, + }, + DropFrames: ".*operator new.*|malloc", + Location: heapL, + Function: heapF, + Mapping: heapM, + } +} + +func contentionProfile() *profile.Profile { + var contentionM = []*profile.Mapping{ + { + ID: 1, + BuildID: "buildid-contention", + Start: 0x1000, + Limit: 0x4000, + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + } + + var contentionF = []*profile.Function{ + {ID: 1, Name: "mangled1000", SystemName: "mangled1000", Filename: "testdata/file1000.src"}, + {ID: 2, Name: "mangled2000", SystemName: "mangled2000", Filename: "testdata/file2000.src"}, + {ID: 3, Name: "mangled2001", SystemName: "mangled2001", Filename: "testdata/file2000.src"}, + {ID: 4, Name: "mangled3000", SystemName: "mangled3000", Filename: "testdata/file3000.src"}, + {ID: 5, Name: "mangled3001", SystemName: "mangled3001", Filename: "testdata/file3000.src"}, + {ID: 6, Name: "mangled3002", SystemName: "mangled3002", Filename: "testdata/file3000.src"}, + } + + var contentionL = []*profile.Location{ + { + ID: 1000, + Mapping: contentionM[0], + Address: 0x1000, + Line: []profile.Line{ + {Function: contentionF[0], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: contentionM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: contentionF[2], Line: 2}, + {Function: contentionF[1], Line: 3}, + }, + }, + { + ID: 3000, + Mapping: contentionM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: contentionF[5], Line: 2}, + {Function: contentionF[4], Line: 3}, + {Function: contentionF[3], Line: 5}, + }, + }, + { + ID: 3001, + Mapping: contentionM[0], + Address: 0x3001, + Line: []profile.Line{ + {Function: contentionF[4], Line: 3}, + {Function: contentionF[3], Line: 5}, + }, + }, + { + ID: 3002, + Mapping: contentionM[0], + Address: 0x3002, + Line: []profile.Line{ + {Function: contentionF[5], Line: 4}, + {Function: contentionF[3], Line: 3}, + }, + }, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "contentions", Unit: "count"}, + Period: 524288, + SampleType: []*profile.ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{contentionL[0], contentionL[1], contentionL[2]}, + Value: []int64{10, 10240000}, + }, + { + Location: []*profile.Location{contentionL[0], contentionL[3]}, + Value: []int64{20, 40960000}, + }, + { + Location: []*profile.Location{contentionL[1], contentionL[4]}, + Value: []int64{40, 65536000}, + }, + { + Location: []*profile.Location{contentionL[2]}, + Value: []int64{80, 32768000}, + }, + }, + Location: contentionL, + Function: contentionF, + Mapping: contentionM, + Comments: []string{"Comment #1", "Comment #2"}, + } +} + +func symzProfile() *profile.Profile { + var symzM = []*profile.Mapping{ + { + ID: 1, + Start: testStart, + Limit: 0x4000, + File: "/path/to/testbinary", + }, + } + + var symzL = []*profile.Location{ + {ID: 1, Mapping: symzM[0], Address: testStart}, + {ID: 2, Mapping: symzM[0], Address: testStart + 0x1000}, + {ID: 3, Mapping: symzM[0], Address: testStart + 0x2000}, + } + + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{symzL[0], symzL[1], symzL[2]}, + Value: []int64{1, 1}, + }, + }, + Location: symzL, + Mapping: symzM, + } +} + +func largeProfile(tb testing.TB) *profile.Profile { + tb.Helper() + input := proftest.LargeProfile(tb) + prof, err := profile.Parse(bytes.NewBuffer(input)) + if err != nil { + tb.Fatal(err) + } + return prof +} + +var autoCompleteTests = []struct { + in string + out string +}{ + {"", ""}, + {"xyz", "xyz"}, // no match + {"dis", "disasm"}, // single match + {"t", "t"}, // many matches + {"top abc", "top abc"}, // no function name match + {"top mangledM", "top mangledMALLOC"}, // single function name match + {"top cmd cmd mangledM", "top cmd cmd mangledMALLOC"}, + {"top mangled", "top mangled"}, // many function name matches + {"cmd mangledM", "cmd mangledM"}, // invalid command + {"top mangledM cmd", "top mangledM cmd"}, // cursor misplaced + {"top edMA", "top mangledMALLOC"}, // single infix function name match + {"top -mangledM", "top -mangledMALLOC"}, // ignore sign handled + {"lin", "lines"}, // single variable match + {"EdGeF", "edgefraction"}, // single capitalized match + {"help dis", "help disasm"}, // help command match + {"help relative_perc", "help relative_percentages"}, // help variable match + {"help coMpa", "help compact_labels"}, // help variable capitalized match +} + +func TestAutoComplete(t *testing.T) { + complete := newCompleter(functionNames(heapProfile())) + + for _, test := range autoCompleteTests { + if out := complete(test.in); out != test.out { + t.Errorf("autoComplete(%s) = %s; want %s", test.in, out, test.out) + } + } +} + +func TestTagFilter(t *testing.T) { + var tagFilterTests = []struct { + desc, value string + tags map[string][]string + want bool + }{ + { + "1 key with 1 matching value", + "tag2", + map[string][]string{"value1": {"tag1", "tag2"}}, + true, + }, + { + "1 key with no matching values", + "tag3", + map[string][]string{"value1": {"tag1", "tag2"}}, + false, + }, + { + "two keys, each with value matching different one value in list", + "tag1,tag3", + map[string][]string{"value1": {"tag1", "tag2"}, "value2": {"tag3"}}, + true, + }, + {"two keys, all value matching different regex value in list", + "t..[12],t..3", + map[string][]string{"value1": {"tag1", "tag2"}, "value2": {"tag3"}}, + true, + }, + { + "one key, not all values in list matched", + "tag2,tag3", + map[string][]string{"value1": {"tag1", "tag2"}}, + false, + }, + { + "key specified, list of tags where all tags in list matched", + "key1=tag1,tag2", + map[string][]string{"key1": {"tag1", "tag2"}}, + true, + }, + {"key specified, list of tag values where not all are matched", + "key1=tag1,tag2", + map[string][]string{"key1": {"tag1"}}, + true, + }, + { + "key included for regex matching, list of values where all values in list matched", + "key1:tag1,tag2", + map[string][]string{"key1": {"tag1", "tag2"}}, + true, + }, + { + "key included for regex matching, list of values where not only second value matched", + "key1:tag1,tag2", + map[string][]string{"key1": {"tag2"}}, + false, + }, + { + "key included for regex matching, list of values where not only first value matched", + "key1:tag1,tag2", + map[string][]string{"key1": {"tag1"}}, + false, + }, + } + for _, test := range tagFilterTests { + t.Run(test.desc, func(t *testing.T) { + filter, err := compileTagFilter(test.desc, test.value, nil, &proftest.TestUI{T: t}, nil) + if err != nil { + t.Fatalf("tagFilter %s:%v", test.desc, err) + } + s := profile.Sample{ + Label: test.tags, + } + if got := filter(&s); got != test.want { + t.Errorf("tagFilter %s: got %v, want %v", test.desc, got, test.want) + } + }) + } +} + +func TestIdentifyNumLabelUnits(t *testing.T) { + var tagFilterTests = []struct { + desc string + tagVals []map[string][]int64 + tagUnits []map[string][]string + wantUnits map[string]string + allowedRx string + wantIgnoreErrCount int + }{ + { + "Multiple keys, no units for all keys", + []map[string][]int64{{"keyA": {131072}, "keyB": {128}}}, + []map[string][]string{{"keyA": {}, "keyB": {""}}}, + map[string]string{"keyA": "keyA", "keyB": "keyB"}, + "", + 0, + }, + { + "Multiple keys, different units for each key", + []map[string][]int64{{"keyA": {131072}, "keyB": {128}}}, + []map[string][]string{{"keyA": {"bytes"}, "keyB": {"kilobytes"}}}, + map[string]string{"keyA": "bytes", "keyB": "kilobytes"}, + "", + 0, + }, + { + "Multiple keys with multiple values, different units for each key", + []map[string][]int64{{"keyC": {131072, 1}, "keyD": {128, 252}}}, + []map[string][]string{{"keyC": {"bytes", "bytes"}, "keyD": {"kilobytes", "kilobytes"}}}, + map[string]string{"keyC": "bytes", "keyD": "kilobytes"}, + "", + 0, + }, + { + "Multiple keys with multiple values, some units missing", + []map[string][]int64{{"key1": {131072, 1}, "A": {128, 252}, "key3": {128}, "key4": {1}}, {"key3": {128}, "key4": {1}}}, + []map[string][]string{{"key1": {"", "bytes"}, "A": {"kilobytes", ""}, "key3": {""}, "key4": {"hour"}}, {"key3": {"seconds"}, "key4": {""}}}, + map[string]string{"key1": "bytes", "A": "kilobytes", "key3": "seconds", "key4": "hour"}, + "", + 0, + }, + { + "One key with three units in same sample", + []map[string][]int64{{"key": {8, 8, 16}}}, + []map[string][]string{{"key": {"bytes", "megabytes", "kilobytes"}}}, + map[string]string{"key": "bytes"}, + `(For tag key used unit bytes, also encountered unit\(s\) kilobytes, megabytes)`, + 1, + }, + { + "One key with four units in same sample", + []map[string][]int64{{"key": {8, 8, 16, 32}}}, + []map[string][]string{{"key": {"bytes", "kilobytes", "a", "megabytes"}}}, + map[string]string{"key": "bytes"}, + `(For tag key used unit bytes, also encountered unit\(s\) a, kilobytes, megabytes)`, + 1, + }, + { + "One key with two units in same sample", + []map[string][]int64{{"key": {8, 8}}}, + []map[string][]string{{"key": {"bytes", "seconds"}}}, + map[string]string{"key": "bytes"}, + `(For tag key used unit bytes, also encountered unit\(s\) seconds)`, + 1, + }, + { + "One key with different units in different samples", + []map[string][]int64{{"key1": {8}}, {"key1": {8}}, {"key1": {8}}}, + []map[string][]string{{"key1": {"bytes"}}, {"key1": {"kilobytes"}}, {"key1": {"megabytes"}}}, + map[string]string{"key1": "bytes"}, + `(For tag key1 used unit bytes, also encountered unit\(s\) kilobytes, megabytes)`, + 1, + }, + { + "Key alignment, unit not specified", + []map[string][]int64{{"alignment": {8}}}, + []map[string][]string{nil}, + map[string]string{"alignment": "bytes"}, + "", + 0, + }, + { + "Key request, unit not specified", + []map[string][]int64{{"request": {8}}, {"request": {8, 8}}}, + []map[string][]string{nil, nil}, + map[string]string{"request": "bytes"}, + "", + 0, + }, + { + "Check units not over-written for keys with default units", + []map[string][]int64{{ + "alignment": {8}, + "request": {8}, + "bytes": {8}, + }}, + []map[string][]string{{ + "alignment": {"seconds"}, + "request": {"minutes"}, + "bytes": {"hours"}, + }}, + map[string]string{ + "alignment": "seconds", + "request": "minutes", + "bytes": "hours", + }, + "", + 0, + }, + } + for _, test := range tagFilterTests { + t.Run(test.desc, func(t *testing.T) { + p := profile.Profile{Sample: make([]*profile.Sample, len(test.tagVals))} + for i, numLabel := range test.tagVals { + s := profile.Sample{ + NumLabel: numLabel, + NumUnit: test.tagUnits[i], + } + p.Sample[i] = &s + } + testUI := &proftest.TestUI{T: t, AllowRx: test.allowedRx} + units := identifyNumLabelUnits(&p, testUI) + if !reflect.DeepEqual(test.wantUnits, units) { + t.Errorf("got %v units, want %v", units, test.wantUnits) + } + if got, want := testUI.NumAllowRxMatches, test.wantIgnoreErrCount; want != got { + t.Errorf("got %d errors logged, want %d errors logged", got, want) + } + }) + } +} + +func TestNumericTagFilter(t *testing.T) { + var tagFilterTests = []struct { + desc, value string + tags map[string][]int64 + identifiedUnits map[string]string + want bool + }{ + { + "Match when unit conversion required", + "128kb", + map[string][]int64{"key1": {131072}, "key2": {128}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + true, + }, + { + "Match only when values equal after unit conversion", + "512kb", + map[string][]int64{"key1": {512}, "key2": {128}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + false, + }, + { + "Match when values and units initially equal", + "10bytes", + map[string][]int64{"key1": {10}, "key2": {128}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + true, + }, + { + "Match range without lower bound, no unit conversion required", + ":10bytes", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "bytes"}, + true, + }, + { + "Match range without lower bound, unit conversion required", + ":10kb", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "bytes"}, + true, + }, + { + "Match range without upper bound, unit conversion required", + "10b:", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "kilobytes"}, + true, + }, + { + "Match range without upper bound, no unit conversion required", + "10b:", + map[string][]int64{"key1": {12}}, + map[string]string{"key1": "bytes"}, + true, + }, + { + "Don't match range without upper bound, no unit conversion required", + "10b:", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "bytes"}, + false, + }, + { + "Multiple keys with different units, don't match range without upper bound", + "10kb:", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + false, + }, + { + "Match range without upper bound, unit conversion required", + "10b:", + map[string][]int64{"key1": {8}}, + map[string]string{"key1": "kilobytes"}, + true, + }, + { + "Don't match range without lower bound, no unit conversion required", + ":10b", + map[string][]int64{"key1": {12}}, + map[string]string{"key1": "bytes"}, + false, + }, + { + "Match specific key, key present, one of two values match", + "bytes=5b", + map[string][]int64{"bytes": {10, 5}}, + map[string]string{"bytes": "bytes"}, + true, + }, + { + "Match specific key, key present and value matches", + "bytes=1024b", + map[string][]int64{"bytes": {1024}}, + map[string]string{"bytes": "kilobytes"}, + false, + }, + { + "Match specific key, matching key present and value matches, also non-matching key", + "bytes=1024b", + map[string][]int64{"bytes": {1024}, "key2": {5}}, + map[string]string{"bytes": "bytes", "key2": "bytes"}, + true, + }, + { + "Match specific key and range of values, value matches", + "bytes=512b:1024b", + map[string][]int64{"bytes": {780}}, + map[string]string{"bytes": "bytes"}, + true, + }, + { + "Match specific key and range of values, value too large", + "key1=1kb:2kb", + map[string][]int64{"key1": {4096}}, + map[string]string{"key1": "bytes"}, + false, + }, + { + "Match specific key and range of values, value too small", + "key1=1kb:2kb", + map[string][]int64{"key1": {256}}, + map[string]string{"key1": "bytes"}, + false, + }, + { + "Match specific key and value, unit conversion required", + "bytes=1024b", + map[string][]int64{"bytes": {1}}, + map[string]string{"bytes": "kilobytes"}, + true, + }, + { + "Match specific key and value, key does not appear", + "key2=256bytes", + map[string][]int64{"key1": {256}}, + map[string]string{"key1": "bytes"}, + false, + }, + { + "Match negative key and range of values, value matches", + "bytes=-512b:-128b", + map[string][]int64{"bytes": {-256}}, + map[string]string{"bytes": "bytes"}, + true, + }, + { + "Match negative key and range of values, value outside range", + "bytes=-512b:-128b", + map[string][]int64{"bytes": {-2048}}, + map[string]string{"bytes": "bytes"}, + false, + }, + { + "Match exact value, unitless tag", + "pid=123", + map[string][]int64{"pid": {123}}, + nil, + true, + }, + { + "Match range, unitless tag", + "pid=123:123", + map[string][]int64{"pid": {123}}, + nil, + true, + }, + { + "Don't match range, unitless tag", + "pid=124:124", + map[string][]int64{"pid": {123}}, + nil, + false, + }, + { + "Match range without upper bound, unitless tag", + "pid=100:", + map[string][]int64{"pid": {123}}, + nil, + true, + }, + { + "Don't match range without upper bound, unitless tag", + "pid=200:", + map[string][]int64{"pid": {123}}, + nil, + false, + }, + { + "Match range without lower bound, unitless tag", + "pid=:200", + map[string][]int64{"pid": {123}}, + nil, + true, + }, + { + "Don't match range without lower bound, unitless tag", + "pid=:100", + map[string][]int64{"pid": {123}}, + nil, + false, + }, + } + for _, test := range tagFilterTests { + t.Run(test.desc, func(t *testing.T) { + wantErrMsg := strings.Join([]string{"(", test.desc, ":Interpreted '", test.value[strings.Index(test.value, "=")+1:], "' as range, not regexp", ")"}, "") + filter, err := compileTagFilter(test.desc, test.value, test.identifiedUnits, &proftest.TestUI{T: t, + AllowRx: wantErrMsg}, nil) + if err != nil { + t.Fatalf("%v", err) + } + s := profile.Sample{ + NumLabel: test.tags, + } + if got := filter(&s); got != test.want { + t.Fatalf("got %v, want %v", got, test.want) + } + }) + } +} + +// TestOptionsHaveHelp tests that a help message is supplied for every +// selectable option. +func TestOptionsHaveHelp(t *testing.T) { + for _, f := range configFields { + // Check all choices if this is a group, else check f.name. + names := f.choices + if len(names) == 0 { + names = []string{f.name} + } + for _, name := range names { + if _, ok := configHelp[name]; !ok { + t.Errorf("missing help message for %q", name) + } + } + } +} + +type testSymbolzMergeFetcher struct{} + +func (testSymbolzMergeFetcher) Fetch(s string, d, t time.Duration) (*profile.Profile, string, error) { + var p *profile.Profile + switch s { + case testSourceURL(8000) + "symbolz": + p = symzProfile() + case testSourceURL(8001) + "symbolz": + p = symzProfile() + p.Mapping[0].Start += testOffset + p.Mapping[0].Limit += testOffset + for i := range p.Location { + p.Location[i].Address += testOffset + } + default: + return nil, "", fmt.Errorf("unexpected source: %s", s) + } + return p, s, nil +} + +func TestSymbolzAfterMerge(t *testing.T) { + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + + f := baseFlags() + f.args = []string{ + testSourceURL(8000) + "symbolz", + testSourceURL(8001) + "symbolz", + } + + o := setDefaults(nil) + o.Flagset = f + o.Obj = new(mockObjTool) + src, cmd, err := parseFlags(o) + if err != nil { + t.Fatalf("parseFlags: %v", err) + } + + if len(cmd) != 1 || cmd[0] != "proto" { + t.Fatalf("parseFlags returned command %v, want [proto]", cmd) + } + + o.Fetch = testSymbolzMergeFetcher{} + o.Sym = testSymbolzSymbolizer{} + p, err := fetchProfiles(src, o) + if err != nil { + t.Fatalf("fetchProfiles: %v", err) + } + if len(p.Location) != 3 { + t.Errorf("Got %d locations after merge, want %d", len(p.Location), 3) + } + for i, l := range p.Location { + if len(l.Line) != 1 { + t.Errorf("Number of lines for symbolz %#x in iteration %d, got %d, want %d", l.Address, i, len(l.Line), 1) + continue + } + address := l.Address - l.Mapping.Start + if got, want := l.Line[0].Function.Name, fmt.Sprintf("%#x", address); got != want { + t.Errorf("symbolz %#x, got %s, want %s", address, got, want) + } + } +} + +func TestProfileCopier(t *testing.T) { + type testCase struct { + name string + prof *profile.Profile + } + for _, c := range []testCase{ + {"cpu", cpuProfile()}, + {"heap", heapProfile()}, + {"contention", contentionProfile()}, + {"symbolz", symzProfile()}, + {"long_name_funcs", longNameFuncsProfile()}, + {"large", largeProfile(t)}, + } { + t.Run(c.name, func(t *testing.T) { + copier := makeProfileCopier(c.prof) + + // Muck with one copy to check that fresh copies are unaffected + tmp := copier.newCopy() + tmp.Sample = tmp.Sample[:0] + + // Get new copy and check it is same as the original. + want := c.prof.String() + got := copier.newCopy().String() + if got != want { + t.Errorf("New copy is not same as original profile") + diff, err := proftest.Diff([]byte(want), []byte(got)) + if err != nil { + t.Fatalf("Diff: %v", err) + } + t.Logf("Diff:\n%s\n", string(diff)) + } + }) + } +} + +type mockObjTool struct{} + +func (*mockObjTool) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + return &mockFile{file, "abcdef", 0}, nil +} + +func (m *mockObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + const fn1 = "line1000" + const fn3 = "line3000" + const file1 = "testdata/file1000.src" + const file3 = "testdata/file3000.src" + data := []plugin.Inst{ + {Addr: 0x1000, Text: "instruction one", Function: fn1, File: file1, Line: 1}, + {Addr: 0x1001, Text: "instruction two", Function: fn1, File: file1, Line: 1}, + {Addr: 0x1002, Text: "instruction three", Function: fn1, File: file1, Line: 2}, + {Addr: 0x1003, Text: "instruction four", Function: fn1, File: file1, Line: 1}, + {Addr: 0x3000, Text: "instruction one", Function: fn3, File: file3}, + {Addr: 0x3001, Text: "instruction two", Function: fn3, File: file3}, + {Addr: 0x3002, Text: "instruction three", Function: fn3, File: file3}, + {Addr: 0x3003, Text: "instruction four", Function: fn3, File: file3}, + {Addr: 0x3004, Text: "instruction five", Function: fn3, File: file3}, + } + var result []plugin.Inst + for _, inst := range data { + if inst.Addr >= start && inst.Addr <= end { + result = append(result, inst) + } + } + return result, nil +} + +type mockFile struct { + name, buildID string + base uint64 +} + +// Name returns the underlyinf file name, if available +func (m *mockFile) Name() string { + return m.name +} + +// ObjAddr returns the objdump address corresponding to a runtime address. +func (m *mockFile) ObjAddr(addr uint64) (uint64, error) { + return addr - m.base, nil +} + +// BuildID returns the GNU build ID of the file, or an empty string. +func (m *mockFile) BuildID() string { + return m.buildID +} + +// SourceLine reports the source line information for a given +// address in the file. Due to inlining, the source line information +// is in general a list of positions representing a call stack, +// with the leaf function first. +func (*mockFile) SourceLine(addr uint64) ([]plugin.Frame, error) { + // Return enough data to support the SourceLine() calls needed for + // weblist on cpuProfile() contents. + frame := func(fn, file string, num int) plugin.Frame { + // Reuse the same num for line number and column number. + return plugin.Frame{Func: fn, File: file, Line: num, Column: num} + } + switch addr { + case 0x1000: + return []plugin.Frame{ + frame("mangled1000", "testdata/file1000.src", 1), + }, nil + case 0x1001: + return []plugin.Frame{ + frame("mangled1000", "testdata/file1000.src", 1), + }, nil + case 0x1002: + return []plugin.Frame{ + frame("mangled1000", "testdata/file1000.src", 2), + }, nil + case 0x1003: + return []plugin.Frame{ + frame("mangled1000", "testdata/file1000.src", 1), + }, nil + case 0x2000: + return []plugin.Frame{ + frame("mangled2001", "testdata/file2000.src", 9), + frame("mangled2000", "testdata/file2000.src", 4), + }, nil + case 0x3000: + return []plugin.Frame{ + frame("mangled3002", "testdata/file3000.src", 2), + frame("mangled3001", "testdata/file3000.src", 5), + frame("mangled3000", "testdata/file3000.src", 6), + }, nil + case 0x3001: + return []plugin.Frame{ + frame("mangled3001", "testdata/file3000.src", 8), + frame("mangled3000", "testdata/file3000.src", 9), + }, nil + case 0x3002: + return []plugin.Frame{ + frame("mangled3002", "testdata/file3000.src", 5), + frame("mangled3000", "testdata/file3000.src", 9), + }, nil + } + + return nil, nil +} + +// Symbols returns a list of symbols in the object file. +// If r is not nil, Symbols restricts the list to symbols +// with names matching the regular expression. +// If addr is not zero, Symbols restricts the list to symbols +// containing that address. +func (m *mockFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + switch r.String() { + case "line[13]": + return []*plugin.Sym{ + { + Name: []string{"line1000"}, File: m.name, + Start: 0x1000, End: 0x1003, + }, + { + Name: []string{"line3000"}, File: m.name, + Start: 0x3000, End: 0x3004, + }, + }, nil + } + return nil, fmt.Errorf("unimplemented") +} + +// Close closes the file, releasing associated resources. +func (*mockFile) Close() error { + return nil +} diff --git a/plugin/debug/pkg/internal/driver/fetch.go b/plugin/debug/pkg/internal/driver/fetch.go new file mode 100644 index 0000000..c29365e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/fetch.go @@ -0,0 +1,622 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// fetchProfiles fetches and symbolizes the profiles specified by s. +// It will merge all the profiles it is able to retrieve, even if +// there are some failures. It will return an error if it is unable to +// fetch any profiles. +func fetchProfiles(s *source, o *plugin.Options) (*profile.Profile, error) { + sources := make([]profileSource, 0, len(s.Sources)) + for _, src := range s.Sources { + sources = append(sources, profileSource{ + addr: src, + source: s, + }) + } + + bases := make([]profileSource, 0, len(s.Base)) + for _, src := range s.Base { + bases = append(bases, profileSource{ + addr: src, + source: s, + }) + } + + p, pbase, m, mbase, save, err := grabSourcesAndBases(sources, bases, o.Fetch, o.Obj, o.UI, o.HTTPTransport) + if err != nil { + return nil, err + } + + if pbase != nil { + if s.DiffBase { + pbase.SetLabel("pprof::base", []string{"true"}) + } + if s.Normalize { + err := p.Normalize(pbase) + if err != nil { + return nil, err + } + } + pbase.Scale(-1) + p, m, err = combineProfiles([]*profile.Profile{p, pbase}, []plugin.MappingSources{m, mbase}) + if err != nil { + return nil, err + } + } + + // Symbolize the merged profile. + if err := o.Sym.Symbolize(s.Symbolize, m, p); err != nil { + return nil, err + } + p.RemoveUninteresting() + unsourceMappings(p) + + if s.Comment != "" { + p.Comments = append(p.Comments, s.Comment) + } + + // Save a copy of the merged profile if there is at least one remote source. + if save { + dir, err := setTmpDir(o.UI) + if err != nil { + return nil, err + } + + prefix := "pprof." + if len(p.Mapping) > 0 && p.Mapping[0].File != "" { + prefix += filepath.Base(p.Mapping[0].File) + "." + } + for _, s := range p.SampleType { + prefix += s.Type + "." + } + + tempFile, err := newTempFile(dir, prefix, ".pb.gz") + if err == nil { + if err = p.Write(tempFile); err == nil { + o.UI.PrintErr("Saved profile in ", tempFile.Name()) + } + } + if err != nil { + o.UI.PrintErr("Could not save profile: ", err) + } + } + + if err := p.CheckValid(); err != nil { + return nil, err + } + + return p, nil +} + +func grabSourcesAndBases(sources, bases []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, *profile.Profile, plugin.MappingSources, plugin.MappingSources, bool, error) { + wg := sync.WaitGroup{} + wg.Add(2) + var psrc, pbase *profile.Profile + var msrc, mbase plugin.MappingSources + var savesrc, savebase bool + var errsrc, errbase error + var countsrc, countbase int + go func() { + defer wg.Done() + psrc, msrc, savesrc, countsrc, errsrc = chunkedGrab(sources, fetch, obj, ui, tr) + }() + go func() { + defer wg.Done() + pbase, mbase, savebase, countbase, errbase = chunkedGrab(bases, fetch, obj, ui, tr) + }() + wg.Wait() + save := savesrc || savebase + + if errsrc != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching source profiles: %v", errsrc) + } + if errbase != nil { + return nil, nil, nil, nil, false, fmt.Errorf("problem fetching base profiles: %v,", errbase) + } + if countsrc == 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any source profiles") + } + if countbase == 0 && len(bases) > 0 { + return nil, nil, nil, nil, false, fmt.Errorf("failed to fetch any base profiles") + } + if want, got := len(sources), countsrc; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d source profiles out of %d", got, want)) + } + if want, got := len(bases), countbase; want != got { + ui.PrintErr(fmt.Sprintf("Fetched %d base profiles out of %d", got, want)) + } + + return psrc, pbase, msrc, mbase, save, nil +} + +// chunkedGrab fetches the profiles described in source and merges them into +// a single profile. It fetches a chunk of profiles concurrently, with a maximum +// chunk size to limit its memory usage. +func chunkedGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + const chunkSize = 128 + + var p *profile.Profile + var msrc plugin.MappingSources + var save bool + var count int + + for start := 0; start < len(sources); start += chunkSize { + end := start + chunkSize + if end > len(sources) { + end = len(sources) + } + chunkP, chunkMsrc, chunkSave, chunkCount, chunkErr := concurrentGrab(sources[start:end], fetch, obj, ui, tr) + switch { + case chunkErr != nil: + return nil, nil, false, 0, chunkErr + case chunkP == nil: + continue + case p == nil: + p, msrc, save, count = chunkP, chunkMsrc, chunkSave, chunkCount + default: + p, msrc, chunkErr = combineProfiles([]*profile.Profile{p, chunkP}, []plugin.MappingSources{msrc, chunkMsrc}) + if chunkErr != nil { + return nil, nil, false, 0, chunkErr + } + if chunkSave { + save = true + } + count += chunkCount + } + } + + return p, msrc, save, count, nil +} + +// concurrentGrab fetches multiple profiles concurrently +func concurrentGrab(sources []profileSource, fetch plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (*profile.Profile, plugin.MappingSources, bool, int, error) { + wg := sync.WaitGroup{} + wg.Add(len(sources)) + for i := range sources { + go func(s *profileSource) { + defer wg.Done() + s.p, s.msrc, s.remote, s.err = grabProfile(s.source, s.addr, fetch, obj, ui, tr) + }(&sources[i]) + } + wg.Wait() + + var save bool + profiles := make([]*profile.Profile, 0, len(sources)) + msrcs := make([]plugin.MappingSources, 0, len(sources)) + for i := range sources { + s := &sources[i] + if err := s.err; err != nil { + ui.PrintErr(s.addr + ": " + err.Error()) + continue + } + save = save || s.remote + profiles = append(profiles, s.p) + msrcs = append(msrcs, s.msrc) + *s = profileSource{} + } + + if len(profiles) == 0 { + return nil, nil, false, 0, nil + } + + p, msrc, err := combineProfiles(profiles, msrcs) + if err != nil { + return nil, nil, false, 0, err + } + return p, msrc, save, len(profiles), nil +} + +func combineProfiles(profiles []*profile.Profile, msrcs []plugin.MappingSources) (*profile.Profile, plugin.MappingSources, error) { + // Merge profiles. + // + // The merge call below only treats exactly matching sample type lists as + // compatible and will fail otherwise. Make the profiles' sample types + // compatible for the merge, see CompatibilizeSampleTypes() doc for details. + if err := profile.CompatibilizeSampleTypes(profiles); err != nil { + return nil, nil, err + } + if err := measurement.ScaleProfiles(profiles); err != nil { + return nil, nil, err + } + + // Avoid expensive work for the common case of a single profile/src. + if len(profiles) == 1 && len(msrcs) == 1 { + return profiles[0], msrcs[0], nil + } + + p, err := profile.Merge(profiles) + if err != nil { + return nil, nil, err + } + + // Combine mapping sources. + msrc := make(plugin.MappingSources) + for _, ms := range msrcs { + for m, s := range ms { + msrc[m] = append(msrc[m], s...) + } + } + return p, msrc, nil +} + +type profileSource struct { + addr string + source *source + + p *profile.Profile + msrc plugin.MappingSources + remote bool + err error +} + +func homeEnv() string { + switch runtime.GOOS { + case "windows": + return "USERPROFILE" + case "plan9": + return "home" + default: + return "HOME" + } +} + +// setTmpDir prepares the directory to use to save profiles retrieved +// remotely. It is selected from PPROF_TMPDIR, defaults to $HOME/pprof, and, if +// $HOME is not set, falls back to os.TempDir(). +func setTmpDir(ui plugin.UI) (string, error) { + var dirs []string + if profileDir := os.Getenv("PPROF_TMPDIR"); profileDir != "" { + dirs = append(dirs, profileDir) + } + if homeDir := os.Getenv(homeEnv()); homeDir != "" { + dirs = append(dirs, filepath.Join(homeDir, "pprof")) + } + dirs = append(dirs, os.TempDir()) + for _, tmpDir := range dirs { + if err := os.MkdirAll(tmpDir, 0755); err != nil { + ui.PrintErr("Could not use temp dir ", tmpDir, ": ", err.Error()) + continue + } + return tmpDir, nil + } + return "", fmt.Errorf("failed to identify temp dir") +} + +const testSourceAddress = "pproftest.local" + +// grabProfile fetches a profile. Returns the profile, sources for the +// profile mappings, a bool indicating if the profile was fetched +// remotely, and an error. +func grabProfile(s *source, source string, fetcher plugin.Fetcher, obj plugin.ObjTool, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, msrc plugin.MappingSources, remote bool, err error) { + var src string + duration, timeout := time.Duration(s.Seconds)*time.Second, time.Duration(s.Timeout)*time.Second + if fetcher != nil { + p, src, err = fetcher.Fetch(source, duration, timeout) + if err != nil { + return + } + } + if err != nil || p == nil { + // Fetch the profile over HTTP or from a file. + p, src, err = fetch(source, duration, timeout, ui, tr) + if err != nil { + return + } + } + + if err = p.CheckValid(); err != nil { + return + } + + // Update the binary locations from command line and paths. + locateBinaries(p, s, obj, ui) + + // Collect the source URL for all mappings. + if src != "" { + msrc = collectMappingSources(p, src) + remote = true + if strings.HasPrefix(src, "http://"+testSourceAddress) { + // Treat test inputs as local to avoid saving + // testcase profiles during driver testing. + remote = false + } + } + return +} + +// collectMappingSources saves the mapping sources of a profile. +func collectMappingSources(p *profile.Profile, source string) plugin.MappingSources { + ms := plugin.MappingSources{} + for _, m := range p.Mapping { + src := struct { + Source string + Start uint64 + }{ + source, m.Start, + } + key := m.BuildID + if key == "" { + key = m.File + } + if key == "" { + // If there is no build id or source file, use the source as the + // mapping file. This will enable remote symbolization for this + // mapping, in particular for Go profiles on the legacy format. + // The source is reset back to empty string by unsourceMapping + // which is called after symbolization is finished. + m.File = source + key = source + } + ms[key] = append(ms[key], src) + } + return ms +} + +// unsourceMappings iterates over the mappings in a profile and replaces file +// set to the remote source URL by collectMappingSources back to empty string. +func unsourceMappings(p *profile.Profile) { + for _, m := range p.Mapping { + if m.BuildID == "" && filepath.VolumeName(m.File) == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() { + m.File = "" + } + } + } +} + +// locateBinaries searches for binary files listed in the profile and, if found, +// updates the profile accordingly. +func locateBinaries(p *profile.Profile, s *source, obj plugin.ObjTool, ui plugin.UI) { + // Construct search path to examine + searchPath := os.Getenv("PPROF_BINARY_PATH") + if searchPath == "" { + // Use $HOME/pprof/binaries as default directory for local symbolization binaries + searchPath = filepath.Join(os.Getenv(homeEnv()), "pprof", "binaries") + } +mapping: + for _, m := range p.Mapping { + var noVolumeFile string + var baseName string + var dirName string + if m.File != "" { + noVolumeFile = strings.TrimPrefix(m.File, filepath.VolumeName(m.File)) + baseName = filepath.Base(m.File) + dirName = filepath.Dir(noVolumeFile) + } + + for _, path := range filepath.SplitList(searchPath) { + var fileNames []string + if m.BuildID != "" { + fileNames = []string{filepath.Join(path, m.BuildID, baseName)} + if matches, err := filepath.Glob(filepath.Join(path, m.BuildID, "*")); err == nil { + fileNames = append(fileNames, matches...) + } + fileNames = append(fileNames, filepath.Join(path, noVolumeFile, m.BuildID)) // perf path format + // Llvm buildid protocol: the first two characters of the build id + // are used as directory, and the remaining part is in the filename. + // e.g. `/ab/cdef0123456.debug` + fileNames = append(fileNames, filepath.Join(path, m.BuildID[:2], m.BuildID[2:]+".debug")) + } + if m.File != "" { + // Try both the basename and the full path, to support the same directory + // structure as the perf symfs option. + fileNames = append(fileNames, filepath.Join(path, baseName)) + fileNames = append(fileNames, filepath.Join(path, noVolumeFile)) + // Other locations: use the same search paths as GDB, according to + // https://sourceware.org/gdb/onlinedocs/gdb/Separate-Debug-Files.html + fileNames = append(fileNames, filepath.Join(path, noVolumeFile+".debug")) + fileNames = append(fileNames, filepath.Join(path, dirName, ".debug", baseName+".debug")) + fileNames = append(fileNames, filepath.Join(path, "usr", "lib", "debug", dirName, baseName+".debug")) + } + for _, name := range fileNames { + if f, err := obj.Open(name, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol); err == nil { + defer f.Close() + fileBuildID := f.BuildID() + if m.BuildID != "" && m.BuildID != fileBuildID { + ui.PrintErr("Ignoring local file " + name + ": build-id mismatch (" + m.BuildID + " != " + fileBuildID + ")") + } else { + // Explicitly do not update KernelRelocationSymbol -- + // the new local file name is most likely missing it. + m.File = name + continue mapping + } + } + } + } + } + if len(p.Mapping) == 0 { + // If there are no mappings, add a fake mapping to attempt symbolization. + // This is useful for some profiles generated by the golang runtime, which + // do not include any mappings. Symbolization with a fake mapping will only + // be successful against a non-PIE binary. + m := &profile.Mapping{ID: 1} + p.Mapping = []*profile.Mapping{m} + for _, l := range p.Location { + l.Mapping = m + } + } + // If configured, apply executable filename override and (maybe, see below) + // build ID override from source. Assume the executable is the first mapping. + if execName, buildID := s.ExecName, s.BuildID; execName != "" || buildID != "" { + m := p.Mapping[0] + if execName != "" { + // Explicitly do not update KernelRelocationSymbol -- + // the source override is most likely missing it. + m.File = execName + } + // Only apply the build ID override if the build ID in the main mapping is + // missing. Overwriting the build ID in case it's present is very likely a + // wrong thing to do so we refuse to do that. + if buildID != "" && m.BuildID == "" { + m.BuildID = buildID + } + } +} + +// fetch fetches a profile from source, within the timeout specified, +// producing messages through the ui. It returns the profile and the +// url of the actual source of the profile for remote profiles. +func fetch(source string, duration, timeout time.Duration, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, src string, err error) { + var f io.ReadCloser + + // First determine whether the source is a file, if not, it will be treated as a URL. + if _, err = os.Stat(source); err == nil { + if isPerfFile(source) { + f, err = convertPerfData(source, ui) + } else { + f, err = os.Open(source) + } + } else { + sourceURL, timeout := adjustURL(source, duration, timeout) + if sourceURL != "" { + ui.Print("Fetching profile over HTTP from " + sourceURL) + if duration > 0 { + ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) + } + f, err = fetchURL(sourceURL, timeout, tr) + src = sourceURL + } + } + if err == nil { + defer f.Close() + p, err = profile.Parse(f) + } + return +} + +// fetchURL fetches a profile from a URL using HTTP. +func fetchURL(source string, timeout time.Duration, tr http.RoundTripper) (io.ReadCloser, error) { + client := &http.Client{ + Transport: tr, + Timeout: timeout + 5*time.Second, + } + resp, err := client.Get(source) + if err != nil { + return nil, fmt.Errorf("http fetch: %v", err) + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + return nil, statusCodeError(resp) + } + + return resp.Body, nil +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// isPerfFile checks if a file is in perf.data format. It also returns false +// if it encounters an error during the check. +func isPerfFile(path string) bool { + sourceFile, openErr := os.Open(path) + if openErr != nil { + return false + } + defer sourceFile.Close() + + // If the file is the output of a perf record command, it should begin + // with the string PERFILE2. + perfHeader := []byte("PERFILE2") + actualHeader := make([]byte, len(perfHeader)) + if _, readErr := sourceFile.Read(actualHeader); readErr != nil { + return false + } + return bytes.Equal(actualHeader, perfHeader) +} + +// convertPerfData converts the file at path which should be in perf.data format +// using the perf_to_profile tool and returns the file containing the +// profile.proto formatted data. +func convertPerfData(perfPath string, ui plugin.UI) (*os.File, error) { + ui.Print(fmt.Sprintf( + "Converting %s to a profile.proto... (May take a few minutes)", + perfPath)) + profile, err := newTempFile(os.TempDir(), "pprof_", ".pb.gz") + if err != nil { + return nil, err + } + deferDeleteTempFile(profile.Name()) + cmd := exec.Command("perf_to_profile", "-i", perfPath, "-o", profile.Name(), "-f") + cmd.Stdout, cmd.Stderr = os.Stdout, os.Stderr + if err := cmd.Run(); err != nil { + profile.Close() + return nil, fmt.Errorf("failed to convert perf.data file. Try github.com/google/perf_data_converter: %v", err) + } + return profile, nil +} + +// adjustURL validates if a profile source is a URL and returns an +// cleaned up URL and the timeout to use for retrieval over HTTP. +// If the source cannot be recognized as a URL it returns an empty string. +func adjustURL(source string, duration, timeout time.Duration) (string, time.Duration) { + u, err := url.Parse(source) + if err != nil || (u.Host == "" && u.Scheme != "" && u.Scheme != "file") { + // Try adding http:// to catch sources of the form hostname:port/path. + // url.Parse treats "hostname" as the scheme. + u, err = url.Parse("http://" + source) + } + if err != nil || u.Host == "" { + return "", 0 + } + + // Apply duration/timeout overrides to URL. + values := u.Query() + if duration > 0 { + values.Set("seconds", fmt.Sprint(int(duration.Seconds()))) + } else { + if urlSeconds := values.Get("seconds"); urlSeconds != "" { + if us, err := strconv.ParseInt(urlSeconds, 10, 32); err == nil { + duration = time.Duration(us) * time.Second + } + } + } + if timeout <= 0 { + if duration > 0 { + timeout = duration + duration/2 + } else { + timeout = 60 * time.Second + } + } + u.RawQuery = values.Encode() + return u.String(), timeout +} diff --git a/plugin/debug/pkg/internal/driver/fetch_test.go b/plugin/debug/pkg/internal/driver/fetch_test.go new file mode 100644 index 0000000..7727895 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/fetch_test.go @@ -0,0 +1,836 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "math/big" + "net" + "net/http" + "os" + "path/filepath" + "reflect" + "regexp" + "runtime" + "strings" + "testing" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/internal/symbolizer" + "m7s.live/v5/plugin/debug/pkg/internal/transport" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func TestSymbolizationPath(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("test assumes Unix paths") + } + + // Save environment variables to restore after test + saveHome := os.Getenv(homeEnv()) + savePath := os.Getenv("PPROF_BINARY_PATH") + + tempdir, err := os.MkdirTemp("", "home") + if err != nil { + t.Fatal("creating temp dir: ", err) + } + defer os.RemoveAll(tempdir) + os.MkdirAll(filepath.Join(tempdir, "pprof", "binaries", "abcde10001"), 0700) + os.Create(filepath.Join(tempdir, "pprof", "binaries", "abcde10001", "binary")) + + os.MkdirAll(filepath.Join(tempdir, "pprof", "binaries", "fg"), 0700) + os.Create(filepath.Join(tempdir, "pprof", "binaries", "fg", "hij10001.debug")) + + obj := testObj{tempdir} + os.Setenv(homeEnv(), tempdir) + for _, tc := range []struct { + env, file, buildID, want string + msgCount int + }{ + {"", "/usr/bin/binary", "", "/usr/bin/binary", 0}, + {"", "/usr/bin/binary", "fedcb10000", "/usr/bin/binary", 0}, + {"/usr", "/bin/binary", "", "/usr/bin/binary", 0}, + {"", "/prod/path/binary", "abcde10001", filepath.Join(tempdir, "pprof/binaries/abcde10001/binary"), 0}, + {"/alternate/architecture", "/usr/bin/binary", "", "/alternate/architecture/binary", 0}, + {"/alternate/architecture", "/usr/bin/binary", "abcde10001", "/alternate/architecture/binary", 0}, + {"", "", "fghij10001", filepath.Join(tempdir, "pprof/binaries/fg/hij10001.debug"), 0}, + {"/nowhere:/alternate/architecture", "/usr/bin/binary", "fedcb10000", "/usr/bin/binary", 1}, + {"/nowhere:/alternate/architecture", "/usr/bin/binary", "abcde10002", "/usr/bin/binary", 1}, + } { + os.Setenv("PPROF_BINARY_PATH", tc.env) + p := &profile.Profile{ + Mapping: []*profile.Mapping{ + { + File: tc.file, + BuildID: tc.buildID, + }, + }, + } + s := &source{} + locateBinaries(p, s, obj, &proftest.TestUI{T: t, Ignore: tc.msgCount}) + if file := p.Mapping[0].File; file != tc.want { + t.Errorf("%s:%s:%s, want %s, got %s", tc.env, tc.file, tc.buildID, tc.want, file) + } + } + os.Setenv(homeEnv(), saveHome) + os.Setenv("PPROF_BINARY_PATH", savePath) +} + +func TestCollectMappingSources(t *testing.T) { + const startAddress uint64 = 0x40000 + const url = "http://example.com" + for _, tc := range []struct { + file, buildID string + want plugin.MappingSources + }{ + {"/usr/bin/binary", "buildId", mappingSources("buildId", url, startAddress)}, + {"/usr/bin/binary", "", mappingSources("/usr/bin/binary", url, startAddress)}, + {"", "", mappingSources(url, url, startAddress)}, + } { + p := &profile.Profile{ + Mapping: []*profile.Mapping{ + { + File: tc.file, + BuildID: tc.buildID, + Start: startAddress, + }, + }, + } + got := collectMappingSources(p, url) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:%s, want %v, got %v", tc.file, tc.buildID, tc.want, got) + } + } +} + +func TestUnsourceMappings(t *testing.T) { + for _, tc := range []struct { + os, file, buildID, want string + }{ + {"any", "/usr/bin/binary", "buildId", "/usr/bin/binary"}, + {"any", "http://example.com", "", ""}, + {"windows", `C:\example.exe`, "", `C:\example.exe`}, + {"windows", `c:/example.exe`, "", `c:/example.exe`}, + } { + t.Run(tc.file+"-"+tc.os, func(t *testing.T) { + if tc.os != "any" && tc.os != runtime.GOOS { + t.Skipf("%s only test", tc.os) + } + + p := &profile.Profile{ + Mapping: []*profile.Mapping{ + { + File: tc.file, + BuildID: tc.buildID, + }, + }, + } + unsourceMappings(p) + if got := p.Mapping[0].File; got != tc.want { + t.Errorf("%s:%s, want %s, got %s", tc.file, tc.buildID, tc.want, got) + } + }) + } +} + +type testObj struct { + home string +} + +func (o testObj) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + switch file { + case "/alternate/architecture/binary": + return testFile{file, "abcde10001"}, nil + case "/usr/bin/binary": + return testFile{file, "fedcb10000"}, nil + case filepath.Join(o.home, "pprof/binaries/abcde10001/binary"): + return testFile{file, "abcde10001"}, nil + case filepath.Join(o.home, "pprof/binaries/fg/hij10001.debug"): + return testFile{file, "fghij10001"}, nil + } + return nil, fmt.Errorf("not found: %s", file) +} +func (testObj) Demangler(_ string) func(names []string) (map[string]string, error) { + return func(names []string) (map[string]string, error) { return nil, nil } +} +func (testObj) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + return nil, nil +} + +type testFile struct{ name, buildID string } + +func (f testFile) Name() string { return f.name } +func (testFile) ObjAddr(addr uint64) (uint64, error) { return addr, nil } +func (f testFile) BuildID() string { return f.buildID } +func (testFile) SourceLine(addr uint64) ([]plugin.Frame, error) { return nil, nil } +func (testFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { return nil, nil } +func (testFile) Close() error { return nil } + +func TestFetch(t *testing.T) { + const path = "testdata/" + type testcase struct { + source, execName string + wantErr bool + } + ts := []testcase{ + {path + "go.crc32.cpu", "", false}, + {path + "go.nomappings.crash", "/bin/gotest.exe", false}, + {"http://localhost/profile?file=cppbench.cpu", "", false}, + {"./missing", "", true}, + } + // Test that paths with a colon character are recognized as file paths + // if the file exists, rather than as a URL. We have to skip this test + // on Windows since the colon char is not allowed in Windows paths. + if runtime.GOOS != "windows" { + src := filepath.Join(path, "go.crc32.cpu") + dst := filepath.Join(t.TempDir(), "go.crc32.cpu_2023-11-11_01:02:03") + data, err := os.ReadFile(src) + if err != nil { + t.Fatalf("read src file %s failed: %#v", src, err) + } + err = os.WriteFile(dst, data, 0644) + if err != nil { + t.Fatalf("create dst file %s failed: %#v", dst, err) + } + ts = append(ts, testcase{dst, "", false}) + } + for _, tc := range ts { + t.Run(tc.source, func(t *testing.T) { + p, _, _, err := grabProfile(&source{ExecName: tc.execName}, tc.source, nil, testObj{}, &proftest.TestUI{T: t}, &httpTransport{}) + if tc.wantErr { + if err == nil { + t.Fatal("got no error, want an error") + } + return + } + if err != nil { + t.Fatalf("got error %v, want no error", err) + } + if len(p.Sample) == 0 { + t.Error("got zero samples, want non-zero") + } + if e := tc.execName; e != "" { + switch { + case len(p.Mapping) == 0 || p.Mapping[0] == nil: + t.Errorf("got no mappings, want mapping[0].execName == %s", e) + case p.Mapping[0].File != e: + t.Errorf("got mapping[0].execName == %s, want %s", p.Mapping[0].File, e) + } + } + }) + } +} + +func TestFetchWithBase(t *testing.T) { + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + + type WantSample struct { + values []int64 + labels map[string][]string + } + + const path = "testdata/" + type testcase struct { + desc string + sources []string + bases []string + diffBases []string + normalize bool + wantSamples []WantSample + wantParseErrorMsg string + wantFetchErrorMsg string + } + + testcases := []testcase{ + { + "not normalized base is same as source", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention"}, + nil, + false, + nil, + "", + "", + }, + { + "not normalized base is same as source", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention"}, + nil, + false, + nil, + "", + "", + }, + { + "not normalized single source, multiple base (all profiles same)", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention", path + "cppbench.contention"}, + nil, + false, + []WantSample{ + { + values: []int64{-2700, -608881724}, + labels: map[string][]string{}, + }, + { + values: []int64{-100, -23992}, + labels: map[string][]string{}, + }, + { + values: []int64{-200, -179943}, + labels: map[string][]string{}, + }, + { + values: []int64{-100, -17778444}, + labels: map[string][]string{}, + }, + { + values: []int64{-100, -75976}, + labels: map[string][]string{}, + }, + { + values: []int64{-300, -63568134}, + labels: map[string][]string{}, + }, + }, + "", + "", + }, + { + "not normalized, different base and source", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.small.contention"}, + nil, + false, + []WantSample{ + { + values: []int64{1700, 608878600}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 23992}, + labels: map[string][]string{}, + }, + { + values: []int64{200, 179943}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 17778444}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 75976}, + labels: map[string][]string{}, + }, + { + values: []int64{300, 63568134}, + labels: map[string][]string{}, + }, + }, + "", + "", + }, + { + "normalized base is same as source", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention"}, + nil, + true, + nil, + "", + "", + }, + { + "normalized single source, multiple base (all profiles same)", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention", path + "cppbench.contention"}, + nil, + true, + nil, + "", + "", + }, + { + "normalized different base and source", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.small.contention"}, + nil, + true, + []WantSample{ + { + values: []int64{-229, -369}, + labels: map[string][]string{}, + }, + { + values: []int64{29, 0}, + labels: map[string][]string{}, + }, + { + values: []int64{57, 1}, + labels: map[string][]string{}, + }, + { + values: []int64{29, 80}, + labels: map[string][]string{}, + }, + { + values: []int64{29, 0}, + labels: map[string][]string{}, + }, + { + values: []int64{86, 288}, + labels: map[string][]string{}, + }, + }, + "", + "", + }, + { + "not normalized diff base is same as source", + []string{path + "cppbench.contention"}, + nil, + []string{path + "cppbench.contention"}, + false, + []WantSample{ + { + values: []int64{2700, 608881724}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 23992}, + labels: map[string][]string{}, + }, + { + values: []int64{200, 179943}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 17778444}, + labels: map[string][]string{}, + }, + { + values: []int64{100, 75976}, + labels: map[string][]string{}, + }, + { + values: []int64{300, 63568134}, + labels: map[string][]string{}, + }, + { + values: []int64{-2700, -608881724}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + { + values: []int64{-100, -23992}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + { + values: []int64{-200, -179943}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + { + values: []int64{-100, -17778444}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + { + values: []int64{-100, -75976}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + { + values: []int64{-300, -63568134}, + labels: map[string][]string{"pprof::base": {"true"}}, + }, + }, + "", + "", + }, + { + "diff_base and base both specified", + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention"}, + []string{path + "cppbench.contention"}, + false, + nil, + "-base and -diff_base flags cannot both be specified", + "", + }, + { + "input profiles with different sample types (non empty intersection)", + []string{path + "cppbench.cpu", path + "cppbench.cpu_no_samples_type"}, + []string{path + "cppbench.cpu", path + "cppbench.cpu_no_samples_type"}, + nil, + false, + nil, + "", + "", + }, + { + "input profiles with different sample types (empty intersection)", + []string{path + "cppbench.cpu", path + "cppbench.contention"}, + []string{path + "cppbench.cpu", path + "cppbench.contention"}, + nil, + false, + nil, + "", + "problem fetching source profiles: profiles have empty common sample type list", + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + setCurrentConfig(baseConfig) + f := testFlags{ + stringLists: map[string][]string{ + "base": tc.bases, + "diff_base": tc.diffBases, + }, + bools: map[string]bool{ + "normalize": tc.normalize, + }, + } + f.args = tc.sources + + o := setDefaults(&plugin.Options{ + UI: &proftest.TestUI{T: t, AllowRx: "Local symbolization failed|Some binary filenames not available"}, + Flagset: f, + HTTPTransport: transport.New(nil), + }) + src, _, err := parseFlags(o) + + if tc.wantParseErrorMsg != "" { + if err == nil { + t.Fatalf("got nil, want error %q", tc.wantParseErrorMsg) + } + + if gotErrMsg := err.Error(); gotErrMsg != tc.wantParseErrorMsg { + t.Fatalf("got error %q, want error %q", gotErrMsg, tc.wantParseErrorMsg) + } + return + } + + if err != nil { + t.Fatalf("got error %q, want no error", err) + } + + p, err := fetchProfiles(src, o) + + if tc.wantFetchErrorMsg != "" { + if err == nil { + t.Fatalf("got nil, want error %q", tc.wantFetchErrorMsg) + } + + if gotErrMsg := err.Error(); gotErrMsg != tc.wantFetchErrorMsg { + t.Fatalf("got error %q, want error %q", gotErrMsg, tc.wantFetchErrorMsg) + } + return + } + + if err != nil { + t.Fatalf("got error %q, want no error", err) + } + + if got, want := len(p.Sample), len(tc.wantSamples); got != want { + t.Fatalf("got %d samples want %d", got, want) + } + + for i, sample := range p.Sample { + if !reflect.DeepEqual(tc.wantSamples[i].values, sample.Value) { + t.Errorf("for sample %d got values %v, want %v", i, sample.Value, tc.wantSamples[i]) + } + if !reflect.DeepEqual(tc.wantSamples[i].labels, sample.Label) { + t.Errorf("for sample %d got labels %v, want %v", i, sample.Label, tc.wantSamples[i].labels) + } + } + }) + } +} + +// mappingSources creates MappingSources map with a single item. +func mappingSources(key, source string, start uint64) plugin.MappingSources { + return plugin.MappingSources{ + key: []struct { + Source string + Start uint64 + }{ + {Source: source, Start: start}, + }, + } +} + +type httpTransport struct{} + +func (tr *httpTransport) RoundTrip(req *http.Request) (*http.Response, error) { + values := req.URL.Query() + file := values.Get("file") + + if file == "" { + return nil, fmt.Errorf("want .../file?profile, got %s", req.URL.String()) + } + + t := &http.Transport{} + t.RegisterProtocol("file", http.NewFileTransport(http.Dir("testdata/"))) + + c := &http.Client{Transport: t} + return c.Get("file:///" + file) +} + +func closedError() string { + if runtime.GOOS == "plan9" { + return "listen hungup" + } + return "use of closed" +} + +func TestHTTPSInsecure(t *testing.T) { + if runtime.GOOS == "nacl" || runtime.GOOS == "js" { + t.Skip("test assumes tcp available") + } + saveHome := os.Getenv(homeEnv()) + tempdir, err := os.MkdirTemp("", "home") + if err != nil { + t.Fatal("creating temp dir: ", err) + } + defer os.RemoveAll(tempdir) + + // pprof writes to $HOME/pprof by default which is not necessarily + // writeable (e.g. on a Debian buildd) so set $HOME to something we + // know we can write to for the duration of the test. + os.Setenv(homeEnv(), tempdir) + defer os.Setenv(homeEnv(), saveHome) + + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + + tlsCert, _, _ := selfSignedCert(t, "") + tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}} + + l, err := tls.Listen("tcp", "localhost:0", tlsConfig) + if err != nil { + t.Fatalf("net.Listen: got error %v, want no error", err) + } + + donec := make(chan error, 1) + go func(donec chan<- error) { + donec <- http.Serve(l, nil) + }(donec) + defer func() { + if got, want := <-donec, closedError(); !strings.Contains(got.Error(), want) { + t.Fatalf("Serve got error %v, want %q", got, want) + } + }() + defer l.Close() + + outputTempFile, err := os.CreateTemp("", "profile_output") + if err != nil { + t.Fatalf("Failed to create tempfile: %v", err) + } + defer os.Remove(outputTempFile.Name()) + defer outputTempFile.Close() + + address := "https+insecure://" + l.Addr().String() + "/debug/pprof/goroutine" + s := &source{ + Sources: []string{address}, + Timeout: 10, + Symbolize: "remote", + } + o := &plugin.Options{ + Obj: &binutils.Binutils{}, + UI: &proftest.TestUI{T: t, AllowRx: "Saved profile in"}, + HTTPTransport: transport.New(nil), + } + o.Sym = &symbolizer.Symbolizer{Obj: o.Obj, UI: o.UI} + p, err := fetchProfiles(s, o) + if err != nil { + t.Fatal(err) + } + if len(p.SampleType) == 0 { + t.Fatalf("fetchProfiles(%s) got empty profile: len(p.SampleType)==0", address) + } + if len(p.Function) == 0 { + t.Fatalf("fetchProfiles(%s) got non-symbolized profile: len(p.Function)==0", address) + } + if err := checkProfileHasFunction(p, "TestHTTPSInsecure"); err != nil { + t.Fatalf("fetchProfiles(%s) %v", address, err) + } +} + +func TestHTTPSWithServerCertFetch(t *testing.T) { + if runtime.GOOS == "nacl" || runtime.GOOS == "js" { + t.Skip("test assumes tcp available") + } + saveHome := os.Getenv(homeEnv()) + tempdir, err := os.MkdirTemp("", "home") + if err != nil { + t.Fatal("creating temp dir: ", err) + } + defer os.RemoveAll(tempdir) + + // pprof writes to $HOME/pprof by default which is not necessarily + // writeable (e.g. on a Debian buildd) so set $HOME to something we + // know we can write to for the duration of the test. + os.Setenv(homeEnv(), tempdir) + defer os.Setenv(homeEnv(), saveHome) + + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + + cert, certBytes, keyBytes := selfSignedCert(t, "localhost") + cas := x509.NewCertPool() + cas.AppendCertsFromPEM(certBytes) + + tlsConfig := &tls.Config{ + RootCAs: cas, + Certificates: []tls.Certificate{cert}, + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: cas, + } + + l, err := tls.Listen("tcp", "localhost:0", tlsConfig) + if err != nil { + t.Fatalf("net.Listen: got error %v, want no error", err) + } + + donec := make(chan error, 1) + go func(donec chan<- error) { + donec <- http.Serve(l, nil) + }(donec) + defer func() { + if got, want := <-donec, closedError(); !strings.Contains(got.Error(), want) { + t.Fatalf("Serve got error %v, want %q", got, want) + } + }() + defer l.Close() + + outputTempFile, err := os.CreateTemp("", "profile_output") + if err != nil { + t.Fatalf("Failed to create tempfile: %v", err) + } + defer os.Remove(outputTempFile.Name()) + defer outputTempFile.Close() + + // Get port from the address, so request to the server can be made using + // the host name specified in certificates. + _, portStr, err := net.SplitHostPort(l.Addr().String()) + if err != nil { + t.Fatalf("cannot get port from URL: %v", err) + } + address := "https://" + "localhost:" + portStr + "/debug/pprof/goroutine" + s := &source{ + Sources: []string{address}, + Timeout: 10, + Symbolize: "remote", + } + + certTempFile, err := os.CreateTemp("", "cert_output") + if err != nil { + t.Errorf("cannot create cert tempfile: %v", err) + } + defer os.Remove(certTempFile.Name()) + defer certTempFile.Close() + certTempFile.Write(certBytes) + + keyTempFile, err := os.CreateTemp("", "key_output") + if err != nil { + t.Errorf("cannot create key tempfile: %v", err) + } + defer os.Remove(keyTempFile.Name()) + defer keyTempFile.Close() + keyTempFile.Write(keyBytes) + + f := &testFlags{ + strings: map[string]string{ + "tls_cert": certTempFile.Name(), + "tls_key": keyTempFile.Name(), + "tls_ca": certTempFile.Name(), + }, + } + o := &plugin.Options{ + Obj: &binutils.Binutils{}, + UI: &proftest.TestUI{T: t, AllowRx: "Saved profile in"}, + Flagset: f, + HTTPTransport: transport.New(f), + } + + o.Sym = &symbolizer.Symbolizer{Obj: o.Obj, UI: o.UI, Transport: o.HTTPTransport} + p, err := fetchProfiles(s, o) + if err != nil { + t.Fatal(err) + } + if len(p.SampleType) == 0 { + t.Fatalf("fetchProfiles(%s) got empty profile: len(p.SampleType)==0", address) + } + if len(p.Function) == 0 { + t.Fatalf("fetchProfiles(%s) got non-symbolized profile: len(p.Function)==0", address) + } + if err := checkProfileHasFunction(p, "TestHTTPSWithServerCertFetch"); err != nil { + t.Fatalf("fetchProfiles(%s) %v", address, err) + } +} + +func checkProfileHasFunction(p *profile.Profile, fname string) error { + for _, f := range p.Function { + if strings.Contains(f.Name, fname) { + return nil + } + } + return fmt.Errorf("got %s, want function %q", p.String(), fname) +} + +// selfSignedCert generates a self-signed certificate, and returns the +// generated certificate, and byte arrays containing the certificate and +// key associated with the certificate. +func selfSignedCert(t *testing.T, host string) (tls.Certificate, []byte, []byte) { + privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("failed to generate private key: %v", err) + } + b, err := x509.MarshalECPrivateKey(privKey) + if err != nil { + t.Fatalf("failed to marshal private key: %v", err) + } + bk := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: b}) + + tmpl := x509.Certificate{ + SerialNumber: big.NewInt(1), + NotBefore: time.Now(), + NotAfter: time.Now().Add(10 * time.Minute), + IsCA: true, + DNSNames: []string{host}, + } + + b, err = x509.CreateCertificate(rand.Reader, &tmpl, &tmpl, privKey.Public(), privKey) + if err != nil { + t.Fatalf("failed to create cert: %v", err) + } + bc := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: b}) + + cert, err := tls.X509KeyPair(bc, bk) + if err != nil { + t.Fatalf("failed to create TLS key pair: %v", err) + } + return cert, bc, bk +} diff --git a/plugin/debug/pkg/internal/driver/flags.go b/plugin/debug/pkg/internal/driver/flags.go new file mode 100644 index 0000000..5390319 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/flags.go @@ -0,0 +1,71 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "flag" + "strings" +) + +// GoFlags implements the plugin.FlagSet interface. +type GoFlags struct { + UsageMsgs []string +} + +// Bool implements the plugin.FlagSet interface. +func (*GoFlags) Bool(o string, d bool, c string) *bool { + return flag.Bool(o, d, c) +} + +// Int implements the plugin.FlagSet interface. +func (*GoFlags) Int(o string, d int, c string) *int { + return flag.Int(o, d, c) +} + +// Float64 implements the plugin.FlagSet interface. +func (*GoFlags) Float64(o string, d float64, c string) *float64 { + return flag.Float64(o, d, c) +} + +// String implements the plugin.FlagSet interface. +func (*GoFlags) String(o, d, c string) *string { + return flag.String(o, d, c) +} + +// StringList implements the plugin.FlagSet interface. +func (*GoFlags) StringList(o, d, c string) *[]*string { + return &[]*string{flag.String(o, d, c)} +} + +// ExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) ExtraUsage() string { + return strings.Join(f.UsageMsgs, "\n") +} + +// AddExtraUsage implements the plugin.FlagSet interface. +func (f *GoFlags) AddExtraUsage(eu string) { + f.UsageMsgs = append(f.UsageMsgs, eu) +} + +// Parse implements the plugin.FlagSet interface. +func (*GoFlags) Parse(usage func()) []string { + flag.Usage = usage + flag.Parse() + args := flag.Args() + if len(args) == 0 { + usage() + } + return args +} diff --git a/plugin/debug/pkg/internal/driver/html/common.css b/plugin/debug/pkg/internal/driver/html/common.css new file mode 100644 index 0000000..0a897ce --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/common.css @@ -0,0 +1,279 @@ +* { + margin: 0; + padding: 0; + box-sizing: border-box; +} +html, body { + height: 100%; +} +body { + font-family: 'Roboto', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + font-size: 13px; + line-height: 1.4; + display: flex; + flex-direction: column; +} +a { + color: #2a66d9; +} +.header { + display: flex; + align-items: center; + height: 44px; + min-height: 44px; + background-color: #eee; + color: #212121; + padding: 0 1rem; +} +.header > div { + margin: 0 0.125em; +} +.header .title h1 { + font-size: 1.75em; + margin-right: 1rem; + margin-bottom: 4px; +} +.header .title a { + color: #212121; + text-decoration: none; +} +.header .title a:hover { + text-decoration: underline; +} +.header .description { + width: 100%; + text-align: right; + white-space: nowrap; +} +@media screen and (max-width: 799px) { + .header input { + display: none; + } +} +#detailsbox { + display: none; + position: fixed; + top: 40px; + right: 20px; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + line-height: 24px; + padding: 1em; + text-align: left; +} +.header input { + background: white url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24' style='pointer-events:none;display:block;width:100%25;height:100%25;fill:%23757575'%3E%3Cpath d='M15.5 14h-.79l-.28-.27C15.41 12.59 16 11.11 16 9.5 16 5.91 13.09 3 9.5 3S3 5.91 3 9.5 5.91 16 9.5 16c1.61.0 3.09-.59 4.23-1.57l.27.28v.79l5 4.99L20.49 19l-4.99-5zm-6 0C7.01 14 5 11.99 5 9.5S7.01 5 9.5 5 14 7.01 14 9.5 11.99 14 9.5 14z'/%3E%3C/svg%3E") no-repeat 4px center/20px 20px; + border: 1px solid #d1d2d3; + border-radius: 2px 0 0 2px; + padding: 0.25em; + padding-left: 28px; + margin-left: 1em; + font-family: 'Roboto', 'Noto', sans-serif; + font-size: 1em; + line-height: 24px; + color: #212121; +} +.downArrow { + border-top: .36em solid #ccc; + border-left: .36em solid transparent; + border-right: .36em solid transparent; + margin-bottom: .05em; + margin-left: .5em; + transition: border-top-color 200ms; +} +.menu-item { + height: 100%; + text-transform: uppercase; + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; + position: relative; +} +.menu-item .menu-name:hover { + opacity: 0.75; +} +.menu-item .menu-name:hover .downArrow { + border-top-color: #666; +} +.menu-name { + height: 100%; + padding: 0 0.5em; + display: flex; + align-items: center; + justify-content: center; +} +.menu-name a { + text-decoration: none; + color: #212121; +} +.submenu { + display: none; + margin-top: -4px; + min-width: 10em; + position: absolute; + left: 0px; + background-color: white; + box-shadow: 0 1px 5px rgba(0,0,0,.3); + font-size: 100%; + text-transform: none; + white-space: nowrap; +} +.menu-item, .submenu { + user-select: none; + -moz-user-select: none; + -ms-user-select: none; + -webkit-user-select: none; +} +.submenu hr { + border: 0; + border-top: 2px solid #eee; +} +.submenu a { + display: block; + padding: .5em 1em; + text-decoration: none; +} +.submenu a:hover, .submenu a.active { + color: white; + background-color: #6b82d6; +} +.submenu a.disabled { + color: gray; + pointer-events: none; +} +.menu-check-mark { + position: absolute; + left: 2px; +} +.menu-delete-btn { + position: absolute; + right: 2px; +} + +.help { + padding-left: 1em; +} + +{{/* Used to disable events when a modal dialog is displayed */}} +#dialog-overlay { + display: none; + position: fixed; + left: 0px; + top: 0px; + width: 100%; + height: 100%; + background-color: rgba(1,1,1,0.1); +} + +.dialog { + {{/* Displayed centered horizontally near the top */}} + display: none; + position: fixed; + margin: 0px; + top: 60px; + left: 50%; + transform: translateX(-50%); + font-size: 125%; + background-color: #ffffff; + box-shadow: 0 1px 5px rgba(0,0,0,.3); +} +.dialog-header { + font-size: 120%; + border-bottom: 1px solid #CCCCCC; + width: 100%; + text-align: center; + background: #EEEEEE; + user-select: none; +} +.dialog-footer { + border-top: 1px solid #CCCCCC; + width: 100%; + text-align: right; + padding: 10px; +} +.dialog-error { + margin: 10px; + color: red; +} +.dialog input { + margin: 10px; + font-size: inherit; +} +.dialog button { + margin-left: 10px; + font-size: inherit; +} +#save-dialog, #delete-dialog { + width: 50%; + max-width: 20em; +} +#delete-prompt { + padding: 10px; +} + +#content { + overflow-y: scroll; + padding: 1em; +} +#top { + overflow-y: scroll; +} +#graph { + overflow: hidden; +} +#graph svg { + width: 100%; + height: auto; + padding: 10px; +} +#content.source .filename { + margin-top: 0; + margin-bottom: 1em; + font-size: 120%; +} +#content.source pre { + margin-bottom: 3em; +} +table { + border-spacing: 0px; + width: 100%; + padding-bottom: 1em; + white-space: nowrap; +} +table thead { + font-family: 'Roboto Medium', -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji', 'Segoe UI Symbol'; +} +table tr th { + position: sticky; + top: 0; + background-color: #ddd; + text-align: right; + padding: .3em .5em; +} +table tr td { + padding: .3em .5em; + text-align: right; +} +#top table tr th:nth-child(6), +#top table tr th:nth-child(7), +#top table tr td:nth-child(6), +#top table tr td:nth-child(7) { + text-align: left; +} +#top table tr td:nth-child(6) { + width: 100%; + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; +} +#flathdr1, #flathdr2, #cumhdr1, #cumhdr2, #namehdr { + cursor: ns-resize; +} +.hilite { + background-color: #ebf5fb; + font-weight: bold; +} +/* stacking order */ +.boxtext { z-index: 2; } /* flame graph box text */ +#current-details { z-index: 2; } /* flame graph current box info */ +#detailsbox { z-index: 3; } /* profile details */ +.submenu { z-index: 4; } +.dialog { z-index: 5; } diff --git a/plugin/debug/pkg/internal/driver/html/common.js b/plugin/debug/pkg/internal/driver/html/common.js new file mode 100644 index 0000000..4a2067e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/common.js @@ -0,0 +1,714 @@ +// Make svg pannable and zoomable. +// Call clickHandler(t) if a click event is caught by the pan event handlers. +function initPanAndZoom(svg, clickHandler) { + 'use strict'; + + // Current mouse/touch handling mode + const IDLE = 0; + const MOUSEPAN = 1; + const TOUCHPAN = 2; + const TOUCHZOOM = 3; + let mode = IDLE; + + // State needed to implement zooming. + let currentScale = 1.0; + const initWidth = svg.viewBox.baseVal.width; + const initHeight = svg.viewBox.baseVal.height; + + // State needed to implement panning. + let panLastX = 0; // Last event X coordinate + let panLastY = 0; // Last event Y coordinate + let moved = false; // Have we seen significant movement + let touchid = null; // Current touch identifier + + // State needed for pinch zooming + let touchid2 = null; // Second id for pinch zooming + let initGap = 1.0; // Starting gap between two touches + let initScale = 1.0; // currentScale when pinch zoom started + let centerPoint = null; // Center point for scaling + + // Convert event coordinates to svg coordinates. + function toSvg(x, y) { + const p = svg.createSVGPoint(); + p.x = x; + p.y = y; + let m = svg.getCTM(); + if (m == null) m = svg.getScreenCTM(); // Firefox workaround. + return p.matrixTransform(m.inverse()); + } + + // Change the scaling for the svg to s, keeping the point denoted + // by u (in svg coordinates]) fixed at the same screen location. + function rescale(s, u) { + // Limit to a good range. + if (s < 0.2) s = 0.2; + if (s > 10.0) s = 10.0; + + currentScale = s; + + // svg.viewBox defines the visible portion of the user coordinate + // system. So to magnify by s, divide the visible portion by s, + // which will then be stretched to fit the viewport. + const vb = svg.viewBox; + const w1 = vb.baseVal.width; + const w2 = initWidth / s; + const h1 = vb.baseVal.height; + const h2 = initHeight / s; + vb.baseVal.width = w2; + vb.baseVal.height = h2; + + // We also want to adjust vb.baseVal.x so that u.x remains at same + // screen X coordinate. In other words, want to change it from x1 to x2 + // so that: + // (u.x - x1) / w1 = (u.x - x2) / w2 + // Simplifying that, we get + // (u.x - x1) * (w2 / w1) = u.x - x2 + // x2 = u.x - (u.x - x1) * (w2 / w1) + vb.baseVal.x = u.x - (u.x - vb.baseVal.x) * (w2 / w1); + vb.baseVal.y = u.y - (u.y - vb.baseVal.y) * (h2 / h1); + } + + function handleWheel(e) { + if (e.deltaY == 0) return; + // Change scale factor by 1.1 or 1/1.1 + rescale(currentScale * (e.deltaY < 0 ? 1.1 : (1/1.1)), + toSvg(e.offsetX, e.offsetY)); + } + + function setMode(m) { + mode = m; + touchid = null; + touchid2 = null; + } + + function panStart(x, y) { + moved = false; + panLastX = x; + panLastY = y; + } + + function panMove(x, y) { + let dx = x - panLastX; + let dy = y - panLastY; + if (Math.abs(dx) <= 2 && Math.abs(dy) <= 2) return; // Ignore tiny moves + + moved = true; + panLastX = x; + panLastY = y; + + // Firefox workaround: get dimensions from parentNode. + const swidth = svg.clientWidth || svg.parentNode.clientWidth; + const sheight = svg.clientHeight || svg.parentNode.clientHeight; + + // Convert deltas from screen space to svg space. + dx *= (svg.viewBox.baseVal.width / swidth); + dy *= (svg.viewBox.baseVal.height / sheight); + + svg.viewBox.baseVal.x -= dx; + svg.viewBox.baseVal.y -= dy; + } + + function handleScanStart(e) { + if (e.button != 0) return; // Do not catch right-clicks etc. + setMode(MOUSEPAN); + panStart(e.clientX, e.clientY); + e.preventDefault(); + svg.addEventListener('mousemove', handleScanMove); + } + + function handleScanMove(e) { + if (e.buttons == 0) { + // Missed an end event, perhaps because mouse moved outside window. + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + return; + } + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + } + + function handleScanEnd(e) { + if (mode == MOUSEPAN) panMove(e.clientX, e.clientY); + setMode(IDLE); + svg.removeEventListener('mousemove', handleScanMove); + if (!moved) clickHandler(e.target); + } + + // Find touch object with specified identifier. + function findTouch(tlist, id) { + for (const t of tlist) { + if (t.identifier == id) return t; + } + return null; + } + + // Return distance between two touch points + function touchGap(t1, t2) { + const dx = t1.clientX - t2.clientX; + const dy = t1.clientY - t2.clientY; + return Math.hypot(dx, dy); + } + + function handleTouchStart(e) { + if (mode == IDLE && e.changedTouches.length == 1) { + // Start touch based panning + const t = e.changedTouches[0]; + setMode(TOUCHPAN); + touchid = t.identifier; + panStart(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHPAN && e.touches.length == 2) { + // Start pinch zooming + setMode(TOUCHZOOM); + const t1 = e.touches[0]; + const t2 = e.touches[1]; + touchid = t1.identifier; + touchid2 = t2.identifier; + initScale = currentScale; + initGap = touchGap(t1, t2); + centerPoint = toSvg((t1.clientX + t2.clientX) / 2, + (t1.clientY + t2.clientY) / 2); + e.preventDefault(); + } + } + + function handleTouchMove(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + if (e.touches.length != 1) { + setMode(IDLE); + return; + } + panMove(t.clientX, t.clientY); + e.preventDefault(); + } else if (mode == TOUCHZOOM) { + // Get two touches; new gap; rescale to ratio. + const t1 = findTouch(e.touches, touchid); + const t2 = findTouch(e.touches, touchid2); + if (t1 == null || t2 == null) return; + const gap = touchGap(t1, t2); + rescale(initScale * gap / initGap, centerPoint); + e.preventDefault(); + } + } + + function handleTouchEnd(e) { + if (mode == TOUCHPAN) { + const t = findTouch(e.changedTouches, touchid); + if (t == null) return; + panMove(t.clientX, t.clientY); + setMode(IDLE); + e.preventDefault(); + if (!moved) clickHandler(t.target); + } else if (mode == TOUCHZOOM) { + setMode(IDLE); + e.preventDefault(); + } + } + + svg.addEventListener('mousedown', handleScanStart); + svg.addEventListener('mouseup', handleScanEnd); + svg.addEventListener('touchstart', handleTouchStart); + svg.addEventListener('touchmove', handleTouchMove); + svg.addEventListener('touchend', handleTouchEnd); + svg.addEventListener('wheel', handleWheel, true); +} + +function initMenus() { + 'use strict'; + + let activeMenu = null; + let activeMenuHdr = null; + + function cancelActiveMenu() { + if (activeMenu == null) return; + activeMenu.style.display = 'none'; + activeMenu = null; + activeMenuHdr = null; + } + + // Set click handlers on every menu header. + for (const menu of document.getElementsByClassName('submenu')) { + const hdr = menu.parentElement; + if (hdr == null) return; + if (hdr.classList.contains('disabled')) return; + function showMenu(e) { + // menu is a child of hdr, so this event can fire for clicks + // inside menu. Ignore such clicks. + if (e.target.parentElement != hdr) return; + activeMenu = menu; + activeMenuHdr = hdr; + menu.style.display = 'block'; + } + hdr.addEventListener('mousedown', showMenu); + hdr.addEventListener('touchstart', showMenu); + } + + // If there is an active menu and a down event outside, retract the menu. + for (const t of ['mousedown', 'touchstart']) { + document.addEventListener(t, (e) => { + // Note: to avoid unnecessary flicker, if the down event is inside + // the active menu header, do not retract the menu. + if (activeMenuHdr != e.target.closest('.menu-item')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); + } + + // If there is an active menu and an up event inside, retract the menu. + document.addEventListener('mouseup', (e) => { + if (activeMenu == e.target.closest('.submenu')) { + cancelActiveMenu(); + } + }, { passive: true, capture: true }); +} + +function sendURL(method, url, done) { + fetch(url.toString(), {method: method}) + .then((response) => { done(response.ok); }) + .catch((error) => { done(false); }); +} + +// Initialize handlers for saving/loading configurations. +function initConfigManager() { + 'use strict'; + + // Initialize various elements. + function elem(id) { + const result = document.getElementById(id); + if (!result) console.warn('element ' + id + ' not found'); + return result; + } + const overlay = elem('dialog-overlay'); + const saveDialog = elem('save-dialog'); + const saveInput = elem('save-name'); + const saveError = elem('save-error'); + const delDialog = elem('delete-dialog'); + const delPrompt = elem('delete-prompt'); + const delError = elem('delete-error'); + + let currentDialog = null; + let currentDeleteTarget = null; + + function showDialog(dialog) { + if (currentDialog != null) { + overlay.style.display = 'none'; + currentDialog.style.display = 'none'; + } + currentDialog = dialog; + if (dialog != null) { + overlay.style.display = 'block'; + dialog.style.display = 'block'; + } + } + + function cancelDialog(e) { + showDialog(null); + } + + // Show dialog for saving the current config. + function showSaveDialog(e) { + saveError.innerText = ''; + showDialog(saveDialog); + saveInput.focus(); + } + + // Commit save config. + function commitSave(e) { + const name = saveInput.value; + const url = new URL(document.URL); + // Set path relative to existing path. + url.pathname = new URL('./saveconfig', document.URL).pathname; + url.searchParams.set('config', name); + saveError.innerText = ''; + sendURL('POST', url, (ok) => { + if (!ok) { + saveError.innerText = 'Save failed'; + } else { + showDialog(null); + location.reload(); // Reload to show updated config menu + } + }); + } + + function handleSaveInputKey(e) { + if (e.key === 'Enter') commitSave(e); + } + + function deleteConfig(e, elem) { + e.preventDefault(); + const config = elem.dataset.config; + delPrompt.innerText = 'Delete ' + config + '?'; + currentDeleteTarget = elem; + showDialog(delDialog); + } + + function commitDelete(e, elem) { + if (!currentDeleteTarget) return; + const config = currentDeleteTarget.dataset.config; + const url = new URL('./deleteconfig', document.URL); + url.searchParams.set('config', config); + delError.innerText = ''; + sendURL('DELETE', url, (ok) => { + if (!ok) { + delError.innerText = 'Delete failed'; + return; + } + showDialog(null); + // Remove menu entry for this config. + if (currentDeleteTarget && currentDeleteTarget.parentElement) { + currentDeleteTarget.parentElement.remove(); + } + }); + } + + // Bind event on elem to fn. + function bind(event, elem, fn) { + if (elem == null) return; + elem.addEventListener(event, fn); + if (event == 'click') { + // Also enable via touch. + elem.addEventListener('touchstart', fn); + } + } + + bind('click', elem('save-config'), showSaveDialog); + bind('click', elem('save-cancel'), cancelDialog); + bind('click', elem('save-confirm'), commitSave); + bind('keydown', saveInput, handleSaveInputKey); + + bind('click', elem('delete-cancel'), cancelDialog); + bind('click', elem('delete-confirm'), commitDelete); + + // Activate deletion button for all config entries in menu. + for (const del of Array.from(document.getElementsByClassName('menu-delete-btn'))) { + bind('click', del, (e) => { + deleteConfig(e, del); + }); + } +} + +// options if present can contain: +// hiliter: function(Number, Boolean): Boolean +// Overridable mechanism for highlighting/unhighlighting specified node. +// current: function() Map[Number,Boolean] +// Overridable mechanism for fetching set of currently selected nodes. +function viewer(baseUrl, nodes, options) { + 'use strict'; + + // Elements + const search = document.getElementById('search'); + const graph0 = document.getElementById('graph0'); + const svg = (graph0 == null ? null : graph0.parentElement); + const toptable = document.getElementById('toptable'); + + let regexpActive = false; + let selected = new Map(); + let origFill = new Map(); + let searchAlarm = null; + let buttonsEnabled = true; + + // Return current selection. + function getSelection() { + if (selected.size > 0) { + return selected; + } else if (options && options.current) { + return options.current(); + } + return new Map(); + } + + function handleDetails(e) { + e.preventDefault(); + const detailsText = document.getElementById('detailsbox'); + if (detailsText != null) { + if (detailsText.style.display === 'block') { + detailsText.style.display = 'none'; + } else { + detailsText.style.display = 'block'; + } + } + } + + function handleKey(e) { + if (e.keyCode != 13) return; + setHrefParams(window.location, function (params) { + params.set('f', search.value); + }); + e.preventDefault(); + } + + function handleSearch() { + // Delay expensive processing so a flurry of key strokes is handled once. + if (searchAlarm != null) { + clearTimeout(searchAlarm); + } + searchAlarm = setTimeout(selectMatching, 300); + + regexpActive = true; + updateButtons(); + } + + function selectMatching() { + searchAlarm = null; + let re = null; + if (search.value != '') { + try { + re = new RegExp(search.value); + } catch (e) { + // TODO: Display error state in search box + return; + } + } + + function match(text) { + return re != null && re.test(text); + } + + // drop currently selected items that do not match re. + selected.forEach(function(v, n) { + if (!match(nodes[n])) { + unselect(n); + } + }) + + // add matching items that are not currently selected. + if (nodes) { + for (let n = 0; n < nodes.length; n++) { + if (!selected.has(n) && match(nodes[n])) { + select(n); + } + } + } + + updateButtons(); + } + + function toggleSvgSelect(elem) { + // Walk up to immediate child of graph0 + while (elem != null && elem.parentElement != graph0) { + elem = elem.parentElement; + } + if (!elem) return; + + // Disable regexp mode. + regexpActive = false; + + const n = nodeId(elem); + if (n < 0) return; + if (selected.has(n)) { + unselect(n); + } else { + select(n); + } + updateButtons(); + } + + function unselect(n) { + if (setNodeHighlight(n, false)) selected.delete(n); + } + + function select(n, elem) { + if (setNodeHighlight(n, true)) selected.set(n, true); + } + + function nodeId(elem) { + const id = elem.id; + if (!id) return -1; + if (!id.startsWith('node')) return -1; + const n = parseInt(id.slice(4), 10); + if (isNaN(n)) return -1; + if (n < 0 || n >= nodes.length) return -1; + return n; + } + + // Change highlighting of node (returns true if node was found). + function setNodeHighlight(n, set) { + if (options && options.hiliter) return options.hiliter(n, set); + + const elem = document.getElementById('node' + n); + if (!elem) return false; + + // Handle table row highlighting. + if (elem.nodeName == 'TR') { + elem.classList.toggle('hilite', set); + return true; + } + + // Handle svg element highlighting. + const p = findPolygon(elem); + if (p != null) { + if (set) { + origFill.set(p, p.style.fill); + p.style.fill = '#ccccff'; + } else if (origFill.has(p)) { + p.style.fill = origFill.get(p); + } + } + + return true; + } + + function findPolygon(elem) { + if (elem.localName == 'polygon') return elem; + for (const c of elem.children) { + const p = findPolygon(c); + if (p != null) return p; + } + return null; + } + + function setSampleIndexLink(si) { + const elem = document.getElementById('sampletype-' + si); + if (elem != null) { + setHrefParams(elem, function (params) { + params.set("si", si); + }); + } + } + + // Update id's href to reflect current selection whenever it is + // liable to be followed. + function makeSearchLinkDynamic(id) { + const elem = document.getElementById(id); + if (elem == null) return; + + // Most links copy current selection into the 'f' parameter, + // but Refine menu links are different. + let param = 'f'; + if (id == 'ignore') param = 'i'; + if (id == 'hide') param = 'h'; + if (id == 'show') param = 's'; + if (id == 'show-from') param = 'sf'; + + // We update on mouseenter so middle-click/right-click work properly. + elem.addEventListener('mouseenter', updater); + elem.addEventListener('touchstart', updater); + + function updater() { + // The selection can be in one of two modes: regexp-based or + // list-based. Construct regular expression depending on mode. + let re = regexpActive + ? search.value + : Array.from(getSelection().keys()).map(key => pprofQuoteMeta(nodes[key])).join('|'); + + setHrefParams(elem, function (params) { + if (re != '') { + // For focus/show/show-from, forget old parameter. For others, add to re. + if (param != 'f' && param != 's' && param != 'sf' && params.has(param)) { + const old = params.get(param); + if (old != '') { + re += '|' + old; + } + } + params.set(param, re); + } else { + params.delete(param); + } + }); + } + } + + function setHrefParams(elem, paramSetter) { + let url = new URL(elem.href); + url.hash = ''; + + // Copy params from this page's URL. + const params = url.searchParams; + for (const p of new URLSearchParams(window.location.search)) { + params.set(p[0], p[1]); + } + + // Give the params to the setter to modify. + paramSetter(params); + + elem.href = url.toString(); + } + + function handleTopClick(e) { + // Walk back until we find TR and then get the Name column (index 5) + let elem = e.target; + while (elem != null && elem.nodeName != 'TR') { + elem = elem.parentElement; + } + if (elem == null || elem.children.length < 6) return; + + e.preventDefault(); + const tr = elem; + const td = elem.children[5]; + if (td.nodeName != 'TD') return; + const name = td.innerText; + const index = nodes.indexOf(name); + if (index < 0) return; + + // Disable regexp mode. + regexpActive = false; + + if (selected.has(index)) { + unselect(index, elem); + } else { + select(index, elem); + } + updateButtons(); + } + + function updateButtons() { + const enable = (search.value != '' || getSelection().size != 0); + if (buttonsEnabled == enable) return; + buttonsEnabled = enable; + for (const id of ['focus', 'ignore', 'hide', 'show', 'show-from']) { + const link = document.getElementById(id); + if (link != null) { + link.classList.toggle('disabled', !enable); + } + } + } + + // Initialize button states + updateButtons(); + + // Setup event handlers + initMenus(); + if (svg != null) { + initPanAndZoom(svg, toggleSvgSelect); + } + if (toptable != null) { + toptable.addEventListener('mousedown', handleTopClick); + toptable.addEventListener('touchstart', handleTopClick); + } + + const ids = ['topbtn', 'graphbtn', + 'flamegraph', + 'peek', 'list', + 'disasm', 'focus', 'ignore', 'hide', 'show', 'show-from']; + ids.forEach(makeSearchLinkDynamic); + + const sampleIDs = [{{range .SampleTypes}}'{{.}}', {{end}}]; + sampleIDs.forEach(setSampleIndexLink); + + // Bind action to button with specified id. + function addAction(id, action) { + const btn = document.getElementById(id); + if (btn != null) { + btn.addEventListener('click', action); + btn.addEventListener('touchstart', action); + } + } + + addAction('details', handleDetails); + initConfigManager(); + + search.addEventListener('input', handleSearch); + search.addEventListener('keydown', handleKey); + + // Give initial focus to main container so it can be scrolled using keys. + const main = document.getElementById('bodycontainer'); + if (main) { + main.focus(); + } +} + +// convert a string to a regexp that matches exactly that string. +function pprofQuoteMeta(str) { + return '^' + str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1') + '$'; +} diff --git a/plugin/debug/pkg/internal/driver/html/graph.css b/plugin/debug/pkg/internal/driver/html/graph.css new file mode 100644 index 0000000..c756ddf --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/graph.css @@ -0,0 +1,7 @@ +#graph { + cursor: grab; +} + +#graph:active { + cursor: grabbing; +} diff --git a/plugin/debug/pkg/internal/driver/html/graph.html b/plugin/debug/pkg/internal/driver/html/graph.html new file mode 100644 index 0000000..d17a0ea --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/graph.html @@ -0,0 +1,17 @@ + + + + + {{.Title}} + {{template "css" .}} + {{template "graph_css" .}} + + + {{template "header" .}} +
+ {{.HTMLBody}} +
+ {{template "script" .}} + + + diff --git a/plugin/debug/pkg/internal/driver/html/header.html b/plugin/debug/pkg/internal/driver/html/header.html new file mode 100644 index 0000000..5405a0b --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/header.html @@ -0,0 +1,119 @@ +
+
+

pprof

+
+ + + + {{$sampleLen := len .SampleTypes}} + {{if gt $sampleLen 1}} + + {{end}} + + + + + + + +
+ +
+ +
+ {{.Title}} +
+ {{range .Legend}}
{{.}}
{{end}} +
+
+ + {{if .DocURL}} + + {{end}} +
+ +
+ +
+
Save options as
+ + {{range .Configs}}{{if .UserConfig}} + + +
+ +
+
Delete config
+
+ +
+ +
{{range .Errors}}
{{.}}
{{end}}
diff --git a/plugin/debug/pkg/internal/driver/html/plaintext.html b/plugin/debug/pkg/internal/driver/html/plaintext.html new file mode 100644 index 0000000..9791cc7 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/plaintext.html @@ -0,0 +1,18 @@ + + + + + {{.Title}} + {{template "css" .}} + + + {{template "header" .}} +
+
+      {{.TextBody}}
+    
+
+ {{template "script" .}} + + + diff --git a/plugin/debug/pkg/internal/driver/html/source.html b/plugin/debug/pkg/internal/driver/html/source.html new file mode 100644 index 0000000..b676ce2 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/source.html @@ -0,0 +1,72 @@ + + + + + {{.Title}} + {{if not .Standalone}}{{template "css" .}}{{end}} + {{template "weblistcss" .}} + {{template "weblistjs" .}} + +{{"\n" -}} + {{/* emit different header in standalone mode */ -}} + {{if .Standalone}}{{"\n" -}} +
{{"" -}} + {{range $i, $e := .Legend -}} + {{if $i}}
{{"\n"}}{{end}}{{. -}} + {{end}}
Total: {{.Listing.Total -}} +
{{"" -}} + {{else -}} + {{template "header" .}} +
{{"" -}} + {{end -}} + + {{range .Listing.Files -}} + {{range .Funcs -}} +

{{.Name}}

{{"" -}} +

{{.File}}

{{"\n" -}} +
{{"\n" -}}
+        {{printf "  Total:  %10s %10s (flat, cum) %s" .Flat .Cumulative .Percent -}}
+        {{range .Lines -}}{{"\n" -}}
+          {{/* source line */ -}}
+          {{printf " %6d" .Line}}{{" " -}}
+          
+            {{- printf "  %10s %10s %8s  %s " .Flat .Cumulative "" .SrcLine -}}
+          {{"" -}}
+
+          {{if .Instructions -}}
+            {{/* instructions for this source line */ -}}
+            {{"" -}}
+            {{range .Instructions -}}
+              {{/* separate when we hit a new basic block */ -}}
+              {{if .NewBlock -}}{{printf " %8s %28s\n" "" "⋮"}}{{end -}}
+
+              {{/* inlined calls leading to this instruction */ -}}
+              {{range .InlinedCalls -}}
+                {{printf " %8s %10s %10s %8s  " "" "" "" "" -}}
+                {{.SrcLine}}{{" " -}}
+                {{.FileBase}}:{{.Line}}{{"\n" -}}
+              {{end -}}
+
+              {{if not .Synthetic -}}
+                {{/* disassembled instruction */ -}}
+                {{printf " %8s %10s %10s %8x: %s " "" .Flat .Cumulative .Address .Disasm -}}
+                {{.FileLine}}{{"\n" -}}
+              {{end -}}
+            {{end -}}
+            {{"" -}}
+          {{end -}}
+          {{/* end of line */ -}}
+        {{end}}{{"\n" -}}
+      
{{"\n" -}} + {{/* end of function */ -}} + {{end -}} + {{/* end of file */ -}} + {{end -}} + + {{if not .Standalone}}{{"\n " -}} +
{{"\n" -}} + {{template "script" .}}{{"\n" -}} + {{"" -}} + {{end}} + + diff --git a/plugin/debug/pkg/internal/driver/html/stacks.css b/plugin/debug/pkg/internal/driver/html/stacks.css new file mode 100644 index 0000000..1df4f71 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/stacks.css @@ -0,0 +1,89 @@ +body { + overflow: hidden; /* Want scrollbar not here, but in #stack-holder */ +} +/* Scrollable container for flame graph */ +#stack-holder { + width: 100%; + flex-grow: 1; + overflow-y: auto; + background: #eee; /* Light grey gives better contrast with boxes */ + position: relative; /* Allows absolute positioning of child boxes */ +} +/* Flame graph */ +#stack-chart { + width: 100%; + position: relative; /* Allows absolute positioning of child boxes */ +} +/* Holder for current frame details. */ +#current-details { + position: relative; + background: #eee; /* Light grey gives better contrast with boxes */ + font-size: 12pt; + padding: 0 4px; + width: 100%; +} +/* Shows details of frame that is under the mouse */ +#current-details-left { + float: left; + max-width: 60%; + white-space: nowrap; + overflow-x: hidden; +} +#current-details-right { + float: right; + max-width: 40%; + white-space: nowrap; + overflow-x: hidden; +} +/* Background of a single flame-graph frame */ +.boxbg { + border-width: 0px; + position: absolute; + overflow: hidden; + box-sizing: border-box; + background: #d8d8d8; +} +.positive { position: absolute; background: #caa; } +.negative { position: absolute; background: #aca; } +/* Not-inlined frames are visually separated from their caller. */ +.not-inlined { + border-top: 1px solid black; +} +/* Function name */ +.boxtext { + position: absolute; + width: 100%; + padding-left: 2px; + line-height: 18px; + cursor: default; + font-family: "Google Sans", Arial, sans-serif; + font-size: 12pt; + z-index: 2; +} +/* Box highlighting via shadows to avoid size changes */ +.hilite { box-shadow: 0px 0px 0px 2px #000; z-index: 1; } +.hilite2 { box-shadow: 0px 0px 0px 2px #000; z-index: 1; } +/* Gap left between callers and callees */ +.separator { + position: absolute; + text-align: center; + font-size: 12pt; + font-weight: bold; +} +/* Right-click menu */ +#action-menu { + max-width: 15em; +} +/* Right-click menu title */ +#action-title { + display: block; + padding: 0.5em 1em; + background: #888; + text-overflow: ellipsis; + overflow: hidden; +} +/* Internal canvas used to measure text size when picking fonts */ +#textsizer { + position: absolute; + bottom: -100px; +} diff --git a/plugin/debug/pkg/internal/driver/html/stacks.html b/plugin/debug/pkg/internal/driver/html/stacks.html new file mode 100644 index 0000000..a4e4077 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/stacks.html @@ -0,0 +1,36 @@ + + + + + {{.Title}} + {{template "css" .}} + {{template "stacks_css"}} + + + {{template "header" .}} +
+
+
 
+
+
+
+
+ + {{template "script" .}} + {{template "stacks_js"}} + + + diff --git a/plugin/debug/pkg/internal/driver/html/stacks.js b/plugin/debug/pkg/internal/driver/html/stacks.js new file mode 100644 index 0000000..7db0699 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/stacks.js @@ -0,0 +1,638 @@ +// stackViewer displays a flame-graph like view (extended to show callers). +// stacks - report.StackSet +// nodes - List of names for each source in report.StackSet +function stackViewer(stacks, nodes) { + 'use strict'; + + // Constants used in rendering. + const ROW = 20; + const PADDING = 2; + const MIN_WIDTH = 4; + const MIN_TEXT_WIDTH = 16; + const TEXT_MARGIN = 2; + const FONT_SIZE = 12; + const MIN_FONT_SIZE = 8; + + // Fields + let pivots = []; // Indices of currently selected data.Sources entries. + let matches = new Set(); // Indices of sources that match search + let elems = new Map(); // Mapping from source index to display elements + let displayList = []; // List of boxes to display. + let actionMenuOn = false; // Is action menu visible? + let actionTarget = null; // Box on which action menu is operating. + let diff = false; // Are we displaying a diff? + let shown = 0; // How many profile values are being displayed? + + for (const stack of stacks.Stacks) { + if (stack.Value < 0) { + diff = true; + break; + } + } + + // Setup to allow measuring text width. + const textSizer = document.createElement('canvas'); + textSizer.id = 'textsizer'; + const textContext = textSizer.getContext('2d'); + + // Get DOM elements. + const chart = find('stack-chart'); + const search = find('search'); + const actions = find('action-menu'); + const actionTitle = find('action-title'); + const leftDetailBox = find('current-details-left'); + const rightDetailBox = find('current-details-right'); + + window.addEventListener('resize', render); + window.addEventListener('popstate', render); + search.addEventListener('keydown', handleSearchKey); + + // Withdraw action menu when clicking outside, or when item selected. + document.addEventListener('mousedown', (e) => { + if (!actions.contains(e.target)) { + hideActionMenu(); + } + }); + actions.addEventListener('click', hideActionMenu); + + // Initialize menus and other general UI elements. + viewer(new URL(window.location.href), nodes, { + hiliter: (n, on) => { return hilite(n, on); }, + current: () => { + let r = new Map(); + if (pivots.length == 1 && pivots[0] == 0) { + // Not pivoting + } else { + for (let p of pivots) { + r.set(p, true); + } + } + return r; + }}); + + render(); + clearDetails(); + + // Helper functions follow: + + // hilite changes the highlighting of elements corresponding to specified src. + function hilite(src, on) { + if (on) { + matches.add(src); + } else { + matches.delete(src); + } + toggleClass(src, 'hilite', on); + return true; + } + + // Display action menu (triggered by right-click on a frame) + function showActionMenu(e, box) { + if (box.src == 0) return; // No action menu for root + e.preventDefault(); // Disable browser context menu + const src = stacks.Sources[box.src]; + actionTitle.innerText = src.Display[src.Display.length-1]; + const menu = actions; + menu.style.display = 'block'; + // Compute position so menu stays visible and near the mouse. + const x = Math.min(e.clientX - 10, document.body.clientWidth - menu.clientWidth); + const y = Math.min(e.clientY - 10, document.body.clientHeight - menu.clientHeight); + menu.style.left = x + 'px'; + menu.style.top = y + 'px'; + // Set menu links to operate on clicked box. + setHrefParam('action-source', 'f', box.src); + setHrefParam('action-source-tab', 'f', box.src); + setHrefParam('action-focus', 'f', box.src); + setHrefParam('action-ignore', 'i', box.src); + setHrefParam('action-hide', 'h', box.src); + setHrefParam('action-showfrom', 'sf', box.src); + toggleClass(box.src, 'hilite2', true); + actionTarget = box; + actionMenuOn = true; + } + + function hideActionMenu() { + actions.style.display = 'none'; + actionMenuOn = false; + if (actionTarget != null) { + toggleClass(actionTarget.src, 'hilite2', false); + } + } + + // setHrefParam updates the specified parameter in the href of an + // element to make it operate on the specified src. + function setHrefParam(id, param, src) { + const elem = document.getElementById(id); + if (!elem) return; + + let url = new URL(elem.href); + url.hash = ''; + + // Copy params from this page's URL. + const params = url.searchParams; + for (const p of new URLSearchParams(window.location.search)) { + params.set(p[0], p[1]); + } + + // Update params to include src. + // When `pprof` is invoked with `-lines`, FullName will be suffixed with `:`, + // which we need to remove. + let v = pprofQuoteMeta(stacks.Sources[src].FullName.replace(/:[0-9]+$/, '')); + if (param != 'f' && param != 'sf') { // old f,sf values are overwritten + // Add new source to current parameter value. + const old = params.get(param); + if (old && old != '') { + v += '|' + old; + } + } + params.set(param, v); + + elem.href = url.toString(); + } + + // Capture Enter key in the search box to make it pivot instead of focus. + function handleSearchKey(e) { + if (e.key != 'Enter') return; + e.stopImmediatePropagation(); // Disable normal enter key handling + const val = search.value; + try { + new RegExp(search.value); + } catch (error) { + return; // TODO: Display error state in search box + } + switchPivots(val); + } + + function switchPivots(regexp) { + // Switch URL without hitting the server. + const url = new URL(document.URL); + if (regexp === '' || regexp === '^$') { + url.searchParams.delete('p'); // Not pivoting + } else { + url.searchParams.set('p', regexp); + } + history.pushState('', '', url.toString()); // Makes back-button work + matches = new Set(); + search.value = ''; + render(); + } + + function handleEnter(box, div) { + if (actionMenuOn) return; + const src = stacks.Sources[box.src]; + div.title = details(box) + ' │ ' + src.FullName + (src.Inlined ? "\n(inlined)" : ""); + leftDetailBox.innerText = src.FullName + (src.Inlined ? " (inlined)" : ""); + let timing = summary(box.sumpos, box.sumneg); + if (box.self != 0) { + timing = "self " + unitText(box.self) + " │ " + timing; + } + rightDetailBox.innerText = timing; + // Highlight all boxes that have the same source as box. + toggleClass(box.src, 'hilite2', true); + } + + function handleLeave(box) { + if (actionMenuOn) return; + clearDetails(); + toggleClass(box.src, 'hilite2', false); + } + + function clearDetails() { + leftDetailBox.innerText = ''; + rightDetailBox.innerText = percentText(shown); + } + + // Return list of sources that match the regexp given by the 'p' URL parameter. + function urlPivots() { + const pivots = []; + const params = (new URL(document.URL)).searchParams; + const val = params.get('p'); + if (val !== null && val != '') { + try { + const re = new RegExp(val); + for (let i = 0; i < stacks.Sources.length; i++) { + const src = stacks.Sources[i]; + if (re.test(src.UniqueName) || re.test(src.FileName)) { + pivots.push(i); + } + } + } catch (error) {} + } + if (pivots.length == 0) { + pivots.push(0); + } + return pivots; + } + + // render re-generates the stack display. + function render() { + pivots = urlPivots(); + + // Get places where pivots occur. + let places = []; + for (let pivot of pivots) { + const src = stacks.Sources[pivot]; + for (let p of src.Places) { + places.push(p); + } + } + + const width = chart.clientWidth; + elems.clear(); + actionTarget = null; + const [pos, neg] = totalValue(places); + const total = pos + neg; + const xscale = (width-2*PADDING) / total; // Converts from profile value to X pixels + const x = PADDING; + const y = 0; + + // Show summary for pivots if we are actually pivoting. + const showPivotSummary = !(pivots.length == 1 && pivots[0] == 0); + + shown = pos + neg; + displayList.length = 0; + renderStacks(0, xscale, x, y, places, +1); // Callees + renderStacks(0, xscale, x, y-ROW, places, -1); // Callers (ROW left for separator) + display(xscale, pos, neg, displayList, showPivotSummary); + } + + // renderStacks creates boxes with top-left at x,y with children drawn as + // nested stacks (below or above based on the sign of direction). + // Returns the largest y coordinate filled. + function renderStacks(depth, xscale, x, y, places, direction) { + // Example: suppose we are drawing the following stacks: + // a->b->c + // a->b->d + // a->e->f + // After rendering a, we will call renderStacks, with places pointing to + // the preceding stacks. + // + // We first group all places with the same leading entry. In this example + // we get [b->c, b->d] and [e->f]. We render the two groups side-by-side. + const groups = partitionPlaces(places); + for (const g of groups) { + renderGroup(depth, xscale, x, y, g, direction); + x += groupWidth(xscale, g); + } + } + + // Some of the types used below: + // + // // Group represents a displayed (sub)tree. + // interface Group { + // name: string; // Full name of source + // src: number; // Index in stacks.Sources + // self: number; // Contribution as leaf (may be < 0 for diffs) + // sumpos: number; // Sum of |self| of positive nodes in tree (>= 0) + // sumneg: number; // Sum of |self| of negative nodes in tree (>= 0) + // places: Place[]; // Stack slots that contributed to this group + // } + // + // // Box is a rendered item. + // interface Box { + // x: number; // X coordinate of top-left + // y: number; // Y coordinate of top-left + // width: number; // Width of box to display + // src: number; // Index in stacks.Sources + // sumpos: number; // From corresponding Group + // sumneg: number; // From corresponding Group + // self: number; // From corresponding Group + // }; + + function groupWidth(xscale, g) { + return xscale * (g.sumpos + g.sumneg); + } + + function renderGroup(depth, xscale, x, y, g, direction) { + // Skip if not wide enough. + const width = groupWidth(xscale, g); + if (width < MIN_WIDTH) return; + + // Draw the box for g.src (except for selected element in upwards direction + // since that duplicates the box we added in downwards direction). + if (depth != 0 || direction > 0) { + const box = { + x: x, + y: y, + width: width, + src: g.src, + sumpos: g.sumpos, + sumneg: g.sumneg, + self: g.self, + }; + displayList.push(box); + if (direction > 0) { + // Leave gap on left hand side to indicate self contribution. + x += xscale*Math.abs(g.self); + } + } + y += direction * ROW; + + // Find child or parent stacks. + const next = []; + for (const place of g.places) { + const stack = stacks.Stacks[place.Stack]; + const nextSlot = place.Pos + direction; + if (nextSlot >= 0 && nextSlot < stack.Sources.length) { + next.push({Stack: place.Stack, Pos: nextSlot}); + } + } + renderStacks(depth+1, xscale, x, y, next, direction); + } + + // partitionPlaces partitions a set of places into groups where each group + // contains places with the same source. If a stack occurs multiple times + // in places, only the outer-most occurrence is kept. + function partitionPlaces(places) { + // Find outer-most slot per stack (used later to elide duplicate stacks). + const stackMap = new Map(); // Map from stack index to outer-most slot# + for (const place of places) { + const prevSlot = stackMap.get(place.Stack); + if (prevSlot && prevSlot <= place.Pos) { + // We already have a higher slot in this stack. + } else { + stackMap.set(place.Stack, place.Pos); + } + } + + // Now partition the stacks. + const groups = []; // Array of Group {name, src, sum, self, places} + const groupMap = new Map(); // Map from Source to Group + for (const place of places) { + if (stackMap.get(place.Stack) != place.Pos) { + continue; + } + + const stack = stacks.Stacks[place.Stack]; + const src = stack.Sources[place.Pos]; + let group = groupMap.get(src); + if (!group) { + const name = stacks.Sources[src].FullName; + group = {name: name, src: src, sumpos: 0, sumneg: 0, self: 0, places: []}; + groupMap.set(src, group); + groups.push(group); + } + if (stack.Value < 0) { + group.sumneg += -stack.Value; + } else { + group.sumpos += stack.Value; + } + group.self += (place.Pos == stack.Sources.length-1) ? stack.Value : 0; + group.places.push(place); + } + + // Order by decreasing cost (makes it easier to spot heavy functions). + // Though alphabetical ordering is a potential alternative that will make + // profile comparisons easier. + groups.sort(function(a, b) { + return (b.sumpos + b.sumneg) - (a.sumpos + a.sumneg); + }); + + return groups; + } + + function display(xscale, posTotal, negTotal, list, showPivotSummary) { + // Sort boxes so that text selection follows a predictable order. + list.sort(function(a, b) { + if (a.y != b.y) return a.y - b.y; + return a.x - b.x; + }); + + // Adjust Y coordinates so that zero is at top. + let adjust = (list.length > 0) ? list[0].y : 0; + + const divs = []; + for (const box of list) { + box.y -= adjust; + divs.push(drawBox(xscale, box)); + } + if (showPivotSummary) { + divs.push(drawSep(-adjust, posTotal, negTotal)); + } + + const h = (list.length > 0 ? list[list.length-1].y : 0) + 4*ROW; + chart.style.height = h+'px'; + chart.replaceChildren(...divs); + } + + function drawBox(xscale, box) { + const srcIndex = box.src; + const src = stacks.Sources[srcIndex]; + + function makeRect(cl, x, y, w, h) { + const r = document.createElement('div'); + r.style.left = x+'px'; + r.style.top = y+'px'; + r.style.width = w+'px'; + r.style.height = h+'px'; + r.classList.add(cl); + return r; + } + + // Background + const w = box.width - 1; // Leave 1px gap + const r = makeRect('boxbg', box.x, box.y, w, ROW); + if (!diff) r.style.background = makeColor(src.Color); + addElem(srcIndex, r); + if (!src.Inlined) { + r.classList.add('not-inlined'); + } + + // Positive/negative indicator for diff mode. + if (diff) { + const delta = box.sumpos - box.sumneg; + const partWidth = xscale * Math.abs(delta); + if (partWidth >= MIN_WIDTH) { + r.appendChild(makeRect((delta < 0 ? 'negative' : 'positive'), + 0, 0, partWidth, ROW-1)); + } + } + + // Label + if (box.width >= MIN_TEXT_WIDTH) { + const t = document.createElement('div'); + t.classList.add('boxtext'); + fitText(t, box.width-2*TEXT_MARGIN, src.Display); + r.appendChild(t); + } + + onClick(r, () => { switchPivots(pprofQuoteMeta(src.UniqueName)); }); + r.addEventListener('mouseenter', () => { handleEnter(box, r); }); + r.addEventListener('mouseleave', () => { handleLeave(box); }); + r.addEventListener('contextmenu', (e) => { showActionMenu(e, box); }); + return r; + } + + // Handle clicks, but only if the mouse did not move during the click. + function onClick(target, handler) { + // Disable click if mouse moves more than threshold pixels since mousedown. + const threshold = 3; + let [x, y] = [-1, -1]; + target.addEventListener('mousedown', (e) => { + [x, y] = [e.clientX, e.clientY]; + }); + target.addEventListener('click', (e) => { + if (Math.abs(e.clientX - x) <= threshold && + Math.abs(e.clientY - y) <= threshold) { + handler(); + } + }); + } + + function drawSep(y, posTotal, negTotal) { + const m = document.createElement('div'); + m.innerText = summary(posTotal, negTotal); + m.style.top = (y-ROW) + 'px'; + m.style.left = PADDING + 'px'; + m.style.width = (chart.clientWidth - PADDING*2) + 'px'; + m.classList.add('separator'); + return m; + } + + // addElem registers an element that belongs to the specified src. + function addElem(src, elem) { + let list = elems.get(src); + if (!list) { + list = []; + elems.set(src, list); + } + list.push(elem); + elem.classList.toggle('hilite', matches.has(src)); + } + + // Adds or removes cl from classList of all elements for the specified source. + function toggleClass(src, cl, value) { + const list = elems.get(src); + if (list) { + for (const elem of list) { + elem.classList.toggle(cl, value); + } + } + } + + // fitText sets text and font-size clipped to the specified width w. + function fitText(t, avail, textList) { + // Find first entry in textList that fits. + let width = avail; + textContext.font = FONT_SIZE + 'pt Arial'; + for (let i = 0; i < textList.length; i++) { + let text = textList[i]; + width = textContext.measureText(text).width; + if (width <= avail) { + t.innerText = text; + return; + } + } + + // Try to fit by dropping font size. + let text = textList[textList.length-1]; + const fs = Math.max(MIN_FONT_SIZE, FONT_SIZE * (avail / width)); + t.style.fontSize = fs + 'pt'; + t.innerText = text; + } + + // totalValue returns the positive and negative sums of the Values of stacks + // listed in places. + function totalValue(places) { + const seen = new Set(); + let pos = 0; + let neg = 0; + for (const place of places) { + if (seen.has(place.Stack)) continue; // Do not double-count stacks + seen.add(place.Stack); + const stack = stacks.Stacks[place.Stack]; + if (stack.Value < 0) { + neg += -stack.Value; + } else { + pos += stack.Value; + } + } + return [pos, neg]; + } + + function summary(pos, neg) { + // Examples: + // 6s (10%) + // 12s (20%) 🠆 18s (30%) + return diff ? diffText(neg, pos) : percentText(pos); + } + + function details(box) { + // Examples: + // 6s (10%) + // 6s (10%) │ self 3s (5%) + // 6s (10%) │ 12s (20%) 🠆 18s (30%) + let result = percentText(box.sumpos - box.sumneg); + if (box.self != 0) { + result += " │ self " + unitText(box.self); + } + if (diff && box.sumpos > 0 && box.sumneg > 0) { + result += " │ " + diffText(box.sumneg, box.sumpos); + } + return result; + } + + // diffText returns text that displays from and to alongside their percentages. + // E.g., 9s (45%) 🠆 10s (50%) + function diffText(from, to) { + return percentText(from) + " 🠆 " + percentText(to); + } + + // percentText returns text that displays v in appropriate units alongside its + // percentange. + function percentText(v) { + function percent(v, total) { + return Number(((100.0 * v) / total).toFixed(1)) + '%'; + } + return unitText(v) + " (" + percent(v, stacks.Total) + ")"; + } + + // unitText returns a formatted string to display for value. + function unitText(value) { + return pprofUnitText(value*stacks.Scale, stacks.Unit); + } + + function find(name) { + const elem = document.getElementById(name); + if (!elem) { + throw 'element not found: ' + name + } + return elem; + } + + function makeColor(index) { + // Rotate hue around a circle. Multiple by phi to spread things + // out better. Use 50% saturation to make subdued colors, and + // 80% lightness to have good contrast with black foreground text. + const PHI = 1.618033988; + const hue = (index+1) * PHI * 2 * Math.PI; // +1 to avoid 0 + const hsl = `hsl(${hue}rad 50% 80%)`; + return hsl; + } +} + +// pprofUnitText returns a formatted string to display for value in the specified unit. +function pprofUnitText(value, unit) { + const sign = (value < 0) ? "-" : ""; + let v = Math.abs(value); + // Rescale to appropriate display unit. + let list = null; + for (const def of pprofUnitDefs) { + if (def.DefaultUnit.CanonicalName == unit) { + list = def.Units; + v *= def.DefaultUnit.Factor; + break; + } + } + if (list) { + // Stop just before entry that is too large. + for (let i = 0; i < list.length; i++) { + if (i == list.length-1 || list[i+1].Factor > v) { + v /= list[i].Factor; + unit = list[i].CanonicalName; + break; + } + } + } + return sign + Number(v.toFixed(2)) + unit; +} diff --git a/plugin/debug/pkg/internal/driver/html/top.html b/plugin/debug/pkg/internal/driver/html/top.html new file mode 100644 index 0000000..86d9fcb --- /dev/null +++ b/plugin/debug/pkg/internal/driver/html/top.html @@ -0,0 +1,114 @@ + + + + + {{.Title}} + {{template "css" .}} + + + + {{template "header" .}} +
+ + + + + + + + + + + + + +
FlatFlat%Sum%CumCum%NameInlined?
+
+ {{template "script" .}} + + + diff --git a/plugin/debug/pkg/internal/driver/interactive.go b/plugin/debug/pkg/internal/driver/interactive.go new file mode 100644 index 0000000..e8f3b7b --- /dev/null +++ b/plugin/debug/pkg/internal/driver/interactive.go @@ -0,0 +1,422 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io" + "regexp" + "sort" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/report" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +var commentStart = "//:" // Sentinel for comments on options +var tailDigitsRE = regexp.MustCompile("[0-9]+$") + +// interactive starts a shell to read pprof commands. +func interactive(p *profile.Profile, o *plugin.Options) error { + // Enter command processing loop. + o.UI.SetAutoComplete(newCompleter(functionNames(p))) + configure("compact_labels", "true") + configHelp["sample_index"] += fmt.Sprintf("Or use sample_index=name, with name in %v.\n", sampleTypes(p)) + + // Do not wait for the visualizer to complete, to allow multiple + // graphs to be visualized simultaneously. + interactiveMode = true + shortcuts := profileShortcuts(p) + + copier := makeProfileCopier(p) + greetings(p, o.UI) + for { + input, err := o.UI.ReadLine("(pprof) ") + if err != nil { + if err != io.EOF { + return err + } + if input == "" { + return nil + } + } + + for _, input := range shortcuts.expand(input) { + // Process assignments of the form variable=value + if s := strings.SplitN(input, "=", 2); len(s) > 0 { + name := strings.TrimSpace(s[0]) + var value string + if len(s) == 2 { + value = s[1] + if comment := strings.LastIndex(value, commentStart); comment != -1 { + value = value[:comment] + } + value = strings.TrimSpace(value) + } + if isConfigurable(name) { + // All non-bool options require inputs + if len(s) == 1 && !isBoolConfig(name) { + o.UI.PrintErr(fmt.Errorf("please specify a value, e.g. %s=", name)) + continue + } + if name == "sample_index" { + // Error check sample_index=xxx to ensure xxx is a valid sample type. + index, err := p.SampleIndexByName(value) + if err != nil { + o.UI.PrintErr(err) + continue + } + if index < 0 || index >= len(p.SampleType) { + o.UI.PrintErr(fmt.Errorf("invalid sample_index %q", value)) + continue + } + value = p.SampleType[index].Type + } + if err := configure(name, value); err != nil { + o.UI.PrintErr(err) + } + continue + } + } + + tokens := strings.Fields(input) + if len(tokens) == 0 { + continue + } + + switch tokens[0] { + case "o", "options": + printCurrentOptions(p, o.UI) + continue + case "exit", "quit", "q": + return nil + case "help": + commandHelp(strings.Join(tokens[1:], " "), o.UI) + continue + } + + args, cfg, err := parseCommandLine(tokens) + if err == nil { + err = generateReportWrapper(copier.newCopy(), args, cfg, o) + } + + if err != nil { + o.UI.PrintErr(err) + } + } + } +} + +var generateReportWrapper = generateReport // For testing purposes. + +// greetings prints a brief welcome and some overall profile +// information before accepting interactive commands. +func greetings(p *profile.Profile, ui plugin.UI) { + numLabelUnits := identifyNumLabelUnits(p, ui) + ropt, err := reportOptions(p, numLabelUnits, currentConfig()) + if err == nil { + rpt := report.New(p, ropt) + ui.Print(strings.Join(report.ProfileLabels(rpt), "\n")) + if rpt.Total() == 0 && len(p.SampleType) > 1 { + ui.Print(`No samples were found with the default sample value type.`) + ui.Print(`Try "sample_index" command to analyze different sample values.`, "\n") + } + } + ui.Print(`Entering interactive mode (type "help" for commands, "o" for options)`) +} + +// shortcuts represents composite commands that expand into a sequence +// of other commands. +type shortcuts map[string][]string + +func (a shortcuts) expand(input string) []string { + input = strings.TrimSpace(input) + if a != nil { + if r, ok := a[input]; ok { + return r + } + } + return []string{input} +} + +var pprofShortcuts = shortcuts{ + ":": []string{"focus=", "ignore=", "hide=", "tagfocus=", "tagignore="}, +} + +// profileShortcuts creates macros for convenience and backward compatibility. +func profileShortcuts(p *profile.Profile) shortcuts { + s := pprofShortcuts + // Add shortcuts for sample types + for _, st := range p.SampleType { + command := fmt.Sprintf("sample_index=%s", st.Type) + s[st.Type] = []string{command} + s["total_"+st.Type] = []string{"mean=0", command} + s["mean_"+st.Type] = []string{"mean=1", command} + } + return s +} + +func sampleTypes(p *profile.Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} + +func printCurrentOptions(p *profile.Profile, ui plugin.UI) { + var args []string + current := currentConfig() + for _, f := range configFields { + n := f.name + v := current.get(f) + comment := "" + switch { + case len(f.choices) > 0: + values := append([]string{}, f.choices...) + sort.Strings(values) + comment = "[" + strings.Join(values, " | ") + "]" + case n == "sample_index": + st := sampleTypes(p) + if v == "" { + // Apply default (last sample index). + v = st[len(st)-1] + } + // Add comments for all sample types in profile. + comment = "[" + strings.Join(st, " | ") + "]" + case n == "source_path": + continue + case n == "nodecount" && v == "-1": + comment = "default" + case v == "": + // Add quotes for empty values. + v = `""` + } + if n == "granularity" && v == "" { + v = "(default)" + } + if comment != "" { + comment = commentStart + " " + comment + } + args = append(args, fmt.Sprintf(" %-25s = %-20s %s", n, v, comment)) + } + sort.Strings(args) + ui.Print(strings.Join(args, "\n")) +} + +// parseCommandLine parses a command and returns the pprof command to +// execute and the configuration to use for the report. +func parseCommandLine(input []string) ([]string, config, error) { + cmd, args := input[:1], input[1:] + name := cmd[0] + + c := pprofCommands[name] + if c == nil { + // Attempt splitting digits on abbreviated commands (eg top10) + if d := tailDigitsRE.FindString(name); d != "" && d != name { + name = name[:len(name)-len(d)] + cmd[0], args = name, append([]string{d}, args...) + c = pprofCommands[name] + } + } + if c == nil { + if _, ok := configHelp[name]; ok { + value := "" + if len(args) > 0 { + value = args[0] + } + return nil, config{}, fmt.Errorf("did you mean: %s=%s", name, value) + } + return nil, config{}, fmt.Errorf("unrecognized command: %q", name) + } + + if c.hasParam { + if len(args) == 0 { + return nil, config{}, fmt.Errorf("command %s requires an argument", name) + } + cmd = append(cmd, args[0]) + args = args[1:] + } + + // Copy config since options set in the command line should not persist. + vcopy := currentConfig() + + var focus, ignore string + for i := 0; i < len(args); i++ { + t := args[i] + if n, err := strconv.ParseInt(t, 10, 32); err == nil { + vcopy.NodeCount = int(n) + continue + } + switch t[0] { + case '>': + outputFile := t[1:] + if outputFile == "" { + i++ + if i >= len(args) { + return nil, config{}, fmt.Errorf("unexpected end of line after >") + } + outputFile = args[i] + } + vcopy.Output = outputFile + case '-': + if t == "--cum" || t == "-cum" { + vcopy.Sort = "cum" + continue + } + ignore = catRegex(ignore, t[1:]) + default: + focus = catRegex(focus, t) + } + } + + if name == "tags" { + if focus != "" { + vcopy.TagFocus = focus + } + if ignore != "" { + vcopy.TagIgnore = ignore + } + } else { + if focus != "" { + vcopy.Focus = focus + } + if ignore != "" { + vcopy.Ignore = ignore + } + } + if vcopy.NodeCount == -1 && (name == "text" || name == "top") { + vcopy.NodeCount = 10 + } + + return cmd, vcopy, nil +} + +func catRegex(a, b string) string { + if a != "" && b != "" { + return a + "|" + b + } + return a + b +} + +// commandHelp displays help and usage information for all Commands +// and Variables or a specific Command or Variable. +func commandHelp(args string, ui plugin.UI) { + if args == "" { + help := usage(false) + help = help + ` + : Clear focus/ignore/hide/tagfocus/tagignore + + type "help " for more information +` + + ui.Print(help) + return + } + + if c := pprofCommands[args]; c != nil { + ui.Print(c.help(args)) + return + } + + if help, ok := configHelp[args]; ok { + ui.Print(help + "\n") + return + } + + ui.PrintErr("Unknown command: " + args) +} + +// newCompleter creates an autocompletion function for a set of commands. +func newCompleter(fns []string) func(string) string { + return func(line string) string { + switch tokens := strings.Fields(line); len(tokens) { + case 0: + // Nothing to complete + case 1: + // Single token -- complete command name + if match := matchVariableOrCommand(tokens[0]); match != "" { + return match + } + case 2: + if tokens[0] == "help" { + if match := matchVariableOrCommand(tokens[1]); match != "" { + return tokens[0] + " " + match + } + return line + } + fallthrough + default: + // Multiple tokens -- complete using functions, except for tags + if cmd := pprofCommands[tokens[0]]; cmd != nil && tokens[0] != "tags" { + lastTokenIdx := len(tokens) - 1 + lastToken := tokens[lastTokenIdx] + if strings.HasPrefix(lastToken, "-") { + lastToken = "-" + functionCompleter(lastToken[1:], fns) + } else { + lastToken = functionCompleter(lastToken, fns) + } + return strings.Join(append(tokens[:lastTokenIdx], lastToken), " ") + } + } + return line + } +} + +// matchVariableOrCommand attempts to match a string token to the prefix of a Command. +func matchVariableOrCommand(token string) string { + token = strings.ToLower(token) + var matches []string + for cmd := range pprofCommands { + if strings.HasPrefix(cmd, token) { + matches = append(matches, cmd) + } + } + matches = append(matches, completeConfig(token)...) + if len(matches) == 1 { + return matches[0] + } + return "" +} + +// functionCompleter replaces provided substring with a function +// name retrieved from a profile if a single match exists. Otherwise, +// it returns unchanged substring. It defaults to no-op if the profile +// is not specified. +func functionCompleter(substring string, fns []string) string { + found := "" + for _, fName := range fns { + if strings.Contains(fName, substring) { + if found != "" { + return substring + } + found = fName + } + } + if found != "" { + return found + } + return substring +} + +func functionNames(p *profile.Profile) []string { + var fns []string + for _, fn := range p.Function { + fns = append(fns, fn.Name) + } + return fns +} diff --git a/plugin/debug/pkg/internal/driver/interactive_test.go b/plugin/debug/pkg/internal/driver/interactive_test.go new file mode 100644 index 0000000..97ae709 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/interactive_test.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "math/rand" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/internal/report" + "m7s.live/v5/plugin/debug/pkg/internal/transport" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func TestShell(t *testing.T) { + p := &profile.Profile{} + generateReportWrapper = checkValue + defer func() { generateReportWrapper = generateReport }() + + // Use test commands and variables to exercise interactive processing + var savedCommands commands + savedCommands, pprofCommands = pprofCommands, testCommands + defer func() { pprofCommands = savedCommands }() + + savedConfig := currentConfig() + defer setCurrentConfig(savedConfig) + + shortcuts1, scScript1 := makeShortcuts(interleave(script, 2), 1) + shortcuts2, scScript2 := makeShortcuts(interleave(script, 1), 2) + + var testcases = []struct { + name string + input []string + shortcuts shortcuts + allowRx string + numAllowRxMatches int + propagateError bool + }{ + {"Random interleave of independent scripts 1", interleave(script, 0), pprofShortcuts, "", 0, false}, + {"Random interleave of independent scripts 2", interleave(script, 1), pprofShortcuts, "", 0, false}, + {"Random interleave of independent scripts with shortcuts 1", scScript1, shortcuts1, "", 0, false}, + {"Random interleave of independent scripts with shortcuts 2", scScript2, shortcuts2, "", 0, false}, + {"Group with invalid value", []string{"sort=this"}, pprofShortcuts, `invalid "sort" value`, 1, false}, + {"No special value provided for the option", []string{"sample_index"}, pprofShortcuts, `please specify a value, e.g. sample_index=`, 1, false}, + {"No string value provided for the option", []string{"focus"}, pprofShortcuts, `please specify a value, e.g. focus=`, 1, false}, + {"No float value provided for the option", []string{"divide_by"}, pprofShortcuts, `please specify a value, e.g. divide_by=`, 1, false}, + {"Helpful input format reminder", []string{"sample_index 0"}, pprofShortcuts, `did you mean: sample_index=0`, 1, false}, + {"Verify propagation of IO errors", []string{"**error**"}, pprofShortcuts, "", 0, true}, + } + + o := setDefaults(&plugin.Options{HTTPTransport: transport.New(nil)}) + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + setCurrentConfig(savedConfig) + pprofShortcuts = tc.shortcuts + ui := &proftest.TestUI{ + T: t, + Input: tc.input, + AllowRx: tc.allowRx, + } + o.UI = ui + + err := interactive(p, o) + if (tc.propagateError && err == nil) || (!tc.propagateError && err != nil) { + t.Errorf("%s: %v", tc.name, err) + } + + // Confirm error message written out once. + if tc.numAllowRxMatches != ui.NumAllowRxMatches { + t.Errorf("want error message to be printed %d time(s), got %d", + tc.numAllowRxMatches, ui.NumAllowRxMatches) + } + }) + } +} + +var testCommands = commands{ + "check": &command{report.Raw, nil, nil, true, "", ""}, +} + +// script contains sequences of commands to be executed for testing. Commands +// are split by semicolon and interleaved randomly, so they must be +// independent from each other. +var script = []string{ + "call_tree=true;call_tree=false;check call_tree=false;call_tree=yes;check call_tree=true", + "mean=1;check mean=true;mean=n;check mean=false", + "nodecount=-1;nodecount=-2;check nodecount=-2;nodecount=999999;check nodecount=999999", + "nodefraction=-1;nodefraction=-2.5;check nodefraction=-2.5;nodefraction=0.0001;check nodefraction=0.0001", + "focus=one;focus=two;check focus=two", + "flat=true;check sort=flat;cum=1;check sort=cum", +} + +func makeShortcuts(input []string, seed int64) (shortcuts, []string) { + rand := rand.New(rand.NewSource(seed)) + + s := shortcuts{} + var output, chunk []string + for _, l := range input { + chunk = append(chunk, l) + switch rand.Intn(3) { + case 0: + // Create a macro for commands in 'chunk'. + macro := fmt.Sprintf("alias%d", len(s)) + s[macro] = chunk + output = append(output, macro) + chunk = nil + case 1: + // Append commands in 'chunk' by themselves. + output = append(output, chunk...) + chunk = nil + case 2: + // Accumulate commands into 'chunk' + } + } + output = append(output, chunk...) + return s, output +} + +func checkValue(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error { + if len(cmd) != 2 { + return fmt.Errorf("expected len(cmd)==2, got %v", cmd) + } + + input := cmd[1] + args := strings.SplitN(input, "=", 2) + if len(args) == 0 { + return fmt.Errorf("unexpected empty input") + } + name, value := args[0], "" + if len(args) == 2 { + value = args[1] + } + + f, ok := configFieldMap[name] + if !ok { + return fmt.Errorf("Could not find variable named %s", name) + } + + if got := cfg.get(f); got != value { + return fmt.Errorf("Variable %s, want %s, got %s", name, value, got) + } + return nil +} + +func interleave(input []string, seed int64) []string { + var inputs [][]string + for _, s := range input { + inputs = append(inputs, strings.Split(s, ";")) + } + rand := rand.New(rand.NewSource(seed)) + var output []string + for len(inputs) > 0 { + next := rand.Intn(len(inputs)) + output = append(output, inputs[next][0]) + if tail := inputs[next][1:]; len(tail) > 0 { + inputs[next] = tail + } else { + inputs = append(inputs[:next], inputs[next+1:]...) + } + } + return output +} + +func TestInteractiveCommands(t *testing.T) { + type interactiveTestcase struct { + input string + want map[string]string + } + + testcases := []interactiveTestcase{ + { + "top 10 --cum focus1 -ignore focus2", + map[string]string{ + "nodecount": "10", + "sort": "cum", + "focus": "focus1|focus2", + "ignore": "ignore", + }, + }, + { + "top10 --cum focus1 -ignore focus2", + map[string]string{ + "nodecount": "10", + "sort": "cum", + "focus": "focus1|focus2", + "ignore": "ignore", + }, + }, + { + "dot", + map[string]string{ + "nodecount": "80", + "sort": "flat", + }, + }, + { + "tags -ignore1 -ignore2 focus1 >out", + map[string]string{ + "nodecount": "80", + "sort": "flat", + "output": "out", + "tagfocus": "focus1", + "tagignore": "ignore1|ignore2", + }, + }, + { + "weblist find -test", + map[string]string{ + "granularity": "addresses", + "noinlines": "false", + "nodecount": "0", + "sort": "flat", + "ignore": "test", + }, + }, + { + "callgrind fun -ignore >out", + map[string]string{ + "granularity": "addresses", + "nodecount": "0", + "sort": "flat", + "output": "out", + }, + }, + { + "999", + nil, // Error + }, + } + + for _, tc := range testcases { + cmd, cfg, err := parseCommandLine(strings.Fields(tc.input)) + if tc.want == nil && err != nil { + // Error expected + continue + } + if err != nil { + t.Errorf("failed on %q: %v", tc.input, err) + continue + } + + // Get report output format + c := pprofCommands[cmd[0]] + if c == nil { + t.Fatalf("unexpected nil command") + } + cfg = applyCommandOverrides(cmd[0], c.format, cfg) + + for n, want := range tc.want { + if got := cfg.get(configFieldMap[n]); got != want { + t.Errorf("failed on %q, cmd=%q, %s got %s, want %s", tc.input, cmd, n, got, want) + } + } + } +} diff --git a/plugin/debug/pkg/internal/driver/options.go b/plugin/debug/pkg/internal/driver/options.go new file mode 100644 index 0000000..f84d0fe --- /dev/null +++ b/plugin/debug/pkg/internal/driver/options.go @@ -0,0 +1,100 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/symbolizer" + "m7s.live/v5/plugin/debug/pkg/internal/transport" +) + +// setDefaults returns a new plugin.Options with zero fields sets to +// sensible defaults. +func setDefaults(o *plugin.Options) *plugin.Options { + d := &plugin.Options{} + if o != nil { + *d = *o + } + if d.Writer == nil { + d.Writer = oswriter{} + } + if d.Flagset == nil { + d.Flagset = &GoFlags{} + } + if d.Obj == nil { + d.Obj = &binutils.Binutils{} + } + if d.UI == nil { + d.UI = &stdUI{r: bufio.NewReader(os.Stdin)} + } + if d.HTTPTransport == nil { + d.HTTPTransport = transport.New(d.Flagset) + } + if d.Sym == nil { + d.Sym = &symbolizer.Symbolizer{Obj: d.Obj, UI: d.UI, Transport: d.HTTPTransport} + } + return d +} + +type stdUI struct { + r *bufio.Reader +} + +func (ui *stdUI) ReadLine(prompt string) (string, error) { + os.Stdout.WriteString(prompt) + return ui.r.ReadString('\n') +} + +func (ui *stdUI) Print(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) PrintErr(args ...interface{}) { + ui.fprint(os.Stderr, args) +} + +func (ui *stdUI) IsTerminal() bool { + return false +} + +func (ui *stdUI) WantBrowser() bool { + return true +} + +func (ui *stdUI) SetAutoComplete(func(string) string) { +} + +func (ui *stdUI) fprint(f *os.File, args []interface{}) { + text := fmt.Sprint(args...) + if !strings.HasSuffix(text, "\n") { + text += "\n" + } + f.WriteString(text) +} + +// oswriter implements the Writer interface using a regular file. +type oswriter struct{} + +func (oswriter) Open(name string) (io.WriteCloser, error) { + f, err := os.Create(name) + return f, err +} diff --git a/plugin/debug/pkg/internal/driver/settings.go b/plugin/debug/pkg/internal/driver/settings.go new file mode 100644 index 0000000..5011a06 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/settings.go @@ -0,0 +1,158 @@ +package driver + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path/filepath" +) + +// settings holds pprof settings. +type settings struct { + // Configs holds a list of named UI configurations. + Configs []namedConfig `json:"configs"` +} + +// namedConfig associates a name with a config. +type namedConfig struct { + Name string `json:"name"` + config +} + +// settingsFileName returns the name of the file where settings should be saved. +func settingsFileName() (string, error) { + // Return "pprof/settings.json" under os.UserConfigDir(). + dir, err := os.UserConfigDir() + if err != nil { + return "", err + } + return filepath.Join(dir, "pprof", "settings.json"), nil +} + +// readSettings reads settings from fname. +func readSettings(fname string) (*settings, error) { + data, err := os.ReadFile(fname) + if err != nil { + if os.IsNotExist(err) { + return &settings{}, nil + } + return nil, fmt.Errorf("could not read settings: %w", err) + } + settings := &settings{} + if err := json.Unmarshal(data, settings); err != nil { + return nil, fmt.Errorf("could not parse settings: %w", err) + } + for i := range settings.Configs { + settings.Configs[i].resetTransient() + } + return settings, nil +} + +// writeSettings saves settings to fname. +func writeSettings(fname string, settings *settings) error { + data, err := json.MarshalIndent(settings, "", " ") + if err != nil { + return fmt.Errorf("could not encode settings: %w", err) + } + + // create the settings directory if it does not exist + // XDG specifies permissions 0700 when creating settings dirs: + // https://specifications.freedesktop.org/basedir-spec/basedir-spec-latest.html + if err := os.MkdirAll(filepath.Dir(fname), 0700); err != nil { + return fmt.Errorf("failed to create settings directory: %w", err) + } + + if err := os.WriteFile(fname, data, 0644); err != nil { + return fmt.Errorf("failed to write settings: %w", err) + } + return nil +} + +// configMenuEntry holds information for a single config menu entry. +type configMenuEntry struct { + Name string + URL string + Current bool // Is this the currently selected config? + UserConfig bool // Is this a user-provided config? +} + +// configMenu returns a list of items to add to a menu in the web UI. +func configMenu(fname string, u url.URL) []configMenuEntry { + // Start with system configs. + configs := []namedConfig{{Name: "Default", config: defaultConfig()}} + if settings, err := readSettings(fname); err == nil { + // Add user configs. + configs = append(configs, settings.Configs...) + } + + // Convert to menu entries. + result := make([]configMenuEntry, len(configs)) + lastMatch := -1 + for i, cfg := range configs { + dst, changed := cfg.config.makeURL(u) + if !changed { + lastMatch = i + } + // Use a relative URL to work in presence of stripping/redirects in webui.go. + rel := &url.URL{RawQuery: dst.RawQuery, ForceQuery: true} + result[i] = configMenuEntry{ + Name: cfg.Name, + URL: rel.String(), + UserConfig: (i != 0), + } + } + // Mark the last matching config as current + if lastMatch >= 0 { + result[lastMatch].Current = true + } + return result +} + +// editSettings edits settings by applying fn to them. +func editSettings(fname string, fn func(s *settings) error) error { + settings, err := readSettings(fname) + if err != nil { + return err + } + if err := fn(settings); err != nil { + return err + } + return writeSettings(fname, settings) +} + +// setConfig saves the config specified in request to fname. +func setConfig(fname string, request url.URL) error { + q := request.Query() + name := q.Get("config") + if name == "" { + return fmt.Errorf("invalid config name") + } + cfg := currentConfig() + if err := cfg.applyURL(q); err != nil { + return err + } + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == name { + s.Configs[i].config = cfg + return nil + } + } + s.Configs = append(s.Configs, namedConfig{Name: name, config: cfg}) + return nil + }) +} + +// removeConfig removes config from fname. +func removeConfig(fname, config string) error { + return editSettings(fname, func(s *settings) error { + for i, c := range s.Configs { + if c.Name == config { + s.Configs = append(s.Configs[:i], s.Configs[i+1:]...) + return nil + } + } + return fmt.Errorf("config %s not found", config) + }) +} diff --git a/plugin/debug/pkg/internal/driver/settings_test.go b/plugin/debug/pkg/internal/driver/settings_test.go new file mode 100644 index 0000000..07bf6ac --- /dev/null +++ b/plugin/debug/pkg/internal/driver/settings_test.go @@ -0,0 +1,247 @@ +package driver + +import ( + "net/url" + "os" + "path/filepath" + "reflect" + "testing" +) + +// settingsDirAndFile returns a directory in which settings should be stored +// and the name of the settings file. The caller must delete the directory when +// done. +func settingsDirAndFile(t *testing.T) (string, string) { + tmpDir, err := os.MkdirTemp("", "pprof_settings_test") + if err != nil { + t.Fatalf("error creating temporary directory: %v", err) + } + return tmpDir, filepath.Join(tmpDir, "settings.json") +} + +func TestSettings(t *testing.T) { + tmpDir, fname := settingsDirAndFile(t) + defer os.RemoveAll(tmpDir) + s, err := readSettings(fname) + if err != nil { + t.Fatalf("error reading empty settings: %v", err) + } + if len(s.Configs) != 0 { + t.Fatalf("expected empty settings; got %v", s) + } + s.Configs = append(s.Configs, namedConfig{ + Name: "Foo", + config: config{ + Focus: "focus", + // Ensure that transient fields are not saved/restored. + Output: "output", + SourcePath: "source", + TrimPath: "trim", + DivideBy: -2, + }, + }) + if err := writeSettings(fname, s); err != nil { + t.Fatal(err) + } + s2, err := readSettings(fname) + if err != nil { + t.Fatal(err) + } + + // Change the transient fields to their expected values. + s.Configs[0].resetTransient() + if !reflect.DeepEqual(s, s2) { + t.Fatalf("ReadSettings = %v; expected %v", s2, s) + } +} + +func TestParseConfig(t *testing.T) { + // Use all the fields to check they are saved/restored from URL. + cfg := config{ + Output: "", + DropNegative: true, + CallTree: true, + RelativePercentages: true, + Unit: "auto", + CompactLabels: true, + SourcePath: "", + TrimPath: "", + NodeCount: 10, + NodeFraction: 0.1, + EdgeFraction: 0.2, + Trim: true, + Focus: "focus", + Ignore: "ignore", + PruneFrom: "prune_from", + Hide: "hide", + Show: "show", + ShowFrom: "show_from", + TagFocus: "tagfocus", + TagIgnore: "tagignore", + TagShow: "tagshow", + TagHide: "taghide", + DivideBy: 1, + Mean: true, + Normalize: true, + Sort: "cum", + Granularity: "functions", + NoInlines: true, + ShowColumns: true, + } + url, changed := cfg.makeURL(url.URL{}) + if !changed { + t.Error("applyConfig returned changed=false after applying non-empty config") + } + cfg2 := defaultConfig() + if err := cfg2.applyURL(url.Query()); err != nil { + t.Fatalf("fromURL failed: %v", err) + } + if !reflect.DeepEqual(cfg, cfg2) { + t.Fatalf("parsed config = %+v; expected match with %+v", cfg2, cfg) + } + if url2, changed := cfg.makeURL(url); changed { + t.Errorf("ApplyConfig returned changed=true after applying same config (%q instead of expected %q", url2.String(), url.String()) + } +} + +// TestDefaultConfig verifies that default config values are omitted from URL. +func TestDefaultConfig(t *testing.T) { + cfg := defaultConfig() + url, changed := cfg.makeURL(url.URL{}) + if changed { + t.Error("applyConfig returned changed=true after applying default config") + } + if url.String() != "" { + t.Errorf("applyConfig returned %q; expecting %q", url.String(), "") + } +} + +func TestConfigMenu(t *testing.T) { + // Save some test settings. + tmpDir, fname := settingsDirAndFile(t) + defer os.RemoveAll(tmpDir) + a, b := defaultConfig(), defaultConfig() + a.Focus, b.Focus = "foo", "bar" + s := &settings{ + Configs: []namedConfig{ + {Name: "A", config: a}, + {Name: "B", config: b}, + }, + } + if err := writeSettings(fname, s); err != nil { + t.Fatal("error writing settings", err) + } + + pageURL, _ := url.Parse("/top?f=foo") + menu := configMenu(fname, *pageURL) + want := []configMenuEntry{ + {Name: "Default", URL: "?", Current: false, UserConfig: false}, + {Name: "A", URL: "?f=foo", Current: true, UserConfig: true}, + {Name: "B", URL: "?f=bar", Current: false, UserConfig: true}, + } + if !reflect.DeepEqual(menu, want) { + t.Errorf("ConfigMenu returned %v; want %v", menu, want) + } +} + +func TestEditConfig(t *testing.T) { + tmpDir, fname := settingsDirAndFile(t) + defer os.RemoveAll(tmpDir) + + type testConfig struct { + name string + focus string + hide string + } + type testCase struct { + remove bool + request string + expect []testConfig + } + for _, c := range []testCase{ + // Create setting c1 + {false, "/?config=c1&f=foo", []testConfig{ + {"c1", "foo", ""}, + }}, + // Create setting c2 + {false, "/?config=c2&h=bar", []testConfig{ + {"c1", "foo", ""}, + {"c2", "", "bar"}, + }}, + // Overwrite c1 + {false, "/?config=c1&f=baz", []testConfig{ + {"c1", "baz", ""}, + {"c2", "", "bar"}, + }}, + // Delete c2 + {true, "c2", []testConfig{ + {"c1", "baz", ""}, + }}, + } { + if c.remove { + if err := removeConfig(fname, c.request); err != nil { + t.Errorf("error removing config %s: %v", c.request, err) + continue + } + } else { + req, err := url.Parse(c.request) + if err != nil { + t.Errorf("error parsing request %q: %v", c.request, err) + continue + } + if err := setConfig(fname, *req); err != nil { + t.Errorf("error saving request %q: %v", c.request, err) + continue + } + } + + // Check resulting settings. + s, err := readSettings(fname) + if err != nil { + t.Errorf("error reading settings after applying %q: %v", c.request, err) + continue + } + // Convert to a list that can be compared to c.expect + got := make([]testConfig, len(s.Configs)) + for i, c := range s.Configs { + got[i] = testConfig{c.Name, c.Focus, c.Hide} + } + if !reflect.DeepEqual(got, c.expect) { + t.Errorf("Settings after applying %q = %v; want %v", c.request, got, c.expect) + } + } +} + +func TestAssign(t *testing.T) { + baseConfig := currentConfig() + defer setCurrentConfig(baseConfig) + + // Test assigning to a simple field. + if err := configure("nodecount", "20"); err != nil { + t.Errorf("error setting nodecount: %v", err) + } + if n := currentConfig().NodeCount; n != 20 { + t.Errorf("incorrect nodecount; expecting 20, got %d", n) + } + + // Test assignment to a group field. + if err := configure("granularity", "files"); err != nil { + t.Errorf("error setting granularity: %v", err) + } + if g := currentConfig().Granularity; g != "files" { + t.Errorf("incorrect granularity; expecting %v, got %v", "files", g) + } + + // Test assignment to one choice of a group field. + if err := configure("lines", "t"); err != nil { + t.Errorf("error setting lines: %v", err) + } + if g := currentConfig().Granularity; g != "lines" { + t.Errorf("incorrect granularity; expecting %v, got %v", "lines", g) + } + + // Test assignment to invalid choice, + if err := configure("granularity", "cheese"); err == nil { + t.Errorf("allowed assignment of invalid granularity") + } +} diff --git a/plugin/debug/pkg/internal/driver/stacks.go b/plugin/debug/pkg/internal/driver/stacks.go new file mode 100644 index 0000000..5a7e590 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/stacks.go @@ -0,0 +1,60 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "encoding/json" + "html/template" + "net/http" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" +) + +// stackView generates the flamegraph view. +func (ui *webInterface) stackView(w http.ResponseWriter, req *http.Request) { + // Get all data in a report. + rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { + cfg.CallTree = true + cfg.Trim = false + if cfg.Granularity == "" { + cfg.Granularity = "filefunctions" + } + }) + if rpt == nil { + return // error already reported + } + + // Make stack data and generate corresponding JSON. + stacks := rpt.Stacks() + b, err := json.Marshal(stacks) + if err != nil { + http.Error(w, "error serializing stacks for flame graph", + http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + + nodes := make([]string, len(stacks.Sources)) + for i, src := range stacks.Sources { + nodes[i] = src.FullName + } + nodes[0] = "" // root is not a real node + + ui.render(w, req, "stacks", rpt, errList, stacks.Legend(), webArgs{ + Stacks: template.JS(b), + Nodes: nodes, + UnitDefs: measurement.UnitTypes, + }) +} diff --git a/plugin/debug/pkg/internal/driver/svg.go b/plugin/debug/pkg/internal/driver/svg.go new file mode 100644 index 0000000..cca554c --- /dev/null +++ b/plugin/debug/pkg/internal/driver/svg.go @@ -0,0 +1,80 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "regexp" + "strings" + + "m7s.live/v5/plugin/debug/pkg/third_party/svgpan" +) + +var ( + viewBox = regexp.MustCompile(``) +) + +// massageSVG enhances the SVG output from DOT to provide better +// panning inside a web browser. It uses the svgpan library, which is +// embedded into the svgpan.JSSource variable. +func massageSVG(svg string) string { + // Work around for dot bug which misses quoting some ampersands, + // resulting on unparsable SVG. + svg = strings.Replace(svg, "&;", "&;", -1) + + // Dot's SVG output is + // + // + // + // ... + // + // + // + // Change it to + // + // + + // ` + // + // + // ... + // + // + // + + if loc := viewBox.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + svgpan.JSSource + `` + + `` + + svg[loc[0]:] + } + + if loc := svgClose.FindStringIndex(svg); loc != nil { + svg = svg[:loc[0]] + + `` + + svg[loc[0]:] + } + + return svg +} diff --git a/plugin/debug/pkg/internal/driver/tagroot.go b/plugin/debug/pkg/internal/driver/tagroot.go new file mode 100644 index 0000000..ad14230 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/tagroot.go @@ -0,0 +1,133 @@ +package driver + +import ( + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// addLabelNodes adds pseudo stack frames "label:value" to each Sample with +// labels matching the supplied keys. +// +// rootKeys adds frames at the root of the callgraph (first key becomes new root). +// leafKeys adds frames at the leaf of the callgraph (last key becomes new leaf). +// +// Returns whether there were matches found for the label keys. +func addLabelNodes(p *profile.Profile, rootKeys, leafKeys []string, outputUnit string) (rootm, leafm bool) { + // Find where to insert the new locations and functions at the end of + // their ID spaces. + var maxLocID uint64 + var maxFunctionID uint64 + for _, loc := range p.Location { + if loc.ID > maxLocID { + maxLocID = loc.ID + } + } + for _, f := range p.Function { + if f.ID > maxFunctionID { + maxFunctionID = f.ID + } + } + nextLocID := maxLocID + 1 + nextFuncID := maxFunctionID + 1 + + // Intern the new locations and functions we are generating. + type locKey struct { + functionName, fileName string + } + locs := map[locKey]*profile.Location{} + + internLoc := func(locKey locKey) *profile.Location { + loc, found := locs[locKey] + if found { + return loc + } + + function := &profile.Function{ + ID: nextFuncID, + Name: locKey.functionName, + Filename: locKey.fileName, + } + nextFuncID++ + p.Function = append(p.Function, function) + + loc = &profile.Location{ + ID: nextLocID, + Line: []profile.Line{ + { + Function: function, + }, + }, + } + nextLocID++ + p.Location = append(p.Location, loc) + locs[locKey] = loc + return loc + } + + makeLabelLocs := func(s *profile.Sample, keys []string) ([]*profile.Location, bool) { + var locs []*profile.Location + var match bool + for i := range keys { + // Loop backwards, ensuring the first tag is closest to the root, + // and the last tag is closest to the leaves. + k := keys[len(keys)-1-i] + values := formatLabelValues(s, k, outputUnit) + if len(values) > 0 { + match = true + } + locKey := locKey{ + functionName: strings.Join(values, ","), + fileName: k, + } + loc := internLoc(locKey) + locs = append(locs, loc) + } + return locs, match + } + + for _, s := range p.Sample { + rootsToAdd, sampleMatchedRoot := makeLabelLocs(s, rootKeys) + if sampleMatchedRoot { + rootm = true + } + leavesToAdd, sampleMatchedLeaf := makeLabelLocs(s, leafKeys) + if sampleMatchedLeaf { + leafm = true + } + + if len(leavesToAdd)+len(rootsToAdd) == 0 { + continue + } + + var newLocs []*profile.Location + newLocs = append(newLocs, leavesToAdd...) + newLocs = append(newLocs, s.Location...) + newLocs = append(newLocs, rootsToAdd...) + s.Location = newLocs + } + return +} + +// formatLabelValues returns all the string and numeric labels in Sample, with +// the numeric labels formatted according to outputUnit. +func formatLabelValues(s *profile.Sample, k string, outputUnit string) []string { + var values []string + values = append(values, s.Label[k]...) + numLabels := s.NumLabel[k] + numUnits := s.NumUnit[k] + if len(numLabels) != len(numUnits) && len(numUnits) != 0 { + return values + } + for i, numLabel := range numLabels { + var value string + if len(numUnits) != 0 { + value = measurement.ScaledLabel(numLabel, numUnits[i], outputUnit) + } else { + value = measurement.ScaledLabel(numLabel, "", "") + } + values = append(values, value) + } + return values +} diff --git a/plugin/debug/pkg/internal/driver/tagroot_test.go b/plugin/debug/pkg/internal/driver/tagroot_test.go new file mode 100644 index 0000000..f01e195 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/tagroot_test.go @@ -0,0 +1,385 @@ +package driver + +import ( + "fmt" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +const mainBinary = "/bin/main" + +var cpuF = []*profile.Function{ + {ID: 1, Name: "main", SystemName: "main", Filename: "main.c"}, + {ID: 2, Name: "foo", SystemName: "foo", Filename: "foo.c"}, + {ID: 3, Name: "foo_caller", SystemName: "foo_caller", Filename: "foo.c"}, + {ID: 4, Name: "bar", SystemName: "bar", Filename: "bar.c"}, +} + +var cpuM = []*profile.Mapping{ + { + ID: 1, + Start: 0x10000, + Limit: 0x40000, + File: mainBinary, + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 2, + Start: 0x1000, + Limit: 0x4000, + File: "/lib/lib.so", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, +} + +var cpuL = []*profile.Location{ + { + ID: 1000, + Mapping: cpuM[1], + Address: 0x1000, + Line: []profile.Line{ + {Function: cpuF[0], Line: 1}, + }, + }, + { + ID: 2000, + Mapping: cpuM[0], + Address: 0x2000, + Line: []profile.Line{ + {Function: cpuF[1], Line: 2}, + {Function: cpuF[2], Line: 1}, + }, + }, + { + ID: 3000, + Mapping: cpuM[0], + Address: 0x3000, + Line: []profile.Line{ + {Function: cpuF[1], Line: 2}, + {Function: cpuF[2], Line: 1}, + }, + }, + { + ID: 3001, + Mapping: cpuM[0], + Address: 0x3001, + Line: []profile.Line{ + {Function: cpuF[2], Line: 2}, + }, + }, + { + ID: 3002, + Mapping: cpuM[0], + Address: 0x3002, + Line: []profile.Line{ + {Function: cpuF[2], Line: 3}, + }, + }, + { + ID: 3003, + Mapping: cpuM[0], + Address: 0x3003, + Line: []profile.Line{ + {Function: cpuF[3], Line: 1}, + }, + }, +} + +var testProfile1 = &profile.Profile{ + TimeNanos: 10000, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{cpuL[0]}, + Value: []int64{1000, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*profile.Location{cpuL[1], cpuL[0]}, + Value: []int64{100, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*profile.Location{cpuL[2], cpuL[0]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + NumLabel: map[string][]int64{ + "allocations": {1024}, + }, + NumUnit: map[string][]string{ + "allocations": {""}, + }, + }, + { + Location: []*profile.Location{cpuL[3], cpuL[0]}, + Value: []int64{10000, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + NumLabel: map[string][]int64{ + "allocations": {1024, 2048}, + }, + NumUnit: map[string][]string{ + "allocations": {"bytes", "b"}, + }, + }, + { + Location: []*profile.Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1", "tag5"}, + }, + NumLabel: map[string][]int64{ + "allocations": {1024, 1}, + }, + NumUnit: map[string][]string{ + "allocations": {"byte", "kilobyte"}, + }, + }, + { + Location: []*profile.Location{cpuL[5], cpuL[0]}, + Value: []int64{200, 200}, + NumLabel: map[string][]int64{ + "allocations": {1024}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +func TestAddLabelNodesMatchBooleans(t *testing.T) { + type addLabelNodesTestcase struct { + name string + tagroot, tagleaf []string + outputUnit string + rootm, leafm bool + // wantSampleFuncs contains expected stack functions and sample value after + // adding nodes, in the same order as in the profile. The format is as + // returned by stackCollapse function, which is "callee caller: ". + wantSampleFuncs []string + } + for _, tc := range []addLabelNodesTestcase{ + { + name: "Without tagroot or tagleaf, add no extra nodes, and should not match", + wantSampleFuncs: []string{ + "main(main.c) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c) 10", + "main(main.c);foo_caller(foo.c) 10000", + "main(main.c);foo_caller(foo.c) 1", + "main(main.c);bar(bar.c) 200", + }, + }, + { + name: "Keys that aren't found add empty nodes, and should not match", + tagroot: []string{"key404"}, + tagleaf: []string{"key404"}, + wantSampleFuncs: []string{ + "(key404);main(main.c);(key404) 1000", + "(key404);main(main.c);foo(foo.c);foo_caller(foo.c);(key404) 100", + "(key404);main(main.c);foo(foo.c);foo_caller(foo.c);(key404) 10", + "(key404);main(main.c);foo_caller(foo.c);(key404) 10000", + "(key404);main(main.c);foo_caller(foo.c);(key404) 1", + "(key404);main(main.c);bar(bar.c);(key404) 200", + }, + }, + { + name: "tagroot adds nodes for key1 and reports a match", + tagroot: []string{"key1"}, + rootm: true, + wantSampleFuncs: []string{ + "tag1(key1);main(main.c) 1000", + "tag2(key1);main(main.c);foo(foo.c);foo_caller(foo.c) 100", + "tag3(key1);main(main.c);foo(foo.c);foo_caller(foo.c) 10", + "tag4(key1);main(main.c);foo_caller(foo.c) 10000", + "tag4(key1);main(main.c);foo_caller(foo.c) 1", + "(key1);main(main.c);bar(bar.c) 200", + }, + }, + { + name: "tagroot adds nodes for key2 and reports a match", + tagroot: []string{"key2"}, + rootm: true, + wantSampleFuncs: []string{ + "tag1(key2);main(main.c) 1000", + "(key2);main(main.c);foo(foo.c);foo_caller(foo.c) 100", + "tag2(key2);main(main.c);foo(foo.c);foo_caller(foo.c) 10", + "tag1(key2);main(main.c);foo_caller(foo.c) 10000", + "tag1,tag5(key2);main(main.c);foo_caller(foo.c) 1", + "(key2);main(main.c);bar(bar.c) 200", + }, + }, + { + name: "tagleaf adds nodes for key1 and reports a match", + tagleaf: []string{"key1"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);tag1(key1) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);tag2(key1) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);tag3(key1) 10", + "main(main.c);foo_caller(foo.c);tag4(key1) 10000", + "main(main.c);foo_caller(foo.c);tag4(key1) 1", + "main(main.c);bar(bar.c);(key1) 200", + }, + }, + { + name: "tagleaf adds nodes for key3 and reports a match", + tagleaf: []string{"key3"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);(key3) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);tag2(key3) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);(key3) 10", + "main(main.c);foo_caller(foo.c);(key3) 10000", + "main(main.c);foo_caller(foo.c);(key3) 1", + "main(main.c);bar(bar.c);(key3) 200", + }, + }, + { + name: "tagroot adds nodes for key1,key2 in order and reports a match", + tagroot: []string{"key1", "key2"}, + rootm: true, + wantSampleFuncs: []string{ + "tag1(key1);tag1(key2);main(main.c) 1000", + "tag2(key1);(key2);main(main.c);foo(foo.c);foo_caller(foo.c) 100", + "tag3(key1);tag2(key2);main(main.c);foo(foo.c);foo_caller(foo.c) 10", + "tag4(key1);tag1(key2);main(main.c);foo_caller(foo.c) 10000", + "tag4(key1);tag1,tag5(key2);main(main.c);foo_caller(foo.c) 1", + "(key1);(key2);main(main.c);bar(bar.c) 200", + }, + }, + { + name: "tagleaf adds nodes for key1,key2 in order and reports a match", + tagleaf: []string{"key1", "key2"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);tag1(key1);tag1(key2) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);tag2(key1);(key2) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);tag3(key1);tag2(key2) 10", + "main(main.c);foo_caller(foo.c);tag4(key1);tag1(key2) 10000", + "main(main.c);foo_caller(foo.c);tag4(key1);tag1,tag5(key2) 1", + "main(main.c);bar(bar.c);(key1);(key2) 200", + }, + }, + { + name: "Numeric units are added with units with tagleaf", + tagleaf: []string{"allocations"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);(allocations) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);(allocations) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);1024(allocations) 10", + "main(main.c);foo_caller(foo.c);1024B,2048B(allocations) 10000", + "main(main.c);foo_caller(foo.c);1024B,1024B(allocations) 1", + "main(main.c);bar(bar.c);1024(allocations) 200", + }, + }, + { + name: "Numeric units are added with units with tagroot", + tagroot: []string{"allocations"}, + rootm: true, + wantSampleFuncs: []string{ + "(allocations);main(main.c) 1000", + "(allocations);main(main.c);foo(foo.c);foo_caller(foo.c) 100", + "1024(allocations);main(main.c);foo(foo.c);foo_caller(foo.c) 10", + "1024B,2048B(allocations);main(main.c);foo_caller(foo.c) 10000", + "1024B,1024B(allocations);main(main.c);foo_caller(foo.c) 1", + "1024(allocations);main(main.c);bar(bar.c) 200", + }, + }, + { + name: "Numeric labels are formatted according to outputUnit", + outputUnit: "kB", + tagleaf: []string{"allocations"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);(allocations) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);(allocations) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);1024(allocations) 10", + "main(main.c);foo_caller(foo.c);1kB,2kB(allocations) 10000", + "main(main.c);foo_caller(foo.c);1kB,1kB(allocations) 1", + "main(main.c);bar(bar.c);1024(allocations) 200", + }, + }, + { + name: "Numeric units with no units are handled properly by tagleaf", + tagleaf: []string{"allocations"}, + leafm: true, + wantSampleFuncs: []string{ + "main(main.c);(allocations) 1000", + "main(main.c);foo(foo.c);foo_caller(foo.c);(allocations) 100", + "main(main.c);foo(foo.c);foo_caller(foo.c);1024(allocations) 10", + "main(main.c);foo_caller(foo.c);1024B,2048B(allocations) 10000", + "main(main.c);foo_caller(foo.c);1024B,1024B(allocations) 1", + "main(main.c);bar(bar.c);1024(allocations) 200", + }, + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + p := testProfile1.Copy() + rootm, leafm := addLabelNodes(p, tc.tagroot, tc.tagleaf, tc.outputUnit) + if rootm != tc.rootm { + t.Errorf("Got rootm=%v, want=%v", rootm, tc.rootm) + } + if leafm != tc.leafm { + t.Errorf("Got leafm=%v, want=%v", leafm, tc.leafm) + } + if got, want := strings.Join(stackCollapse(p), "\n")+"\n", strings.Join(tc.wantSampleFuncs, "\n")+"\n"; got != want { + diff, err := proftest.Diff([]byte(want), []byte(got)) + if err != nil { + t.Fatalf("Failed to get diff: %v", err) + } + t.Errorf("Profile samples got diff(want->got):\n%s", diff) + } + }) + } +} + +// stackCollapse returns a slice of strings where each string represents one +// profile sample in Brendan Gregg's "Folded Stacks" format: +// "(filename);(filename);(filename) ". This +// allows the expected values for test cases to be specified in human-readable +// strings. +func stackCollapse(p *profile.Profile) []string { + var ret []string + for _, s := range p.Sample { + var funcs []string + for i := range s.Location { + loc := s.Location[len(s.Location)-1-i] + for _, line := range loc.Line { + funcs = append(funcs, fmt.Sprintf("%s(%s)", line.Function.Name, line.Function.Filename)) + } + } + ret = append(ret, fmt.Sprintf("%s %d", strings.Join(funcs, ";"), s.Value[0])) + } + return ret +} diff --git a/plugin/debug/pkg/internal/driver/tempfile.go b/plugin/debug/pkg/internal/driver/tempfile.go new file mode 100644 index 0000000..b6c8776 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/tempfile.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "os" + "path/filepath" + "sync" +) + +// newTempFile returns a new output file in dir with the provided prefix and suffix. +func newTempFile(dir, prefix, suffix string) (*os.File, error) { + for index := 1; index < 10000; index++ { + switch f, err := os.OpenFile(filepath.Join(dir, fmt.Sprintf("%s%03d%s", prefix, index, suffix)), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0666); { + case err == nil: + return f, nil + case !os.IsExist(err): + return nil, err + } + } + // Give up + return nil, fmt.Errorf("could not create file of the form %s%03d%s", prefix, 1, suffix) +} + +var tempFiles []string +var tempFilesMu = sync.Mutex{} + +// deferDeleteTempFile marks a file to be deleted by next call to Cleanup() +func deferDeleteTempFile(path string) { + tempFilesMu.Lock() + tempFiles = append(tempFiles, path) + tempFilesMu.Unlock() +} + +// cleanupTempFiles removes any temporary files selected for deferred cleaning. +func cleanupTempFiles() error { + tempFilesMu.Lock() + defer tempFilesMu.Unlock() + var lastErr error + for _, f := range tempFiles { + if err := os.Remove(f); err != nil { + lastErr = err + } + } + tempFiles = nil + return lastErr +} diff --git a/plugin/debug/pkg/internal/driver/tempfile_test.go b/plugin/debug/pkg/internal/driver/tempfile_test.go new file mode 100644 index 0000000..7004353 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/tempfile_test.go @@ -0,0 +1,55 @@ +package driver + +import ( + "os" + "sync" + "testing" +) + +func TestNewTempFile(t *testing.T) { + const n = 100 + // Line up ready to execute goroutines with a read-write lock. + var mu sync.RWMutex + mu.Lock() + var wg sync.WaitGroup + errc := make(chan error, n) + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + mu.RLock() + defer mu.RUnlock() + defer wg.Done() + f, err := newTempFile(os.TempDir(), "profile", ".tmp") + errc <- err + deferDeleteTempFile(f.Name()) + f.Close() + }() + } + // Start the file creation race. + mu.Unlock() + // Wait for the goroutines to finish. + wg.Wait() + + for i := 0; i < n; i++ { + if err := <-errc; err != nil { + t.Fatalf("newTempFile(): got %v, want no error", err) + } + } + if len(tempFiles) != n { + t.Errorf("len(tempFiles): got %d, want %d", len(tempFiles), n) + } + names := map[string]bool{} + for _, name := range tempFiles { + if names[name] { + t.Errorf("got temp file %s created multiple times", name) + break + } + names[name] = true + } + if err := cleanupTempFiles(); err != nil { + t.Errorf("cleanupTempFiles(): got error %v, want no error", err) + } + if len(tempFiles) != 0 { + t.Errorf("len(tempFiles) after the cleanup: got %d, want 0", len(tempFiles)) + } +} diff --git a/plugin/debug/pkg/internal/driver/testdata/cppbench.contention b/plugin/debug/pkg/internal/driver/testdata/cppbench.contention new file mode 100644 index 0000000..66a64c9 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/cppbench.contention @@ -0,0 +1,24 @@ +--- contentionz 1 --- +cycles/second = 3201000000 +sampling period = 100 +ms since reset = 16502830 +discarded samples = 0 + 19490304 27 @ 0xbccc97 0xc61202 0x42ed5f 0x42edc1 0x42e15a 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 768 1 @ 0xbccc97 0xa42dc7 0xa456e4 0x7fcdc2ff214e + 5760 2 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87eab 0xb8814c 0x4e969d 0x4faa17 0x4fc5f6 0x4fd028 0x4fd230 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 569088 1 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87f08 0xb8814c 0x42ed5f 0x42edc1 0x42e15a 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 2432 1 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87eab 0xb8814c 0x7aa74c 0x7ab844 0x7ab914 0x79e9e9 0x79e326 0x4d299e 0x4d4b7b 0x4b7be8 0x4b7ff1 0x4d2dae 0x79e80a + 2034816 3 @ 0xbccc97 0xb82f0f 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e +--- Memory map: --- + 00400000-00fcb000: cppbench_server_main + 7fcdc231e000-7fcdc2321000: /libnss_cache-2.15.so + 7fcdc2522000-7fcdc252e000: /libnss_files-2.15.so + 7fcdc272f000-7fcdc28dd000: /libc-2.15.so + 7fcdc2ae7000-7fcdc2be2000: /libm-2.15.so + 7fcdc2de3000-7fcdc2dea000: /librt-2.15.so + 7fcdc2feb000-7fcdc3003000: /libpthread-2.15.so + 7fcdc3208000-7fcdc320a000: /libdl-2.15.so + 7fcdc340c000-7fcdc3415000: /libcrypt-2.15.so + 7fcdc3645000-7fcdc3669000: /ld-2.15.so + 7fff86bff000-7fff86c00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu b/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu new file mode 100644 index 0000000..95c22e1 Binary files /dev/null and b/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu differ diff --git a/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu_no_samples_type b/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu_no_samples_type new file mode 100644 index 0000000..266fd70 Binary files /dev/null and b/plugin/debug/pkg/internal/driver/testdata/cppbench.cpu_no_samples_type differ diff --git a/plugin/debug/pkg/internal/driver/testdata/cppbench.small.contention b/plugin/debug/pkg/internal/driver/testdata/cppbench.small.contention new file mode 100644 index 0000000..230cd90 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/cppbench.small.contention @@ -0,0 +1,19 @@ +--- contentionz 1 --- +cycles/second = 3201000000 +sampling period = 100 +ms since reset = 16502830 +discarded samples = 0 + 100 10 @ 0xbccc97 0xc61202 0x42ed5f 0x42edc1 0x42e15a 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e +--- Memory map: --- + 00400000-00fcb000: cppbench_server_main + 7fcdc231e000-7fcdc2321000: /libnss_cache-2.15.so + 7fcdc2522000-7fcdc252e000: /libnss_files-2.15.so + 7fcdc272f000-7fcdc28dd000: /libc-2.15.so + 7fcdc2ae7000-7fcdc2be2000: /libm-2.15.so + 7fcdc2de3000-7fcdc2dea000: /librt-2.15.so + 7fcdc2feb000-7fcdc3003000: /libpthread-2.15.so + 7fcdc3208000-7fcdc320a000: /libdl-2.15.so + 7fcdc340c000-7fcdc3415000: /libcrypt-2.15.so + 7fcdc3645000-7fcdc3669000: /ld-2.15.so + 7fff86bff000-7fff86c00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/internal/driver/testdata/file1000.src b/plugin/debug/pkg/internal/driver/testdata/file1000.src new file mode 100644 index 0000000..b53eeca --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/file1000.src @@ -0,0 +1,17 @@ +line1 +line2 +line3 +line4 +line5 +line6 +line7 +line8 +line9 +line0 +line1 +line2 +line3 +line4 +line5 + + diff --git a/plugin/debug/pkg/internal/driver/testdata/file2000.src b/plugin/debug/pkg/internal/driver/testdata/file2000.src new file mode 100644 index 0000000..b53eeca --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/file2000.src @@ -0,0 +1,17 @@ +line1 +line2 +line3 +line4 +line5 +line6 +line7 +line8 +line9 +line0 +line1 +line2 +line3 +line4 +line5 + + diff --git a/plugin/debug/pkg/internal/driver/testdata/file3000.src b/plugin/debug/pkg/internal/driver/testdata/file3000.src new file mode 100644 index 0000000..b53eeca --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/file3000.src @@ -0,0 +1,17 @@ +line1 +line2 +line3 +line4 +line5 +line6 +line7 +line8 +line9 +line0 +line1 +line2 +line3 +line4 +line5 + + diff --git a/plugin/debug/pkg/internal/driver/testdata/go.crc32.cpu b/plugin/debug/pkg/internal/driver/testdata/go.crc32.cpu new file mode 100644 index 0000000..ce08313 Binary files /dev/null and b/plugin/debug/pkg/internal/driver/testdata/go.crc32.cpu differ diff --git a/plugin/debug/pkg/internal/driver/testdata/go.nomappings.crash b/plugin/debug/pkg/internal/driver/testdata/go.nomappings.crash new file mode 100644 index 0000000..4915d5a Binary files /dev/null and b/plugin/debug/pkg/internal/driver/testdata/go.nomappings.crash differ diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.contention.cum.files.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.contention.cum.files.dot new file mode 100644 index 0000000..eedfacf --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.contention.cum.files.dot @@ -0,0 +1,10 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid-contention" [shape=box fontsize=16 label="Build ID: buildid-contention\lComment #1\lComment #2\lType: delay\lShowing nodes accounting for 149.50ms, 100% of 149.50ms total\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="file3000.src\n32.77ms (21.92%)\nof 149.50ms (100%)" id="node1" fontsize=20 shape=box tooltip="testdata/file3000.src (149.50ms)" color="#b20000" fillcolor="#edd5d5"] +N2 [label="file1000.src\n51.20ms (34.25%)" id="node2" fontsize=23 shape=box tooltip="testdata/file1000.src (51.20ms)" color="#b23100" fillcolor="#eddbd5"] +N3 [label="file2000.src\n65.54ms (43.84%)\nof 75.78ms (50.68%)" id="node3" fontsize=24 shape=box tooltip="testdata/file2000.src (75.78ms)" color="#b22000" fillcolor="#edd9d5"] +N1 -> N3 [label=" 75.78ms" weight=51 penwidth=3 color="#b22000" tooltip="testdata/file3000.src -> testdata/file2000.src (75.78ms)" labeltooltip="testdata/file3000.src -> testdata/file2000.src (75.78ms)"] +N1 -> N2 [label=" 40.96ms" weight=28 penwidth=2 color="#b23900" tooltip="testdata/file3000.src -> testdata/file1000.src (40.96ms)" labeltooltip="testdata/file3000.src -> testdata/file1000.src (40.96ms)"] +N3 -> N2 [label=" 10.24ms" weight=7 color="#b29775" tooltip="testdata/file2000.src -> testdata/file1000.src (10.24ms)" labeltooltip="testdata/file2000.src -> testdata/file1000.src (10.24ms)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.contention.flat.addresses.dot.focus.ignore b/plugin/debug/pkg/internal/driver/testdata/pprof.contention.flat.addresses.dot.focus.ignore new file mode 100644 index 0000000..dcd0920 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.contention.flat.addresses.dot.focus.ignore @@ -0,0 +1,9 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid-contention" [shape=box fontsize=16 label="Build ID: buildid-contention\lComment #1\lComment #2\lType: delay\lActive filters:\l focus=[X1]000\l ignore=[X3]002\lShowing nodes accounting for 40.96ms, 27.40% of 149.50ms total\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="0000000000001000\nline1000\nfile1000.src:1\n40.96ms (27.40%)" id="node1" fontsize=24 shape=box tooltip="0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)" color="#b23900" fillcolor="#edddd5"] +N2 [label="0000000000003001\nline3000\nfile3000.src:5\n0 of 40.96ms (27.40%)" id="node2" fontsize=8 shape=box tooltip="0000000000003001 line3000 testdata/file3000.src:5 (40.96ms)" color="#b23900" fillcolor="#edddd5"] +N3 [label="0000000000003001\nline3001\nfile3000.src:3\n0 of 40.96ms (27.40%)" id="node3" fontsize=8 shape=box tooltip="0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)" color="#b23900" fillcolor="#edddd5"] +N2 -> N3 [label=" 40.96ms\n (inline)" weight=28 penwidth=2 color="#b23900" tooltip="0000000000003001 line3000 testdata/file3000.src:5 -> 0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)" labeltooltip="0000000000003001 line3000 testdata/file3000.src:5 -> 0000000000003001 line3001 testdata/file3000.src:3 (40.96ms)"] +N3 -> N1 [label=" 40.96ms" weight=28 penwidth=2 color="#b23900" tooltip="0000000000003001 line3001 testdata/file3000.src:3 -> 0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)" labeltooltip="0000000000003001 line3001 testdata/file3000.src:3 -> 0000000000001000 line1000 testdata/file1000.src:1 (40.96ms)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.addresses.traces b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.addresses.traces new file mode 100644 index 0000000..742b123 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.addresses.traces @@ -0,0 +1,32 @@ +File: testbinary +Type: cpu +Duration: 10s, Total samples = 1.12s (11.20%) +-----------+------------------------------------------------------- + key1: tag1 + key2: tag1 + 1s 0000000000001000 line1000 testdata/file1000.src:1 + 0000000000002000 line2001 testdata/file2000.src:9 (inline) + 0000000000002000 line2000 testdata/file2000.src:4 + 0000000000003000 line3002 testdata/file3000.src:2 (inline) + 0000000000003000 line3001 testdata/file3000.src:5 (inline) + 0000000000003000 line3000 testdata/file3000.src:6 +-----------+------------------------------------------------------- + key1: tag2 + key3: tag2 + 100ms 0000000000001000 line1000 testdata/file1000.src:1 + 0000000000003001 line3001 testdata/file3000.src:8 (inline) + 0000000000003001 line3000 testdata/file3000.src:9 +-----------+------------------------------------------------------- + key1: tag3 + key2: tag2 + 10ms 0000000000002000 line2001 testdata/file2000.src:9 (inline) + 0000000000002000 line2000 testdata/file2000.src:4 + 0000000000003002 line3002 testdata/file3000.src:5 (inline) + 0000000000003002 line3000 testdata/file3000.src:9 +-----------+------------------------------------------------------- + key1: tag4 + key2: tag1 + 10ms 0000000000003000 line3002 testdata/file3000.src:2 (inline) + 0000000000003000 line3001 testdata/file3000.src:5 (inline) + 0000000000003000 line3000 testdata/file3000.src:6 +-----------+------------------------------------------------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.call_tree.callgrind b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.call_tree.callgrind new file mode 100644 index 0000000..e2286f6 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.call_tree.callgrind @@ -0,0 +1,99 @@ +positions: instr line +events: cpu(ms) + +ob=(1) /path/to/testbinary +fl=(1) testdata/file1000.src +fn=(1) line1000 +0x1000 1 1000 +* 1 100 + +ob=(1) +fl=(2) testdata/file2000.src +fn=(2) line2001 ++4096 9 10 + +ob=(1) +fl=(3) testdata/file3000.src +fn=(3) line3002 ++4096 2 10 +cfl=(2) +cfn=(4) line2000 [1/2] +calls=0 * 4 +* * 1000 + +ob=(1) +fl=(2) +fn=(5) line2000 +-4096 4 0 +cfl=(2) +cfn=(6) line2001 [2/2] +calls=0 -4096 9 +* * 1000 +* 4 0 +cfl=(2) +cfn=(7) line2001 [1/2] +calls=0 * 9 +* * 10 + +ob=(1) +fl=(2) +fn=(2) +* 9 0 +cfl=(1) +cfn=(8) line1000 [1/2] +calls=0 -4096 1 +* * 1000 + +ob=(1) +fl=(3) +fn=(9) line3000 ++4096 6 0 +cfl=(3) +cfn=(10) line3001 [1/2] +calls=0 +4096 5 +* * 1010 + +ob=(1) +fl=(3) +fn=(11) line3001 +* 5 0 +cfl=(3) +cfn=(12) line3002 [1/2] +calls=0 * 2 +* * 1010 + +ob=(1) +fl=(3) +fn=(9) ++1 9 0 +cfl=(3) +cfn=(13) line3001 [2/2] +calls=0 +1 8 +* * 100 + +ob=(1) +fl=(3) +fn=(11) +* 8 0 +cfl=(1) +cfn=(14) line1000 [2/2] +calls=0 -8193 1 +* * 100 + +ob=(1) +fl=(3) +fn=(9) ++1 9 0 +cfl=(3) +cfn=(15) line3002 [2/2] +calls=0 +1 5 +* * 10 + +ob=(1) +fl=(3) +fn=(3) +* 5 0 +cfl=(2) +cfn=(16) line2000 [2/2] +calls=0 -4098 4 +* * 10 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.callgrind b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.callgrind new file mode 100644 index 0000000..0b04996 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.callgrind @@ -0,0 +1,88 @@ +positions: instr line +events: cpu(ms) + +ob=(1) /path/to/testbinary +fl=(1) testdata/file1000.src +fn=(1) line1000 +0x1000 1 1100 + +ob=(1) +fl=(2) testdata/file2000.src +fn=(2) line2001 ++4096 9 10 +cfl=(1) +cfn=(1) +calls=0 * 1 +* * 1000 + +ob=(1) +fl=(3) testdata/file3000.src +fn=(3) line3002 ++4096 2 10 +cfl=(2) +cfn=(4) line2000 +calls=0 * 4 +* * 1000 + +ob=(1) +fl=(2) +fn=(4) +-4096 4 0 +cfl=(2) +cfn=(2) +calls=0 -4096 9 +* * 1010 + +ob=(1) +fl=(3) +fn=(5) line3000 ++4096 6 0 +cfl=(3) +cfn=(6) line3001 +calls=0 +4096 5 +* * 1010 + +ob=(1) +fl=(3) +fn=(6) +* 5 0 +cfl=(3) +cfn=(3) +calls=0 * 2 +* * 1010 + +ob=(1) +fl=(3) +fn=(5) ++1 9 0 +cfl=(3) +cfn=(6) +calls=0 +1 8 +* * 100 + +ob=(1) +fl=(3) +fn=(6) +* 8 0 +cfl=(1) +cfn=(1) +calls=0 -8193 1 +* * 100 + +ob=(1) +fl=(3) +fn=(5) ++1 9 0 +cfl=(3) +cfn=(3) +calls=0 +1 5 +* * 10 + +ob=(1) +fl=(3) +fn=(3) +* 5 0 +cfl=(2) +cfn=(4) +calls=0 -4098 4 +* * 10 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.comments b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.comments new file mode 100644 index 0000000..e6d9824 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.comments @@ -0,0 +1 @@ +some-comment diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.focus.hide b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.focus.hide new file mode 100644 index 0000000..f0d928d --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.focus.hide @@ -0,0 +1,8 @@ +Active filters: + focus=[12]00 + hide=line[X3]0 +Showing nodes accounting for 1.11s, 99.11% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src:1 + 0 0% 98.21% 1.01s 90.18% line2000 testdata/file2000.src:4 + 0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src:9 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.hide b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.hide new file mode 100644 index 0000000..bf503a5 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.hide @@ -0,0 +1,7 @@ +Active filters: + hide=line[X3]0 +Showing nodes accounting for 1.11s, 99.11% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src:1 + 0 0% 98.21% 1.01s 90.18% line2000 testdata/file2000.src:4 + 0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src:9 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.show b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.show new file mode 100644 index 0000000..7604cb8 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.text.show @@ -0,0 +1,7 @@ +Active filters: + show=[12]00 +Showing nodes accounting for 1.11s, 99.11% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src:1 + 0 0% 98.21% 1.01s 90.18% line2000 testdata/file2000.src:4 + 0.01s 0.89% 99.11% 1.01s 90.18% line2001 testdata/file2000.src:9 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.topproto.hide b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.topproto.hide new file mode 100644 index 0000000..94b9be8 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.topproto.hide @@ -0,0 +1,5 @@ +Active filters: + hide=mangled[X3]0 +Showing nodes accounting for 1s, 100% of 1s total + flat flat% sum% cum cum% + 1s 100% 100% 1s 100% mangled1000 testdata/file1000.src:1 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.tree.show_from b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.tree.show_from new file mode 100644 index 0000000..112b49b --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.cum.lines.tree.show_from @@ -0,0 +1,16 @@ +Active filters: + show_from=line2 +Showing nodes accounting for 1.01s, 90.18% of 1.12s total +----------------------------------------------------------+------------- + flat flat% sum% cum cum% calls calls% + context +----------------------------------------------------------+------------- + 0 0% 0% 1.01s 90.18% | line2000 testdata/file2000.src:4 + 1.01s 100% | line2001 testdata/file2000.src:9 (inline) +----------------------------------------------------------+------------- + 1.01s 100% | line2000 testdata/file2000.src:4 (inline) + 0.01s 0.89% 0.89% 1.01s 90.18% | line2001 testdata/file2000.src:9 + 1s 99.01% | line1000 testdata/file1000.src:1 +----------------------------------------------------------+------------- + 1s 100% | line2001 testdata/file2000.src:9 + 1s 89.29% 90.18% 1s 89.29% | line1000 testdata/file1000.src:1 +----------------------------------------------------------+------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.disasm b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.disasm new file mode 100644 index 0000000..57987e8 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.disasm @@ -0,0 +1,14 @@ +Total: 1.12s +ROUTINE ======================== line1000 + 1.10s 1.10s (flat, cum) 98.21% of Total + 1.10s 1.10s 1000: instruction one ;line1000 file1000.src:1 + . . 1001: instruction two + . . 1002: instruction three ;line1000 file1000.src:2 + . . 1003: instruction four ;line1000 file1000.src:1 +ROUTINE ======================== line3000 + 10ms 1.12s (flat, cum) 100% of Total + 10ms 1.01s 3000: instruction one ;line3000 file3000.src:6 + . 100ms 3001: instruction two ;line3000 file3000.src:9 + . 10ms 3002: instruction three + . . 3003: instruction four ;line3000 file3000.src + . . 3004: instruction five diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text new file mode 100644 index 0000000..d53c44d --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.noinlines.text @@ -0,0 +1,7 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total +Dropped 1 node (cum <= 0.06s) + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% 0000000000001000 line1000 testdata/file1000.src:1 + 0.01s 0.89% 99.11% 1.01s 90.18% 0000000000002000 line2000 testdata/file2000.src:4 + 0.01s 0.89% 100% 1.01s 90.18% 0000000000003000 line3000 testdata/file3000.src:6 + 0 0% 100% 0.10s 8.93% 0000000000003001 line3000 testdata/file3000.src:9 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.weblist b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.weblist new file mode 100644 index 0000000..b480dd9 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.addresses.weblist @@ -0,0 +1,101 @@ + + + + + testbinary cpu + + + + + + +
File: testbinary
+Type: cpu
+Duration: 10s, Total samples = 1.12s (11.20%)
Total: 1.12s

line1000

testdata/file1000.src

+
+  Total:       1.10s      1.10s (flat, cum) 98.21%
+      1        1.10s      1.10s           line1                1.10s      1.10s     1000:     instruction one                                                              file1000.src:1
+                   .          .     1001:     instruction two                                                              file1000.src:1
+                                     ⋮
+                   .          .     1003:     instruction four                                                             file1000.src:1
+
+      2            .          .           line2                    .          .     1002:     instruction three                                                            file1000.src:2
+
+      3            .          .           line3 
+      4            .          .           line4 
+      5            .          .           line5 
+      6            .          .           line6 
+      7            .          .           line7 
+
+

line3000

testdata/file3000.src

+
+  Total:        10ms      1.12s (flat, cum)   100%
+      1            .          .           line1 
+      2            .          .           line2 
+      3            .          .           line3 
+      4            .          .           line4 
+      5            .          .           line5 
+      6         10ms      1.01s           line6                                               line5                                                                        file3000.src:5
+                                                  line2                                                                    file3000.src:2
+                10ms      1.01s     3000:             instruction one                                                      file3000.src:2
+
+      7            .          .           line7 
+      8            .          .           line8 
+      9            .      110ms           line9                                               line8                                                                        file3000.src:8
+                   .      100ms     3001:         instruction two                                                          file3000.src:8
+                                              line5                                                                        file3000.src:5
+                   .       10ms     3002:         instruction three                                                        file3000.src:5
+                   .          .     3003:         instruction four                                                         
+                   .          .     3004:         instruction five                                                         
+
+     10            .          .           line0 
+     11            .          .           line1 
+     12            .          .           line2 
+     13            .          .           line3 
+     14            .          .           line4 
+
+ + + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text new file mode 100644 index 0000000..88fb760 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.filefunctions.noinlines.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 testdata/file1000.src + 0.01s 0.89% 99.11% 1.01s 90.18% line2000 testdata/file2000.src + 0.01s 0.89% 100% 1.12s 100% line3000 testdata/file3000.src diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.call_tree.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.call_tree.dot new file mode 100644 index 0000000..ae57f66 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.call_tree.dot @@ -0,0 +1,21 @@ +digraph "testbinary" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "File: testbinary" [shape=box fontsize=16 label="File: testbinary\lType: cpu\lDuration: 10s, Total samples = 1.12s (11.20%)\lShowing nodes accounting for 1.11s, 99.11% of 1.12s total\lDropped 3 nodes (cum <= 0.06s)\l\lSee https://git.io/JfYMW for how to read the graph\l" tooltip="testbinary"] } +N1 [label="line1000\n1s (89.29%)" id="node1" fontsize=24 shape=box tooltip="line1000 (1s)" color="#b20500" fillcolor="#edd6d5"] +N1_0 [label = "key1:tag1\nkey2:tag1" id="N1_0" fontsize=8 shape=box3d tooltip="1s"] +N1 -> N1_0 [label=" 1s" weight=100 tooltip="1s" labeltooltip="1s"] +N2 [label="line3000\n0 of 1.12s (100%)" id="node2" fontsize=8 shape=box tooltip="line3000 (1.12s)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="line3001\n0 of 1.11s (99.11%)" id="node3" fontsize=8 shape=box tooltip="line3001 (1.11s)" color="#b20000" fillcolor="#edd5d5"] +N4 [label="line1000\n0.10s (8.93%)" id="node4" fontsize=14 shape=box tooltip="line1000 (0.10s)" color="#b28b62" fillcolor="#ede8e2"] +N4_0 [label = "key1:tag2\nkey3:tag2" id="N4_0" fontsize=8 shape=box3d tooltip="0.10s"] +N4 -> N4_0 [label=" 0.10s" weight=100 tooltip="0.10s" labeltooltip="0.10s"] +N5 [label="line3002\n0.01s (0.89%)\nof 1.01s (90.18%)" id="node5" fontsize=10 shape=box tooltip="line3002 (1.01s)" color="#b20500" fillcolor="#edd6d5"] +N6 [label="line2000\n0 of 1s (89.29%)" id="node6" fontsize=8 shape=box tooltip="line2000 (1s)" color="#b20500" fillcolor="#edd6d5"] +N7 [label="line2001\n0 of 1s (89.29%)" id="node7" fontsize=8 shape=box tooltip="line2001 (1s)" color="#b20500" fillcolor="#edd6d5"] +N2 -> N3 [label=" 1.11s\n (inline)" weight=100 penwidth=5 color="#b20000" tooltip="line3000 -> line3001 (1.11s)" labeltooltip="line3000 -> line3001 (1.11s)"] +N3 -> N5 [label=" 1.01s\n (inline)" weight=91 penwidth=5 color="#b20500" tooltip="line3001 -> line3002 (1.01s)" labeltooltip="line3001 -> line3002 (1.01s)"] +N6 -> N7 [label=" 1s\n (inline)" weight=90 penwidth=5 color="#b20500" tooltip="line2000 -> line2001 (1s)" labeltooltip="line2000 -> line2001 (1s)"] +N7 -> N1 [label=" 1s" weight=90 penwidth=5 color="#b20500" tooltip="line2001 -> line1000 (1s)" labeltooltip="line2001 -> line1000 (1s)"] +N5 -> N6 [label=" 1s" weight=90 penwidth=5 color="#b20500" tooltip="line3002 -> line2000 (1s)" labeltooltip="line3002 -> line2000 (1s)"] +N3 -> N4 [label=" 0.10s" weight=9 color="#b28b62" tooltip="line3001 -> line1000 (0.10s)" labeltooltip="line3001 -> line1000 (0.10s)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.dot new file mode 100644 index 0000000..4a812e4 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.dot @@ -0,0 +1,20 @@ +digraph "testbinary" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "File: testbinary" [shape=box fontsize=16 label="File: testbinary\lType: cpu\lDuration: 10s, Total samples = 1.12s (11.20%)\lShowing nodes accounting for 1.12s, 100% of 1.12s total\l\lSee https://git.io/JfYMW for how to read the graph\l" tooltip="testbinary"] } +N1 [label="line1000\n1.10s (98.21%)" id="node1" fontsize=24 shape=box tooltip="line1000 (1.10s)" color="#b20000" fillcolor="#edd5d5"] +N1_0 [label = "key1:tag1\nkey2:tag1" id="N1_0" fontsize=8 shape=box3d tooltip="1s"] +N1 -> N1_0 [label=" 1s" weight=100 tooltip="1s" labeltooltip="1s"] +N1_1 [label = "key1:tag2\nkey3:tag2" id="N1_1" fontsize=8 shape=box3d tooltip="0.10s"] +N1 -> N1_1 [label=" 0.10s" weight=100 tooltip="0.10s" labeltooltip="0.10s"] +N2 [label="line3000\n0 of 1.12s (100%)" id="node2" fontsize=8 shape=box tooltip="line3000 (1.12s)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="line3001\n0 of 1.11s (99.11%)" id="node3" fontsize=8 shape=box tooltip="line3001 (1.11s)" color="#b20000" fillcolor="#edd5d5"] +N4 [label="line3002\n0.01s (0.89%)\nof 1.02s (91.07%)" id="node4" fontsize=10 shape=box tooltip="line3002 (1.02s)" color="#b20400" fillcolor="#edd6d5"] +N5 [label="line2001\n0.01s (0.89%)\nof 1.01s (90.18%)" id="node5" fontsize=10 shape=box tooltip="line2001 (1.01s)" color="#b20500" fillcolor="#edd6d5"] +N6 [label="line2000\n0 of 1.01s (90.18%)" id="node6" fontsize=8 shape=box tooltip="line2000 (1.01s)" color="#b20500" fillcolor="#edd6d5"] +N2 -> N3 [label=" 1.11s\n (inline)" weight=100 penwidth=5 color="#b20000" tooltip="line3000 -> line3001 (1.11s)" labeltooltip="line3000 -> line3001 (1.11s)"] +N6 -> N5 [label=" 1.01s\n (inline)" weight=91 penwidth=5 color="#b20500" tooltip="line2000 -> line2001 (1.01s)" labeltooltip="line2000 -> line2001 (1.01s)"] +N3 -> N4 [label=" 1.01s\n (inline)" weight=91 penwidth=5 color="#b20500" tooltip="line3001 -> line3002 (1.01s)" labeltooltip="line3001 -> line3002 (1.01s)"] +N4 -> N6 [label=" 1.01s" weight=91 penwidth=5 color="#b20500" tooltip="line3002 -> line2000 (1.01s)" labeltooltip="line3002 -> line2000 (1.01s)"] +N5 -> N1 [label=" 1s" weight=90 penwidth=5 color="#b20500" tooltip="line2001 -> line1000 (1s)" labeltooltip="line2001 -> line1000 (1s)"] +N3 -> N1 [label=" 0.10s" weight=9 color="#b28b62" tooltip="line3001 -> line1000 (0.10s)" labeltooltip="line3001 -> line1000 (0.10s)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text new file mode 100644 index 0000000..493b491 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.noinlines.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 + 0.01s 0.89% 99.11% 1.01s 90.18% line2000 + 0.01s 0.89% 100% 1.12s 100% line3000 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.text b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.text new file mode 100644 index 0000000..66e4189 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.flat.functions.text @@ -0,0 +1,8 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 + 0.01s 0.89% 99.11% 1.01s 90.18% line2001 (inline) + 0.01s 0.89% 100% 1.02s 91.07% line3002 (inline) + 0 0% 100% 1.01s 90.18% line2000 + 0 0% 100% 1.12s 100% line3000 + 0 0% 100% 1.11s 99.11% line3001 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.lines.topproto b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.lines.topproto new file mode 100644 index 0000000..33bf681 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.lines.topproto @@ -0,0 +1,3 @@ +Showing nodes accounting for 1s, 100% of 1s total + flat flat% sum% cum cum% + 1s 100% 100% 1s 100% mangled1000 testdata/file1000.src:1 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.peek b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.peek new file mode 100644 index 0000000..3b8a353 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.peek @@ -0,0 +1,13 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total +----------------------------------------------------------+------------- + flat flat% sum% cum cum% calls calls% + context +----------------------------------------------------------+------------- + 1.01s 100% | line2000 (inline) + 0.01s 0.89% 0.89% 1.01s 90.18% | line2001 + 1s 99.01% | line1000 +----------------------------------------------------------+------------- + 1.11s 100% | line3000 (inline) + 0 0% 0.89% 1.11s 99.11% | line3001 + 1.01s 90.99% | line3002 (inline) + 0.10s 9.01% | line1000 +----------------------------------------------------------+------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags new file mode 100644 index 0000000..5998b5b --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags @@ -0,0 +1,13 @@ + key1: Total 1.1s + 1.0s (89.29%): tag1 + 100.0ms ( 8.93%): tag2 + 10.0ms ( 0.89%): tag3 + 10.0ms ( 0.89%): tag4 + + key2: Total 1.0s + 1.0s (99.02%): tag1 + 10.0ms ( 0.98%): tag2 + + key3: Total 100.0ms + 100.0ms ( 100%): tag2 + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags.focus.ignore b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags.focus.ignore new file mode 100644 index 0000000..9b99d43 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.tags.focus.ignore @@ -0,0 +1,6 @@ + key1: Total 100.0ms + 100.0ms ( 100%): tag2 + + key3: Total 100.0ms + 100.0ms ( 100%): tag2 + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.traces b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.traces new file mode 100644 index 0000000..dd31e2e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpu.traces @@ -0,0 +1,32 @@ +File: testbinary +Type: cpu +Duration: 10s, Total samples = 1.12s (11.20%) +-----------+------------------------------------------------------- + key1: tag1 + key2: tag1 + 1s line1000 + line2001 (inline) + line2000 + line3002 (inline) + line3001 (inline) + line3000 +-----------+------------------------------------------------------- + key1: tag2 + key3: tag2 + 100ms line1000 + line3001 (inline) + line3000 +-----------+------------------------------------------------------- + key1: tag3 + key2: tag2 + 10ms line2001 (inline) + line2000 + line3002 (inline) + line3000 +-----------+------------------------------------------------------- + key1: tag4 + key2: tag1 + 10ms line3002 (inline) + line3001 (inline) + line3000 +-----------+------------------------------------------------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.cpusmall.flat.addresses.tree b/plugin/debug/pkg/internal/driver/testdata/pprof.cpusmall.flat.addresses.tree new file mode 100644 index 0000000..606db2b --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.cpusmall.flat.addresses.tree @@ -0,0 +1,17 @@ +Showing nodes accounting for 4s, 100% of 4s total +Showing top 4 nodes out of 5 +----------------------------------------------------------+------------- + flat flat% sum% cum cum% calls calls% + context +----------------------------------------------------------+------------- + 1s 100% | 0000000000003000 [testbinary] + 1s 25.00% 25.00% 1s 25.00% | 0000000000001000 [testbinary] +----------------------------------------------------------+------------- + 1s 25.00% 50.00% 2s 50.00% | 0000000000003000 [testbinary] + 1s 50.00% | 0000000000001000 [testbinary] +----------------------------------------------------------+------------- + 1s 100% | 0000000000005000 [testbinary] + 1s 25.00% 75.00% 1s 25.00% | 0000000000004000 [testbinary] +----------------------------------------------------------+------------- + 1s 25.00% 100% 2s 50.00% | 0000000000005000 [testbinary] + 1s 50.00% | 0000000000004000 [testbinary] +----------------------------------------------------------+------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.callgrind b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.callgrind new file mode 100644 index 0000000..bfd96cb --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.callgrind @@ -0,0 +1,88 @@ +positions: instr line +events: inuse_space(MB) + +ob= +fl=(1) testdata/file2000.src +fn=(1) line2001 +0x2000 2 62 +cfl=(2) testdata/file1000.src +cfn=(2) line1000 +calls=0 0x1000 1 +* * 0 + +ob= +fl=(3) testdata/file3000.src +fn=(3) line3002 ++4096 3 31 +cfl=(1) +cfn=(4) line2000 +calls=0 * 3 +* * 0 + +ob= +fl=(2) +fn=(2) +-8192 1 4 + +ob= +fl=(1) +fn=(4) ++4096 3 0 +cfl=(1) +cfn=(1) +calls=0 +4096 2 +* * 63 + +ob= +fl=(3) +fn=(5) line3000 ++4096 4 0 +cfl=(3) +cfn=(6) line3001 +calls=0 +4096 2 +* * 32 + +ob= +fl=(3) +fn=(6) +* 2 0 +cfl=(3) +cfn=(3) +calls=0 * 3 +* * 32 + +ob= +fl=(3) +fn=(5) ++1 4 0 +cfl=(3) +cfn=(6) +calls=0 +1 2 +* * 3 + +ob= +fl=(3) +fn=(6) +* 2 0 +cfl=(2) +cfn=(2) +calls=0 -8193 1 +* * 3 + +ob= +fl=(3) +fn=(5) ++1 4 0 +cfl=(3) +cfn=(3) +calls=0 +1 3 +* * 62 + +ob= +fl=(3) +fn=(3) +* 3 0 +cfl=(1) +cfn=(4) +calls=0 -4098 3 +* * 62 diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.comments b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.comments new file mode 100644 index 0000000..6eca2fb --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.comments @@ -0,0 +1,2 @@ +comment +#hidden comment diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.lines.tree.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.lines.tree.focus new file mode 100644 index 0000000..9d4ba72 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.lines.tree.focus @@ -0,0 +1,21 @@ +Active filters: + focus=[24]00 +Showing nodes accounting for 62.50MB, 63.37% of 98.63MB total +Dropped 2 nodes (cum <= 4.93MB) +----------------------------------------------------------+------------- + flat flat% sum% cum cum% calls calls% + context +----------------------------------------------------------+------------- + 63.48MB 100% | line3002 testdata/file3000.src:3 + 0 0% 0% 63.48MB 64.36% | line2000 testdata/file2000.src:3 + 63.48MB 100% | line2001 testdata/file2000.src:2 (inline) +----------------------------------------------------------+------------- + 63.48MB 100% | line2000 testdata/file2000.src:3 (inline) + 62.50MB 63.37% 63.37% 63.48MB 64.36% | line2001 testdata/file2000.src:2 +----------------------------------------------------------+------------- + 0 0% 63.37% 63.48MB 64.36% | line3000 testdata/file3000.src:4 + 63.48MB 100% | line3002 testdata/file3000.src:3 (inline) +----------------------------------------------------------+------------- + 63.48MB 100% | line3000 testdata/file3000.src:4 (inline) + 0 0% 63.37% 63.48MB 64.36% | line3002 testdata/file3000.src:3 + 63.48MB 100% | line2000 testdata/file2000.src:3 +----------------------------------------------------------+------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.relative_percentages.tree.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.relative_percentages.tree.focus new file mode 100644 index 0000000..c2d1183 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.cum.relative_percentages.tree.focus @@ -0,0 +1,21 @@ +Active filters: + focus=[24]00 +Showing nodes accounting for 62.50MB, 98.46% of 63.48MB total +Dropped 2 nodes (cum <= 3.17MB) +----------------------------------------------------------+------------- + flat flat% sum% cum cum% calls calls% + context +----------------------------------------------------------+------------- + 63.48MB 100% | line3002 + 0 0% 0% 63.48MB 100% | line2000 + 63.48MB 100% | line2001 (inline) +----------------------------------------------------------+------------- + 63.48MB 100% | line2000 (inline) + 62.50MB 98.46% 98.46% 63.48MB 100% | line2001 +----------------------------------------------------------+------------- + 0 0% 98.46% 63.48MB 100% | line3000 + 63.48MB 100% | line3002 (inline) +----------------------------------------------------------+------------- + 63.48MB 100% | line3000 (inline) + 0 0% 98.46% 63.48MB 100% | line3002 + 63.48MB 100% | line2000 +----------------------------------------------------------+------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.seconds.text b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.seconds.text new file mode 100644 index 0000000..b9571ef --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.seconds.text @@ -0,0 +1,2 @@ +Showing nodes accounting for 0, 0% of 0 total + flat flat% sum% cum cum% diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text new file mode 100644 index 0000000..fd536df --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 93.75MB, 95.05% of 98.63MB total +Dropped 1 node (cum <= 4.93MB) + flat flat% sum% cum cum% + 62.50MB 63.37% 63.37% 63.48MB 64.36% testdata/file2000.src + 31.25MB 31.68% 95.05% 98.63MB 100% testdata/file3000.src diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text.focus new file mode 100644 index 0000000..20a503f --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.files.text.focus @@ -0,0 +1,8 @@ +Active filters: + focus=[12]00 + taghide=[X3]00 +Showing nodes accounting for 67.38MB, 68.32% of 98.63MB total + flat flat% sum% cum cum% + 62.50MB 63.37% 63.37% 63.48MB 64.36% testdata/file2000.src + 4.88MB 4.95% 68.32% 4.88MB 4.95% testdata/file1000.src + 0 0% 68.32% 67.38MB 68.32% testdata/file3000.src diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_objects.text b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_objects.text new file mode 100644 index 0000000..929461a --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_objects.text @@ -0,0 +1,8 @@ +Showing nodes accounting for 150, 100% of 150 total + flat flat% sum% cum cum% + 80 53.33% 53.33% 130 86.67% line3002 (inline) + 40 26.67% 80.00% 50 33.33% line2001 (inline) + 30 20.00% 100% 30 20.00% line1000 + 0 0% 100% 50 33.33% line2000 + 0 0% 100% 150 100% line3000 + 0 0% 100% 110 73.33% line3001 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus new file mode 100644 index 0000000..c1d3a8e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus @@ -0,0 +1,13 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lActive filters:\l tagfocus=1mb:2gb\lShowing nodes accounting for 62.50MB, 63.37% of 98.63MB total\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line2001\n62.50MB (63.37%)" id="node1" fontsize=24 shape=box tooltip="line2001 (62.50MB)" color="#b21600" fillcolor="#edd8d5"] +NN1_0 [label = "1.56MB" id="NN1_0" fontsize=8 shape=box3d tooltip="62.50MB"] +N1 -> NN1_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"] +N2 [label="line3000\n0 of 62.50MB (63.37%)" id="node2" fontsize=8 shape=box tooltip="line3000 (62.50MB)" color="#b21600" fillcolor="#edd8d5"] +N3 [label="line2000\n0 of 62.50MB (63.37%)" id="node3" fontsize=8 shape=box tooltip="line2000 (62.50MB)" color="#b21600" fillcolor="#edd8d5"] +N4 [label="line3002\n0 of 62.50MB (63.37%)" id="node4" fontsize=8 shape=box tooltip="line3002 (62.50MB)" color="#b21600" fillcolor="#edd8d5"] +N3 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line2000 -> line2001 (62.50MB)" labeltooltip="line2000 -> line2001 (62.50MB)"] +N2 -> N4 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 -> line3002 (62.50MB)" labeltooltip="line3000 -> line3002 (62.50MB)"] +N4 -> N3 [label=" 62.50MB" weight=64 penwidth=4 color="#b21600" tooltip="line3002 -> line2000 (62.50MB)" labeltooltip="line3002 -> line2000 (62.50MB)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus.ignore b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus.ignore new file mode 100644 index 0000000..ead36d6 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.inuse_space.dot.focus.ignore @@ -0,0 +1,16 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lActive filters:\l tagfocus=30kb:\l tagignore=1mb:2mb\lShowing nodes accounting for 36.13MB, 36.63% of 98.63MB total\lDropped 2 nodes (cum <= 4.93MB)\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3002\n31.25MB (31.68%)\nof 32.23MB (32.67%)" id="node1" fontsize=24 shape=box tooltip="line3002 (32.23MB)" color="#b23200" fillcolor="#eddcd5"] +NN1_0 [label = "400kB" id="NN1_0" fontsize=8 shape=box3d tooltip="31.25MB"] +N1 -> NN1_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"] +N2 [label="line3000\n0 of 36.13MB (36.63%)" id="node2" fontsize=8 shape=box tooltip="line3000 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +N3 [label="line3001\n0 of 36.13MB (36.63%)" id="node3" fontsize=8 shape=box tooltip="line3001 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +N4 [label="line1000\n4.88MB (4.95%)" id="node4" fontsize=15 shape=box tooltip="line1000 (4.88MB)" color="#b2a086" fillcolor="#edeae7"] +NN4_0 [label = "200kB" id="NN4_0" fontsize=8 shape=box3d tooltip="3.91MB"] +N4 -> NN4_0 [label=" 3.91MB" weight=100 tooltip="3.91MB" labeltooltip="3.91MB"] +N2 -> N3 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 -> line3001 (36.13MB)" labeltooltip="line3000 -> line3001 (36.13MB)"] +N3 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 -> line3002 (32.23MB)" labeltooltip="line3001 -> line3002 (32.23MB)"] +N3 -> N4 [label=" 3.91MB" weight=4 color="#b2a58f" tooltip="line3001 -> line1000 (3.91MB)" labeltooltip="line3001 -> line1000 (3.91MB)"] +N1 -> N4 [label=" 0.98MB" color="#b2b0a9" tooltip="line3002 ... line1000 (0.98MB)" labeltooltip="line3002 ... line1000 (0.98MB)" style="dotted" minlen=2] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.lines.dot.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.lines.dot.focus new file mode 100644 index 0000000..02ea91e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.flat.lines.dot.focus @@ -0,0 +1,21 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lActive filters:\l focus=[12]00\lShowing nodes accounting for 67.38MB, 68.32% of 98.63MB total\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3000\nfile3000.src:4\n0 of 67.38MB (68.32%)" id="node1" fontsize=8 shape=box tooltip="line3000 testdata/file3000.src:4 (67.38MB)" color="#b21300" fillcolor="#edd7d5"] +N2 [label="line2001\nfile2000.src:2\n62.50MB (63.37%)\nof 63.48MB (64.36%)" id="node2" fontsize=24 shape=box tooltip="line2001 testdata/file2000.src:2 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +NN2_0 [label = "1.56MB" id="NN2_0" fontsize=8 shape=box3d tooltip="62.50MB"] +N2 -> NN2_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"] +N3 [label="line1000\nfile1000.src:1\n4.88MB (4.95%)" id="node3" fontsize=13 shape=box tooltip="line1000 testdata/file1000.src:1 (4.88MB)" color="#b2a086" fillcolor="#edeae7"] +NN3_0 [label = "200kB" id="NN3_0" fontsize=8 shape=box3d tooltip="3.91MB"] +N3 -> NN3_0 [label=" 3.91MB" weight=100 tooltip="3.91MB" labeltooltip="3.91MB"] +N4 [label="line3002\nfile3000.src:3\n0 of 63.48MB (64.36%)" id="node4" fontsize=8 shape=box tooltip="line3002 testdata/file3000.src:3 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N5 [label="line3001\nfile3000.src:2\n0 of 4.88MB (4.95%)" id="node5" fontsize=8 shape=box tooltip="line3001 testdata/file3000.src:2 (4.88MB)" color="#b2a086" fillcolor="#edeae7"] +N6 [label="line2000\nfile2000.src:3\n0 of 63.48MB (64.36%)" id="node6" fontsize=8 shape=box tooltip="line2000 testdata/file2000.src:3 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N6 -> N2 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 testdata/file2000.src:3 -> line2001 testdata/file2000.src:2 (63.48MB)" labeltooltip="line2000 testdata/file2000.src:3 -> line2001 testdata/file2000.src:2 (63.48MB)"] +N4 -> N6 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 testdata/file3000.src:3 -> line2000 testdata/file2000.src:3 (63.48MB)" labeltooltip="line3002 testdata/file3000.src:3 -> line2000 testdata/file2000.src:3 (63.48MB)"] +N1 -> N4 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 testdata/file3000.src:4 -> line3002 testdata/file3000.src:3 (62.50MB)" labeltooltip="line3000 testdata/file3000.src:4 -> line3002 testdata/file3000.src:3 (62.50MB)"] +N1 -> N5 [label=" 4.88MB\n (inline)" weight=5 color="#b2a086" tooltip="line3000 testdata/file3000.src:4 -> line3001 testdata/file3000.src:2 (4.88MB)" labeltooltip="line3000 testdata/file3000.src:4 -> line3001 testdata/file3000.src:2 (4.88MB)"] +N5 -> N3 [label=" 3.91MB" weight=4 color="#b2a58f" tooltip="line3001 testdata/file3000.src:2 -> line1000 testdata/file1000.src:1 (3.91MB)" labeltooltip="line3001 testdata/file3000.src:2 -> line1000 testdata/file1000.src:1 (3.91MB)"] +N2 -> N3 [label=" 0.98MB" color="#b2b0a9" tooltip="line2001 testdata/file2000.src:2 -> line1000 testdata/file1000.src:1 (0.98MB)" labeltooltip="line2001 testdata/file2000.src:2 -> line1000 testdata/file1000.src:1 (0.98MB)" minlen=2] +N5 -> N4 [label=" 0.98MB\n (inline)" color="#b2b0a9" tooltip="line3001 testdata/file3000.src:2 -> line3002 testdata/file3000.src:3 (0.98MB)" labeltooltip="line3001 testdata/file3000.src:2 -> line3002 testdata/file3000.src:3 (0.98MB)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags new file mode 100644 index 0000000..630e452 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags @@ -0,0 +1,6 @@ + bytes: Total 98.6MB + 62.5MB (63.37%): 1.56MB + 31.2MB (31.68%): 400kB + 3.9MB ( 3.96%): 200kB + 1000.0kB ( 0.99%): 100kB + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags.unit b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags.unit new file mode 100644 index 0000000..5e565fc --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap.tags.unit @@ -0,0 +1,6 @@ + bytes: Total 103424000.0B + 65536000.0B (63.37%): 1638400B + 32768000.0B (31.68%): 409600B + 4096000.0B ( 3.96%): 204800B + 1024000.0B ( 0.99%): 102400B + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_objects.text b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_objects.text new file mode 100644 index 0000000..929461a --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_objects.text @@ -0,0 +1,8 @@ +Showing nodes accounting for 150, 100% of 150 total + flat flat% sum% cum cum% + 80 53.33% 53.33% 130 86.67% line3002 (inline) + 40 26.67% 80.00% 50 33.33% line2001 (inline) + 30 20.00% 100% 30 20.00% line1000 + 0 0% 100% 50 33.33% line2000 + 0 0% 100% 150 100% line3000 + 0 0% 100% 110 73.33% line3001 (inline) diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot new file mode 100644 index 0000000..152f550 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot @@ -0,0 +1,14 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: alloc_space\lActive filters:\l tagshow=[2]00\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3002\n31.25MB (31.68%)\nof 94.73MB (96.04%)" id="node1" fontsize=20 shape=box tooltip="line3002 (94.73MB)" color="#b20200" fillcolor="#edd5d5"] +N2 [label="line3000\n0 of 98.63MB (100%)" id="node2" fontsize=8 shape=box tooltip="line3000 (98.63MB)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="line2001\n62.50MB (63.37%)\nof 63.48MB (64.36%)" id="node3" fontsize=24 shape=box tooltip="line2001 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N4 [label="line2000\n0 of 63.48MB (64.36%)" id="node4" fontsize=8 shape=box tooltip="line2000 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N5 [label="line3001\n0 of 36.13MB (36.63%)" id="node5" fontsize=8 shape=box tooltip="line3001 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +N4 -> N3 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 -> line2001 (63.48MB)" labeltooltip="line2000 -> line2001 (63.48MB)"] +N1 -> N4 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 -> line2000 (63.48MB)" labeltooltip="line3002 -> line2000 (63.48MB)"] +N2 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 -> line3002 (62.50MB)" labeltooltip="line3000 -> line3002 (62.50MB)"] +N2 -> N5 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 -> line3001 (36.13MB)" labeltooltip="line3000 -> line3001 (36.13MB)"] +N5 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 -> line3002 (32.23MB)" labeltooltip="line3001 -> line3002 (32.23MB)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.focus new file mode 100644 index 0000000..e59deef --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.focus @@ -0,0 +1,18 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: alloc_space\lActive filters:\l focus=[234]00\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3002\n31.25MB (31.68%)\nof 94.73MB (96.04%)" id="node1" fontsize=20 shape=box tooltip="line3002 (94.73MB)" color="#b20200" fillcolor="#edd5d5"] +NN1_0 [label = "400kB" id="NN1_0" fontsize=8 shape=box3d tooltip="31.25MB"] +N1 -> NN1_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"] +N2 [label="line3000\n0 of 98.63MB (100%)" id="node2" fontsize=8 shape=box tooltip="line3000 (98.63MB)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="line2001\n62.50MB (63.37%)\nof 63.48MB (64.36%)" id="node3" fontsize=24 shape=box tooltip="line2001 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +NN3_0 [label = "1.56MB" id="NN3_0" fontsize=8 shape=box3d tooltip="62.50MB"] +N3 -> NN3_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"] +N4 [label="line2000\n0 of 63.48MB (64.36%)" id="node4" fontsize=8 shape=box tooltip="line2000 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N5 [label="line3001\n0 of 36.13MB (36.63%)" id="node5" fontsize=8 shape=box tooltip="line3001 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +N4 -> N3 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 -> line2001 (63.48MB)" labeltooltip="line2000 -> line2001 (63.48MB)"] +N1 -> N4 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 -> line2000 (63.48MB)" labeltooltip="line3002 -> line2000 (63.48MB)" minlen=2] +N2 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 -> line3002 (62.50MB)" labeltooltip="line3000 -> line3002 (62.50MB)"] +N2 -> N5 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 -> line3001 (36.13MB)" labeltooltip="line3000 -> line3001 (36.13MB)"] +N5 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 -> line3002 (32.23MB)" labeltooltip="line3001 -> line3002 (32.23MB)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.hide b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.hide new file mode 100644 index 0000000..25250f0 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_alloc.flat.alloc_space.dot.hide @@ -0,0 +1,11 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: alloc_space\lActive filters:\l hide=line.*1?23?\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3000\n62.50MB (63.37%)\nof 98.63MB (100%)" id="node1" fontsize=24 shape=box tooltip="line3000 (98.63MB)" color="#b20000" fillcolor="#edd5d5"] +NN1_0 [label = "1.56MB" id="NN1_0" fontsize=8 shape=box3d tooltip="62.50MB"] +N1 -> NN1_0 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"] +N2 [label="line3001\n31.25MB (31.68%)\nof 36.13MB (36.63%)" id="node2" fontsize=20 shape=box tooltip="line3001 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +NN2_0 [label = "400kB" id="NN2_0" fontsize=8 shape=box3d tooltip="31.25MB"] +N2 -> NN2_0 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"] +N1 -> N2 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 -> line3001 (36.13MB)" labeltooltip="line3000 -> line3001 (36.13MB)" minlen=2] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_request.tags.focus b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_request.tags.focus new file mode 100644 index 0000000..b1a5f44 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_request.tags.focus @@ -0,0 +1,8 @@ + bytes: Total 93.8MB + 62.5MB (66.67%): 1.56MB + 31.2MB (33.33%): 400kB + + request: Total 93.8MB + 62.5MB (66.67%): 1.56MB + 31.2MB (33.33%): 400kB + diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_sizetags.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_sizetags.dot new file mode 100644 index 0000000..fb31559 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_sizetags.dot @@ -0,0 +1,30 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Build ID: buildid" [shape=box fontsize=16 label="Build ID: buildid\lcomment\lType: inuse_space\lShowing nodes accounting for 93.75MB, 95.05% of 98.63MB total\lDropped 1 node (cum <= 4.93MB)\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="line3002\n31.25MB (31.68%)\nof 94.73MB (96.04%)" id="node1" fontsize=20 shape=box tooltip="line3002 (94.73MB)" color="#b20200" fillcolor="#edd5d5"] +NN1_0 [label = "16B..64B" id="NN1_0" fontsize=8 shape=box3d tooltip="93.75MB"] +N1 -> NN1_0 [label=" 93.75MB" weight=100 tooltip="93.75MB" labeltooltip="93.75MB"] +NN1_1 [label = "2B..8B" id="NN1_1" fontsize=8 shape=box3d tooltip="93.75MB"] +N1 -> NN1_1 [label=" 93.75MB" weight=100 tooltip="93.75MB" labeltooltip="93.75MB"] +NN1_2 [label = "256B..1.56MB" id="NN1_2" fontsize=8 shape=box3d tooltip="62.50MB"] +N1 -> NN1_2 [label=" 62.50MB" weight=100 tooltip="62.50MB" labeltooltip="62.50MB"] +NN1_3 [label = "128B" id="NN1_3" fontsize=8 shape=box3d tooltip="31.25MB"] +N1 -> NN1_3 [label=" 31.25MB" weight=100 tooltip="31.25MB" labeltooltip="31.25MB"] +N2 [label="line3000\n0 of 98.63MB (100%)" id="node2" fontsize=8 shape=box tooltip="line3000 (98.63MB)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="line2001\n62.50MB (63.37%)\nof 63.48MB (64.36%)" id="node3" fontsize=24 shape=box tooltip="line2001 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +NN3_0 [label = "16B..64B" id="NN3_0" fontsize=8 shape=box3d tooltip="190.43MB"] +N3 -> NN3_0 [label=" 190.43MB" weight=100 tooltip="190.43MB" labeltooltip="190.43MB" style="dotted"] +NN3_1 [label = "2B..8B" id="NN3_1" fontsize=8 shape=box3d tooltip="190.43MB"] +N3 -> NN3_1 [label=" 190.43MB" weight=100 tooltip="190.43MB" labeltooltip="190.43MB" style="dotted"] +NN3_2 [label = "256B..1.56MB" id="NN3_2" fontsize=8 shape=box3d tooltip="125.98MB"] +N3 -> NN3_2 [label=" 125.98MB" weight=100 tooltip="125.98MB" labeltooltip="125.98MB" style="dotted"] +NN3_3 [label = "128B" id="NN3_3" fontsize=8 shape=box3d tooltip="63.48MB"] +N3 -> NN3_3 [label=" 63.48MB" weight=100 tooltip="63.48MB" labeltooltip="63.48MB" style="dotted"] +N4 [label="line2000\n0 of 63.48MB (64.36%)" id="node4" fontsize=8 shape=box tooltip="line2000 (63.48MB)" color="#b21600" fillcolor="#edd8d5"] +N5 [label="line3001\n0 of 36.13MB (36.63%)" id="node5" fontsize=8 shape=box tooltip="line3001 (36.13MB)" color="#b22e00" fillcolor="#eddbd5"] +N4 -> N3 [label=" 63.48MB\n (inline)" weight=65 penwidth=4 color="#b21600" tooltip="line2000 -> line2001 (63.48MB)" labeltooltip="line2000 -> line2001 (63.48MB)"] +N1 -> N4 [label=" 63.48MB" weight=65 penwidth=4 color="#b21600" tooltip="line3002 -> line2000 (63.48MB)" labeltooltip="line3002 -> line2000 (63.48MB)" minlen=2] +N2 -> N1 [label=" 62.50MB\n (inline)" weight=64 penwidth=4 color="#b21600" tooltip="line3000 -> line3002 (62.50MB)" labeltooltip="line3000 -> line3002 (62.50MB)"] +N2 -> N5 [label=" 36.13MB\n (inline)" weight=37 penwidth=2 color="#b22e00" tooltip="line3000 -> line3001 (36.13MB)" labeltooltip="line3000 -> line3001 (36.13MB)"] +N5 -> N1 [label=" 32.23MB\n (inline)" weight=33 penwidth=2 color="#b23200" tooltip="line3001 -> line3002 (32.23MB)" labeltooltip="line3001 -> line3002 (32.23MB)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.heap_tags.traces b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_tags.traces new file mode 100644 index 0000000..694b4b2 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.heap_tags.traces @@ -0,0 +1,32 @@ +Build ID: buildid +comment +Type: inuse_space +-----------+------------------------------------------------------- + key1: tag + bytes: 100kB + request: 100kB + 1000kB line1000 + line2001 (inline) + line2000 + line3002 (inline) + line3001 (inline) + line3000 +-----------+------------------------------------------------------- + bytes: 200kB + 3.91MB line1000 + line3001 (inline) + line3000 +-----------+------------------------------------------------------- + key1: tag + bytes: 1.56MB + request: 1.56MB + 62.50MB line2001 (inline) + line2000 + line3002 (inline) + line3000 +-----------+------------------------------------------------------- + bytes: 400kB + 31.25MB line3002 (inline) + line3001 (inline) + line3000 +-----------+------------------------------------------------------- diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.dot b/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.dot new file mode 100644 index 0000000..dfc2142 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.dot @@ -0,0 +1,9 @@ +digraph "testbinary" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "File: testbinary" [shape=box fontsize=16 label="File: testbinary\lType: cpu\lDuration: 10s, Total samples = 1.11s (11.10%)\lShowing nodes accounting for 1.11s, 100% of 1.11s total\l\lSee https://git.io/JfYMW for how to read the graph\l" tooltip="testbinary"] } +N1 [label="package1\nobject\nfunction1\n1.10s (99.10%)" id="node1" fontsize=24 shape=box tooltip="path/to/package1.object.function1 (1.10s)" color="#b20000" fillcolor="#edd5d5"] +N2 [label="FooBar\nrun\n0.01s (0.9%)\nof 1.01s (90.99%)" id="node2" fontsize=10 shape=box tooltip="java.bar.foo.FooBar.run(java.lang.Runnable) (1.01s)" color="#b20400" fillcolor="#edd6d5"] +N3 [label="Bar\nFoo\n0 of 1.10s (99.10%)" id="node3" fontsize=8 shape=box tooltip="(anonymous namespace)::Bar::Foo (1.10s)" color="#b20000" fillcolor="#edd5d5"] +N3 -> N1 [label=" 1.10s" weight=100 penwidth=5 color="#b20000" tooltip="(anonymous namespace)::Bar::Foo -> path/to/package1.object.function1 (1.10s)" labeltooltip="(anonymous namespace)::Bar::Foo -> path/to/package1.object.function1 (1.10s)"] +N2 -> N3 [label=" 1s" weight=91 penwidth=5 color="#b20500" tooltip="java.bar.foo.FooBar.run(java.lang.Runnable) -> (anonymous namespace)::Bar::Foo (1s)" labeltooltip="java.bar.foo.FooBar.run(java.lang.Runnable) -> (anonymous namespace)::Bar::Foo (1s)"] +} diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.text b/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.text new file mode 100644 index 0000000..39cb24e --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.long_name_funcs.text @@ -0,0 +1,5 @@ +Showing nodes accounting for 1.11s, 100% of 1.11s total + flat flat% sum% cum cum% + 1.10s 99.10% 99.10% 1.10s 99.10% path/to/package1.object.function1 + 0.01s 0.9% 100% 1.01s 90.99% java.bar.foo.FooBar.run(java.lang.Runnable) + 0 0% 100% 1.10s 99.10% (anonymous namespace)::Bar::Foo diff --git a/plugin/debug/pkg/internal/driver/testdata/pprof.unknown.flat.functions.call_tree.text b/plugin/debug/pkg/internal/driver/testdata/pprof.unknown.flat.functions.call_tree.text new file mode 100644 index 0000000..78a2298 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/testdata/pprof.unknown.flat.functions.call_tree.text @@ -0,0 +1,8 @@ +Showing nodes accounting for 1.12s, 100% of 1.12s total +Showing top 5 nodes out of 6 + flat flat% sum% cum cum% + 1.10s 98.21% 98.21% 1.10s 98.21% line1000 + 0.01s 0.89% 99.11% 1.01s 90.18% line2001 (inline) + 0.01s 0.89% 100% 1.02s 91.07% line3002 (inline) + 0 0% 100% 1.01s 90.18% line2000 + 0 0% 100% 1.12s 100% line3000 diff --git a/plugin/debug/pkg/internal/driver/webhtml.go b/plugin/debug/pkg/internal/driver/webhtml.go new file mode 100644 index 0000000..62cf628 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/webhtml.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "embed" + "fmt" + "html/template" + "os" + "sync" + + "m7s.live/v5/plugin/debug/pkg/internal/report" +) + +var ( + htmlTemplates *template.Template // Lazily loaded templates + htmlTemplateInit sync.Once +) + +// getHTMLTemplates returns the set of HTML templates used by pprof, +// initializing them if necessary. +func getHTMLTemplates() *template.Template { + htmlTemplateInit.Do(func() { + htmlTemplates = template.New("templategroup") + addTemplates(htmlTemplates) + report.AddSourceTemplates(htmlTemplates) + }) + return htmlTemplates +} + +//go:embed html +var embeddedFiles embed.FS + +// addTemplates adds a set of template definitions to templates. +func addTemplates(templates *template.Template) { + // Load specified file. + loadFile := func(fname string) string { + data, err := embeddedFiles.ReadFile(fname) + if err != nil { + fmt.Fprintf(os.Stderr, "internal/driver: embedded file %q not found\n", + fname) + os.Exit(1) + } + return string(data) + } + loadCSS := func(fname string) string { + return `` + "\n" + } + loadJS := func(fname string) string { + return `` + "\n" + } + + // Define a named template with specified contents. + def := func(name, contents string) { + sub := template.New(name) + template.Must(sub.Parse(contents)) + template.Must(templates.AddParseTree(name, sub.Tree)) + } + + // Embedded files. + def("css", loadCSS("html/common.css")) + def("header", loadFile("html/header.html")) + def("graph", loadFile("html/graph.html")) + def("graph_css", loadCSS("html/graph.css")) + def("script", loadJS("html/common.js")) + def("top", loadFile("html/top.html")) + def("sourcelisting", loadFile("html/source.html")) + def("plaintext", loadFile("html/plaintext.html")) + // TODO: Rename "stacks" to "flamegraph" to seal moving off d3 flamegraph. + def("stacks", loadFile("html/stacks.html")) + def("stacks_css", loadCSS("html/stacks.css")) + def("stacks_js", loadJS("html/stacks.js")) +} diff --git a/plugin/debug/pkg/internal/driver/webui.go b/plugin/debug/pkg/internal/driver/webui.go new file mode 100644 index 0000000..8f5697c --- /dev/null +++ b/plugin/debug/pkg/internal/driver/webui.go @@ -0,0 +1,483 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "bytes" + "fmt" + "html/template" + "io" + "net" + "net/http" + gourl "net/url" + "os" + "os/exec" + "strconv" + "strings" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/graph" + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/report" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// webInterface holds the state needed for serving a browser based interface. +type webInterface struct { + prof *profile.Profile + copier profileCopier + options *plugin.Options + help map[string]string + settingsFile string +} + +func makeWebInterface(p *profile.Profile, copier profileCopier, opt *plugin.Options) (*webInterface, error) { + settingsFile, err := settingsFileName() + if err != nil { + return nil, err + } + return &webInterface{ + prof: p, + copier: copier, + options: opt, + help: make(map[string]string), + settingsFile: settingsFile, + }, nil +} + +// maxEntries is the maximum number of entries to print for text interfaces. +const maxEntries = 50 + +// errorCatcher is a UI that captures errors for reporting to the browser. +type errorCatcher struct { + plugin.UI + errors []string +} + +func (ec *errorCatcher) PrintErr(args ...interface{}) { + ec.errors = append(ec.errors, strings.TrimSuffix(fmt.Sprintln(args...), "\n")) + ec.UI.PrintErr(args...) +} + +// webArgs contains arguments passed to templates in webhtml.go. +type webArgs struct { + Title string + Errors []string + Total int64 + SampleTypes []string + Legend []string + DocURL string + Standalone bool // True for command-line generation of HTML + Help map[string]string + Nodes []string + HTMLBody template.HTML + TextBody string + Top []report.TextItem + Listing report.WebListData + FlameGraph template.JS + Stacks template.JS + Configs []configMenuEntry + UnitDefs []measurement.UnitType +} + +func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, disableBrowser bool) error { + host, port, err := getHostAndPort(hostport) + if err != nil { + return err + } + interactiveMode = true + copier := makeProfileCopier(p) + ui, err := makeWebInterface(p, copier, o) + if err != nil { + return err + } + for n, c := range pprofCommands { + ui.help[n] = c.description + } + for n, help := range configHelp { + ui.help[n] = help + } + ui.help["details"] = "Show information about the profile and this view" + ui.help["graph"] = "Display profile as a directed graph" + ui.help["flamegraph"] = "Display profile as a flame graph" + ui.help["reset"] = "Show the entire profile" + ui.help["save_config"] = "Save current settings" + + server := o.HTTPServer + if server == nil { + server = defaultWebServer + } + args := &plugin.HTTPServerArgs{ + Hostport: net.JoinHostPort(host, strconv.Itoa(port)), + Host: host, + Port: port, + Handlers: map[string]http.Handler{ + "/": http.HandlerFunc(ui.dot), + "/top": http.HandlerFunc(ui.top), + "/disasm": http.HandlerFunc(ui.disasm), + "/source": http.HandlerFunc(ui.source), + "/peek": http.HandlerFunc(ui.peek), + "/flamegraph": http.HandlerFunc(ui.stackView), + "/flamegraph2": redirectWithQuery("flamegraph", http.StatusMovedPermanently), // Keep legacy URL working. + "/flamegraphold": redirectWithQuery("flamegraph", http.StatusMovedPermanently), // Keep legacy URL working. + "/saveconfig": http.HandlerFunc(ui.saveConfig), + "/deleteconfig": http.HandlerFunc(ui.deleteConfig), + "/download": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + w.Header().Set("Content-Type", "application/vnd.google.protobuf+gzip") + w.Header().Set("Content-Disposition", "attachment;filename=profile.pb.gz") + p.Write(w) + }), + }, + } + + url := "http://" + args.Hostport + + o.UI.Print("Serving web UI on ", url) + + if o.UI.WantBrowser() && !disableBrowser { + go openBrowser(url, o) + } + return server(args) +} + +func getHostAndPort(hostport string) (string, int, error) { + host, portStr, err := net.SplitHostPort(hostport) + if err != nil { + return "", 0, fmt.Errorf("could not split http address: %v", err) + } + if host == "" { + host = "localhost" + } + var port int + if portStr == "" { + ln, err := net.Listen("tcp", net.JoinHostPort(host, "0")) + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + port = ln.Addr().(*net.TCPAddr).Port + err = ln.Close() + if err != nil { + return "", 0, fmt.Errorf("could not generate random port: %v", err) + } + } else { + port, err = strconv.Atoi(portStr) + if err != nil { + return "", 0, fmt.Errorf("invalid port number: %v", err) + } + } + return host, port, nil +} +func defaultWebServer(args *plugin.HTTPServerArgs) error { + ln, err := net.Listen("tcp", args.Hostport) + if err != nil { + return err + } + isLocal := isLocalhost(args.Host) + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + if isLocal { + // Only allow local clients + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil || !isLocalhost(host) { + http.Error(w, "permission denied", http.StatusForbidden) + return + } + } + h := args.Handlers[req.URL.Path] + if h == nil { + // Fall back to default behavior + h = http.DefaultServeMux + } + h.ServeHTTP(w, req) + }) + + // We serve the ui at /ui/ and redirect there from the root. This is done + // to surface any problems with serving the ui at a non-root early. See: + // + // https://m7s.live/v5/plugin/debug/pkg/pull/348 + mux := http.NewServeMux() + mux.Handle("/ui/", http.StripPrefix("/ui", handler)) + mux.Handle("/", redirectWithQuery("/ui", http.StatusTemporaryRedirect)) + s := &http.Server{Handler: mux} + return s.Serve(ln) +} + +// redirectWithQuery responds with a given redirect code, preserving query +// parameters in the redirect URL. It does not convert relative paths to +// absolute paths like http.Redirect does, so that HTTPServerArgs.Handlers can +// generate relative redirects that work with the external prefixing. +func redirectWithQuery(path string, code int) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + pathWithQuery := &gourl.URL{Path: path, RawQuery: r.URL.RawQuery} + w.Header().Set("Location", pathWithQuery.String()) + w.WriteHeader(code) + } +} + +func isLocalhost(host string) bool { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + if host == v { + return true + } + } + return false +} + +func openBrowser(url string, o *plugin.Options) { + // Construct URL. + baseURL, _ := gourl.Parse(url) + current := currentConfig() + u, _ := current.makeURL(*baseURL) + + // Give server a little time to get ready. + time.Sleep(time.Millisecond * 500) + + for _, b := range browsers() { + args := strings.Split(b, " ") + if len(args) == 0 { + continue + } + viewer := exec.Command(args[0], append(args[1:], u.String())...) + viewer.Stderr = os.Stderr + if err := viewer.Start(); err == nil { + return + } + } + // No visualizer succeeded, so just print URL. + o.UI.PrintErr(u.String()) +} + +// makeReport generates a report for the specified command. +// If configEditor is not null, it is used to edit the config used for the report. +func (ui *webInterface) makeReport(w http.ResponseWriter, req *http.Request, + cmd []string, configEditor func(*config)) (*report.Report, []string) { + cfg := currentConfig() + if err := cfg.applyURL(req.URL.Query()); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + if configEditor != nil { + configEditor(&cfg) + } + catcher := &errorCatcher{UI: ui.options.UI} + options := *ui.options + options.UI = catcher + _, rpt, err := generateRawReport(ui.copier.newCopy(), cmd, cfg, &options) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return nil, nil + } + return rpt, catcher.errors +} + +// renderHTML generates html using the named template based on the contents of data. +func renderHTML(dst io.Writer, tmpl string, rpt *report.Report, errList, legend []string, data webArgs) error { + file := getFromLegend(legend, "File: ", "unknown") + profile := getFromLegend(legend, "Type: ", "unknown") + data.Title = file + " " + profile + data.Errors = errList + data.Total = rpt.Total() + data.DocURL = rpt.DocURL() + data.Legend = legend + return getHTMLTemplates().ExecuteTemplate(dst, tmpl, data) +} + +// render responds with html generated by passing data to the named template. +func (ui *webInterface) render(w http.ResponseWriter, req *http.Request, tmpl string, + rpt *report.Report, errList, legend []string, data webArgs) { + data.SampleTypes = sampleTypes(ui.prof) + data.Help = ui.help + data.Configs = configMenu(ui.settingsFile, *req.URL) + html := &bytes.Buffer{} + if err := renderHTML(html, tmpl, rpt, errList, legend, data); err != nil { + http.Error(w, "internal template error", http.StatusInternalServerError) + ui.options.UI.PrintErr(err) + return + } + w.Header().Set("Content-Type", "text/html") + w.Write(html.Bytes()) +} + +// dot generates a web page containing an svg diagram. +func (ui *webInterface) dot(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"svg"}, nil) + if rpt == nil { + return // error already reported + } + + // Generate dot graph. + g, config := report.GetDOT(rpt) + legend := config.Labels + config.Labels = nil + dot := &bytes.Buffer{} + graph.ComposeDot(dot, g, &graph.DotAttributes{}, config) + + // Convert to svg. + svg, err := dotToSvg(dot.Bytes()) + if err != nil { + http.Error(w, "Could not execute dot; may need to install graphviz.", + http.StatusNotImplemented) + ui.options.UI.PrintErr("Failed to execute dot. Is Graphviz installed?\n", err) + return + } + + // Get all node names into an array. + nodes := []string{""} // dot starts with node numbered 1 + for _, n := range g.Nodes { + nodes = append(nodes, n.Info.Name) + } + + ui.render(w, req, "graph", rpt, errList, legend, webArgs{ + HTMLBody: template.HTML(string(svg)), + Nodes: nodes, + }) +} + +func dotToSvg(dot []byte) ([]byte, error) { + cmd := exec.Command("dot", "-Tsvg") + out := &bytes.Buffer{} + cmd.Stdin, cmd.Stdout, cmd.Stderr = bytes.NewBuffer(dot), out, os.Stderr + if err := cmd.Run(); err != nil { + return nil, err + } + + // Fix dot bug related to unquoted ampersands. + svg := bytes.Replace(out.Bytes(), []byte("&;"), []byte("&;"), -1) + + // Cleanup for embedding by dropping stuff before the start. + if pos := bytes.Index(svg, []byte("= 0 { + svg = svg[pos:] + } + return svg, nil +} + +func (ui *webInterface) top(w http.ResponseWriter, req *http.Request) { + rpt, errList := ui.makeReport(w, req, []string{"top"}, func(cfg *config) { + cfg.NodeCount = 500 + }) + if rpt == nil { + return // error already reported + } + top, legend := report.TextItems(rpt) + var nodes []string + for _, item := range top { + nodes = append(nodes, item.Name) + } + + ui.render(w, req, "top", rpt, errList, legend, webArgs{ + Top: top, + Nodes: nodes, + }) +} + +// disasm generates a web page containing disassembly. +func (ui *webInterface) disasm(w http.ResponseWriter, req *http.Request) { + args := []string{"disasm", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.PrintAssembly(out, rpt, ui.options.Obj, maxEntries); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) + +} + +// source generates a web page containing source code annotated with profile +// data. +func (ui *webInterface) source(w http.ResponseWriter, req *http.Request) { + args := []string{"weblist", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, nil) + if rpt == nil { + return // error already reported + } + + // Generate source listing. + listing, err := report.MakeWebList(rpt, ui.options.Obj, maxEntries) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "sourcelisting", rpt, errList, legend, webArgs{ + Listing: listing, + }) +} + +// peek generates a web page listing callers/callers. +func (ui *webInterface) peek(w http.ResponseWriter, req *http.Request) { + args := []string{"peek", req.URL.Query().Get("f")} + rpt, errList := ui.makeReport(w, req, args, func(cfg *config) { + cfg.Granularity = "lines" + }) + if rpt == nil { + return // error already reported + } + + out := &bytes.Buffer{} + if err := report.Generate(out, rpt, ui.options.Obj); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } + + legend := report.ProfileLabels(rpt) + ui.render(w, req, "plaintext", rpt, errList, legend, webArgs{ + TextBody: out.String(), + }) +} + +// saveConfig saves URL configuration. +func (ui *webInterface) saveConfig(w http.ResponseWriter, req *http.Request) { + if err := setConfig(ui.settingsFile, *req.URL); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// deleteConfig deletes a configuration. +func (ui *webInterface) deleteConfig(w http.ResponseWriter, req *http.Request) { + name := req.URL.Query().Get("config") + if err := removeConfig(ui.settingsFile, name); err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + ui.options.UI.PrintErr(err) + return + } +} + +// getFromLegend returns the suffix of an entry in legend that starts +// with param. It returns def if no such entry is found. +func getFromLegend(legend []string, param, def string) string { + for _, s := range legend { + if strings.HasPrefix(s, param) { + return s[len(param):] + } + } + return def +} diff --git a/plugin/debug/pkg/internal/driver/webui_test.go b/plugin/debug/pkg/internal/driver/webui_test.go new file mode 100644 index 0000000..8c42303 --- /dev/null +++ b/plugin/debug/pkg/internal/driver/webui_test.go @@ -0,0 +1,332 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package driver + +import ( + "fmt" + "io" + "net" + "net/http" + "net/http/httptest" + "net/url" + "os/exec" + "regexp" + "runtime" + "sync" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func makeTestServer(t testing.TB, prof *profile.Profile) *httptest.Server { + if runtime.GOOS == "nacl" || runtime.GOOS == "js" { + t.Skip("test assumes tcp available") + } + + // Custom http server creator + var server *httptest.Server + serverCreated := make(chan bool) + creator := func(a *plugin.HTTPServerArgs) error { + server = httptest.NewServer(http.HandlerFunc( + func(w http.ResponseWriter, r *http.Request) { + if h := a.Handlers[r.URL.Path]; h != nil { + h.ServeHTTP(w, r) + } + })) + serverCreated <- true + return nil + } + + // Start server and wait for it to be initialized + go serveWebInterface("unused:1234", prof, &plugin.Options{ + Obj: fakeObjTool{}, + UI: &proftest.TestUI{T: t}, + HTTPServer: creator, + }, false) + <-serverCreated + + // Close the server when the test is done. + t.Cleanup(server.Close) + + return server +} + +func TestWebInterface(t *testing.T) { + prof := makeFakeProfile() + server := makeTestServer(t, prof) + haveDot := false + if _, err := exec.LookPath("dot"); err == nil { + haveDot = true + } + + type testCase struct { + path string + want []string + needDot bool + } + testcases := []testCase{ + {"/", []string{"F1", "F2", "F3", "testbin", "cpu"}, true}, + {"/top", []string{`"Name":"F2","InlineLabel":"","Flat":200,"Cum":300,"FlatFormat":"200ms","CumFormat":"300ms"}`}, false}, + {"/source?f=" + url.QueryEscape("F[12]"), []string{ + "F1", + "F2", + `\. +300ms .*f1:asm`, // Cumulative count for F1 + "200ms +300ms .*f2:asm", // Flat + cumulative count for F2 + }, false}, + {"/peek?f=" + url.QueryEscape("F[12]"), + []string{"300ms.*F1", "200ms.*300ms.*F2"}, false}, + {"/disasm?f=" + url.QueryEscape("F[12]"), + []string{"f1:asm", "f2:asm"}, false}, + {"/flamegraph", []string{ + "File: testbin", + // Check that interesting frames are included. + `\bF1\b`, + `\bF2\b`, + // Check new view JS is included. + `function stackViewer`, + // Check new view CSS is included. + "#stack-chart {", + }, false}, + } + for _, c := range testcases { + if c.needDot && !haveDot { + t.Log("skipping", c.path, "since dot (graphviz) does not seem to be installed") + continue + } + res, err := http.Get(server.URL + c.path) + if err != nil { + t.Error("could not fetch", c.path, err) + continue + } + data, err := io.ReadAll(res.Body) + if err != nil { + t.Error("could not read response", c.path, err) + continue + } + result := string(data) + for _, w := range c.want { + if match, _ := regexp.MatchString(w, result); !match { + t.Errorf("response for %s does not match "+ + "expected pattern '%s'; "+ + "actual result:\n%s", c.path, w, result) + } + } + } + + // Also fetch all the test case URLs in parallel to test thread + // safety when run under the race detector. + var wg sync.WaitGroup + for _, c := range testcases { + if c.needDot && !haveDot { + continue + } + path := server.URL + c.path + for count := 0; count < 2; count++ { + wg.Add(1) + go func() { + defer wg.Done() + res, err := http.Get(path) + if err != nil { + t.Error("could not fetch", path, err) + return + } + if _, err = io.ReadAll(res.Body); err != nil { + t.Error("could not read response", path, err) + } + }() + } + } + wg.Wait() +} + +// Implement fake object file support. + +const addrBase = 0x1000 +const fakeSource = "testdata/file1000.src" + +type fakeObj struct{} + +func (f fakeObj) Close() error { return nil } +func (f fakeObj) Name() string { return "testbin" } +func (f fakeObj) ObjAddr(addr uint64) (uint64, error) { return addr, nil } +func (f fakeObj) BuildID() string { return "" } +func (f fakeObj) SourceLine(addr uint64) ([]plugin.Frame, error) { + return nil, fmt.Errorf("SourceLine unimplemented") +} +func (f fakeObj) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + return []*plugin.Sym{ + { + Name: []string{"F1"}, File: fakeSource, + Start: addrBase, End: addrBase + 10, + }, + { + Name: []string{"F2"}, File: fakeSource, + Start: addrBase + 10, End: addrBase + 20, + }, + { + Name: []string{"F3"}, File: fakeSource, + Start: addrBase + 20, End: addrBase + 30, + }, + }, nil +} + +type fakeObjTool struct{} + +func (obj fakeObjTool) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + return fakeObj{}, nil +} + +func (obj fakeObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + return []plugin.Inst{ + {Addr: addrBase + 10, Text: "f1:asm", Function: "F1", Line: 3}, + {Addr: addrBase + 20, Text: "f2:asm", Function: "F2", Line: 11}, + {Addr: addrBase + 30, Text: "d3:asm", Function: "F3", Line: 22}, + }, nil +} + +func makeFakeProfile() *profile.Profile { + // Three functions: F1, F2, F3 with three lines, 11, 22, 33. + funcs := []*profile.Function{ + {ID: 1, Name: "F1", Filename: fakeSource, StartLine: 3}, + {ID: 2, Name: "F2", Filename: fakeSource, StartLine: 5}, + {ID: 3, Name: "F3", Filename: fakeSource, StartLine: 7}, + } + lines := []profile.Line{ + {Function: funcs[0], Line: 11}, + {Function: funcs[1], Line: 22}, + {Function: funcs[2], Line: 33}, + } + mapping := []*profile.Mapping{ + { + ID: 1, + Start: addrBase, + Limit: addrBase + 100, + Offset: 0, + File: "testbin", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + }, + } + + // Three interesting addresses: base+{10,20,30} + locs := []*profile.Location{ + {ID: 1, Address: addrBase + 10, Line: lines[0:1], Mapping: mapping[0]}, + {ID: 2, Address: addrBase + 20, Line: lines[1:2], Mapping: mapping[0]}, + {ID: 3, Address: addrBase + 30, Line: lines[2:3], Mapping: mapping[0]}, + } + + // Two stack traces. + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{locs[2], locs[1], locs[0]}, + Value: []int64{100}, + }, + { + Location: []*profile.Location{locs[1], locs[0]}, + Value: []int64{200}, + }, + }, + Location: locs, + Function: funcs, + Mapping: mapping, + } +} + +func TestGetHostAndPort(t *testing.T) { + if runtime.GOOS == "nacl" || runtime.GOOS == "js" { + t.Skip("test assumes tcp available") + } + + type testCase struct { + hostport string + wantHost string + wantPort int + wantRandomPort bool + } + + testCases := []testCase{ + {":", "localhost", 0, true}, + {":4681", "localhost", 4681, false}, + {"localhost:4681", "localhost", 4681, false}, + } + for _, tc := range testCases { + host, port, err := getHostAndPort(tc.hostport) + if err != nil { + t.Errorf("could not get host and port for %q: %v", tc.hostport, err) + } + if got, want := host, tc.wantHost; got != want { + t.Errorf("for %s, got host %s, want %s", tc.hostport, got, want) + continue + } + if !tc.wantRandomPort { + if got, want := port, tc.wantPort; got != want { + t.Errorf("for %s, got port %d, want %d", tc.hostport, got, want) + continue + } + } + } +} + +func TestIsLocalHost(t *testing.T) { + for _, s := range []string{"localhost:10000", "[::1]:10000", "127.0.0.1:10000"} { + host, _, err := net.SplitHostPort(s) + if err != nil { + t.Error("unexpected error when splitting", s) + continue + } + if !isLocalhost(host) { + t.Errorf("host %s from %s not considered local", host, s) + } + } +} + +func BenchmarkTop(b *testing.B) { benchmarkURL(b, "/top", false) } +func BenchmarkFlame(b *testing.B) { benchmarkURL(b, "/flamegraph", false) } +func BenchmarkDot(b *testing.B) { benchmarkURL(b, "/", true) } + +func benchmarkURL(b *testing.B, path string, needDot bool) { + if needDot { + if _, err := exec.LookPath("dot"); err != nil { + b.Skip("dot not available") + } + } + prof := largeProfile(b) + server := makeTestServer(b, prof) + url := server.URL + path + b.ResetTimer() + for i := 0; i < b.N; i++ { + res, err := http.Get(url) + if err != nil { + b.Fatal(err) + } + data, err := io.ReadAll(res.Body) + if err != nil { + b.Fatal(err) + } + if i == 0 && testing.Verbose() { + b.Logf("%-12s : %10d bytes", path, len(data)) + } + } +} diff --git a/plugin/debug/pkg/internal/elfexec/elfexec.go b/plugin/debug/pkg/internal/elfexec/elfexec.go new file mode 100644 index 0000000..3f5b09b --- /dev/null +++ b/plugin/debug/pkg/internal/elfexec/elfexec.go @@ -0,0 +1,378 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package elfexec provides utility routines to examine ELF binaries. +package elfexec + +import ( + "bufio" + "debug/elf" + "encoding/binary" + "fmt" + "io" +) + +const ( + maxNoteSize = 1 << 20 // in bytes + noteTypeGNUBuildID = 3 +) + +// elfNote is the payload of a Note Section in an ELF file. +type elfNote struct { + Name string // Contents of the "name" field, omitting the trailing zero byte. + Desc []byte // Contents of the "desc" field. + Type uint32 // Contents of the "type" field. +} + +// parseNotes returns the notes from a SHT_NOTE section or PT_NOTE segment. +func parseNotes(reader io.Reader, alignment int, order binary.ByteOrder) ([]elfNote, error) { + r := bufio.NewReader(reader) + + // padding returns the number of bytes required to pad the given size to an + // alignment boundary. + padding := func(size int) int { + return ((size + (alignment - 1)) &^ (alignment - 1)) - size + } + + var notes []elfNote + for { + noteHeader := make([]byte, 12) // 3 4-byte words + if _, err := io.ReadFull(r, noteHeader); err == io.EOF { + break + } else if err != nil { + return nil, err + } + namesz := order.Uint32(noteHeader[0:4]) + descsz := order.Uint32(noteHeader[4:8]) + typ := order.Uint32(noteHeader[8:12]) + + if uint64(namesz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note name too long (%d bytes)", namesz) + } + var name string + if namesz > 0 { + // Documentation differs as to whether namesz is meant to include the + // trailing zero, but everyone agrees that name is null-terminated. + // So we'll just determine the actual length after the fact. + var err error + name, err = r.ReadString('\x00') + if err == io.EOF { + return nil, fmt.Errorf("missing note name (want %d bytes)", namesz) + } else if err != nil { + return nil, err + } + namesz = uint32(len(name)) + name = name[:len(name)-1] + } + + // Drop padding bytes until the desc field. + for n := padding(len(noteHeader) + int(namesz)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + return nil, fmt.Errorf( + "missing %d bytes of padding after note name", n) + } else if err != nil { + return nil, err + } + } + + if uint64(descsz) > uint64(maxNoteSize) { + return nil, fmt.Errorf("note desc too long (%d bytes)", descsz) + } + desc := make([]byte, int(descsz)) + if _, err := io.ReadFull(r, desc); err == io.EOF { + return nil, fmt.Errorf("missing desc (want %d bytes)", len(desc)) + } else if err != nil { + return nil, err + } + + notes = append(notes, elfNote{Name: name, Desc: desc, Type: typ}) + + // Drop padding bytes until the next note or the end of the section, + // whichever comes first. + for n := padding(len(desc)); n > 0; n-- { + if _, err := r.ReadByte(); err == io.EOF { + // We hit the end of the section before an alignment boundary. + // This can happen if this section is at the end of the file or the next + // section has a smaller alignment requirement. + break + } else if err != nil { + return nil, err + } + } + } + return notes, nil +} + +// GetBuildID returns the GNU build-ID for an ELF binary. +// +// If no build-ID was found but the binary was read without error, it returns +// (nil, nil). +func GetBuildID(f *elf.File) ([]byte, error) { + findBuildID := func(notes []elfNote) ([]byte, error) { + var buildID []byte + for _, note := range notes { + if note.Name == "GNU" && note.Type == noteTypeGNUBuildID { + if buildID == nil { + buildID = note.Desc + } else { + return nil, fmt.Errorf("multiple build ids found, don't know which to use") + } + } + } + return buildID, nil + } + + for _, p := range f.Progs { + if p.Type != elf.PT_NOTE { + continue + } + notes, err := parseNotes(p.Open(), int(p.Align), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + for _, s := range f.Sections { + if s.Type != elf.SHT_NOTE { + continue + } + notes, err := parseNotes(s.Open(), int(s.Addralign), f.ByteOrder) + if err != nil { + return nil, err + } + if b, err := findBuildID(notes); b != nil || err != nil { + return b, err + } + } + return nil, nil +} + +// kernelBase calculates the base for kernel mappings, which usually require +// special handling. For kernel mappings, tools (like perf) use the address of +// the kernel relocation symbol (_text or _stext) as the mmap start. Additionally, +// for obfuscation, ChromeOS profiles have the kernel image remapped to the 0-th page. +func kernelBase(loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, bool) { + const ( + // PAGE_OFFSET for PowerPC64, see arch/powerpc/Kconfig in the kernel sources. + pageOffsetPpc64 = 0xc000000000000000 + pageSize = 4096 + ) + + if loadSegment.Vaddr == start-offset { + return offset, true + } + if start == 0 && limit != 0 && stextOffset != nil { + // ChromeOS remaps its kernel to 0. Nothing else should come + // down this path. Empirical values: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + return start - *stextOffset, true + } + if start >= 0x8000000000000000 && limit > start && (offset == 0 || offset == pageOffsetPpc64 || offset == start) { + // Some kernels look like: + // VADDR=0xffffffff80200000 + // stextOffset=0xffffffff80200198 + // Start=0xffffffff83200000 + // Limit=0xffffffff84200000 + // Offset=0 (0xc000000000000000 for PowerPC64) (== Start for ASLR kernel) + // So the base should be: + if stextOffset != nil && (start%pageSize) == (*stextOffset%pageSize) { + // perf uses the address of _stext as start. Some tools may + // adjust for this before calling GetBase, in which case the page + // alignment should be different from that of stextOffset. + return start - *stextOffset, true + } + + return start - loadSegment.Vaddr, true + } + if start%pageSize != 0 && stextOffset != nil && *stextOffset%pageSize == start%pageSize { + // ChromeOS remaps its kernel to 0 + start%pageSize. Nothing + // else should come down this path. Empirical values: + // start=0x198 limit=0x2f9fffff offset=0 + // VADDR=0xffffffff81000000 + // stextOffset=0xffffffff81000198 + return start - *stextOffset, true + } + return 0, false +} + +// GetBase determines the base address to subtract from virtual +// address to get symbol table address. For an executable, the base +// is 0. Otherwise, it's a shared library, and the base is the +// address where the mapping starts. The kernel needs special handling. +func GetBase(fh *elf.FileHeader, loadSegment *elf.ProgHeader, stextOffset *uint64, start, limit, offset uint64) (uint64, error) { + + if start == 0 && offset == 0 && (limit == ^uint64(0) || limit == 0) { + // Some tools may introduce a fake mapping that spans the entire + // address space. Assume that the address has already been + // adjusted, so no additional base adjustment is necessary. + return 0, nil + } + + switch fh.Type { + case elf.ET_EXEC: + if loadSegment == nil { + // Assume fixed-address executable and so no adjustment. + return 0, nil + } + if stextOffset == nil && start > 0 && start < 0x8000000000000000 { + // A regular user-mode executable. Compute the base offset using same + // arithmetics as in ET_DYN case below, see the explanation there. + // Ideally, the condition would just be "stextOffset == nil" as that + // represents the address of _stext symbol in the vmlinux image. Alas, + // the caller may skip reading it from the binary (it's expensive to scan + // all the symbols) and so it may be nil even for the kernel executable. + // So additionally check that the start is within the user-mode half of + // the 64-bit address space. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + // Various kernel heuristics and cases are handled separately. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // ChromeOS can remap its kernel to 0, and the caller might have not found + // the _stext symbol. Split this case from kernelBase() above, since we don't + // want to apply it to an ET_DYN user-mode executable. + if start == 0 && limit != 0 && stextOffset == nil { + return start - loadSegment.Vaddr, nil + } + + return 0, fmt.Errorf("don't know how to handle EXEC segment: %v start=0x%x limit=0x%x offset=0x%x", *loadSegment, start, limit, offset) + case elf.ET_REL: + if offset != 0 { + return 0, fmt.Errorf("don't know how to handle mapping.Offset") + } + return start, nil + case elf.ET_DYN: + // The process mapping information, start = start of virtual address range, + // and offset = offset in the executable file of the start address, tells us + // that a runtime virtual address x maps to a file offset + // fx = x - start + offset. + if loadSegment == nil { + return start - offset, nil + } + // Kernels compiled as PIE can be ET_DYN as well. Use heuristic, similar to + // the ET_EXEC case above. + if base, match := kernelBase(loadSegment, stextOffset, start, limit, offset); match { + return base, nil + } + // The program header, if not nil, indicates the offset in the file where + // the executable segment is located (loadSegment.Off), and the base virtual + // address where the first byte of the segment is loaded + // (loadSegment.Vaddr). A file offset fx maps to a virtual (symbol) address + // sx = fx - loadSegment.Off + loadSegment.Vaddr. + // + // Thus, a runtime virtual address x maps to a symbol address + // sx = x - start + offset - loadSegment.Off + loadSegment.Vaddr. + return start - offset + loadSegment.Off - loadSegment.Vaddr, nil + } + return 0, fmt.Errorf("don't know how to handle FileHeader.Type %v", fh.Type) +} + +// FindTextProgHeader finds the program segment header containing the .text +// section or nil if the segment cannot be found. +func FindTextProgHeader(f *elf.File) *elf.ProgHeader { + for _, s := range f.Sections { + if s.Name == ".text" { + // Find the LOAD segment containing the .text section. + for _, p := range f.Progs { + if p.Type == elf.PT_LOAD && p.Flags&elf.PF_X != 0 && s.Addr >= p.Vaddr && s.Addr < p.Vaddr+p.Memsz { + return &p.ProgHeader + } + } + } + } + return nil +} + +// ProgramHeadersForMapping returns the program segment headers that overlap +// the runtime mapping with file offset mapOff and memory size mapSz. We skip +// over segments zero file size because their file offset values are unreliable. +// Even if overlapping, a segment is not selected if its aligned file offset is +// greater than the mapping file offset, or if the mapping includes the last +// page of the segment, but not the full segment and the mapping includes +// additional pages after the segment end. +// The function returns a slice of pointers to the headers in the input +// slice, which are valid only while phdrs is not modified or discarded. +func ProgramHeadersForMapping(phdrs []elf.ProgHeader, mapOff, mapSz uint64) []*elf.ProgHeader { + const ( + // pageSize defines the virtual memory page size used by the loader. This + // value is dependent on the memory management unit of the CPU. The page + // size is 4KB virtually on all the architectures that we care about, so we + // define this metric as a constant. If we encounter architectures where + // page sie is not 4KB, we must try to guess the page size on the system + // where the profile was collected, possibly using the architecture + // specified in the ELF file header. + pageSize = 4096 + pageOffsetMask = pageSize - 1 + ) + mapLimit := mapOff + mapSz + var headers []*elf.ProgHeader + for i := range phdrs { + p := &phdrs[i] + // Skip over segments with zero file size. Their file offsets can have + // arbitrary values, see b/195427553. + if p.Filesz == 0 { + continue + } + segLimit := p.Off + p.Memsz + // The segment must overlap the mapping. + if p.Type == elf.PT_LOAD && mapOff < segLimit && p.Off < mapLimit { + // If the mapping offset is strictly less than the page aligned segment + // offset, then this mapping comes from a different segment, fixes + // b/179920361. + alignedSegOffset := uint64(0) + if p.Off > (p.Vaddr & pageOffsetMask) { + alignedSegOffset = p.Off - (p.Vaddr & pageOffsetMask) + } + if mapOff < alignedSegOffset { + continue + } + // If the mapping starts in the middle of the segment, it covers less than + // one page of the segment, and it extends at least one page past the + // segment, then this mapping comes from a different segment. + if mapOff > p.Off && (segLimit < mapOff+pageSize) && (mapLimit >= segLimit+pageSize) { + continue + } + headers = append(headers, p) + } + } + return headers +} + +// HeaderForFileOffset attempts to identify a unique program header that +// includes the given file offset. It returns an error if it cannot identify a +// unique header. +func HeaderForFileOffset(headers []*elf.ProgHeader, fileOffset uint64) (*elf.ProgHeader, error) { + var ph *elf.ProgHeader + for _, h := range headers { + if fileOffset >= h.Off && fileOffset < h.Off+h.Memsz { + if ph != nil { + // Assuming no other bugs, this can only happen if we have two or + // more small program segments that fit on the same page, and a + // segment other than the last one includes uninitialized data, or + // if the debug binary used for symbolization is stripped of some + // sections, so segment file sizes are smaller than memory sizes. + return nil, fmt.Errorf("found second program header (%#v) that matches file offset %x, first program header is %#v. Is this a stripped binary, or does the first program segment contain uninitialized data?", *h, fileOffset, *ph) + } + ph = h + } + } + if ph == nil { + return nil, fmt.Errorf("no program header matches file offset %x", fileOffset) + } + return ph, nil +} diff --git a/plugin/debug/pkg/internal/elfexec/elfexec_test.go b/plugin/debug/pkg/internal/elfexec/elfexec_test.go new file mode 100644 index 0000000..a03b0e4 --- /dev/null +++ b/plugin/debug/pkg/internal/elfexec/elfexec_test.go @@ -0,0 +1,485 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package elfexec + +import ( + "debug/elf" + "fmt" + "reflect" + "strings" + "testing" +) + +func TestGetBase(t *testing.T) { + + fhExec := &elf.FileHeader{ + Type: elf.ET_EXEC, + } + fhRel := &elf.FileHeader{ + Type: elf.ET_REL, + } + fhDyn := &elf.FileHeader{ + Type: elf.ET_DYN, + } + lsOffset := &elf.ProgHeader{ + Vaddr: 0x400000, + Off: 0x200000, + } + kernelHeader := &elf.ProgHeader{ + Vaddr: 0xffffffff81000000, + } + kernelAslrHeader := &elf.ProgHeader{ + Vaddr: 0xffffffff80200000, + Off: 0x1000, + } + // Kernel PIE header with vaddr aligned to a 4k boundary + kernelPieAlignedHeader := &elf.ProgHeader{ + Vaddr: 0xffff800010010000, + Off: 0x10000, + } + // Kernel PIE header with vaddr that doesn't fall on a 4k boundary + kernelPieUnalignedHeader := &elf.ProgHeader{ + Vaddr: 0xffffffc010080800, + Off: 0x10800, + } + ppc64KernelHeader := &elf.ProgHeader{ + Vaddr: 0xc000000000000000, + } + + testcases := []struct { + label string + fh *elf.FileHeader + loadSegment *elf.ProgHeader + stextOffset *uint64 + start, limit, offset uint64 + want uint64 + wanterr bool + }{ + {"exec", fhExec, nil, nil, 0x400000, 0, 0, 0, false}, + {"exec offset", fhExec, lsOffset, nil, 0x400000, 0x800000, 0, 0x200000, false}, + {"exec offset 2", fhExec, lsOffset, nil, 0x200000, 0x600000, 0, 0, false}, + {"exec nomap", fhExec, nil, nil, 0, 0, 0, 0, false}, + {"exec kernel", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0xffffffff82000198, 0xffffffff83000198, 0, 0x1000000, false}, + {"exec kernel", fhExec, kernelHeader, uint64p(0xffffffff810002b8), 0xffffffff81000000, 0xffffffffa0000000, 0x0, 0x0, false}, + {"exec kernel ASLR", fhExec, kernelHeader, uint64p(0xffffffff810002b8), 0xffffffff81000000, 0xffffffffa0000000, 0xffffffff81000000, 0x0, false}, + // TODO(aalexand): Figure out where this test case exactly comes from and + // whether it's still relevant. + {"exec kernel ASLR 2", fhExec, kernelAslrHeader, nil, 0xffffffff83e00000, 0xfffffffffc3fffff, 0x3c00000, 0x3c00000, false}, + {"exec PPC64 kernel", fhExec, ppc64KernelHeader, uint64p(0xc000000000000000), 0xc000000000000000, 0xd00000001a730000, 0x0, 0x0, false}, + {"exec chromeos kernel", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0, 0x10197, 0, 0x7efffe68, false}, + {"exec chromeos kernel 2", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0, 0x10198, 0, 0x7efffe68, false}, + {"exec chromeos kernel 3", fhExec, kernelHeader, uint64p(0xffffffff81000198), 0x198, 0x100000, 0, 0x7f000000, false}, + {"exec chromeos kernel 4", fhExec, kernelHeader, uint64p(0xffffffff81200198), 0x198, 0x100000, 0, 0x7ee00000, false}, + {"exec chromeos kernel unremapped", fhExec, kernelHeader, uint64p(0xffffffff810001c8), 0xffffffff834001c8, 0xffffffffc0000000, 0xffffffff834001c8, 0x2400000, false}, + {"dyn", fhDyn, nil, nil, 0x200000, 0x300000, 0, 0x200000, false}, + {"dyn map", fhDyn, lsOffset, nil, 0x0, 0x300000, 0, 0xFFFFFFFFFFE00000, false}, + {"dyn nomap", fhDyn, nil, nil, 0x0, 0x0, 0, 0, false}, + {"dyn map+offset", fhDyn, lsOffset, nil, 0x900000, 0xa00000, 0x200000, 0x500000, false}, + {"dyn kernel", fhDyn, kernelPieAlignedHeader, uint64p(0xffff800010000000), 0xffff800010000000, 0xffff800012815c00, 0xffff800010000000, 0, false}, + {"dyn chromeos aslr kernel", fhDyn, kernelPieUnalignedHeader, uint64p(0xffffffc010080800), 0x800, 0xb7f800, 0, 0x3feff80000, false}, + {"dyn chromeos aslr kernel unremapped", fhDyn, kernelPieUnalignedHeader, uint64p(0xffffffc010080800), 0xffffffdb5d680800, 0xffffffdb5e200000, 0xffffffdb5d680800, 0x1b4d600000, false}, + {"rel", fhRel, nil, nil, 0x2000000, 0x3000000, 0, 0x2000000, false}, + {"rel nomap", fhRel, nil, nil, 0x0, ^uint64(0), 0, 0, false}, + {"rel offset", fhRel, nil, nil, 0x100000, 0x200000, 0x1, 0, true}, + } + + for _, tc := range testcases { + base, err := GetBase(tc.fh, tc.loadSegment, tc.stextOffset, tc.start, tc.limit, tc.offset) + if err != nil { + if !tc.wanterr { + t.Errorf("%s: want no error, got %v", tc.label, err) + } + continue + } + if tc.wanterr { + t.Errorf("%s: want error, got nil", tc.label) + continue + } + if base != tc.want { + t.Errorf("%s: want 0x%x, got 0x%x", tc.label, tc.want, base) + } + } +} + +func uint64p(n uint64) *uint64 { + return &n +} + +func TestFindProgHeaderForMapping(t *testing.T) { + buildList := func(headers []*elf.ProgHeader) (result string) { + builder := strings.Builder{} + if err := builder.WriteByte('['); err != nil { + t.Error("Failed to append '[' to the builder") + } + defer func() { + if err := builder.WriteByte(']'); err != nil { + t.Error("Failed to append ']' to the builder") + } + result = builder.String() + }() + if len(headers) == 0 { + if _, err := builder.WriteString("nil"); err != nil { + t.Error("Failed to append 'nil' to the builder") + } + return + } + if _, err := builder.WriteString(fmt.Sprintf("%#v", *headers[0])); err != nil { + t.Error("Failed to append first header to the builder") + } + for i, h := range headers[1:] { + if _, err := builder.WriteString(fmt.Sprintf(", %#v", *h)); err != nil { + t.Errorf("Failed to append header %d to the builder", i+1) + } + } + return + } + + // Variuos ELF program headers for unit tests. + tinyHeaders := []elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + } + tinyBadBSSHeaders := []elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x90, Memsz: 0x90, Align: 0x200000}, + } + smallHeaders := []elf.ProgHeader{ + {Type: elf.PT_PHDR, Flags: elf.PF_R | elf.PF_X, Off: 0x40, Vaddr: 0x400040, Paddr: 0x400040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}, + {Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x400238, Paddr: 0x400238, Filesz: 0x1c, Memsz: 0x1c, Align: 1}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x400000, Paddr: 0x400000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + {Type: elf.PT_DYNAMIC, Flags: elf.PF_R | elf.PF_W, Off: 0xe28, Vaddr: 0x600e28, Paddr: 0x600e28, Filesz: 0x1d0, Memsz: 0x1d0, Align: 8}, + } + smallBadBSSHeaders := []elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x200000, Paddr: 0x200000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x700, Vaddr: 0x400700, Paddr: 0x400700, Filesz: 0x500, Memsz: 0x710, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + } + mediumHeaders := []elf.ProgHeader{ + {Type: elf.PT_PHDR, Flags: elf.PF_R, Off: 0x40, Vaddr: 0x40, Paddr: 0x40, Filesz: 0x268, Memsz: 0x268, Align: 8}, + {Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x2a8, Vaddr: 0x2a8, Paddr: 0x2a8, Filesz: 0x28, Memsz: 0x28, Align: 1}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0x51800, Memsz: 0x51800, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x51800, Vaddr: 0x251800, Paddr: 0x251800, Filesz: 0x24a8, Memsz: 0x24e8, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x53d00, Vaddr: 0x453d00, Paddr: 0x453d00, Filesz: 0x13a58, Memsz: 0x91a198, Align: 0x200000}, + {Type: elf.PT_TLS, Flags: elf.PF_R, Off: 0x51800, Vaddr: 0x51800, Paddr: 0x51800, Filesz: 0x0, Memsz: 0x38, Align: 0x8}, + {Type: elf.PT_DYNAMIC, Flags: elf.PF_R | elf.PF_W, Off: 0x51d00, Vaddr: 0x251d00, Paddr: 0x251d00, Filesz: 0x1ef0, Memsz: 0x1ef0, Align: 8}, + } + largeHeaders := []elf.ProgHeader{ + {Type: elf.PT_PHDR, Flags: elf.PF_R, Off: 0x40, Vaddr: 0x40, Paddr: 0x40, Filesz: 0x268, Memsz: 0x268, Align: 8}, + {Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x2a8, Vaddr: 0x2a8, Paddr: 0x2a8, Filesz: 0x28, Memsz: 0x28, Align: 1}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0x2ec5d2c0, Memsz: 0x2ec5d2c0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x2ec5d2c0, Vaddr: 0x2ee5d2c0, Paddr: 0x2ee5d2c0, Filesz: 0x1361118, Memsz: 0x1361150, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x2ffbe440, Vaddr: 0x303be440, Paddr: 0x303be440, Filesz: 0x4637c0, Memsz: 0xc91610, Align: 0x200000}, + {Type: elf.PT_TLS, Flags: elf.PF_R, Off: 0x2ec5d2c0, Vaddr: 0x2ee5d2c0, Paddr: 0x2ee5d2c0, Filesz: 0x120, Memsz: 0x103f8, Align: 0x40}, + {Type: elf.PT_DYNAMIC, Flags: elf.PF_R | elf.PF_W, Off: 0x2ffbc9e0, Vaddr: 0x301bc9e0, Paddr: 0x301bc9e0, Filesz: 0x1f0, Memsz: 0x1f0, Align: 8}, + } + ffmpegHeaders := []elf.ProgHeader{ + {Type: elf.PT_PHDR, Flags: elf.PF_R, Off: 0x40, Vaddr: 0x200040, Paddr: 0x200040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}, + {Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x200238, Paddr: 0x200238, Filesz: 0x28, Memsz: 0x28, Align: 1}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x200000, Paddr: 0x200000, Filesz: 0x48d8410, Memsz: 0x48d8410, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x48d8440, Vaddr: 0x4cd8440, Paddr: 0x4cd8440, Filesz: 0x18cbe0, Memsz: 0xd2fb70, Align: 0x200000}, + {Type: elf.PT_TLS, Flags: elf.PF_R, Off: 0x48d8440, Vaddr: 0x4cd8440, Paddr: 0x4cd8440, Filesz: 0xa8, Memsz: 0x468, Align: 0x40}, + {Type: elf.PT_DYNAMIC, Flags: elf.PF_R | elf.PF_W, Off: 0x4a63ad0, Vaddr: 0x4e63ad0, Paddr: 0x4e63ad0, Filesz: 0x200, Memsz: 0x200, Align: 8}, + } + sentryHeaders := []elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_X + elf.PF_R, Off: 0x0, Vaddr: 0x7f0000000000, Paddr: 0x7f0000000000, Filesz: 0xbc64d5, Memsz: 0xbc64d5, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R, Off: 0xbc7000, Vaddr: 0x7f0000bc7000, Paddr: 0x7f0000bc7000, Filesz: 0xcd6b30, Memsz: 0xcd6b30, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_W + elf.PF_R, Off: 0x189e000, Vaddr: 0x7f000189e000, Paddr: 0x7f000189e000, Filesz: 0x58180, Memsz: 0x92d10, Align: 0x1000}, + } + + for _, tc := range []struct { + desc string + phdrs []elf.ProgHeader + pgoff uint64 + memsz uint64 + wantHeaders []*elf.ProgHeader + }{ + { + desc: "no prog headers", + phdrs: nil, + pgoff: 0, + memsz: 0x1000, + wantHeaders: nil, + }, + { + desc: "tiny file, 4KB at offset 0 matches both headers, b/178747588", + phdrs: tinyHeaders, + pgoff: 0, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + }, + }, + { + desc: "tiny file, file offset 4KB matches no headers", + phdrs: tinyHeaders, + pgoff: 0x1000, + memsz: 0x1000, + wantHeaders: nil, + }, + { + desc: "tiny file with unaligned memsz matches executable segment", + phdrs: tinyHeaders, + pgoff: 0, + memsz: 0xc80, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}}, + }, + { + desc: "tiny file with unaligned offset matches data segment", + phdrs: tinyHeaders, + pgoff: 0xc80, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}}, + }, + { + desc: "tiny bad BSS file, 4KB at offset 0 matches all three headers", + phdrs: tinyBadBSSHeaders, + pgoff: 0, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x90, Memsz: 0x90, Align: 0x200000}, + }, + }, + { + desc: "small file, offset 0, memsz 4KB matches both segments", + phdrs: smallHeaders, + pgoff: 0, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x400000, Paddr: 0x400000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + }, + }, + { + desc: "small file, offset 0, memsz 8KB matches both segments", + phdrs: smallHeaders, + pgoff: 0, + memsz: 0x2000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x400000, Paddr: 0x400000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + }, + }, + { + desc: "small file, offset 4KB matches data segment", + phdrs: smallHeaders, + pgoff: 0x1000, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}}, + }, + { + desc: "small file, offset 8KB matches no segment", + phdrs: smallHeaders, + pgoff: 0x2000, + memsz: 0x1000, + wantHeaders: nil, + }, + { + desc: "small bad BSS file, offset 0, memsz 4KB matches all three segments", + phdrs: smallBadBSSHeaders, + pgoff: 0, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x200000, Paddr: 0x200000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x700, Vaddr: 0x400700, Paddr: 0x400700, Filesz: 0x500, Memsz: 0x710, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + }, + }, + { + desc: "small bad BSS file, offset 0, memsz 8KB matches all three segments", + phdrs: smallBadBSSHeaders, + pgoff: 0, + memsz: 0x2000, + wantHeaders: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x200000, Paddr: 0x200000, Filesz: 0x6fc, Memsz: 0x6fc, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x700, Vaddr: 0x400700, Paddr: 0x400700, Filesz: 0x500, Memsz: 0x710, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}, + }, + }, + { + desc: "small bad BSS file, offset 4KB matches second data segment", + phdrs: smallBadBSSHeaders, + pgoff: 0x1000, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe10, Vaddr: 0x600e10, Paddr: 0x600e10, Filesz: 0x230, Memsz: 0x238, Align: 0x200000}}, + }, + { + desc: "medium file large mapping that includes all address space matches executable segment, b/179920361", + phdrs: mediumHeaders, + pgoff: 0, + memsz: 0xd6e000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0x51800, Memsz: 0x51800, Align: 0x200000}}, + }, + { + desc: "large file executable mapping matches executable segment", + phdrs: largeHeaders, + pgoff: 0, + memsz: 0x2ec5e000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0x2ec5d2c0, Memsz: 0x2ec5d2c0, Align: 0x200000}}, + }, + { + desc: "large file first data mapping matches first data segment", + phdrs: largeHeaders, + pgoff: 0x2ec5d000, + memsz: 0x1362000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x2ec5d2c0, Vaddr: 0x2ee5d2c0, Paddr: 0x2ee5d2c0, Filesz: 0x1361118, Memsz: 0x1361150, Align: 0x200000}}, + }, + { + desc: "large file, split second data mapping matches second data segment", + phdrs: largeHeaders, + pgoff: 0x2ffbe000, + memsz: 0xb11000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0x2ffbe440, Vaddr: 0x303be440, Paddr: 0x303be440, Filesz: 0x4637c0, Memsz: 0xc91610, Align: 0x200000}}, + }, + { + desc: "sentry headers, mapping for last page of executable segment matches executable segment", + phdrs: sentryHeaders, + pgoff: 0xbc6000, + memsz: 0x1000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_X + elf.PF_R, Off: 0x0, Vaddr: 0x7f0000000000, Paddr: 0x7f0000000000, Filesz: 0xbc64d5, Memsz: 0xbc64d5, Align: 0x1000}}, + }, + { + desc: "ffmpeg headers, split mapping for executable segment matches executable segment, b/193176694", + phdrs: ffmpegHeaders, + pgoff: 0, + memsz: 0x48d8000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0x200000, Paddr: 0x200000, Filesz: 0x48d8410, Memsz: 0x48d8410, Align: 0x200000}}, + }, + { + desc: "segments with no file bits (b/195427553), mapping for executable segment matches executable segment", + phdrs: []elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R, Off: 0x0, Vaddr: 0x0, Paddr: 0x0, Filesz: 0x115000, Memsz: 0x115000, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_X + elf.PF_R, Off: 0x115000, Vaddr: 0x115000, Paddr: 0x115000, Filesz: 0x361e15, Memsz: 0x361e15, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_W + elf.PF_R, Off: 0x0, Vaddr: 0x477000, Paddr: 0x477000, Filesz: 0x0, Memsz: 0x33c, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R, Off: 0x0, Vaddr: 0x478000, Paddr: 0x478000, Filesz: 0x0, Memsz: 0x47dc28, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R, Off: 0x477000, Vaddr: 0x8f6000, Paddr: 0x8f6000, Filesz: 0x140, Memsz: 0x140, Align: 0x1000}, + {Type: elf.PT_LOAD, Flags: elf.PF_W + elf.PF_R, Off: 0x478000, Vaddr: 0x8f7000, Paddr: 0x8f7000, Filesz: 0x38, Memsz: 0x38, Align: 0x1000}, + }, + pgoff: 0x115000, + memsz: 0x362000, + wantHeaders: []*elf.ProgHeader{{Type: elf.PT_LOAD, Flags: elf.PF_X + elf.PF_R, Off: 0x115000, Vaddr: 0x115000, Paddr: 0x115000, Filesz: 0x361e15, Memsz: 0x361e15, Align: 0x1000}}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + gotHeaders := ProgramHeadersForMapping(tc.phdrs, tc.pgoff, tc.memsz) + if !reflect.DeepEqual(gotHeaders, tc.wantHeaders) { + t.Errorf("got program headers %q; want %q", buildList(gotHeaders), buildList(tc.wantHeaders)) + } + }) + } +} + +func TestHeaderForFileOffset(t *testing.T) { + for _, tc := range []struct { + desc string + headers []*elf.ProgHeader + fileOffset uint64 + wantError bool + want *elf.ProgHeader + }{ + { + desc: "no headers, want error", + headers: nil, + wantError: true, + }, + { + desc: "three headers, BSS in last segment, file offset selects first header", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xc79, + want: &elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + }, + { + desc: "three headers, BSS in last segment, file offset selects second header", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xc80, + want: &elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + }, + { + desc: "three headers, BSS in last segment, file offset selects third header", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xef0, + want: &elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + { + desc: "three headers, BSS in last segment, file offset in uninitialized section selects third header", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xf40, + want: &elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + { + desc: "three headers, BSS in last segment, file offset past any segment gives error", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xe70, Vaddr: 0x400e70, Paddr: 0x400e70, Filesz: 0x90, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xf70, + wantError: true, + }, + { + desc: "three headers, BSS in second segment, file offset in mapped section selects second header", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x100, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xd79, + want: &elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}, + }, + { + desc: "three headers, BSS in second segment, file offset in unmapped section gives error", + headers: []*elf.ProgHeader{ + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}, + {Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x100, Memsz: 0x100, Align: 0x200000}, + }, + fileOffset: 0xd80, + wantError: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + got, err := HeaderForFileOffset(tc.headers, tc.fileOffset) + if (err != nil) != tc.wantError { + t.Errorf("got error %v, want any error=%v", err, tc.wantError) + } + if err != nil { + return + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("got program header %#v, want %#v", got, tc.want) + } + }) + } +} diff --git a/plugin/debug/pkg/internal/graph/dotgraph.go b/plugin/debug/pkg/internal/graph/dotgraph.go new file mode 100644 index 0000000..bc843e3 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/dotgraph.go @@ -0,0 +1,494 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + "io" + "math" + "path/filepath" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" +) + +// DotAttributes contains details about the graph itself, giving +// insight into how its elements should be rendered. +type DotAttributes struct { + Nodes map[*Node]*DotNodeAttributes // A map allowing each Node to have its own visualization option +} + +// DotNodeAttributes contains Node specific visualization options. +type DotNodeAttributes struct { + Shape string // The optional shape of the node when rendered visually + Bold bool // If the node should be bold or not + Peripheries int // An optional number of borders to place around a node + URL string // An optional url link to add to a node + Formatter func(*NodeInfo) string // An optional formatter for the node's label +} + +// DotConfig contains attributes about how a graph should be +// constructed and how it should look. +type DotConfig struct { + Title string // The title of the DOT graph + LegendURL string // The URL to link to from the legend. + Labels []string // The labels for the DOT's legend + + FormatValue func(int64) string // A formatting function for values + Total int64 // The total weight of the graph, used to compute percentages +} + +const maxNodelets = 4 // Number of nodelets for labels (both numeric and non) + +// ComposeDot creates and writes a in the DOT format to the writer, using +// the configurations given. +func ComposeDot(w io.Writer, g *Graph, a *DotAttributes, c *DotConfig) { + builder := &builder{w, a, c} + + // Begin constructing DOT by adding a title and legend. + builder.start() + defer builder.finish() + builder.addLegend() + + if len(g.Nodes) == 0 { + return + } + + // Preprocess graph to get id map and find max flat. + nodeIDMap := make(map[*Node]int) + hasNodelets := make(map[*Node]bool) + + maxFlat := float64(abs64(g.Nodes[0].FlatValue())) + for i, n := range g.Nodes { + nodeIDMap[n] = i + 1 + if float64(abs64(n.FlatValue())) > maxFlat { + maxFlat = float64(abs64(n.FlatValue())) + } + } + + edges := EdgeMap{} + + // Add nodes and nodelets to DOT builder. + for _, n := range g.Nodes { + builder.addNode(n, nodeIDMap[n], maxFlat) + hasNodelets[n] = builder.addNodelets(n, nodeIDMap[n]) + + // Collect all edges. Use a fake node to support multiple incoming edges. + for _, e := range n.Out { + edges[&Node{}] = e + } + } + + // Add edges to DOT builder. Sort edges by frequency as a hint to the graph layout engine. + for _, e := range edges.Sort() { + builder.addEdge(e, nodeIDMap[e.Src], nodeIDMap[e.Dest], hasNodelets[e.Src]) + } +} + +// builder wraps an io.Writer and understands how to compose DOT formatted elements. +type builder struct { + io.Writer + attributes *DotAttributes + config *DotConfig +} + +// start generates a title and initial node in DOT format. +func (b *builder) start() { + graphname := "unnamed" + if b.config.Title != "" { + graphname = b.config.Title + } + fmt.Fprintln(b, `digraph "`+graphname+`" {`) + fmt.Fprintln(b, `node [style=filled fillcolor="#f8f8f8"]`) +} + +// finish closes the opening curly bracket in the constructed DOT buffer. +func (b *builder) finish() { + fmt.Fprintln(b, "}") +} + +// addLegend generates a legend in DOT format. +func (b *builder) addLegend() { + labels := b.config.Labels + if len(labels) == 0 { + return + } + title := labels[0] + fmt.Fprintf(b, `subgraph cluster_L { "%s" [shape=box fontsize=16`, escapeForDot(title)) + fmt.Fprintf(b, ` label="%s\l"`, strings.Join(escapeAllForDot(labels), `\l`)) + if b.config.LegendURL != "" { + fmt.Fprintf(b, ` URL="%s" target="_blank"`, b.config.LegendURL) + } + if b.config.Title != "" { + fmt.Fprintf(b, ` tooltip="%s"`, b.config.Title) + } + fmt.Fprintf(b, "] }\n") +} + +// addNode generates a graph node in DOT format. +func (b *builder) addNode(node *Node, nodeID int, maxFlat float64) { + flat, cum := node.FlatValue(), node.CumValue() + attrs := b.attributes.Nodes[node] + + // Populate label for node. + var label string + if attrs != nil && attrs.Formatter != nil { + label = attrs.Formatter(&node.Info) + } else { + label = multilinePrintableName(&node.Info) + } + + flatValue := b.config.FormatValue(flat) + if flat != 0 { + label = label + fmt.Sprintf(`%s (%s)`, + flatValue, + strings.TrimSpace(measurement.Percentage(flat, b.config.Total))) + } else { + label = label + "0" + } + cumValue := flatValue + if cum != flat { + if flat != 0 { + label = label + `\n` + } else { + label = label + " " + } + cumValue = b.config.FormatValue(cum) + label = label + fmt.Sprintf(`of %s (%s)`, + cumValue, + strings.TrimSpace(measurement.Percentage(cum, b.config.Total))) + } + + // Scale font sizes from 8 to 24 based on percentage of flat frequency. + // Use non linear growth to emphasize the size difference. + baseFontSize, maxFontGrowth := 8, 16.0 + fontSize := baseFontSize + if maxFlat != 0 && flat != 0 && float64(abs64(flat)) <= maxFlat { + fontSize += int(math.Ceil(maxFontGrowth * math.Sqrt(float64(abs64(flat))/maxFlat))) + } + + // Determine node shape. + shape := "box" + if attrs != nil && attrs.Shape != "" { + shape = attrs.Shape + } + + // Create DOT attribute for node. + attr := fmt.Sprintf(`label="%s" id="node%d" fontsize=%d shape=%s tooltip="%s (%s)" color="%s" fillcolor="%s"`, + label, nodeID, fontSize, shape, escapeForDot(node.Info.PrintableName()), cumValue, + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), false), + dotColor(float64(node.CumValue())/float64(abs64(b.config.Total)), true)) + + // Add on extra attributes if provided. + if attrs != nil { + // Make bold if specified. + if attrs.Bold { + attr += ` style="bold,filled"` + } + + // Add peripheries if specified. + if attrs.Peripheries != 0 { + attr += fmt.Sprintf(` peripheries=%d`, attrs.Peripheries) + } + + // Add URL if specified. target="_blank" forces the link to open in a new tab. + if attrs.URL != "" { + attr += fmt.Sprintf(` URL="%s" target="_blank"`, attrs.URL) + } + } + + fmt.Fprintf(b, "N%d [%s]\n", nodeID, attr) +} + +// addNodelets generates the DOT boxes for the node tags if they exist. +func (b *builder) addNodelets(node *Node, nodeID int) bool { + var nodelets string + + // Populate two Tag slices, one for LabelTags and one for NumericTags. + var ts []*Tag + lnts := make(map[string][]*Tag) + for _, t := range node.LabelTags { + ts = append(ts, t) + } + for l, tm := range node.NumericTags { + for _, t := range tm { + lnts[l] = append(lnts[l], t) + } + } + + // For leaf nodes, print cumulative tags (includes weight from + // children that have been deleted). + // For internal nodes, print only flat tags. + flatTags := len(node.Out) > 0 + + // Select the top maxNodelets alphanumeric labels by weight. + SortTags(ts, flatTags) + if len(ts) > maxNodelets { + ts = ts[:maxNodelets] + } + for i, t := range ts { + w := t.CumValue() + if flatTags { + w = t.FlatValue() + } + if w == 0 { + continue + } + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%d_%d [label = "%s" id="N%d_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", nodeID, i, t.Name, nodeID, i, weight) + nodelets += fmt.Sprintf(`N%d -> N%d_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"]`+"\n", nodeID, nodeID, i, weight, weight, weight) + if nts := lnts[t.Name]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d_%d`, nodeID, i)) + } + } + + if nts := lnts[""]; nts != nil { + nodelets += b.numericNodelets(nts, maxNodelets, flatTags, fmt.Sprintf(`N%d`, nodeID)) + } + + fmt.Fprint(b, nodelets) + return nodelets != "" +} + +func (b *builder) numericNodelets(nts []*Tag, maxNumNodelets int, flatTags bool, source string) string { + nodelets := "" + + // Collapse numeric labels into maxNumNodelets buckets, of the form: + // 1MB..2MB, 3MB..5MB, ... + for j, t := range b.collapsedTags(nts, maxNumNodelets, flatTags) { + w, attr := t.CumValue(), ` style="dotted"` + if flatTags || t.FlatValue() == t.CumValue() { + w, attr = t.FlatValue(), "" + } + if w != 0 { + weight := b.config.FormatValue(w) + nodelets += fmt.Sprintf(`N%s_%d [label = "%s" id="N%s_%d" fontsize=8 shape=box3d tooltip="%s"]`+"\n", source, j, t.Name, source, j, weight) + nodelets += fmt.Sprintf(`%s -> N%s_%d [label=" %s" weight=100 tooltip="%s" labeltooltip="%s"%s]`+"\n", source, source, j, weight, weight, weight, attr) + } + } + return nodelets +} + +// addEdge generates a graph edge in DOT format. +func (b *builder) addEdge(edge *Edge, from, to int, hasNodelets bool) { + var inline string + if edge.Inline { + inline = `\n (inline)` + } + w := b.config.FormatValue(edge.WeightValue()) + attr := fmt.Sprintf(`label=" %s%s"`, w, inline) + if b.config.Total != 0 { + // Note: edge.weight > b.config.Total is possible for profile diffs. + if weight := 1 + int(min64(abs64(edge.WeightValue()*100/b.config.Total), 100)); weight > 1 { + attr = fmt.Sprintf(`%s weight=%d`, attr, weight) + } + if width := 1 + int(min64(abs64(edge.WeightValue()*5/b.config.Total), 5)); width > 1 { + attr = fmt.Sprintf(`%s penwidth=%d`, attr, width) + } + attr = fmt.Sprintf(`%s color="%s"`, attr, + dotColor(float64(edge.WeightValue())/float64(abs64(b.config.Total)), false)) + } + arrow := "->" + if edge.Residual { + arrow = "..." + } + tooltip := fmt.Sprintf(`"%s %s %s (%s)"`, + escapeForDot(edge.Src.Info.PrintableName()), arrow, + escapeForDot(edge.Dest.Info.PrintableName()), w) + attr = fmt.Sprintf(`%s tooltip=%s labeltooltip=%s`, attr, tooltip, tooltip) + + if edge.Residual { + attr = attr + ` style="dotted"` + } + + if hasNodelets { + // Separate children further if source has tags. + attr = attr + " minlen=2" + } + + fmt.Fprintf(b, "N%d -> N%d [%s]\n", from, to, attr) +} + +// dotColor returns a color for the given score (between -1.0 and +// 1.0), with -1.0 colored green, 0.0 colored grey, and 1.0 colored +// red. If isBackground is true, then a light (low-saturation) +// color is returned (suitable for use as a background color); +// otherwise, a darker color is returned (suitable for use as a +// foreground color). +func dotColor(score float64, isBackground bool) string { + // A float between 0.0 and 1.0, indicating the extent to which + // colors should be shifted away from grey (to make positive and + // negative values easier to distinguish, and to make more use of + // the color range.) + const shift = 0.7 + + // Saturation and value (in hsv colorspace) for background colors. + const bgSaturation = 0.1 + const bgValue = 0.93 + + // Saturation and value (in hsv colorspace) for foreground colors. + const fgSaturation = 1.0 + const fgValue = 0.7 + + // Choose saturation and value based on isBackground. + var saturation float64 + var value float64 + if isBackground { + saturation = bgSaturation + value = bgValue + } else { + saturation = fgSaturation + value = fgValue + } + + // Limit the score values to the range [-1.0, 1.0]. + score = math.Max(-1.0, math.Min(1.0, score)) + + // Reduce saturation near score=0 (so it is colored grey, rather than yellow). + if math.Abs(score) < 0.2 { + saturation *= math.Abs(score) / 0.2 + } + + // Apply 'shift' to move scores away from 0.0 (grey). + if score > 0.0 { + score = math.Pow(score, (1.0 - shift)) + } + if score < 0.0 { + score = -math.Pow(-score, (1.0 - shift)) + } + + var r, g, b float64 // red, green, blue + if score < 0.0 { + g = value + r = value * (1 + saturation*score) + } else { + r = value + g = value * (1 - saturation*score) + } + b = value * (1 - saturation) + return fmt.Sprintf("#%02x%02x%02x", uint8(r*255.0), uint8(g*255.0), uint8(b*255.0)) +} + +func multilinePrintableName(info *NodeInfo) string { + infoCopy := *info + infoCopy.Name = escapeForDot(ShortenFunctionName(infoCopy.Name)) + infoCopy.Name = strings.Replace(infoCopy.Name, "::", `\n`, -1) + // Go type parameters are reported as "[...]" by Go pprof profiles. + // Keep this ellipsis rather than replacing with newlines below. + infoCopy.Name = strings.Replace(infoCopy.Name, "[...]", "[…]", -1) + infoCopy.Name = strings.Replace(infoCopy.Name, ".", `\n`, -1) + if infoCopy.File != "" { + infoCopy.File = filepath.Base(infoCopy.File) + } + return strings.Join(infoCopy.NameComponents(), `\n`) + `\n` +} + +// collapsedTags trims and sorts a slice of tags. +func (b *builder) collapsedTags(ts []*Tag, count int, flatTags bool) []*Tag { + ts = SortTags(ts, flatTags) + if len(ts) <= count { + return ts + } + + tagGroups := make([][]*Tag, count) + for i, t := range (ts)[:count] { + tagGroups[i] = []*Tag{t} + } + for _, t := range (ts)[count:] { + g, d := 0, tagDistance(t, tagGroups[0][0]) + for i := 1; i < count; i++ { + if nd := tagDistance(t, tagGroups[i][0]); nd < d { + g, d = i, nd + } + } + tagGroups[g] = append(tagGroups[g], t) + } + + var nts []*Tag + for _, g := range tagGroups { + l, w, c := b.tagGroupLabel(g) + nts = append(nts, &Tag{ + Name: l, + Flat: w, + Cum: c, + }) + } + return SortTags(nts, flatTags) +} + +func tagDistance(t, u *Tag) float64 { + v, _ := measurement.Scale(u.Value, u.Unit, t.Unit) + if v < float64(t.Value) { + return float64(t.Value) - v + } + return v - float64(t.Value) +} + +func (b *builder) tagGroupLabel(g []*Tag) (label string, flat, cum int64) { + if len(g) == 1 { + t := g[0] + return measurement.Label(t.Value, t.Unit), t.FlatValue(), t.CumValue() + } + min := g[0] + max := g[0] + df, f := min.FlatDiv, min.Flat + dc, c := min.CumDiv, min.Cum + for _, t := range g[1:] { + if v, _ := measurement.Scale(t.Value, t.Unit, min.Unit); int64(v) < min.Value { + min = t + } + if v, _ := measurement.Scale(t.Value, t.Unit, max.Unit); int64(v) > max.Value { + max = t + } + f += t.Flat + df += t.FlatDiv + c += t.Cum + dc += t.CumDiv + } + if df != 0 { + f = f / df + } + if dc != 0 { + c = c / dc + } + + // Tags are not scaled with the selected output unit because tags are often + // much smaller than other values which appear, so the range of tag sizes + // sometimes would appear to be "0..0" when scaled to the selected output unit. + return measurement.Label(min.Value, min.Unit) + ".." + measurement.Label(max.Value, max.Unit), f, c +} + +func min64(a, b int64) int64 { + if a < b { + return a + } + return b +} + +// escapeAllForDot applies escapeForDot to all strings in the given slice. +func escapeAllForDot(in []string) []string { + var out = make([]string, len(in)) + for i := range in { + out[i] = escapeForDot(in[i]) + } + return out +} + +// escapeForDot escapes double quotes and backslashes, and replaces Graphviz's +// "center" character (\n) with a left-justified character. +// See https://graphviz.org/docs/attr-types/escString/ for more info. +func escapeForDot(str string) string { + return strings.ReplaceAll(strings.ReplaceAll(strings.ReplaceAll(str, `\`, `\\`), `"`, `\"`), "\n", `\l`) +} diff --git a/plugin/debug/pkg/internal/graph/dotgraph_test.go b/plugin/debug/pkg/internal/graph/dotgraph_test.go new file mode 100644 index 0000000..77a1146 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/dotgraph_test.go @@ -0,0 +1,400 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "bytes" + "flag" + "fmt" + "os" + "path/filepath" + "reflect" + "strconv" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" +) + +var updateFlag = flag.Bool("update", false, "Update the golden files") + +func TestComposeWithStandardGraph(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose1.dot") +} + +func TestComposeWithNodeAttributesAndZeroFlat(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + + // Set NodeAttributes for Node 1. + a.Nodes[g.Nodes[0]] = &DotNodeAttributes{ + Shape: "folder", + Bold: true, + Peripheries: 2, + URL: "www.google.com", + Formatter: func(ni *NodeInfo) string { + return strings.ToUpper(ni.Name) + }, + } + + // Set Flat value to zero on Node 2. + g.Nodes[1].Flat = 0 + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose2.dot") +} + +func TestComposeWithTagsAndResidualEdge(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + + // Add tags to Node 1. + g.Nodes[0].LabelTags["a"] = &Tag{ + Name: "tag1", + Cum: 10, + Flat: 10, + } + g.Nodes[0].NumericTags[""] = TagMap{ + "b": &Tag{ + Name: "tag2", + Cum: 20, + Flat: 20, + Unit: "ms", + }, + } + + // Set edge to be Residual. + g.Nodes[0].Out[g.Nodes[1]].Residual = true + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose3.dot") +} + +func TestComposeWithNestedTags(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + + // Add tags to Node 1. + g.Nodes[0].LabelTags["tag1"] = &Tag{ + Name: "tag1", + Cum: 10, + Flat: 10, + } + g.Nodes[0].NumericTags["tag1"] = TagMap{ + "tag2": &Tag{ + Name: "tag2", + Cum: 20, + Flat: 20, + Unit: "ms", + }, + } + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose5.dot") +} + +func TestComposeWithEmptyGraph(t *testing.T) { + g := &Graph{} + a, c := baseAttrsAndConfig() + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose4.dot") +} + +func TestComposeWithStandardGraphAndURL(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + c.LegendURL = "http://example.com" + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose6.dot") +} + +func TestComposeWithNamesThatNeedEscaping(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + g.Nodes[0].Info = NodeInfo{Name: `var"src"`} + g.Nodes[1].Info = NodeInfo{Name: `var"#dest#"`} + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose7.dot") +} + +func TestComposeWithCommentsWithNewlines(t *testing.T) { + g := baseGraph() + a, c := baseAttrsAndConfig() + // comments that could be added with the -add_comment command line tool + // the first label is used as the dot "node name"; the others are escaped as labels + c.Labels = []string{"comment line 1\ncomment line 2 \"unterminated double quote", `second comment "double quote"`} + + var buf bytes.Buffer + ComposeDot(&buf, g, a, c) + + compareGraphs(t, buf.Bytes(), "compose9.dot") +} + +func baseGraph() *Graph { + src := &Node{ + Info: NodeInfo{Name: "src"}, + Flat: 10, + Cum: 25, + In: make(EdgeMap), + Out: make(EdgeMap), + LabelTags: make(TagMap), + NumericTags: make(map[string]TagMap), + } + dest := &Node{ + Info: NodeInfo{Name: "dest"}, + Flat: 15, + Cum: 25, + In: make(EdgeMap), + Out: make(EdgeMap), + LabelTags: make(TagMap), + NumericTags: make(map[string]TagMap), + } + edge := &Edge{ + Src: src, + Dest: dest, + Weight: 10, + } + src.Out[dest] = edge + src.In[src] = edge + return &Graph{ + Nodes: Nodes{ + src, + dest, + }, + } +} + +func baseAttrsAndConfig() (*DotAttributes, *DotConfig) { + a := &DotAttributes{ + Nodes: make(map[*Node]*DotNodeAttributes), + } + c := &DotConfig{ + Title: "testtitle", + Labels: []string{"label1", "label2", `label3: "foo"`}, + Total: 100, + FormatValue: func(v int64) string { + return strconv.FormatInt(v, 10) + }, + } + return a, c +} + +func compareGraphs(t *testing.T, got []byte, wantFile string) { + wantFile = filepath.Join("testdata", wantFile) + want, err := os.ReadFile(wantFile) + if err != nil { + t.Fatalf("error reading test file %s: %v", wantFile, err) + } + + if string(got) != string(want) { + d, err := proftest.Diff(got, want) + if err != nil { + t.Fatalf("error finding diff: %v", err) + } + t.Errorf("Compose incorrectly wrote %s", string(d)) + if *updateFlag { + err := os.WriteFile(wantFile, got, 0644) + if err != nil { + t.Errorf("failed to update the golden file %q: %v", wantFile, err) + } + } + } +} + +func TestNodeletCountCapping(t *testing.T) { + labelTags := make(TagMap) + for i := 0; i < 10; i++ { + name := fmt.Sprintf("tag-%d", i) + labelTags[name] = &Tag{ + Name: name, + Flat: 10, + Cum: 10, + } + } + numTags := make(TagMap) + for i := 0; i < 10; i++ { + name := fmt.Sprintf("num-tag-%d", i) + numTags[name] = &Tag{ + Name: name, + Unit: "mb", + Value: 16, + Flat: 10, + Cum: 10, + } + } + node1 := &Node{ + Info: NodeInfo{Name: "node1-with-tags"}, + Flat: 10, + Cum: 10, + NumericTags: map[string]TagMap{"": numTags}, + LabelTags: labelTags, + } + node2 := &Node{ + Info: NodeInfo{Name: "node2"}, + Flat: 15, + Cum: 15, + } + node3 := &Node{ + Info: NodeInfo{Name: "node3"}, + Flat: 15, + Cum: 15, + } + g := &Graph{ + Nodes: Nodes{ + node1, + node2, + node3, + }, + } + for n := 1; n <= 3; n++ { + input := maxNodelets + n + if got, want := len(g.SelectTopNodes(input, true)), n; got != want { + t.Errorf("SelectTopNodes(%d): got %d nodes, want %d", input, got, want) + } + } +} + +func TestMultilinePrintableName(t *testing.T) { + ni := &NodeInfo{ + Name: "test1.test2::test3", + File: "src/file.cc", + Address: 123, + Lineno: 999, + } + + want := fmt.Sprintf(`%016x\ntest1\ntest2\ntest3\nfile.cc:999\n`, 123) + if got := multilinePrintableName(ni); got != want { + t.Errorf("multilinePrintableName(%#v) == %q, want %q", ni, got, want) + } +} + +func TestTagCollapse(t *testing.T) { + + makeTag := func(name, unit string, value, flat, cum int64) *Tag { + return &Tag{name, unit, value, flat, 0, cum, 0} + } + + tagSource := []*Tag{ + makeTag("12mb", "mb", 12, 100, 100), + makeTag("1kb", "kb", 1, 1, 1), + makeTag("1mb", "mb", 1, 1000, 1000), + makeTag("2048mb", "mb", 2048, 1000, 1000), + makeTag("1b", "b", 1, 100, 100), + makeTag("2b", "b", 2, 100, 100), + makeTag("7b", "b", 7, 100, 100), + } + + tagWant := [][]*Tag{ + { + makeTag("1B..2GB", "", 0, 2401, 2401), + }, + { + makeTag("2GB", "", 0, 1000, 1000), + makeTag("1B..12MB", "", 0, 1401, 1401), + }, + { + makeTag("2GB", "", 0, 1000, 1000), + makeTag("12MB", "", 0, 100, 100), + makeTag("1B..1MB", "", 0, 1301, 1301), + }, + { + makeTag("2GB", "", 0, 1000, 1000), + makeTag("1MB", "", 0, 1000, 1000), + makeTag("2B..1kB", "", 0, 201, 201), + makeTag("1B", "", 0, 100, 100), + makeTag("12MB", "", 0, 100, 100), + }, + } + + for _, tc := range tagWant { + var got, want []*Tag + b := builder{nil, &DotAttributes{}, &DotConfig{}} + got = b.collapsedTags(tagSource, len(tc), true) + want = SortTags(tc, true) + + if !reflect.DeepEqual(got, want) { + t.Errorf("collapse to %d, got:\n%v\nwant:\n%v", len(tc), tagString(got), tagString(want)) + } + } +} + +func TestEscapeForDot(t *testing.T) { + for _, tc := range []struct { + desc string + input []string + want []string + }{ + { + desc: "with multiple doubles quotes", + input: []string{`label: "foo" and "bar"`}, + want: []string{`label: \"foo\" and \"bar\"`}, + }, + { + desc: "with graphviz center line character", + input: []string{"label: foo \n bar"}, + want: []string{`label: foo \l bar`}, + }, + { + desc: "with two backslashes", + input: []string{`label: \\`}, + want: []string{`label: \\\\`}, + }, + { + desc: "with two double quotes together", + input: []string{`label: ""`}, + want: []string{`label: \"\"`}, + }, + { + desc: "with multiple labels", + input: []string{`label1: "foo"`, `label2: "bar"`}, + want: []string{`label1: \"foo\"`, `label2: \"bar\"`}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + if got := escapeAllForDot(tc.input); !reflect.DeepEqual(got, tc.want) { + t.Errorf("escapeAllForDot(%s) = %s, want %s", tc.input, got, tc.want) + } + }) + } +} + +func tagString(t []*Tag) string { + var ret []string + for _, s := range t { + ret = append(ret, fmt.Sprintln(s)) + } + return strings.Join(ret, ":") +} diff --git a/plugin/debug/pkg/internal/graph/graph.go b/plugin/debug/pkg/internal/graph/graph.go new file mode 100644 index 0000000..263ea95 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/graph.go @@ -0,0 +1,1177 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package graph collects a set of samples into a directed graph. +package graph + +import ( + "fmt" + "math" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +var ( + // Removes package name and method arguments for Java method names. + // See tests for examples. + javaRegExp = regexp.MustCompile(`^(?:[a-z]\w*\.)*([A-Z][\w\$]*\.(?:|[a-z][\w\$]*(?:\$\d+)?))(?:(?:\()|$)`) + // Removes package name and method arguments for Go function names. + // See tests for examples. + goRegExp = regexp.MustCompile(`^(?:[\w\-\.]+\/)+([^.]+\..+)`) + // Removes potential module versions in a package path. + goVerRegExp = regexp.MustCompile(`^(.*?)/v(?:[2-9]|[1-9][0-9]+)([./].*)$`) + // Strips C++ namespace prefix from a C++ function / method name. + // NOTE: Make sure to keep the template parameters in the name. Normally, + // template parameters are stripped from the C++ names but when + // -symbolize=demangle=templates flag is used, they will not be. + // See tests for examples. + cppRegExp = regexp.MustCompile(`^(?:[_a-zA-Z]\w*::)+(_*[A-Z]\w*::~?[_a-zA-Z]\w*(?:<.*>)?)`) + cppAnonymousPrefixRegExp = regexp.MustCompile(`^\(anonymous namespace\)::`) +) + +// Graph summarizes a performance profile into a format that is +// suitable for visualization. +type Graph struct { + Nodes Nodes +} + +// Options encodes the options for constructing a graph +type Options struct { + SampleValue func(s []int64) int64 // Function to compute the value of a sample + SampleMeanDivisor func(s []int64) int64 // Function to compute the divisor for mean graphs, or nil + FormatTag func(int64, string) string // Function to format a sample tag value into a string + ObjNames bool // Always preserve obj filename + OrigFnNames bool // Preserve original (eg mangled) function names + + CallTree bool // Build a tree instead of a graph + DropNegative bool // Drop nodes with overall negative values + + KeptNodes NodeSet // If non-nil, only use nodes in this set +} + +// Nodes is an ordered collection of graph nodes. +type Nodes []*Node + +// Node is an entry on a profiling report. It represents a unique +// program location. +type Node struct { + // Info describes the source location associated to this node. + Info NodeInfo + + // Function represents the function that this node belongs to. On + // graphs with sub-function resolution (eg line number or + // addresses), two nodes in a NodeMap that are part of the same + // function have the same value of Node.Function. If the Node + // represents the whole function, it points back to itself. + Function *Node + + // Values associated to this node. Flat is exclusive to this node, + // Cum includes all descendents. + Flat, FlatDiv, Cum, CumDiv int64 + + // In and out Contains the nodes immediately reaching or reached by + // this node. + In, Out EdgeMap + + // LabelTags provide additional information about subsets of a sample. + LabelTags TagMap + + // NumericTags provide additional values for subsets of a sample. + // Numeric tags are optionally associated to a label tag. The key + // for NumericTags is the name of the LabelTag they are associated + // to, or "" for numeric tags not associated to a label tag. + NumericTags map[string]TagMap +} + +// FlatValue returns the exclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) FlatValue() int64 { + if n.FlatDiv == 0 { + return n.Flat + } + return n.Flat / n.FlatDiv +} + +// CumValue returns the inclusive value for this node, computing the +// mean if a divisor is available. +func (n *Node) CumValue() int64 { + if n.CumDiv == 0 { + return n.Cum + } + return n.Cum / n.CumDiv +} + +// AddToEdge increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdge(to *Node, v int64, residual, inline bool) { + n.AddToEdgeDiv(to, 0, v, residual, inline) +} + +// AddToEdgeDiv increases the weight of an edge between two nodes. If +// there isn't such an edge one is created. +func (n *Node) AddToEdgeDiv(to *Node, dv, v int64, residual, inline bool) { + if n.Out[to] != to.In[n] { + panic(fmt.Errorf("asymmetric edges %v %v", *n, *to)) + } + + if e := n.Out[to]; e != nil { + e.WeightDiv += dv + e.Weight += v + if residual { + e.Residual = true + } + if !inline { + e.Inline = false + } + return + } + + info := &Edge{Src: n, Dest: to, WeightDiv: dv, Weight: v, Residual: residual, Inline: inline} + n.Out[to] = info + to.In[n] = info +} + +// NodeInfo contains the attributes for a node. +type NodeInfo struct { + Name string + OrigName string + Address uint64 + File string + StartLine, Lineno int + Columnno int + Objfile string +} + +// PrintableName calls the Node's Formatter function with a single space separator. +func (i *NodeInfo) PrintableName() string { + return strings.Join(i.NameComponents(), " ") +} + +// NameComponents returns the components of the printable name to be used for a node. +func (i *NodeInfo) NameComponents() []string { + var name []string + if i.Address != 0 { + name = append(name, fmt.Sprintf("%016x", i.Address)) + } + if fun := i.Name; fun != "" { + name = append(name, fun) + } + + switch { + case i.Lineno != 0: + s := fmt.Sprintf("%s:%d", i.File, i.Lineno) + if i.Columnno != 0 { + s += fmt.Sprintf(":%d", i.Columnno) + } + // User requested line numbers, provide what we have. + name = append(name, s) + case i.File != "": + // User requested file name, provide it. + name = append(name, i.File) + case i.Name != "": + // User requested function name. It was already included. + case i.Objfile != "": + // Only binary name is available + name = append(name, "["+filepath.Base(i.Objfile)+"]") + default: + // Do not leave it empty if there is no information at all. + name = append(name, "") + } + return name +} + +// NodeMap maps from a node info struct to a node. It is used to merge +// report entries with the same info. +type NodeMap map[NodeInfo]*Node + +// NodeSet is a collection of node info structs. +type NodeSet map[NodeInfo]bool + +// NodePtrSet is a collection of nodes. Trimming a graph or tree requires a set +// of objects which uniquely identify the nodes to keep. In a graph, NodeInfo +// works as a unique identifier; however, in a tree multiple nodes may share +// identical NodeInfos. A *Node does uniquely identify a node so we can use that +// instead. Though a *Node also uniquely identifies a node in a graph, +// currently, during trimming, graphs are rebuilt from scratch using only the +// NodeSet, so there would not be the required context of the initial graph to +// allow for the use of *Node. +type NodePtrSet map[*Node]bool + +// FindOrInsertNode takes the info for a node and either returns a matching node +// from the node map if one exists, or adds one to the map if one does not. +// If kept is non-nil, nodes are only added if they can be located on it. +func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node { + if kept != nil { + if _, ok := kept[info]; !ok { + return nil + } + } + + if n, ok := nm[info]; ok { + return n + } + + n := &Node{ + Info: info, + In: make(EdgeMap), + Out: make(EdgeMap), + LabelTags: make(TagMap), + NumericTags: make(map[string]TagMap), + } + nm[info] = n + if info.Address == 0 && info.Lineno == 0 { + // This node represents the whole function, so point Function + // back to itself. + n.Function = n + return n + } + // Find a node that represents the whole function. + info.Address = 0 + info.Lineno = 0 + info.Columnno = 0 + n.Function = nm.FindOrInsertNode(info, nil) + return n +} + +// EdgeMap is used to represent the incoming/outgoing edges from a node. +type EdgeMap map[*Node]*Edge + +// Edge contains any attributes to be represented about edges in a graph. +type Edge struct { + Src, Dest *Node + // The summary weight of the edge + Weight, WeightDiv int64 + + // residual edges connect nodes that were connected through a + // separate node, which has been removed from the report. + Residual bool + // An inline edge represents a call that was inlined into the caller. + Inline bool +} + +// WeightValue returns the weight value for this edge, normalizing if a +// divisor is available. +func (e *Edge) WeightValue() int64 { + if e.WeightDiv == 0 { + return e.Weight + } + return e.Weight / e.WeightDiv +} + +// Tag represent sample annotations +type Tag struct { + Name string + Unit string // Describe the value, "" for non-numeric tags + Value int64 + Flat, FlatDiv int64 + Cum, CumDiv int64 +} + +// FlatValue returns the exclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) FlatValue() int64 { + if t.FlatDiv == 0 { + return t.Flat + } + return t.Flat / t.FlatDiv +} + +// CumValue returns the inclusive value for this tag, computing the +// mean if a divisor is available. +func (t *Tag) CumValue() int64 { + if t.CumDiv == 0 { + return t.Cum + } + return t.Cum / t.CumDiv +} + +// TagMap is a collection of tags, classified by their name. +type TagMap map[string]*Tag + +// SortTags sorts a slice of tags based on their weight. +func SortTags(t []*Tag, flat bool) []*Tag { + ts := tags{t, flat} + sort.Sort(ts) + return ts.t +} + +// New summarizes performance data from a profile into a graph. +func New(prof *profile.Profile, o *Options) *Graph { + if o.CallTree { + return newTree(prof, o) + } + g, _ := newGraph(prof, o) + return g +} + +// newGraph computes a graph from a profile. It returns the graph, and +// a map from the profile location indices to the corresponding graph +// nodes. +func newGraph(prof *profile.Profile, o *Options) (*Graph, map[uint64]Nodes) { + nodes, locationMap := CreateNodes(prof, o) + seenNode := make(map[*Node]bool) + seenEdge := make(map[nodePair]bool) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + for k := range seenNode { + delete(seenNode, k) + } + for k := range seenEdge { + delete(seenEdge, k) + } + var parent *Node + // A residual edge goes over one or more nodes that were not kept. + residual := false + + labels := joinLabels(sample) + // Group the sample frames, based on a global map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + locNodes := locationMap[l.ID] + for ni := len(locNodes) - 1; ni >= 0; ni-- { + n := locNodes[ni] + if n == nil { + residual = true + continue + } + // Add cum weight to all nodes in stack, avoiding double counting. + if _, ok := seenNode[n]; !ok { + seenNode[n] = true + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + } + // Update edge weights for all edges in stack, avoiding double counting. + if _, ok := seenEdge[nodePair{n, parent}]; !ok && parent != nil && n != parent { + seenEdge[nodePair{n, parent}] = true + parent.AddToEdgeDiv(n, dw, w, residual, ni != len(locNodes)-1) + } + parent = n + residual = false + } + } + if parent != nil && !residual { + // Add flat weight to leaf node. + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + return selectNodesForGraph(nodes, o.DropNegative), locationMap +} + +func selectNodesForGraph(nodes Nodes, dropNegative bool) *Graph { + // Collect nodes into a graph. + gNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if n == nil { + continue + } + if n.Cum == 0 && n.Flat == 0 { + continue + } + if dropNegative && isNegative(n) { + continue + } + gNodes = append(gNodes, n) + } + return &Graph{gNodes} +} + +type nodePair struct { + src, dest *Node +} + +func newTree(prof *profile.Profile, o *Options) (g *Graph) { + parentNodeMap := make(map[*Node]NodeMap, len(prof.Sample)) + for _, sample := range prof.Sample { + var w, dw int64 + w = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + dw = o.SampleMeanDivisor(sample.Value) + } + if dw == 0 && w == 0 { + continue + } + var parent *Node + labels := joinLabels(sample) + // Group the sample frames, based on a per-node map. + for i := len(sample.Location) - 1; i >= 0; i-- { + l := sample.Location[i] + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + for lidx := len(lines) - 1; lidx >= 0; lidx-- { + nodeMap := parentNodeMap[parent] + if nodeMap == nil { + nodeMap = make(NodeMap) + parentNodeMap[parent] = nodeMap + } + n := nodeMap.findOrInsertLine(l, lines[lidx], o) + if n == nil { + continue + } + n.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, false) + if parent != nil { + parent.AddToEdgeDiv(n, dw, w, false, lidx != len(lines)-1) + } + parent = n + } + } + if parent != nil { + parent.addSample(dw, w, labels, sample.NumLabel, sample.NumUnit, o.FormatTag, true) + } + } + + nodes := make(Nodes, 0, len(prof.Location)) + for _, nm := range parentNodeMap { + nodes = append(nodes, nm.nodes()...) + } + return selectNodesForGraph(nodes, o.DropNegative) +} + +// ShortenFunctionName returns a shortened version of a function's name. +func ShortenFunctionName(f string) string { + f = cppAnonymousPrefixRegExp.ReplaceAllString(f, "") + f = goVerRegExp.ReplaceAllString(f, `${1}${2}`) + for _, re := range []*regexp.Regexp{goRegExp, javaRegExp, cppRegExp} { + if matches := re.FindStringSubmatch(f); len(matches) >= 2 { + return strings.Join(matches[1:], "") + } + } + return f +} + +// TrimTree trims a Graph in forest form, keeping only the nodes in kept. This +// will not work correctly if even a single node has multiple parents. +func (g *Graph) TrimTree(kept NodePtrSet) { + // Creates a new list of nodes + oldNodes := g.Nodes + g.Nodes = make(Nodes, 0, len(kept)) + + for _, cur := range oldNodes { + // A node may not have multiple parents + if len(cur.In) > 1 { + panic("TrimTree only works on trees") + } + + // If a node should be kept, add it to the new list of nodes + if _, ok := kept[cur]; ok { + g.Nodes = append(g.Nodes, cur) + continue + } + + // If a node has no parents, then delete all of the in edges of its + // children to make them each roots of their own trees. + if len(cur.In) == 0 { + for _, outEdge := range cur.Out { + delete(outEdge.Dest.In, cur) + } + continue + } + + // Get the parent. This works since at this point cur.In must contain only + // one element. + if len(cur.In) != 1 { + panic("Get parent assertion failed. cur.In expected to be of length 1.") + } + var parent *Node + for _, edge := range cur.In { + parent = edge.Src + } + + parentEdgeInline := parent.Out[cur].Inline + + // Remove the edge from the parent to this node + delete(parent.Out, cur) + + // Reconfigure every edge from the current node to now begin at the parent. + for _, outEdge := range cur.Out { + child := outEdge.Dest + + delete(child.In, cur) + child.In[parent] = outEdge + parent.Out[child] = outEdge + + outEdge.Src = parent + outEdge.Residual = true + // If the edge from the parent to the current node and the edge from the + // current node to the child are both inline, then this resulting residual + // edge should also be inline + outEdge.Inline = parentEdgeInline && outEdge.Inline + } + } + g.RemoveRedundantEdges() +} + +func joinLabels(s *profile.Sample) string { + if len(s.Label) == 0 { + return "" + } + + var labels []string + for key, vals := range s.Label { + for _, v := range vals { + labels = append(labels, key+":"+v) + } + } + sort.Strings(labels) + return strings.Join(labels, `\n`) +} + +// isNegative returns true if the node is considered as "negative" for the +// purposes of drop_negative. +func isNegative(n *Node) bool { + switch { + case n.Flat < 0: + return true + case n.Flat == 0 && n.Cum < 0: + return true + default: + return false + } +} + +// CreateNodes creates graph nodes for all locations in a profile. It +// returns set of all nodes, plus a mapping of each location to the +// set of corresponding nodes (one per location.Line). +func CreateNodes(prof *profile.Profile, o *Options) (Nodes, map[uint64]Nodes) { + locations := make(map[uint64]Nodes, len(prof.Location)) + nm := make(NodeMap, len(prof.Location)) + for _, l := range prof.Location { + lines := l.Line + if len(lines) == 0 { + lines = []profile.Line{{}} // Create empty line to include location info. + } + nodes := make(Nodes, len(lines)) + for ln := range lines { + nodes[ln] = nm.findOrInsertLine(l, lines[ln], o) + } + locations[l.ID] = nodes + } + return nm.nodes(), locations +} + +func (nm NodeMap) nodes() Nodes { + nodes := make(Nodes, 0, len(nm)) + for _, n := range nm { + nodes = append(nodes, n) + } + return nodes +} + +func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node { + var objfile string + if m := l.Mapping; m != nil && m.File != "" { + objfile = m.File + } + + if ni := nodeInfo(l, li, objfile, o); ni != nil { + return nm.FindOrInsertNode(*ni, o.KeptNodes) + } + return nil +} + +func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo { + if line.Function == nil { + return &NodeInfo{Address: l.Address, Objfile: objfile} + } + ni := &NodeInfo{ + Address: l.Address, + Lineno: int(line.Line), + Columnno: int(line.Column), + Name: line.Function.Name, + } + if fname := line.Function.Filename; fname != "" { + ni.File = filepath.Clean(fname) + } + if o.OrigFnNames { + ni.OrigName = line.Function.SystemName + } + if o.ObjNames || (ni.Name == "" && ni.OrigName == "") { + ni.Objfile = objfile + ni.StartLine = int(line.Function.StartLine) + } + return ni +} + +type tags struct { + t []*Tag + flat bool +} + +func (t tags) Len() int { return len(t.t) } +func (t tags) Swap(i, j int) { t.t[i], t.t[j] = t.t[j], t.t[i] } +func (t tags) Less(i, j int) bool { + if !t.flat { + if t.t[i].Cum != t.t[j].Cum { + return abs64(t.t[i].Cum) > abs64(t.t[j].Cum) + } + } + if t.t[i].Flat != t.t[j].Flat { + return abs64(t.t[i].Flat) > abs64(t.t[j].Flat) + } + return t.t[i].Name < t.t[j].Name +} + +// Sum adds the flat and cum values of a set of nodes. +func (ns Nodes) Sum() (flat int64, cum int64) { + for _, n := range ns { + flat += n.Flat + cum += n.Cum + } + return +} + +func (n *Node) addSample(dw, w int64, labels string, numLabel map[string][]int64, numUnit map[string][]string, format func(int64, string) string, flat bool) { + // Update sample value + if flat { + n.FlatDiv += dw + n.Flat += w + } else { + n.CumDiv += dw + n.Cum += w + } + + // Add string tags + if labels != "" { + t := n.LabelTags.findOrAddTag(labels, "", 0) + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + + numericTags := n.NumericTags[labels] + if numericTags == nil { + numericTags = TagMap{} + n.NumericTags[labels] = numericTags + } + // Add numeric tags + if format == nil { + format = defaultLabelFormat + } + for k, nvals := range numLabel { + units := numUnit[k] + for i, v := range nvals { + var t *Tag + if len(units) > 0 { + t = numericTags.findOrAddTag(format(v, units[i]), units[i], v) + } else { + t = numericTags.findOrAddTag(format(v, k), k, v) + } + if flat { + t.FlatDiv += dw + t.Flat += w + } else { + t.CumDiv += dw + t.Cum += w + } + } + } +} + +func defaultLabelFormat(v int64, key string) string { + return strconv.FormatInt(v, 10) +} + +func (m TagMap) findOrAddTag(label, unit string, value int64) *Tag { + l := m[label] + if l == nil { + l = &Tag{ + Name: label, + Unit: unit, + Value: value, + } + m[label] = l + } + return l +} + +// String returns a text representation of a graph, for debugging purposes. +func (g *Graph) String() string { + var s []string + + nodeIndex := make(map[*Node]int, len(g.Nodes)) + + for i, n := range g.Nodes { + nodeIndex[n] = i + 1 + } + + for i, n := range g.Nodes { + name := n.Info.PrintableName() + var in, out []int + + for _, from := range n.In { + in = append(in, nodeIndex[from.Src]) + } + for _, to := range n.Out { + out = append(out, nodeIndex[to.Dest]) + } + s = append(s, fmt.Sprintf("%d: %s[flat=%d cum=%d] %x -> %v ", i+1, name, n.Flat, n.Cum, in, out)) + } + return strings.Join(s, "\n") +} + +// DiscardLowFrequencyNodes returns a set of the nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodes(nodeCutoff int64) NodeSet { + return makeNodeSet(g.Nodes, nodeCutoff) +} + +// DiscardLowFrequencyNodePtrs returns a NodePtrSet of nodes at or over a +// specific cum value cutoff. +func (g *Graph) DiscardLowFrequencyNodePtrs(nodeCutoff int64) NodePtrSet { + cutNodes := getNodesAboveCumCutoff(g.Nodes, nodeCutoff) + kept := make(NodePtrSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n] = true + } + return kept +} + +func makeNodeSet(nodes Nodes, nodeCutoff int64) NodeSet { + cutNodes := getNodesAboveCumCutoff(nodes, nodeCutoff) + kept := make(NodeSet, len(cutNodes)) + for _, n := range cutNodes { + kept[n.Info] = true + } + return kept +} + +// getNodesAboveCumCutoff returns all the nodes which have a Cum value greater +// than or equal to cutoff. +func getNodesAboveCumCutoff(nodes Nodes, nodeCutoff int64) Nodes { + cutoffNodes := make(Nodes, 0, len(nodes)) + for _, n := range nodes { + if abs64(n.Cum) < nodeCutoff { + continue + } + cutoffNodes = append(cutoffNodes, n) + } + return cutoffNodes +} + +// TrimLowFrequencyTags removes tags that have less than +// the specified weight. +func (g *Graph) TrimLowFrequencyTags(tagCutoff int64) { + // Remove nodes with value <= total*nodeFraction + for _, n := range g.Nodes { + n.LabelTags = trimLowFreqTags(n.LabelTags, tagCutoff) + for s, nt := range n.NumericTags { + n.NumericTags[s] = trimLowFreqTags(nt, tagCutoff) + } + } +} + +func trimLowFreqTags(tags TagMap, minValue int64) TagMap { + kept := TagMap{} + for s, t := range tags { + if abs64(t.Flat) >= minValue || abs64(t.Cum) >= minValue { + kept[s] = t + } + } + return kept +} + +// TrimLowFrequencyEdges removes edges that have less than +// the specified weight. Returns the number of edges removed +func (g *Graph) TrimLowFrequencyEdges(edgeCutoff int64) int { + var droppedEdges int + for _, n := range g.Nodes { + for src, e := range n.In { + if abs64(e.Weight) < edgeCutoff { + delete(n.In, src) + delete(src.Out, n) + droppedEdges++ + } + } + } + return droppedEdges +} + +// SortNodes sorts the nodes in a graph based on a specific heuristic. +func (g *Graph) SortNodes(cum bool, visualMode bool) { + // Sort nodes based on requested mode + switch { + case visualMode: + // Specialized sort to produce a more visually-interesting graph + g.Nodes.Sort(EntropyOrder) + case cum: + g.Nodes.Sort(CumNameOrder) + default: + g.Nodes.Sort(FlatNameOrder) + } +} + +// SelectTopNodePtrs returns a set of the top maxNodes *Node in a graph. +func (g *Graph) SelectTopNodePtrs(maxNodes int, visualMode bool) NodePtrSet { + set := make(NodePtrSet) + for _, node := range g.selectTopNodes(maxNodes, visualMode) { + set[node] = true + } + return set +} + +// SelectTopNodes returns a set of the top maxNodes nodes in a graph. +func (g *Graph) SelectTopNodes(maxNodes int, visualMode bool) NodeSet { + return makeNodeSet(g.selectTopNodes(maxNodes, visualMode), 0) +} + +// selectTopNodes returns a slice of the top maxNodes nodes in a graph. +func (g *Graph) selectTopNodes(maxNodes int, visualMode bool) Nodes { + if maxNodes > 0 { + if visualMode { + var count int + // If generating a visual graph, count tags as nodes. Update + // maxNodes to account for them. + for i, n := range g.Nodes { + tags := countTags(n) + if tags > maxNodelets { + tags = maxNodelets + } + if count += tags + 1; count >= maxNodes { + maxNodes = i + 1 + break + } + } + } + } + if maxNodes > len(g.Nodes) { + maxNodes = len(g.Nodes) + } + return g.Nodes[:maxNodes] +} + +// countTags counts the tags with flat count. This underestimates the +// number of tags being displayed, but in practice is close enough. +func countTags(n *Node) int { + count := 0 + for _, e := range n.LabelTags { + if e.Flat != 0 { + count++ + } + } + for _, t := range n.NumericTags { + for _, e := range t { + if e.Flat != 0 { + count++ + } + } + } + return count +} + +// RemoveRedundantEdges removes residual edges if the destination can +// be reached through another path. This is done to simplify the graph +// while preserving connectivity. +func (g *Graph) RemoveRedundantEdges() { + // Walk the nodes and outgoing edges in reverse order to prefer + // removing edges with the lowest weight. + for i := len(g.Nodes); i > 0; i-- { + n := g.Nodes[i-1] + in := n.In.Sort() + for j := len(in); j > 0; j-- { + e := in[j-1] + if !e.Residual { + // Do not remove edges heavier than a non-residual edge, to + // avoid potential confusion. + break + } + if isRedundantEdge(e) { + delete(e.Src.Out, e.Dest) + delete(e.Dest.In, e.Src) + } + } + } +} + +// isRedundantEdge determines if there is a path that allows e.Src +// to reach e.Dest after removing e. +func isRedundantEdge(e *Edge) bool { + src, n := e.Src, e.Dest + seen := map[*Node]bool{n: true} + queue := Nodes{n} + for len(queue) > 0 { + n := queue[0] + queue = queue[1:] + for _, ie := range n.In { + if e == ie || seen[ie.Src] { + continue + } + if ie.Src == src { + return true + } + seen[ie.Src] = true + queue = append(queue, ie.Src) + } + } + return false +} + +// nodeSorter is a mechanism used to allow a report to be sorted +// in different ways. +type nodeSorter struct { + rs Nodes + less func(l, r *Node) bool +} + +func (s nodeSorter) Len() int { return len(s.rs) } +func (s nodeSorter) Swap(i, j int) { s.rs[i], s.rs[j] = s.rs[j], s.rs[i] } +func (s nodeSorter) Less(i, j int) bool { return s.less(s.rs[i], s.rs[j]) } + +// Sort reorders a slice of nodes based on the specified ordering +// criteria. The result is sorted in decreasing order for (absolute) +// numeric quantities, alphabetically for text, and increasing for +// addresses. +func (ns Nodes) Sort(o NodeOrder) error { + var s nodeSorter + + switch o { + case FlatNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + return compareNodes(l, r) + }, + } + case FlatCumNameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + if iv, jv := abs64(l.Cum), abs64(r.Cum); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case NameOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Name, r.Info.Name; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case FileOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.File, r.Info.File; iv != jv { + return iv < jv + } + if iv, jv := l.Info.StartLine, r.Info.StartLine; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case AddressOrder: + s = nodeSorter{ns, + func(l, r *Node) bool { + if iv, jv := l.Info.Address, r.Info.Address; iv != jv { + return iv < jv + } + return compareNodes(l, r) + }, + } + case CumNameOrder, EntropyOrder: + // Hold scoring for score-based ordering + var score map[*Node]int64 + scoreOrder := func(l, r *Node) bool { + if iv, jv := abs64(score[l]), abs64(score[r]); iv != jv { + return iv > jv + } + if iv, jv := l.Info.PrintableName(), r.Info.PrintableName(); iv != jv { + return iv < jv + } + if iv, jv := abs64(l.Flat), abs64(r.Flat); iv != jv { + return iv > jv + } + return compareNodes(l, r) + } + + switch o { + case CumNameOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = n.Cum + } + s = nodeSorter{ns, scoreOrder} + case EntropyOrder: + score = make(map[*Node]int64, len(ns)) + for _, n := range ns { + score[n] = entropyScore(n) + } + s = nodeSorter{ns, scoreOrder} + } + default: + return fmt.Errorf("report: unrecognized sort ordering: %d", o) + } + sort.Sort(s) + return nil +} + +// compareNodes compares two nodes to provide a deterministic ordering +// between them. Two nodes cannot have the same Node.Info value. +func compareNodes(l, r *Node) bool { + return fmt.Sprint(l.Info) < fmt.Sprint(r.Info) +} + +// entropyScore computes a score for a node representing how important +// it is to include this node on a graph visualization. It is used to +// sort the nodes and select which ones to display if we have more +// nodes than desired in the graph. This number is computed by looking +// at the flat and cum weights of the node and the incoming/outgoing +// edges. The fundamental idea is to penalize nodes that have a simple +// fallthrough from their incoming to the outgoing edge. +func entropyScore(n *Node) int64 { + score := float64(0) + + if len(n.In) == 0 { + score++ // Favor entry nodes + } else { + score += edgeEntropyScore(n, n.In, 0) + } + + if len(n.Out) == 0 { + score++ // Favor leaf nodes + } else { + score += edgeEntropyScore(n, n.Out, n.Flat) + } + + return int64(score*float64(n.Cum)) + n.Flat +} + +// edgeEntropyScore computes the entropy value for a set of edges +// coming in or out of a node. Entropy (as defined in information +// theory) refers to the amount of information encoded by the set of +// edges. A set of edges that have a more interesting distribution of +// samples gets a higher score. +func edgeEntropyScore(n *Node, edges EdgeMap, self int64) float64 { + score := float64(0) + total := self + for _, e := range edges { + if e.Weight > 0 { + total += abs64(e.Weight) + } + } + if total != 0 { + for _, e := range edges { + frac := float64(abs64(e.Weight)) / float64(total) + score += -frac * math.Log2(frac) + } + if self > 0 { + frac := float64(abs64(self)) / float64(total) + score += -frac * math.Log2(frac) + } + } + return score +} + +// NodeOrder sets the ordering for a Sort operation +type NodeOrder int + +// Sorting options for node sort. +const ( + FlatNameOrder NodeOrder = iota + FlatCumNameOrder + CumNameOrder + NameOrder + FileOrder + AddressOrder + EntropyOrder +) + +// Sort returns a slice of the edges in the map, in a consistent +// order. The sort order is first based on the edge weight +// (higher-to-lower) and then by the node names to avoid flakiness. +func (e EdgeMap) Sort() []*Edge { + el := make(edgeList, 0, len(e)) + for _, w := range e { + el = append(el, w) + } + + sort.Sort(el) + return el +} + +// Sum returns the total weight for a set of nodes. +func (e EdgeMap) Sum() int64 { + var ret int64 + for _, edge := range e { + ret += edge.Weight + } + return ret +} + +type edgeList []*Edge + +func (el edgeList) Len() int { + return len(el) +} + +func (el edgeList) Less(i, j int) bool { + if el[i].Weight != el[j].Weight { + return abs64(el[i].Weight) > abs64(el[j].Weight) + } + + from1 := el[i].Src.Info.PrintableName() + from2 := el[j].Src.Info.PrintableName() + if from1 != from2 { + return from1 < from2 + } + + to1 := el[i].Dest.Info.PrintableName() + to2 := el[j].Dest.Info.PrintableName() + + return to1 < to2 +} + +func (el edgeList) Swap(i, j int) { + el[i], el[j] = el[j], el[i] +} + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/plugin/debug/pkg/internal/graph/graph_test.go b/plugin/debug/pkg/internal/graph/graph_test.go new file mode 100644 index 0000000..051eaa2 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/graph_test.go @@ -0,0 +1,547 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graph + +import ( + "fmt" + "testing" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func edgeDebugString(edge *Edge) string { + debug := "" + debug += fmt.Sprintf("\t\tSrc: %p\n", edge.Src) + debug += fmt.Sprintf("\t\tDest: %p\n", edge.Dest) + debug += fmt.Sprintf("\t\tWeight: %d\n", edge.Weight) + debug += fmt.Sprintf("\t\tResidual: %t\n", edge.Residual) + debug += fmt.Sprintf("\t\tInline: %t\n", edge.Inline) + return debug +} + +func edgeMapsDebugString(in, out EdgeMap) string { + debug := "" + debug += "In Edges:\n" + for parent, edge := range in { + debug += fmt.Sprintf("\tParent: %p\n", parent) + debug += edgeDebugString(edge) + } + debug += "Out Edges:\n" + for child, edge := range out { + debug += fmt.Sprintf("\tChild: %p\n", child) + debug += edgeDebugString(edge) + } + return debug +} + +func graphDebugString(graph *Graph) string { + debug := "" + for i, node := range graph.Nodes { + debug += fmt.Sprintf("Node %d: %p\n", i, node) + } + + for i, node := range graph.Nodes { + debug += "\n" + debug += fmt.Sprintf("=== Node %d: %p ===\n", i, node) + debug += edgeMapsDebugString(node.In, node.Out) + } + return debug +} + +func expectedNodesDebugString(expected []expectedNode) string { + debug := "" + for i, node := range expected { + debug += fmt.Sprintf("Node %d: %p\n", i, node.node) + } + + for i, node := range expected { + debug += "\n" + debug += fmt.Sprintf("=== Node %d: %p ===\n", i, node.node) + debug += edgeMapsDebugString(node.in, node.out) + } + return debug +} + +// edgeMapsEqual checks if all the edges in this equal all the edges in that. +func edgeMapsEqual(this, that EdgeMap) bool { + if len(this) != len(that) { + return false + } + for node, thisEdge := range this { + if *thisEdge != *that[node] { + return false + } + } + return true +} + +// nodesEqual checks if node is equal to expected. +func nodesEqual(node *Node, expected expectedNode) bool { + return node == expected.node && edgeMapsEqual(node.In, expected.in) && + edgeMapsEqual(node.Out, expected.out) +} + +// graphsEqual checks if graph is equivalent to the graph templated by expected. +func graphsEqual(graph *Graph, expected []expectedNode) bool { + if len(graph.Nodes) != len(expected) { + return false + } + expectedSet := make(map[*Node]expectedNode) + for i := range expected { + expectedSet[expected[i].node] = expected[i] + } + + for _, node := range graph.Nodes { + expectedNode, found := expectedSet[node] + if !found || !nodesEqual(node, expectedNode) { + return false + } + } + return true +} + +type expectedNode struct { + node *Node + in, out EdgeMap +} + +type trimTreeTestcase struct { + initial *Graph + expected []expectedNode + keep NodePtrSet +} + +// makeExpectedEdgeResidual makes the edge from parent to child residual. +func makeExpectedEdgeResidual(parent, child expectedNode) { + parent.out[child.node].Residual = true + child.in[parent.node].Residual = true +} + +func makeEdgeInline(edgeMap EdgeMap, node *Node) { + edgeMap[node].Inline = true +} + +func setEdgeWeight(edgeMap EdgeMap, node *Node, weight int64) { + edgeMap[node].Weight = weight +} + +// createEdges creates directed edges from the parent to each of the children. +func createEdges(parent *Node, children ...*Node) { + for _, child := range children { + edge := &Edge{ + Src: parent, + Dest: child, + } + parent.Out[child] = edge + child.In[parent] = edge + } +} + +// createEmptyNode creates a node without any edges. +func createEmptyNode() *Node { + return &Node{ + In: make(EdgeMap), + Out: make(EdgeMap), + } +} + +// createExpectedNodes creates a slice of expectedNodes from nodes. +func createExpectedNodes(nodes ...*Node) ([]expectedNode, NodePtrSet) { + expected := make([]expectedNode, len(nodes)) + keep := make(NodePtrSet, len(nodes)) + + for i, node := range nodes { + expected[i] = expectedNode{ + node: node, + in: make(EdgeMap), + out: make(EdgeMap), + } + keep[node] = true + } + + return expected, keep +} + +// createExpectedEdges creates directed edges from the parent to each of the +// children. +func createExpectedEdges(parent expectedNode, children ...expectedNode) { + for _, child := range children { + edge := &Edge{ + Src: parent.node, + Dest: child.node, + } + parent.out[child.node] = edge + child.in[parent.node] = edge + } +} + +// createTestCase1 creates a test case that initially looks like: +// +// 0 +// |(5) +// 1 +// (3)/ \(4) +// 2 3. +// +// After keeping 0, 2, and 3, it expects the graph: +// +// 0 +// (3)/ \(4) +// 2 3. +func createTestCase1() trimTreeTestcase { + // Create initial graph + graph := &Graph{make(Nodes, 4)} + nodes := graph.Nodes + for i := range nodes { + nodes[i] = createEmptyNode() + } + createEdges(nodes[0], nodes[1]) + createEdges(nodes[1], nodes[2], nodes[3]) + makeEdgeInline(nodes[0].Out, nodes[1]) + makeEdgeInline(nodes[1].Out, nodes[2]) + setEdgeWeight(nodes[0].Out, nodes[1], 5) + setEdgeWeight(nodes[1].Out, nodes[2], 3) + setEdgeWeight(nodes[1].Out, nodes[3], 4) + + // Create expected graph + expected, keep := createExpectedNodes(nodes[0], nodes[2], nodes[3]) + createExpectedEdges(expected[0], expected[1], expected[2]) + makeEdgeInline(expected[0].out, expected[1].node) + makeExpectedEdgeResidual(expected[0], expected[1]) + makeExpectedEdgeResidual(expected[0], expected[2]) + setEdgeWeight(expected[0].out, expected[1].node, 3) + setEdgeWeight(expected[0].out, expected[2].node, 4) + return trimTreeTestcase{ + initial: graph, + expected: expected, + keep: keep, + } +} + +// createTestCase2 creates a test case that initially looks like: +// +// 3 +// | (12) +// 1 +// | (8) +// 2 +// | (15) +// 0 +// | (10) +// 4. +// +// After keeping 3 and 4, it expects the graph: +// +// 3 +// | (10) +// 4. +func createTestCase2() trimTreeTestcase { + // Create initial graph + graph := &Graph{make(Nodes, 5)} + nodes := graph.Nodes + for i := range nodes { + nodes[i] = createEmptyNode() + } + createEdges(nodes[3], nodes[1]) + createEdges(nodes[1], nodes[2]) + createEdges(nodes[2], nodes[0]) + createEdges(nodes[0], nodes[4]) + setEdgeWeight(nodes[3].Out, nodes[1], 12) + setEdgeWeight(nodes[1].Out, nodes[2], 8) + setEdgeWeight(nodes[2].Out, nodes[0], 15) + setEdgeWeight(nodes[0].Out, nodes[4], 10) + + // Create expected graph + expected, keep := createExpectedNodes(nodes[3], nodes[4]) + createExpectedEdges(expected[0], expected[1]) + makeExpectedEdgeResidual(expected[0], expected[1]) + setEdgeWeight(expected[0].out, expected[1].node, 10) + return trimTreeTestcase{ + initial: graph, + expected: expected, + keep: keep, + } +} + +// createTestCase3 creates an initially empty graph and expects an empty graph +// after trimming. +func createTestCase3() trimTreeTestcase { + graph := &Graph{make(Nodes, 0)} + expected, keep := createExpectedNodes() + return trimTreeTestcase{ + initial: graph, + expected: expected, + keep: keep, + } +} + +// createTestCase4 creates a test case that initially looks like: +// +// 0. +// +// After keeping 0, it expects the graph: +// +// 0. +func createTestCase4() trimTreeTestcase { + graph := &Graph{make(Nodes, 1)} + nodes := graph.Nodes + for i := range nodes { + nodes[i] = createEmptyNode() + } + expected, keep := createExpectedNodes(nodes[0]) + return trimTreeTestcase{ + initial: graph, + expected: expected, + keep: keep, + } +} + +func createTrimTreeTestCases() []trimTreeTestcase { + caseGenerators := []func() trimTreeTestcase{ + createTestCase1, + createTestCase2, + createTestCase3, + createTestCase4, + } + cases := make([]trimTreeTestcase, len(caseGenerators)) + for i, gen := range caseGenerators { + cases[i] = gen() + } + return cases +} + +func TestTrimTree(t *testing.T) { + tests := createTrimTreeTestCases() + for _, test := range tests { + graph := test.initial + graph.TrimTree(test.keep) + if !graphsEqual(graph, test.expected) { + t.Fatalf("Graphs do not match.\nExpected: %s\nFound: %s\n", + expectedNodesDebugString(test.expected), + graphDebugString(graph)) + } + } +} + +func nodeTestProfile() *profile.Profile { + mappings := []*profile.Mapping{ + { + ID: 1, + File: "symbolized_binary", + }, + { + ID: 2, + File: "unsymbolized_library_1", + }, + { + ID: 3, + File: "unsymbolized_library_2", + }, + } + functions := []*profile.Function{ + {ID: 1, Name: "symname"}, + {ID: 2}, + } + locations := []*profile.Location{ + { + ID: 1, + Mapping: mappings[0], + Line: []profile.Line{ + {Function: functions[0]}, + }, + }, + { + ID: 2, + Mapping: mappings[1], + Line: []profile.Line{ + {Function: functions[1]}, + }, + }, + { + ID: 3, + Mapping: mappings[2], + }, + } + return &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + SampleType: []*profile.ValueType{ + {Type: "type", Unit: "unit"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{locations[0]}, + Value: []int64{1}, + }, + { + Location: []*profile.Location{locations[1]}, + Value: []int64{1}, + }, + { + Location: []*profile.Location{locations[2]}, + Value: []int64{1}, + }, + }, + Location: locations, + Function: functions, + Mapping: mappings, + } +} + +// TestCreateNodes checks that nodes are properly created for a simple profile. +func TestCreateNodes(t *testing.T) { + testProfile := nodeTestProfile() + wantNodeSet := NodeSet{ + {Name: "symname"}: true, + {Objfile: "unsymbolized_library_1"}: true, + {Objfile: "unsymbolized_library_2"}: true, + } + + nodes, _ := CreateNodes(testProfile, &Options{}) + if len(nodes) != len(wantNodeSet) { + t.Errorf("got %d nodes, want %d", len(nodes), len(wantNodeSet)) + } + for _, node := range nodes { + if !wantNodeSet[node.Info] { + t.Errorf("unexpected node %v", node.Info) + } + } +} + +func TestShortenFunctionName(t *testing.T) { + type testCase struct { + name string + want string + } + testcases := []testCase{ + { + "root", + "root", + }, + { + "syscall.Syscall", + "syscall.Syscall", + }, + { + "net/http.(*conn).serve", + "http.(*conn).serve", + }, + { + "github.com/blahBlah/foo.Foo", + "foo.Foo", + }, + { + "github.com/BlahBlah/foo.Foo", + "foo.Foo", + }, + { + "github.com/BlahBlah/foo.Foo[...]", + "foo.Foo[...]", + }, + { + "github.com/blah-blah/foo_bar.(*FooBar).Foo", + "foo_bar.(*FooBar).Foo", + }, + { + "encoding/json.(*structEncoder).(encoding/json.encode)-fm", + "json.(*structEncoder).(encoding/json.encode)-fm", + }, + { + "github.com/blah/blah/vendor/gopkg.in/redis.v3.(*baseClient).(github.com/blah/blah/vendor/gopkg.in/redis.v3.process)-fm", + "redis.v3.(*baseClient).(github.com/blah/blah/vendor/gopkg.in/redis.v3.process)-fm", + }, + { + "github.com/foo/bar/v4.(*Foo).Bar", + "bar.(*Foo).Bar", + }, + { + "github.com/foo/bar/v4/baz.Foo.Bar", + "baz.Foo.Bar", + }, + { + "github.com/foo/bar/v123.(*Foo).Bar", + "bar.(*Foo).Bar", + }, + { + "github.com/foobar/v0.(*Foo).Bar", + "v0.(*Foo).Bar", + }, + { + "github.com/foobar/v1.(*Foo).Bar", + "v1.(*Foo).Bar", + }, + { + "example.org/v2xyz.Foo", + "v2xyz.Foo", + }, + { + "github.com/foo/bar/v4/v4.(*Foo).Bar", + "v4.(*Foo).Bar", + }, + { + "github.com/foo/bar/v4/foo/bar/v4.(*Foo).Bar", + "v4.(*Foo).Bar", + }, + { + "java.util.concurrent.ThreadPoolExecutor$Worker.run", + "ThreadPoolExecutor$Worker.run", + }, + { + "java.bar.foo.FooBar.run(java.lang.Runnable)", + "FooBar.run", + }, + { + "(anonymous namespace)::Bar::Foo", + "Bar::Foo", + }, + { + "(anonymous namespace)::foo", + "foo", + }, + { + "cpp::namespace::Class::method()::$_100::operator()", + "Class::method", + }, + { + "foo_bar::Foo::bar", + "Foo::bar", + }, + { + "cpp::namespace::Class::method()", + "Class::method", + }, + { + "foo", + "foo", + }, + { + "foo/xyz", + "foo/xyz", + }, + { + "com.google.perftools.gwp.benchmark.FloatBench.lambda$run$0", + "FloatBench.lambda$run$0", + }, + { + "java.bar.foo.FooBar.run$0", + "FooBar.run$0", + }, + } + for _, tc := range testcases { + name := ShortenFunctionName(tc.name) + if got, want := name, tc.want; got != want { + t.Errorf("ShortenFunctionName(%q) = %q, want %q", tc.name, got, want) + } + } +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose1.dot b/plugin/debug/pkg/internal/graph/testdata/compose1.dot new file mode 100644 index 0000000..a0842ee --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose1.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose2.dot b/plugin/debug/pkg/internal/graph/testdata/compose2.dot new file mode 100644 index 0000000..44c2aec --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose2.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +N1 [label="SRC10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=24 shape=folder tooltip="src (25)" color="#b23c00" fillcolor="#edddd5" style="bold,filled" peripheries=2 URL="www.google.com" target="_blank"] +N2 [label="dest\n0 of 25 (25.00%)" id="node2" fontsize=8 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose3.dot b/plugin/debug/pkg/internal/graph/testdata/compose3.dot new file mode 100644 index 0000000..f22ad9f --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose3.dot @@ -0,0 +1,11 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N1_0 [label = "tag1" id="N1_0" fontsize=8 shape=box3d tooltip="10"] +N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"] +NN1_0 [label = "tag2" id="NN1_0" fontsize=8 shape=box3d tooltip="20"] +N1 -> NN1_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src ... dest (10)" labeltooltip="src ... dest (10)" style="dotted" minlen=2] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose4.dot b/plugin/debug/pkg/internal/graph/testdata/compose4.dot new file mode 100644 index 0000000..ed770d1 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose4.dot @@ -0,0 +1,4 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose5.dot b/plugin/debug/pkg/internal/graph/testdata/compose5.dot new file mode 100644 index 0000000..3f2285c --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose5.dot @@ -0,0 +1,11 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N1_0 [label = "tag1" id="N1_0" fontsize=8 shape=box3d tooltip="10"] +N1 -> N1_0 [label=" 10" weight=100 tooltip="10" labeltooltip="10"] +NN1_0_0 [label = "tag2" id="NN1_0_0" fontsize=8 shape=box3d tooltip="20"] +N1_0 -> NN1_0_0 [label=" 20" weight=100 tooltip="20" labeltooltip="20"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)" minlen=2] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose6.dot b/plugin/debug/pkg/internal/graph/testdata/compose6.dot new file mode 100644 index 0000000..1dfc3fe --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose6.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" URL="http://example.com" target="_blank" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose7.dot b/plugin/debug/pkg/internal/graph/testdata/compose7.dot new file mode 100644 index 0000000..8f749a7 --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose7.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "label1" [shape=box fontsize=16 label="label1\llabel2\llabel3: \"foo\"\l" tooltip="testtitle"] } +N1 [label="var\"src\"\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="var\"src\" (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="var\"#dest#\"\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="var\"#dest#\" (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="var\"src\" -> var\"#dest#\" (10)" labeltooltip="var\"src\" -> var\"#dest#\" (10)"] +} diff --git a/plugin/debug/pkg/internal/graph/testdata/compose9.dot b/plugin/debug/pkg/internal/graph/testdata/compose9.dot new file mode 100644 index 0000000..2e163ce --- /dev/null +++ b/plugin/debug/pkg/internal/graph/testdata/compose9.dot @@ -0,0 +1,7 @@ +digraph "testtitle" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "comment line 1\lcomment line 2 \"unterminated double quote" [shape=box fontsize=16 label="comment line 1\lcomment line 2 \"unterminated double quote\lsecond comment \"double quote\"\l" tooltip="testtitle"] } +N1 [label="src\n10 (10.00%)\nof 25 (25.00%)" id="node1" fontsize=22 shape=box tooltip="src (25)" color="#b23c00" fillcolor="#edddd5"] +N2 [label="dest\n15 (15.00%)\nof 25 (25.00%)" id="node2" fontsize=24 shape=box tooltip="dest (25)" color="#b23c00" fillcolor="#edddd5"] +N1 -> N2 [label=" 10" weight=11 color="#b28559" tooltip="src -> dest (10)" labeltooltip="src -> dest (10)"] +} diff --git a/plugin/debug/pkg/internal/measurement/measurement.go b/plugin/debug/pkg/internal/measurement/measurement.go new file mode 100644 index 0000000..e7a5732 --- /dev/null +++ b/plugin/debug/pkg/internal/measurement/measurement.go @@ -0,0 +1,294 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package measurement export utility functions to manipulate/format performance profile sample values. +package measurement + +import ( + "fmt" + "math" + "strings" + "time" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// ScaleProfiles updates the units in a set of profiles to make them +// compatible. It scales the profiles to the smallest unit to preserve +// data. +func ScaleProfiles(profiles []*profile.Profile) error { + if len(profiles) == 0 { + return nil + } + periodTypes := make([]*profile.ValueType, 0, len(profiles)) + for _, p := range profiles { + if p.PeriodType != nil { + periodTypes = append(periodTypes, p.PeriodType) + } + } + periodType, err := CommonValueType(periodTypes) + if err != nil { + return fmt.Errorf("period type: %v", err) + } + + // Identify common sample types + numSampleTypes := len(profiles[0].SampleType) + for _, p := range profiles[1:] { + if numSampleTypes != len(p.SampleType) { + return fmt.Errorf("inconsistent samples type count: %d != %d", numSampleTypes, len(p.SampleType)) + } + } + sampleType := make([]*profile.ValueType, numSampleTypes) + for i := 0; i < numSampleTypes; i++ { + sampleTypes := make([]*profile.ValueType, len(profiles)) + for j, p := range profiles { + sampleTypes[j] = p.SampleType[i] + } + sampleType[i], err = CommonValueType(sampleTypes) + if err != nil { + return fmt.Errorf("sample types: %v", err) + } + } + + for _, p := range profiles { + if p.PeriodType != nil && periodType != nil { + period, _ := Scale(p.Period, p.PeriodType.Unit, periodType.Unit) + p.Period, p.PeriodType.Unit = int64(period), periodType.Unit + } + ratios := make([]float64, len(p.SampleType)) + for i, st := range p.SampleType { + if sampleType[i] == nil { + ratios[i] = 1 + continue + } + ratios[i], _ = Scale(1, st.Unit, sampleType[i].Unit) + p.SampleType[i].Unit = sampleType[i].Unit + } + if err := p.ScaleN(ratios); err != nil { + return fmt.Errorf("scale: %v", err) + } + } + return nil +} + +// CommonValueType returns the finest type from a set of compatible +// types. +func CommonValueType(ts []*profile.ValueType) (*profile.ValueType, error) { + if len(ts) <= 1 { + return nil, nil + } + minType := ts[0] + for _, t := range ts[1:] { + if !compatibleValueTypes(minType, t) { + return nil, fmt.Errorf("incompatible types: %v %v", *minType, *t) + } + if ratio, _ := Scale(1, t.Unit, minType.Unit); ratio < 1 { + minType = t + } + } + rcopy := *minType + return &rcopy, nil +} + +func compatibleValueTypes(v1, v2 *profile.ValueType) bool { + if v1 == nil || v2 == nil { + return true // No grounds to disqualify. + } + // Remove trailing 's' to permit minor mismatches. + if t1, t2 := strings.TrimSuffix(v1.Type, "s"), strings.TrimSuffix(v2.Type, "s"); t1 != t2 { + return false + } + + if v1.Unit == v2.Unit { + return true + } + for _, ut := range UnitTypes { + if ut.sniffUnit(v1.Unit) != nil && ut.sniffUnit(v2.Unit) != nil { + return true + } + } + return false +} + +// Scale a measurement from a unit to a different unit and returns +// the scaled value and the target unit. The returned target unit +// will be empty if uninteresting (could be skipped). +func Scale(value int64, fromUnit, toUnit string) (float64, string) { + // Avoid infinite recursion on overflow. + if value < 0 && -value > 0 { + v, u := Scale(-value, fromUnit, toUnit) + return -v, u + } + for _, ut := range UnitTypes { + if v, u, ok := ut.convertUnit(value, fromUnit, toUnit); ok { + return v, u + } + } + // Skip non-interesting units. + switch toUnit { + case "count", "sample", "unit", "minimum", "auto": + return float64(value), "" + default: + return float64(value), toUnit + } +} + +// Label returns the label used to describe a certain measurement. +func Label(value int64, unit string) string { + return ScaledLabel(value, unit, "auto") +} + +// ScaledLabel scales the passed-in measurement (if necessary) and +// returns the label used to describe a float measurement. +func ScaledLabel(value int64, fromUnit, toUnit string) string { + v, u := Scale(value, fromUnit, toUnit) + sv := strings.TrimSuffix(fmt.Sprintf("%.2f", v), ".00") + if sv == "0" || sv == "-0" { + return "0" + } + return sv + u +} + +// Percentage computes the percentage of total of a value, and encodes +// it as a string. At least two digits of precision are printed. +func Percentage(value, total int64) string { + var ratio float64 + if total != 0 { + ratio = math.Abs(float64(value)/float64(total)) * 100 + } + switch { + case math.Abs(ratio) >= 99.95 && math.Abs(ratio) <= 100.05: + return " 100%" + case math.Abs(ratio) >= 1.0: + return fmt.Sprintf("%5.2f%%", ratio) + default: + return fmt.Sprintf("%5.2g%%", ratio) + } +} + +// Unit includes a list of aliases representing a specific unit and a factor +// which one can multiple a value in the specified unit by to get the value +// in terms of the base unit. +type Unit struct { + CanonicalName string + aliases []string + Factor float64 +} + +// UnitType includes a list of units that are within the same category (i.e. +// memory or time units) and a default unit to use for this type of unit. +type UnitType struct { + DefaultUnit Unit + Units []Unit +} + +// findByAlias returns the unit associated with the specified alias. It returns +// nil if the unit with such alias is not found. +func (ut UnitType) findByAlias(alias string) *Unit { + for _, u := range ut.Units { + for _, a := range u.aliases { + if alias == a { + return &u + } + } + } + return nil +} + +// sniffUnit simpifies the input alias and returns the unit associated with the +// specified alias. It returns nil if the unit with such alias is not found. +func (ut UnitType) sniffUnit(unit string) *Unit { + unit = strings.ToLower(unit) + if len(unit) > 2 { + unit = strings.TrimSuffix(unit, "s") + } + return ut.findByAlias(unit) +} + +// autoScale takes in the value with units of the base unit and returns +// that value scaled to a reasonable unit if a reasonable unit is +// found. +func (ut UnitType) autoScale(value float64) (float64, string, bool) { + var f float64 + var unit string + for _, u := range ut.Units { + if u.Factor >= f && (value/u.Factor) >= 1.0 { + f = u.Factor + unit = u.CanonicalName + } + } + if f == 0 { + return 0, "", false + } + return value / f, unit, true +} + +// convertUnit converts a value from the fromUnit to the toUnit, autoscaling +// the value if the toUnit is "minimum" or "auto". If the fromUnit is not +// included in the unitType, then a false boolean will be returned. If the +// toUnit is not in the unitType, the value will be returned in terms of the +// default unitType. +func (ut UnitType) convertUnit(value int64, fromUnitStr, toUnitStr string) (float64, string, bool) { + fromUnit := ut.sniffUnit(fromUnitStr) + if fromUnit == nil { + return 0, "", false + } + v := float64(value) * fromUnit.Factor + if toUnitStr == "minimum" || toUnitStr == "auto" { + if v, u, ok := ut.autoScale(v); ok { + return v, u, true + } + return v / ut.DefaultUnit.Factor, ut.DefaultUnit.CanonicalName, true + } + toUnit := ut.sniffUnit(toUnitStr) + if toUnit == nil { + return v / ut.DefaultUnit.Factor, ut.DefaultUnit.CanonicalName, true + } + return v / toUnit.Factor, toUnit.CanonicalName, true +} + +// UnitTypes holds the definition of units known to pprof. +var UnitTypes = []UnitType{{ + Units: []Unit{ + {"B", []string{"b", "byte"}, 1}, + {"kB", []string{"kb", "kbyte", "kilobyte"}, float64(1 << 10)}, + {"MB", []string{"mb", "mbyte", "megabyte"}, float64(1 << 20)}, + {"GB", []string{"gb", "gbyte", "gigabyte"}, float64(1 << 30)}, + {"TB", []string{"tb", "tbyte", "terabyte"}, float64(1 << 40)}, + {"PB", []string{"pb", "pbyte", "petabyte"}, float64(1 << 50)}, + }, + DefaultUnit: Unit{"B", []string{"b", "byte"}, 1}, +}, { + Units: []Unit{ + {"ns", []string{"ns", "nanosecond"}, float64(time.Nanosecond)}, + {"us", []string{"μs", "us", "microsecond"}, float64(time.Microsecond)}, + {"ms", []string{"ms", "millisecond"}, float64(time.Millisecond)}, + {"s", []string{"s", "sec", "second"}, float64(time.Second)}, + {"hrs", []string{"hour", "hr"}, float64(time.Hour)}, + }, + DefaultUnit: Unit{"s", []string{}, float64(time.Second)}, +}, { + Units: []Unit{ + {"n*GCU", []string{"nanogcu"}, 1e-9}, + {"u*GCU", []string{"microgcu"}, 1e-6}, + {"m*GCU", []string{"milligcu"}, 1e-3}, + {"GCU", []string{"gcu"}, 1}, + {"k*GCU", []string{"kilogcu"}, 1e3}, + {"M*GCU", []string{"megagcu"}, 1e6}, + {"G*GCU", []string{"gigagcu"}, 1e9}, + {"T*GCU", []string{"teragcu"}, 1e12}, + {"P*GCU", []string{"petagcu"}, 1e15}, + }, + DefaultUnit: Unit{"GCU", []string{}, 1.0}, +}} diff --git a/plugin/debug/pkg/internal/measurement/measurement_test.go b/plugin/debug/pkg/internal/measurement/measurement_test.go new file mode 100644 index 0000000..7521a64 --- /dev/null +++ b/plugin/debug/pkg/internal/measurement/measurement_test.go @@ -0,0 +1,76 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package measurement + +import ( + "math" + "testing" +) + +func TestScale(t *testing.T) { + for _, tc := range []struct { + value int64 + fromUnit, toUnit string + wantValue float64 + wantUnit string + }{ + {1, "s", "ms", 1000, "ms"}, + {1, "kb", "b", 1024, "B"}, + {1, "kbyte", "b", 1024, "B"}, + {1, "kilobyte", "b", 1024, "B"}, + {1, "mb", "kb", 1024, "kB"}, + {1, "gb", "mb", 1024, "MB"}, + {1024, "gb", "tb", 1, "TB"}, + {1024, "tb", "pb", 1, "PB"}, + {2048, "mb", "auto", 2, "GB"}, + {3.1536e7, "s", "auto", 8760, "hrs"}, + {-1, "s", "ms", -1000, "ms"}, + {1, "foo", "count", 1, ""}, + {1, "foo", "bar", 1, "bar"}, + {2000, "count", "count", 2000, ""}, + {2000, "count", "auto", 2000, ""}, + {2000, "count", "minimum", 2000, ""}, + {8e10, "nanogcu", "petagcus", 8e-14, "P*GCU"}, + {1.5e10, "microGCU", "teraGCU", 1.5e-8, "T*GCU"}, + {3e6, "milliGCU", "gigagcu", 3e-6, "G*GCU"}, + {1000, "kilogcu", "megagcu", 1, "M*GCU"}, + {2000, "GCU", "kiloGCU", 2, "k*GCU"}, + {7, "megaGCU", "gcu", 7e6, "GCU"}, + {5, "gigagcus", "milligcu", 5e12, "m*GCU"}, + {7, "teragcus", "microGCU", 7e18, "u*GCU"}, + {1, "petaGCU", "nanogcus", 1e24, "n*GCU"}, + {100, "NanoGCU", "auto", 100, "n*GCU"}, + {5000, "nanogcu", "auto", 5, "u*GCU"}, + {3000, "MicroGCU", "auto", 3, "m*GCU"}, + {4000, "MilliGCU", "auto", 4, "GCU"}, + {4000, "GCU", "auto", 4, "k*GCU"}, + {5000, "KiloGCU", "auto", 5, "M*GCU"}, + {6000, "MegaGCU", "auto", 6, "G*GCU"}, + {7000, "GigaGCU", "auto", 7, "T*GCU"}, + {8000, "TeraGCU", "auto", 8, "P*GCU"}, + {9000, "PetaGCU", "auto", 9000, "P*GCU"}, + } { + if gotValue, gotUnit := Scale(tc.value, tc.fromUnit, tc.toUnit); !floatEqual(gotValue, tc.wantValue) || gotUnit != tc.wantUnit { + t.Errorf("Scale(%d, %q, %q) = (%g, %q), want (%g, %q)", + tc.value, tc.fromUnit, tc.toUnit, gotValue, gotUnit, tc.wantValue, tc.wantUnit) + } + } +} + +func floatEqual(a, b float64) bool { + diff := math.Abs(a - b) + avg := (math.Abs(a) + math.Abs(b)) / 2 + return diff/avg < 0.0001 +} diff --git a/plugin/debug/pkg/internal/plugin/plugin.go b/plugin/debug/pkg/internal/plugin/plugin.go new file mode 100644 index 0000000..276d2b7 --- /dev/null +++ b/plugin/debug/pkg/internal/plugin/plugin.go @@ -0,0 +1,218 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package plugin defines the plugin implementations that the main pprof driver requires. +package plugin + +import ( + "io" + "net/http" + "regexp" + "time" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// Options groups all the optional plugins into pprof. +type Options struct { + Writer Writer + Flagset FlagSet + Fetch Fetcher + Sym Symbolizer + Obj ObjTool + UI UI + + // HTTPServer is a function that should block serving http requests, + // including the handlers specified in args. If non-nil, pprof will + // invoke this function if necessary to provide a web interface. + // + // If HTTPServer is nil, pprof will use its own internal HTTP server. + // + // A common use for a custom HTTPServer is to provide custom + // authentication checks. + HTTPServer func(args *HTTPServerArgs) error + HTTPTransport http.RoundTripper +} + +// Writer provides a mechanism to write data under a certain name, +// typically a filename. +type Writer interface { + Open(name string) (io.WriteCloser, error) +} + +// A FlagSet creates and parses command-line flags. +// It is similar to the standard flag.FlagSet. +type FlagSet interface { + // Bool, Int, Float64, and String define new flags, + // like the functions of the same name in package flag. + Bool(name string, def bool, usage string) *bool + Int(name string, def int, usage string) *int + Float64(name string, def float64, usage string) *float64 + String(name string, def string, usage string) *string + + // StringList is similar to String but allows multiple values for a + // single flag + StringList(name string, def string, usage string) *[]*string + + // ExtraUsage returns any additional text that should be printed after the + // standard usage message. The extra usage message returned includes all text + // added with AddExtraUsage(). + // The typical use of ExtraUsage is to show any custom flags defined by the + // specific pprof plugins being used. + ExtraUsage() string + + // AddExtraUsage appends additional text to the end of the extra usage message. + AddExtraUsage(eu string) + + // Parse initializes the flags with their values for this run + // and returns the non-flag command line arguments. + // If an unknown flag is encountered or there are no arguments, + // Parse should call usage and return nil. + Parse(usage func()) []string +} + +// A Fetcher reads and returns the profile named by src. src can be a +// local file path or a URL. duration and timeout are units specified +// by the end user, or 0 by default. duration refers to the length of +// the profile collection, if applicable, and timeout is the amount of +// time to wait for a profile before returning an error. Returns the +// fetched profile, the URL of the actual source of the profile, or an +// error. +type Fetcher interface { + Fetch(src string, duration, timeout time.Duration) (*profile.Profile, string, error) +} + +// A Symbolizer introduces symbol information into a profile. +type Symbolizer interface { + Symbolize(mode string, srcs MappingSources, prof *profile.Profile) error +} + +// MappingSources map each profile.Mapping to the source of the profile. +// The key is either Mapping.File or Mapping.BuildId. +type MappingSources map[string][]struct { + Source string // URL of the source the mapping was collected from + Start uint64 // delta applied to addresses from this source (to represent Merge adjustments) +} + +// An ObjTool inspects shared libraries and executable files. +type ObjTool interface { + // Open opens the named object file. If the object is a shared + // library, start/limit/offset are the addresses where it is mapped + // into memory in the address space being inspected. If the object + // is a linux kernel, relocationSymbol is the name of the symbol + // corresponding to the start address. + Open(file string, start, limit, offset uint64, relocationSymbol string) (ObjFile, error) + + // Disasm disassembles the named object file, starting at + // the start address and stopping at (before) the end address. + Disasm(file string, start, end uint64, intelSyntax bool) ([]Inst, error) +} + +// An Inst is a single instruction in an assembly listing. +type Inst struct { + Addr uint64 // virtual address of instruction + Text string // instruction text + Function string // function name + File string // source file + Line int // source line +} + +// An ObjFile is a single object file: a shared library or executable. +type ObjFile interface { + // Name returns the underlyinf file name, if available + Name() string + + // ObjAddr returns the objdump (linker) address corresponding to a runtime + // address, and an error. + ObjAddr(addr uint64) (uint64, error) + + // BuildID returns the GNU build ID of the file, or an empty string. + BuildID() string + + // SourceLine reports the source line information for a given + // address in the file. Due to inlining, the source line information + // is in general a list of positions representing a call stack, + // with the leaf function first. + SourceLine(addr uint64) ([]Frame, error) + + // Symbols returns a list of symbols in the object file. + // If r is not nil, Symbols restricts the list to symbols + // with names matching the regular expression. + // If addr is not zero, Symbols restricts the list to symbols + // containing that address. + Symbols(r *regexp.Regexp, addr uint64) ([]*Sym, error) + + // Close closes the file, releasing associated resources. + Close() error +} + +// A Frame describes a location in a single line in a source file. +type Frame struct { + Func string // name of function + File string // source file name + Line int // line in file + Column int // column in line (if available) + StartLine int // start line of function (if available) +} + +// A Sym describes a single symbol in an object file. +type Sym struct { + Name []string // names of symbol (many if symbol was dedup'ed) + File string // object file containing symbol + Start uint64 // start virtual address + End uint64 // virtual address of last byte in sym (Start+size-1) +} + +// A UI manages user interactions. +type UI interface { + // ReadLine returns a line of text (a command) read from the user. + // prompt is printed before reading the command. + ReadLine(prompt string) (string, error) + + // Print shows a message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, Print writes to standard error. + // (Standard output is reserved for report data.) + Print(...interface{}) + + // PrintErr shows an error message to the user. + // It formats the text as fmt.Print would and adds a final \n if not already present. + // For line-based UI, PrintErr writes to standard error. + PrintErr(...interface{}) + + // IsTerminal returns whether the UI is known to be tied to an + // interactive terminal (as opposed to being redirected to a file). + IsTerminal() bool + + // WantBrowser indicates whether a browser should be opened with the -http option. + WantBrowser() bool + + // SetAutoComplete instructs the UI to call complete(cmd) to obtain + // the auto-completion of cmd, if the UI supports auto-completion at all. + SetAutoComplete(complete func(string) string) +} + +// HTTPServerArgs contains arguments needed by an HTTP server that +// is exporting a pprof web interface. +type HTTPServerArgs struct { + // Hostport contains the http server address (derived from flags). + Hostport string + + Host string // Host portion of Hostport + Port int // Port portion of Hostport + + // Handlers maps from URL paths to the handler to invoke to + // serve that path. + Handlers map[string]http.Handler +} diff --git a/plugin/debug/pkg/internal/proftest/BUILD b/plugin/debug/pkg/internal/proftest/BUILD new file mode 100644 index 0000000..046e97c --- /dev/null +++ b/plugin/debug/pkg/internal/proftest/BUILD @@ -0,0 +1,15 @@ +# Description: +# Auto-imported from m7s.live/v5/plugin/debug/pkg/internal/proftest + +licenses(["notice"]) + +package( + default_applicable_licenses = ["//third_party/golang/pprof:license"], + default_visibility = ["//third_party/golang/pprof/internal:friends"], +) + +go_library( + name = "proftest", + srcs = ["proftest.go"], + embedsrcs = ["testdata/large.cpu"], +) diff --git a/plugin/debug/pkg/internal/proftest/proftest.go b/plugin/debug/pkg/internal/proftest/proftest.go new file mode 100644 index 0000000..17ecb0f --- /dev/null +++ b/plugin/debug/pkg/internal/proftest/proftest.go @@ -0,0 +1,166 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package proftest provides some utility routines to test other +// packages related to profiles. +package proftest + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "os" + "os/exec" + "regexp" + "testing" + + _ "embed" // For embedding profiles needed by tests and benchmarks +) + +var flagLargeProfile = flag.String("large_profile", "", "The name of a file that contains a profile to use in benchmarks. If empty, a profile of a synthetic program is used.") + +// Diff compares two byte arrays using the diff tool to highlight the +// differences. It is meant for testing purposes to display the +// differences between expected and actual output. +func Diff(b1, b2 []byte) (data []byte, err error) { + f1, err := os.CreateTemp("", "proto_test") + if err != nil { + return nil, err + } + defer os.Remove(f1.Name()) + defer f1.Close() + + f2, err := os.CreateTemp("", "proto_test") + if err != nil { + return nil, err + } + defer os.Remove(f2.Name()) + defer f2.Close() + + f1.Write(b1) + f2.Write(b2) + + data, err = exec.Command("diff", "-u", f1.Name(), f2.Name()).CombinedOutput() + if len(data) > 0 { + // diff exits with a non-zero status when the files don't match. + // Ignore that failure as long as we get output. + err = nil + } + if err != nil { + data = []byte(fmt.Sprintf("diff failed: %v\nb1: %q\nb2: %q\n", err, b1, b2)) + err = nil + } + return +} + +// EncodeJSON encodes a value into a byte array. This is intended for +// testing purposes. +func EncodeJSON(x interface{}) []byte { + data, err := json.MarshalIndent(x, "", " ") + if err != nil { + panic(err) + } + data = append(data, '\n') + return data +} + +// TestUI implements the plugin.UI interface, triggering test failures +// if more than Ignore errors not matching AllowRx are printed. +// Also tracks the number of times the error matches AllowRx in +// NumAllowRxMatches. +type TestUI struct { + T testing.TB + Ignore int + AllowRx string + NumAllowRxMatches int + Input []string + index int +} + +// ReadLine returns no input, as no input is expected during testing. +func (ui *TestUI) ReadLine(_ string) (string, error) { + if ui.index >= len(ui.Input) { + return "", io.EOF + } + input := ui.Input[ui.index] + ui.index++ + if input == "**error**" { + return "", fmt.Errorf("error: %s", input) + } + return input, nil +} + +// Print messages are discarded by the test UI. +func (ui *TestUI) Print(args ...interface{}) { +} + +// PrintErr messages may trigger an error failure. A fixed number of +// error messages are permitted when appropriate. +func (ui *TestUI) PrintErr(args ...interface{}) { + if ui.AllowRx != "" { + if matched, err := regexp.MatchString(ui.AllowRx, fmt.Sprint(args...)); matched || err != nil { + if err != nil { + ui.T.Errorf("failed to match against regex %q: %v", ui.AllowRx, err) + } + ui.NumAllowRxMatches++ + return + } + } + if ui.Ignore > 0 { + ui.Ignore-- + return + } + // Stringify arguments with fmt.Sprint() to match what default UI + // implementation does. Without this Error() calls fmt.Sprintln() which + // _always_ adds spaces between arguments, unlike fmt.Sprint() which only + // adds them between arguments if neither is string. + ui.T.Error("unexpected error: " + fmt.Sprint(args...)) +} + +// IsTerminal indicates if the UI is an interactive terminal. +func (ui *TestUI) IsTerminal() bool { + return false +} + +// WantBrowser indicates whether a browser should be opened with the -http option. +func (ui *TestUI) WantBrowser() bool { + return false +} + +// SetAutoComplete is not supported by the test UI. +func (ui *TestUI) SetAutoComplete(_ func(string) string) { +} + +// LargeProfile returns a large profile that may be useful in benchmarks. +// +// If the flag --large_profile is set, the contents of the file +// named by the flag are returned. Otherwise an embedded profile (~1.2MB) +// for a synthetic program is returned. +func LargeProfile(tb testing.TB) []byte { + tb.Helper() + if f := *flagLargeProfile; f != "" { + // Use custom profile. + data, err := os.ReadFile(f) + if err != nil { + tb.Fatalf("custom profile file: %v\n", err) + } + return data + } + + return largeProfileData +} + +//go:embed testdata/large.cpu +var largeProfileData []byte diff --git a/plugin/debug/pkg/internal/proftest/testdata/large.cpu b/plugin/debug/pkg/internal/proftest/testdata/large.cpu new file mode 100644 index 0000000..382a9eb Binary files /dev/null and b/plugin/debug/pkg/internal/proftest/testdata/large.cpu differ diff --git a/plugin/debug/pkg/internal/report/package.go b/plugin/debug/pkg/internal/report/package.go new file mode 100644 index 0000000..0f6dcf5 --- /dev/null +++ b/plugin/debug/pkg/internal/report/package.go @@ -0,0 +1,17 @@ +package report + +import "regexp" + +// pkgRE extracts package name, It looks for the first "." or "::" that occurs +// after the last "/". (Searching after the last / allows us to correctly handle +// names that look like "some.url.com/foo.bar".) +var pkgRE = regexp.MustCompile(`^((.*/)?[\w\d_]+)(\.|::)([^/]*)$`) + +// packageName returns the package name of the named symbol, or "" if not found. +func packageName(name string) string { + m := pkgRE.FindStringSubmatch(name) + if m == nil { + return "" + } + return m[1] +} diff --git a/plugin/debug/pkg/internal/report/package_test.go b/plugin/debug/pkg/internal/report/package_test.go new file mode 100644 index 0000000..62e5196 --- /dev/null +++ b/plugin/debug/pkg/internal/report/package_test.go @@ -0,0 +1,52 @@ +package report + +import ( + "testing" +) + +func TestPackageName(t *testing.T) { + type testCase struct { + name string + expect string + } + + for _, c := range []testCase{ + // Unrecognized packages: + {``, ``}, + {`name`, ``}, + {`[libjvm.so]`, ``}, + {`prefix/name/suffix`, ``}, + {`prefix(a.b.c,x.y.z)`, ``}, + {`.a.b`, ``}, + {`(a.b)`, ``}, + + // C++ symbols: + {`Math.number`, `Math`}, + {`std::vector`, `std`}, + {`std::internal::vector`, `std`}, + + // Java symbols: + {`pkg.Class.name`, `pkg`}, + {`pkg.pkg.Class.name`, `pkg`}, + {`pkg.Class.name(a.b.c, x.y.z)`, `pkg`}, + {`pkg.pkg.Class.`, `pkg`}, + {`pkg.pkg.Class.(a.b.c, x.y.z)`, `pkg`}, + + // Go symbols: + {`pkg.name`, `pkg`}, + {`pkg.(*type).name`, `pkg`}, + {`path/pkg.name`, `path/pkg`}, + {`path/pkg.(*type).name`, `path/pkg`}, + {`path/path/pkg.name`, `path/path/pkg`}, + {`path/path/pkg.(*type).name`, `path/path/pkg`}, + {`some.url.com/path/pkg.fnID`, `some.url.com/path/pkg`}, + {`parent-dir/dir/google.golang.org/grpc/transport.NewFramer`, `parent-dir/dir/google.golang.org/grpc/transport`}, + {`parent-dir/dir/google.golang.org/grpc.(*Server).handleRawConn`, `parent-dir/dir/google.golang.org/grpc`}, + } { + t.Run(c.name, func(t *testing.T) { + if got := packageName(c.name); got != c.expect { + t.Errorf("packageName(%q) = %#v, expecting %#v", c.name, got, c.expect) + } + }) + } +} diff --git a/plugin/debug/pkg/internal/report/report.go b/plugin/debug/pkg/internal/report/report.go new file mode 100644 index 0000000..ac2f1e7 --- /dev/null +++ b/plugin/debug/pkg/internal/report/report.go @@ -0,0 +1,1359 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package report summarizes a performance profile into a +// human-readable report. +package report + +import ( + "fmt" + "io" + "net/url" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/graph" + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// Output formats. +const ( + Callgrind = iota + Comments + Dis + Dot + List + Proto + Raw + Tags + Text + TopProto + Traces + Tree + WebList +) + +// Options are the formatting and filtering options used to generate a +// profile. +type Options struct { + OutputFormat int + + CumSort bool + CallTree bool + DropNegative bool + CompactLabels bool + Ratio float64 + Title string + ProfileLabels []string + ActiveFilters []string + NumLabelUnits map[string]string + + NodeCount int + NodeFraction float64 + EdgeFraction float64 + + SampleValue func(s []int64) int64 + SampleMeanDivisor func(s []int64) int64 + SampleType string + SampleUnit string // Unit for the sample data from the profile. + + OutputUnit string // Units for data formatting in report. + + Symbol *regexp.Regexp // Symbols to include on disassembly report. + SourcePath string // Search path for source files. + TrimPath string // Paths to trim from source file paths. + + IntelSyntax bool // Whether or not to print assembly in Intel syntax. +} + +// Generate generates a report as directed by the Report. +func Generate(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + o := rpt.options + + switch o.OutputFormat { + case Comments: + return printComments(w, rpt) + case Dot: + return printDOT(w, rpt) + case Tree: + return printTree(w, rpt) + case Text: + return printText(w, rpt) + case Traces: + return printTraces(w, rpt) + case Raw: + fmt.Fprint(w, rpt.prof.String()) + return nil + case Tags: + return printTags(w, rpt) + case Proto: + return printProto(w, rpt) + case TopProto: + return printTopProto(w, rpt) + case Dis: + return printAssembly(w, rpt, obj) + case List: + return printSource(w, rpt) + case Callgrind: + return printCallgrind(w, rpt) + } + // Note: WebList handling is in driver package. + return fmt.Errorf("unexpected output format %v", o.OutputFormat) +} + +// newTrimmedGraph creates a graph for this report, trimmed according +// to the report options. +func (rpt *Report) newTrimmedGraph() (g *graph.Graph, origCount, droppedNodes, droppedEdges int) { + o := rpt.options + + // Build a graph and refine it. On each refinement step we must rebuild the graph from the samples, + // as the graph itself doesn't contain enough information to preserve full precision. + visualMode := o.OutputFormat == Dot + cumSort := o.CumSort + + // The call_tree option is only honored when generating visual representations of the callgraph. + callTree := o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind) + + // First step: Build complete graph to identify low frequency nodes, based on their cum weight. + g = rpt.newGraph(nil) + totalValue, _ := g.Nodes.Sum() + nodeCutoff := abs64(int64(float64(totalValue) * o.NodeFraction)) + edgeCutoff := abs64(int64(float64(totalValue) * o.EdgeFraction)) + + // Filter out nodes with cum value below nodeCutoff. + if nodeCutoff > 0 { + if callTree { + if nodesKept := g.DiscardLowFrequencyNodePtrs(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g.TrimTree(nodesKept) + } + } else { + if nodesKept := g.DiscardLowFrequencyNodes(nodeCutoff); len(g.Nodes) != len(nodesKept) { + droppedNodes = len(g.Nodes) - len(nodesKept) + g = rpt.newGraph(nodesKept) + } + } + } + origCount = len(g.Nodes) + + // Second step: Limit the total number of nodes. Apply specialized heuristics to improve + // visualization when generating dot output. + g.SortNodes(cumSort, visualMode) + if nodeCount := o.NodeCount; nodeCount > 0 { + // Remove low frequency tags and edges as they affect selection. + g.TrimLowFrequencyTags(nodeCutoff) + g.TrimLowFrequencyEdges(edgeCutoff) + if callTree { + if nodesKept := g.SelectTopNodePtrs(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g.TrimTree(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } else { + if nodesKept := g.SelectTopNodes(nodeCount, visualMode); len(g.Nodes) != len(nodesKept) { + g = rpt.newGraph(nodesKept) + g.SortNodes(cumSort, visualMode) + } + } + } + + // Final step: Filter out low frequency tags and edges, and remove redundant edges that clutter + // the graph. + g.TrimLowFrequencyTags(nodeCutoff) + droppedEdges = g.TrimLowFrequencyEdges(edgeCutoff) + if visualMode { + g.RemoveRedundantEdges() + } + return +} + +func (rpt *Report) selectOutputUnit(g *graph.Graph) { + o := rpt.options + + // Select best unit for profile output. + // Find the appropriate units for the smallest non-zero sample + if o.OutputUnit != "minimum" || len(g.Nodes) == 0 { + return + } + var minValue int64 + + for _, n := range g.Nodes { + nodeMin := abs64(n.FlatValue()) + if nodeMin == 0 { + nodeMin = abs64(n.CumValue()) + } + if nodeMin > 0 && (minValue == 0 || nodeMin < minValue) { + minValue = nodeMin + } + } + maxValue := rpt.total + if minValue == 0 { + minValue = maxValue + } + + if r := o.Ratio; r > 0 && r != 1 { + minValue = int64(float64(minValue) * r) + maxValue = int64(float64(maxValue) * r) + } + + _, minUnit := measurement.Scale(minValue, o.SampleUnit, "minimum") + _, maxUnit := measurement.Scale(maxValue, o.SampleUnit, "minimum") + + unit := minUnit + if minUnit != maxUnit && minValue*100 < maxValue && o.OutputFormat != Callgrind { + // Minimum and maximum values have different units. Scale + // minimum by 100 to use larger units, allowing minimum value to + // be scaled down to 0.01, except for callgrind reports since + // they can only represent integer values. + _, unit = measurement.Scale(100*minValue, o.SampleUnit, "minimum") + } + + if unit != "" { + o.OutputUnit = unit + } else { + o.OutputUnit = o.SampleUnit + } +} + +// newGraph creates a new graph for this report. If nodes is non-nil, +// only nodes whose info matches are included. Otherwise, all nodes +// are included, without trimming. +func (rpt *Report) newGraph(nodes graph.NodeSet) *graph.Graph { + o := rpt.options + + // Clean up file paths using heuristics. + prof := rpt.prof + for _, f := range prof.Function { + f.Filename = trimPath(f.Filename, o.TrimPath, o.SourcePath) + } + // Removes all numeric tags except for the bytes tag prior + // to making graph. + // TODO: modify to select first numeric tag if no bytes tag + for _, s := range prof.Sample { + numLabels := make(map[string][]int64, len(s.NumLabel)) + numUnits := make(map[string][]string, len(s.NumLabel)) + for k, vs := range s.NumLabel { + if k == "bytes" { + unit := o.NumLabelUnits[k] + numValues := make([]int64, len(vs)) + numUnit := make([]string, len(vs)) + for i, v := range vs { + numValues[i] = v + numUnit[i] = unit + } + numLabels[k] = append(numLabels[k], numValues...) + numUnits[k] = append(numUnits[k], numUnit...) + } + } + s.NumLabel = numLabels + s.NumUnit = numUnits + } + + // Remove label marking samples from the base profiles, so it does not appear + // as a nodelet in the graph view. + prof.RemoveLabel("pprof::base") + + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + gopt := &graph.Options{ + SampleValue: o.SampleValue, + SampleMeanDivisor: o.SampleMeanDivisor, + FormatTag: formatTag, + CallTree: o.CallTree && (o.OutputFormat == Dot || o.OutputFormat == Callgrind), + DropNegative: o.DropNegative, + KeptNodes: nodes, + } + + // Only keep binary names for disassembly-based reports, otherwise + // remove it to allow merging of functions across binaries. + switch o.OutputFormat { + case Raw, List, WebList, Dis, Callgrind: + gopt.ObjNames = true + } + + return graph.New(rpt.prof, gopt) +} + +// printProto writes the incoming proto via the writer w. +// If the divide_by option has been specified, samples are scaled appropriately. +func printProto(w io.Writer, rpt *Report) error { + p, o := rpt.prof, rpt.options + + // Apply the sample ratio to all samples before saving the profile. + if r := o.Ratio; r > 0 && r != 1 { + for _, sample := range p.Sample { + for i, v := range sample.Value { + sample.Value[i] = int64(float64(v) * r) + } + } + } + return p.Write(w) +} + +// printTopProto writes a list of the hottest routines in a profile as a profile.proto. +func printTopProto(w io.Writer, rpt *Report) error { + p := rpt.prof + o := rpt.options + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + out := profile.Profile{ + SampleType: []*profile.ValueType{ + {Type: "cum", Unit: o.OutputUnit}, + {Type: "flat", Unit: o.OutputUnit}, + }, + TimeNanos: p.TimeNanos, + DurationNanos: p.DurationNanos, + PeriodType: p.PeriodType, + Period: p.Period, + } + functionMap := make(functionMap) + for i, n := range g.Nodes { + f, added := functionMap.findOrAdd(n.Info) + if added { + out.Function = append(out.Function, f) + } + flat, cum := n.FlatValue(), n.CumValue() + l := &profile.Location{ + ID: uint64(i + 1), + Address: n.Info.Address, + Line: []profile.Line{ + { + Line: int64(n.Info.Lineno), + Column: int64(n.Info.Columnno), + Function: f, + }, + }, + } + + fv, _ := measurement.Scale(flat, o.SampleUnit, o.OutputUnit) + cv, _ := measurement.Scale(cum, o.SampleUnit, o.OutputUnit) + s := &profile.Sample{ + Location: []*profile.Location{l}, + Value: []int64{int64(cv), int64(fv)}, + } + out.Location = append(out.Location, l) + out.Sample = append(out.Sample, s) + } + + return out.Write(w) +} + +type functionMap map[string]*profile.Function + +// findOrAdd takes a node representing a function, adds the function +// represented by the node to the map if the function is not already present, +// and returns the function the node represents. This also returns a boolean, +// which is true if the function was added and false otherwise. +func (fm functionMap) findOrAdd(ni graph.NodeInfo) (*profile.Function, bool) { + fName := fmt.Sprintf("%q%q%q%d", ni.Name, ni.OrigName, ni.File, ni.StartLine) + + if f := fm[fName]; f != nil { + return f, false + } + + f := &profile.Function{ + ID: uint64(len(fm) + 1), + Name: ni.Name, + SystemName: ni.OrigName, + Filename: ni.File, + StartLine: int64(ni.StartLine), + } + fm[fName] = f + return f, true +} + +// printAssembly prints an annotated assembly listing. +func printAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool) error { + return PrintAssembly(w, rpt, obj, -1) +} + +// PrintAssembly prints annotated disassembly of rpt to w. +func PrintAssembly(w io.Writer, rpt *Report, obj plugin.ObjTool, maxFuncs int) error { + o := rpt.options + prof := rpt.prof + + g := rpt.newGraph(nil) + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if hex, err := strconv.ParseUint(o.Symbol.String(), 0, 64); err == nil { + address = &hex + } + + fmt.Fprintln(w, "Total:", rpt.formatValue(rpt.total)) + symbols := symbolsFromBinaries(prof, g, o.Symbol, address, obj) + symNodes := nodesPerSymbol(g.Nodes, symbols) + + // Sort for printing. + var syms []*objSymbol + for s := range symNodes { + syms = append(syms, s) + } + byName := func(a, b *objSymbol) bool { + if na, nb := a.sym.Name[0], b.sym.Name[0]; na != nb { + return na < nb + } + return a.sym.Start < b.sym.Start + } + if maxFuncs < 0 { + sort.Sort(orderSyms{syms, byName}) + } else { + byFlatSum := func(a, b *objSymbol) bool { + suma, _ := symNodes[a].Sum() + sumb, _ := symNodes[b].Sum() + if suma != sumb { + return suma > sumb + } + return byName(a, b) + } + sort.Sort(orderSyms{syms, byFlatSum}) + if len(syms) > maxFuncs { + syms = syms[:maxFuncs] + } + } + + if len(syms) == 0 { + // The symbol regexp case + if address == nil { + return fmt.Errorf("no matches found for regexp %s", o.Symbol) + } + + // The address case + if len(symbols) == 0 { + return fmt.Errorf("no matches found for address 0x%x", *address) + } + return fmt.Errorf("address 0x%x found in binary, but the corresponding symbols do not have samples in the profile", *address) + } + + // Correlate the symbols from the binary with the profile samples. + for _, s := range syms { + sns := symNodes[s] + + // Gather samples for this symbol. + flatSum, cumSum := sns.Sum() + + // Get the function assembly. + insts, err := obj.Disasm(s.sym.File, s.sym.Start, s.sym.End, o.IntelSyntax) + if err != nil { + return err + } + + ns := annotateAssembly(insts, sns, s.file) + + fmt.Fprintf(w, "ROUTINE ======================== %s\n", s.sym.Name[0]) + for _, name := range s.sym.Name[1:] { + fmt.Fprintf(w, " AKA ======================== %s\n", name) + } + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + function, file, line := "", "", 0 + for _, n := range ns { + locStr := "" + // Skip loc information if it hasn't changed from previous instruction. + if n.function != function || n.file != file || n.line != line { + function, file, line = n.function, n.file, n.line + if n.function != "" { + locStr = n.function + " " + } + if n.file != "" { + locStr += n.file + if n.line != 0 { + locStr += fmt.Sprintf(":%d", n.line) + } + } + } + switch { + case locStr == "": + // No location info, just print the instruction. + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + case len(n.instruction) < 40: + // Short instruction, print loc on the same line. + fmt.Fprintf(w, "%10s %10s %10x: %-40s;%s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + locStr, + ) + default: + // Long instruction, print loc on a separate line. + fmt.Fprintf(w, "%74s;%s\n", "", locStr) + fmt.Fprintf(w, "%10s %10s %10x: %s\n", + valueOrDot(n.flatValue(), rpt), + valueOrDot(n.cumValue(), rpt), + n.address, n.instruction, + ) + } + } + } + return nil +} + +// symbolsFromBinaries examines the binaries listed on the profile that have +// associated samples, and returns the identified symbols matching rx. +func symbolsFromBinaries(prof *profile.Profile, g *graph.Graph, rx *regexp.Regexp, address *uint64, obj plugin.ObjTool) []*objSymbol { + // fileHasSamplesAndMatched is for optimization to speed up pprof: when later + // walking through the profile mappings, it will only examine the ones that have + // samples and are matched to the regexp. + fileHasSamplesAndMatched := make(map[string]bool) + for _, n := range g.Nodes { + if name := n.Info.PrintableName(); rx.MatchString(name) && n.Info.Objfile != "" { + fileHasSamplesAndMatched[n.Info.Objfile] = true + } + } + + // Walk all mappings looking for matching functions with samples. + var objSyms []*objSymbol + for _, m := range prof.Mapping { + // Skip the mapping if its file does not have samples or is not matched to + // the regexp (unless the regexp is an address and the mapping's range covers + // the address) + if !fileHasSamplesAndMatched[m.File] { + if address == nil || !(m.Start <= *address && *address <= m.Limit) { + continue + } + } + + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + fmt.Printf("%v\n", err) + continue + } + + // Find symbols in this binary matching the user regexp. + var addr uint64 + if address != nil { + addr = *address + } + msyms, err := f.Symbols(rx, addr) + f.Close() + if err != nil { + continue + } + for _, ms := range msyms { + objSyms = append(objSyms, + &objSymbol{ + sym: ms, + file: f, + }, + ) + } + } + + return objSyms +} + +// objSym represents a symbol identified from a binary. It includes +// the SymbolInfo from the disasm package and the base that must be +// added to correspond to sample addresses +type objSymbol struct { + sym *plugin.Sym + file plugin.ObjFile +} + +// orderSyms is a wrapper type to sort []*objSymbol by a supplied comparator. +type orderSyms struct { + v []*objSymbol + less func(a, b *objSymbol) bool +} + +func (o orderSyms) Len() int { return len(o.v) } +func (o orderSyms) Less(i, j int) bool { return o.less(o.v[i], o.v[j]) } +func (o orderSyms) Swap(i, j int) { o.v[i], o.v[j] = o.v[j], o.v[i] } + +// nodesPerSymbol classifies nodes into a group of symbols. +func nodesPerSymbol(ns graph.Nodes, symbols []*objSymbol) map[*objSymbol]graph.Nodes { + symNodes := make(map[*objSymbol]graph.Nodes) + for _, s := range symbols { + // Gather samples for this symbol. + for _, n := range ns { + if address, err := s.file.ObjAddr(n.Info.Address); err == nil && address >= s.sym.Start && address < s.sym.End { + symNodes[s] = append(symNodes[s], n) + } + } + } + return symNodes +} + +type assemblyInstruction struct { + address uint64 + instruction string + function string + file string + line int + flat, cum int64 + flatDiv, cumDiv int64 + startsBlock bool + inlineCalls []callID +} + +type callID struct { + file string + line int +} + +func (a *assemblyInstruction) flatValue() int64 { + if a.flatDiv != 0 { + return a.flat / a.flatDiv + } + return a.flat +} + +func (a *assemblyInstruction) cumValue() int64 { + if a.cumDiv != 0 { + return a.cum / a.cumDiv + } + return a.cum +} + +// annotateAssembly annotates a set of assembly instructions with a +// set of samples. It returns a set of nodes to display. base is an +// offset to adjust the sample addresses. +func annotateAssembly(insts []plugin.Inst, samples graph.Nodes, file plugin.ObjFile) []assemblyInstruction { + // Add end marker to simplify printing loop. + insts = append(insts, plugin.Inst{ + Addr: ^uint64(0), + }) + + // Ensure samples are sorted by address. + samples.Sort(graph.AddressOrder) + + s := 0 + asm := make([]assemblyInstruction, 0, len(insts)) + for ix, in := range insts[:len(insts)-1] { + n := assemblyInstruction{ + address: in.Addr, + instruction: in.Text, + function: in.Function, + line: in.Line, + } + if in.File != "" { + n.file = filepath.Base(in.File) + } + + // Sum all the samples until the next instruction (to account + // for samples attributed to the middle of an instruction). + for next := insts[ix+1].Addr; s < len(samples); s++ { + if addr, err := file.ObjAddr(samples[s].Info.Address); err != nil || addr >= next { + break + } + sample := samples[s] + n.flatDiv += sample.FlatDiv + n.flat += sample.Flat + n.cumDiv += sample.CumDiv + n.cum += sample.Cum + if f := sample.Info.File; f != "" && n.file == "" { + n.file = filepath.Base(f) + } + if ln := sample.Info.Lineno; ln != 0 && n.line == 0 { + n.line = ln + } + if f := sample.Info.Name; f != "" && n.function == "" { + n.function = f + } + } + asm = append(asm, n) + } + + return asm +} + +// valueOrDot formats a value according to a report, intercepting zero +// values. +func valueOrDot(value int64, rpt *Report) string { + if value == 0 { + return "." + } + return rpt.formatValue(value) +} + +// printTags collects all tags referenced in the profile and prints +// them in a sorted table. +func printTags(w io.Writer, rpt *Report) error { + p := rpt.prof + + o := rpt.options + formatTag := func(v int64, key string) string { + return measurement.ScaledLabel(v, key, o.OutputUnit) + } + + // Hashtable to keep accumulate tags as key,value,count. + tagMap := make(map[string]map[string]int64) + for _, s := range p.Sample { + for key, vals := range s.Label { + for _, val := range vals { + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + for key, vals := range s.NumLabel { + unit := o.NumLabelUnits[key] + for _, nval := range vals { + val := formatTag(nval, unit) + valueMap, ok := tagMap[key] + if !ok { + valueMap = make(map[string]int64) + tagMap[key] = valueMap + } + valueMap[val] += o.SampleValue(s.Value) + } + } + } + + tagKeys := make([]*graph.Tag, 0, len(tagMap)) + for key := range tagMap { + tagKeys = append(tagKeys, &graph.Tag{Name: key}) + } + tabw := tabwriter.NewWriter(w, 0, 0, 1, ' ', tabwriter.AlignRight) + for _, tagKey := range graph.SortTags(tagKeys, true) { + var total int64 + key := tagKey.Name + tags := make([]*graph.Tag, 0, len(tagMap[key])) + for t, c := range tagMap[key] { + total += c + tags = append(tags, &graph.Tag{Name: t, Flat: c}) + } + + f, u := measurement.Scale(total, o.SampleUnit, o.OutputUnit) + fmt.Fprintf(tabw, "%s:\t Total %.1f%s\n", key, f, u) + for _, t := range graph.SortTags(tags, true) { + f, u := measurement.Scale(t.FlatValue(), o.SampleUnit, o.OutputUnit) + if total > 0 { + fmt.Fprintf(tabw, " \t%.1f%s (%s):\t %s\n", f, u, measurement.Percentage(t.FlatValue(), total), t.Name) + } else { + fmt.Fprintf(tabw, " \t%.1f%s:\t %s\n", f, u, t.Name) + } + } + fmt.Fprintln(tabw) + } + return tabw.Flush() +} + +// printComments prints all freeform comments in the profile. +func printComments(w io.Writer, rpt *Report) error { + p := rpt.prof + + for _, c := range p.Comments { + fmt.Fprintln(w, c) + } + return nil +} + +// TextItem holds a single text report entry. +type TextItem struct { + Name string + InlineLabel string // Not empty if inlined + Flat, Cum int64 // Raw values + FlatFormat, CumFormat string // Formatted values +} + +// TextItems returns a list of text items from the report and a list +// of labels that describe the report. +func TextItems(rpt *Report) ([]TextItem, []string) { + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, graphTotal(g), len(g.Nodes), origCount, droppedNodes, 0, false) + + var items []TextItem + var flatSum int64 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + var inline, noinline bool + for _, e := range n.In { + if e.Inline { + inline = true + } else { + noinline = true + } + } + + var inl string + if inline { + if noinline { + inl = "(partial-inline)" + } else { + inl = "(inline)" + } + } + + flatSum += flat + items = append(items, TextItem{ + Name: name, + InlineLabel: inl, + Flat: flat, + Cum: cum, + FlatFormat: rpt.formatValue(flat), + CumFormat: rpt.formatValue(cum), + }) + } + return items, labels +} + +// printText prints a flat text report for a profile. +func printText(w io.Writer, rpt *Report) error { + items, labels := TextItems(rpt) + fmt.Fprintln(w, strings.Join(labels, "\n")) + fmt.Fprintf(w, "%10s %5s%% %5s%% %10s %5s%%\n", + "flat", "flat", "sum", "cum", "cum") + var flatSum int64 + for _, item := range items { + inl := item.InlineLabel + if inl != "" { + inl = " " + inl + } + flatSum += item.Flat + fmt.Fprintf(w, "%10s %s %s %10s %s %s%s\n", + item.FlatFormat, measurement.Percentage(item.Flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + item.CumFormat, measurement.Percentage(item.Cum, rpt.total), + item.Name, inl) + } + return nil +} + +// printTraces prints all traces from a profile. +func printTraces(w io.Writer, rpt *Report) error { + fmt.Fprintln(w, strings.Join(ProfileLabels(rpt), "\n")) + + prof := rpt.prof + o := rpt.options + + const separator = "-----------+-------------------------------------------------------" + + _, locations := graph.CreateNodes(prof, &graph.Options{}) + for _, sample := range prof.Sample { + type stk struct { + *graph.NodeInfo + inline bool + } + var stack []stk + for _, loc := range sample.Location { + nodes := locations[loc.ID] + for i, n := range nodes { + // The inline flag may be inaccurate if 'show' or 'hide' filter is + // used. See https://m7s.live/v5/plugin/debug/pkg/issues/511. + inline := i != len(nodes)-1 + stack = append(stack, stk{&n.Info, inline}) + } + } + + if len(stack) == 0 { + continue + } + + fmt.Fprintln(w, separator) + // Print any text labels for the sample. + var labels []string + for s, vs := range sample.Label { + labels = append(labels, fmt.Sprintf("%10s: %s\n", s, strings.Join(vs, " "))) + } + sort.Strings(labels) + fmt.Fprint(w, strings.Join(labels, "")) + + // Print any numeric labels for the sample + var numLabels []string + for key, vals := range sample.NumLabel { + unit := o.NumLabelUnits[key] + numValues := make([]string, len(vals)) + for i, vv := range vals { + numValues[i] = measurement.Label(vv, unit) + } + numLabels = append(numLabels, fmt.Sprintf("%10s: %s\n", key, strings.Join(numValues, " "))) + } + sort.Strings(numLabels) + fmt.Fprint(w, strings.Join(numLabels, "")) + + var d, v int64 + v = o.SampleValue(sample.Value) + if o.SampleMeanDivisor != nil { + d = o.SampleMeanDivisor(sample.Value) + } + // Print call stack. + if d != 0 { + v = v / d + } + for i, s := range stack { + var vs, inline string + if i == 0 { + vs = rpt.formatValue(v) + } + if s.inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%10s %s%s\n", vs, s.PrintableName(), inline) + } + } + fmt.Fprintln(w, separator) + return nil +} + +// printCallgrind prints a graph for a profile on callgrind format. +func printCallgrind(w io.Writer, rpt *Report) error { + o := rpt.options + rpt.options.NodeFraction = 0 + rpt.options.EdgeFraction = 0 + rpt.options.NodeCount = 0 + + g, _, _, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + nodeNames := getDisambiguatedNames(g) + + fmt.Fprintln(w, "positions: instr line") + fmt.Fprintln(w, "events:", o.SampleType+"("+o.OutputUnit+")") + + objfiles := make(map[string]int) + files := make(map[string]int) + names := make(map[string]int) + + // prevInfo points to the previous NodeInfo. + // It is used to group cost lines together as much as possible. + var prevInfo *graph.NodeInfo + for _, n := range g.Nodes { + if prevInfo == nil || n.Info.Objfile != prevInfo.Objfile || n.Info.File != prevInfo.File || n.Info.Name != prevInfo.Name { + fmt.Fprintln(w) + fmt.Fprintln(w, "ob="+callgrindName(objfiles, n.Info.Objfile)) + fmt.Fprintln(w, "fl="+callgrindName(files, n.Info.File)) + fmt.Fprintln(w, "fn="+callgrindName(names, n.Info.Name)) + } + + addr := callgrindAddress(prevInfo, n.Info.Address) + sv, _ := measurement.Scale(n.FlatValue(), o.SampleUnit, o.OutputUnit) + fmt.Fprintf(w, "%s %d %d\n", addr, n.Info.Lineno, int64(sv)) + + // Print outgoing edges. + for _, out := range n.Out.Sort() { + c, _ := measurement.Scale(out.Weight, o.SampleUnit, o.OutputUnit) + callee := out.Dest + fmt.Fprintln(w, "cfl="+callgrindName(files, callee.Info.File)) + fmt.Fprintln(w, "cfn="+callgrindName(names, nodeNames[callee])) + // pprof doesn't have a flat weight for a call, leave as 0. + fmt.Fprintf(w, "calls=0 %s %d\n", callgrindAddress(prevInfo, callee.Info.Address), callee.Info.Lineno) + // TODO: This address may be in the middle of a call + // instruction. It would be best to find the beginning + // of the instruction, but the tools seem to handle + // this OK. + fmt.Fprintf(w, "* * %d\n", int64(c)) + } + + prevInfo = &n.Info + } + + return nil +} + +// getDisambiguatedNames returns a map from each node in the graph to +// the name to use in the callgrind output. Callgrind merges all +// functions with the same [file name, function name]. Add a [%d/n] +// suffix to disambiguate nodes with different values of +// node.Function, which we want to keep separate. In particular, this +// affects graphs created with --call_tree, where nodes from different +// contexts are associated to different Functions. +func getDisambiguatedNames(g *graph.Graph) map[*graph.Node]string { + nodeName := make(map[*graph.Node]string, len(g.Nodes)) + + type names struct { + file, function string + } + + // nameFunctionIndex maps the callgrind names (filename, function) + // to the node.Function values found for that name, and each + // node.Function value to a sequential index to be used on the + // disambiguated name. + nameFunctionIndex := make(map[names]map[*graph.Node]int) + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + p, ok := nameFunctionIndex[nm] + if !ok { + p = make(map[*graph.Node]int) + nameFunctionIndex[nm] = p + } + if _, ok := p[n.Function]; !ok { + p[n.Function] = len(p) + } + } + + for _, n := range g.Nodes { + nm := names{n.Info.File, n.Info.Name} + nodeName[n] = n.Info.Name + if p := nameFunctionIndex[nm]; len(p) > 1 { + // If there is more than one function, add suffix to disambiguate. + nodeName[n] += fmt.Sprintf(" [%d/%d]", p[n.Function]+1, len(p)) + } + } + return nodeName +} + +// callgrindName implements the callgrind naming compression scheme. +// For names not previously seen returns "(N) name", where N is a +// unique index. For names previously seen returns "(N)" where N is +// the index returned the first time. +func callgrindName(names map[string]int, name string) string { + if name == "" { + return "" + } + if id, ok := names[name]; ok { + return fmt.Sprintf("(%d)", id) + } + id := len(names) + 1 + names[name] = id + return fmt.Sprintf("(%d) %s", id, name) +} + +// callgrindAddress implements the callgrind subposition compression scheme if +// possible. If prevInfo != nil, it contains the previous address. The current +// address can be given relative to the previous address, with an explicit +/- +// to indicate it is relative, or * for the same address. +func callgrindAddress(prevInfo *graph.NodeInfo, curr uint64) string { + abs := fmt.Sprintf("%#x", curr) + if prevInfo == nil { + return abs + } + + prev := prevInfo.Address + if prev == curr { + return "*" + } + + diff := int64(curr - prev) + relative := fmt.Sprintf("%+d", diff) + + // Only bother to use the relative address if it is actually shorter. + if len(relative) < len(abs) { + return relative + } + + return abs +} + +// printTree prints a tree-based report in text form. +func printTree(w io.Writer, rpt *Report) error { + const separator = "----------------------------------------------------------+-------------" + const legend = " flat flat% sum% cum cum% calls calls% + context " + + g, origCount, droppedNodes, _ := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + + fmt.Fprintln(w, strings.Join(reportLabels(rpt, graphTotal(g), len(g.Nodes), origCount, droppedNodes, 0, false), "\n")) + + fmt.Fprintln(w, separator) + fmt.Fprintln(w, legend) + var flatSum int64 + + rx := rpt.options.Symbol + matched := 0 + for _, n := range g.Nodes { + name, flat, cum := n.Info.PrintableName(), n.FlatValue(), n.CumValue() + + // Skip any entries that do not match the regexp (for the "peek" command). + if rx != nil && !rx.MatchString(name) { + continue + } + matched++ + + fmt.Fprintln(w, separator) + // Print incoming edges. + inEdges := n.In.Sort() + for _, in := range inEdges { + var inline string + if in.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(in.Weight), + measurement.Percentage(in.Weight, cum), in.Src.Info.PrintableName(), inline) + } + + // Print current node. + flatSum += flat + fmt.Fprintf(w, "%10s %s %s %10s %s | %s\n", + rpt.formatValue(flat), + measurement.Percentage(flat, rpt.total), + measurement.Percentage(flatSum, rpt.total), + rpt.formatValue(cum), + measurement.Percentage(cum, rpt.total), + name) + + // Print outgoing edges. + outEdges := n.Out.Sort() + for _, out := range outEdges { + var inline string + if out.Inline { + inline = " (inline)" + } + fmt.Fprintf(w, "%50s %s | %s%s\n", rpt.formatValue(out.Weight), + measurement.Percentage(out.Weight, cum), out.Dest.Info.PrintableName(), inline) + } + } + if len(g.Nodes) > 0 { + fmt.Fprintln(w, separator) + } + if rx != nil && matched == 0 { + return fmt.Errorf("no matches found for regexp: %s", rx) + } + return nil +} + +// GetDOT returns a graph suitable for dot processing along with some +// configuration information. +func GetDOT(rpt *Report) (*graph.Graph, *graph.DotConfig) { + g, origCount, droppedNodes, droppedEdges := rpt.newTrimmedGraph() + rpt.selectOutputUnit(g) + labels := reportLabels(rpt, graphTotal(g), len(g.Nodes), origCount, droppedNodes, droppedEdges, true) + + c := &graph.DotConfig{ + Title: rpt.options.Title, + Labels: labels, + FormatValue: rpt.formatValue, + Total: rpt.total, + } + return g, c +} + +// printDOT prints an annotated callgraph in DOT format. +func printDOT(w io.Writer, rpt *Report) error { + g, c := GetDOT(rpt) + graph.ComposeDot(w, g, &graph.DotAttributes{}, c) + return nil +} + +// ProfileLabels returns printable labels for a profile. +func ProfileLabels(rpt *Report) []string { + label := []string{} + prof := rpt.prof + o := rpt.options + if len(prof.Mapping) > 0 { + if prof.Mapping[0].File != "" { + label = append(label, "File: "+filepath.Base(prof.Mapping[0].File)) + } + if prof.Mapping[0].BuildID != "" { + label = append(label, "Build ID: "+prof.Mapping[0].BuildID) + } + } + // Only include comments that do not start with '#'. + for _, c := range prof.Comments { + if !strings.HasPrefix(c, "#") { + label = append(label, c) + } + } + if o.SampleType != "" { + label = append(label, "Type: "+o.SampleType) + } + if url := prof.DocURL; url != "" { + label = append(label, "Doc: "+url) + } + if prof.TimeNanos != 0 { + const layout = "2006-01-02 15:04:05 MST" + label = append(label, "Time: "+time.Unix(0, prof.TimeNanos).Format(layout)) + } + if prof.DurationNanos != 0 { + duration := measurement.Label(prof.DurationNanos, "nanoseconds") + totalNanos, totalUnit := measurement.Scale(rpt.total, o.SampleUnit, "nanoseconds") + var ratio string + if totalUnit == "ns" && totalNanos != 0 { + ratio = "(" + measurement.Percentage(int64(totalNanos), prof.DurationNanos) + ")" + } + label = append(label, fmt.Sprintf("Duration: %s, Total samples = %s %s", duration, rpt.formatValue(rpt.total), ratio)) + } + return label +} + +func graphTotal(g *graph.Graph) int64 { + var total int64 + for _, n := range g.Nodes { + total += n.FlatValue() + } + return total +} + +// reportLabels returns printable labels for a report. Includes +// profileLabels. +func reportLabels(rpt *Report, shownTotal int64, nodeCount, origCount, droppedNodes, droppedEdges int, fullHeaders bool) []string { + nodeFraction := rpt.options.NodeFraction + edgeFraction := rpt.options.EdgeFraction + + var label []string + if len(rpt.options.ProfileLabels) > 0 { + label = append(label, rpt.options.ProfileLabels...) + } else if fullHeaders || !rpt.options.CompactLabels { + label = ProfileLabels(rpt) + } + + if len(rpt.options.ActiveFilters) > 0 { + activeFilters := legendActiveFilters(rpt.options.ActiveFilters) + label = append(label, activeFilters...) + } + + label = append(label, fmt.Sprintf("Showing nodes accounting for %s, %s of %s total", rpt.formatValue(shownTotal), strings.TrimSpace(measurement.Percentage(shownTotal, rpt.total)), rpt.formatValue(rpt.total))) + + if rpt.total != 0 { + if droppedNodes > 0 { + label = append(label, genLabel(droppedNodes, "node", "cum", + rpt.formatValue(abs64(int64(float64(rpt.total)*nodeFraction))))) + } + if droppedEdges > 0 { + label = append(label, genLabel(droppedEdges, "edge", "freq", + rpt.formatValue(abs64(int64(float64(rpt.total)*edgeFraction))))) + } + if nodeCount > 0 && nodeCount < origCount { + label = append(label, fmt.Sprintf("Showing top %d nodes out of %d", + nodeCount, origCount)) + } + } + + // Help new users understand the graph. + // A new line is intentionally added here to better show this message. + if fullHeaders { + label = append(label, "\nSee https://git.io/JfYMW for how to read the graph") + } + + return label +} + +func legendActiveFilters(activeFilters []string) []string { + legendActiveFilters := make([]string, len(activeFilters)+1) + legendActiveFilters[0] = "Active filters:" + for i, s := range activeFilters { + if len(s) > 80 { + s = s[:80] + "…" + } + legendActiveFilters[i+1] = " " + s + } + return legendActiveFilters +} + +func genLabel(d int, n, l, f string) string { + if d > 1 { + n = n + "s" + } + return fmt.Sprintf("Dropped %d %s (%s <= %s)", d, n, l, f) +} + +// New builds a new report indexing the sample values interpreting the +// samples with the provided function. +func New(prof *profile.Profile, o *Options) *Report { + format := func(v int64) string { + if r := o.Ratio; r > 0 && r != 1 { + fv := float64(v) * r + v = int64(fv) + } + return measurement.ScaledLabel(v, o.SampleUnit, o.OutputUnit) + } + return &Report{prof, computeTotal(prof, o.SampleValue, o.SampleMeanDivisor), + o, format} +} + +// NewDefault builds a new report indexing the last sample value +// available. +func NewDefault(prof *profile.Profile, options Options) *Report { + index := len(prof.SampleType) - 1 + o := &options + if o.Title == "" && len(prof.Mapping) > 0 && prof.Mapping[0].File != "" { + o.Title = filepath.Base(prof.Mapping[0].File) + } + o.SampleType = prof.SampleType[index].Type + o.SampleUnit = strings.ToLower(prof.SampleType[index].Unit) + o.SampleValue = func(v []int64) int64 { + return v[index] + } + return New(prof, o) +} + +// computeTotal computes the sum of the absolute value of all sample values. +// If any samples have label indicating they belong to the diff base, then the +// total will only include samples with that label. +func computeTotal(prof *profile.Profile, value, meanDiv func(v []int64) int64) int64 { + var div, total, diffDiv, diffTotal int64 + for _, sample := range prof.Sample { + var d, v int64 + v = value(sample.Value) + if meanDiv != nil { + d = meanDiv(sample.Value) + } + if v < 0 { + v = -v + } + total += v + div += d + if sample.DiffBaseSample() { + diffTotal += v + diffDiv += d + } + } + if diffTotal > 0 { + total = diffTotal + div = diffDiv + } + if div != 0 { + return total / div + } + return total +} + +// Report contains the data and associated routines to extract a +// report from a profile. +type Report struct { + prof *profile.Profile + total int64 + options *Options + formatValue func(int64) string +} + +// Total returns the total number of samples in a report. +func (rpt *Report) Total() int64 { return rpt.total } + +// OutputFormat returns the output format for the report. +func (rpt *Report) OutputFormat() int { return rpt.options.OutputFormat } + +// DocURL returns the documentation URL for Report, or "" if not available. +func (rpt *Report) DocURL() string { + u := rpt.prof.DocURL + if u == "" || !absoluteURL(u) { + return "" + } + return u +} + +func absoluteURL(str string) bool { + // Avoid returning relative URLs to prevent unwanted local navigation + // within pprof server. + u, err := url.Parse(str) + return err == nil && (u.Scheme == "https" || u.Scheme == "http") +} + +func abs64(i int64) int64 { + if i < 0 { + return -i + } + return i +} diff --git a/plugin/debug/pkg/internal/report/report_test.go b/plugin/debug/pkg/internal/report/report_test.go new file mode 100644 index 0000000..69dab84 --- /dev/null +++ b/plugin/debug/pkg/internal/report/report_test.go @@ -0,0 +1,617 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "slices" + "strings" + "testing" + "time" + + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/internal/graph" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +type testcase struct { + rpt *Report + want string +} + +func TestSource(t *testing.T) { + const path = "testdata/" + + sampleValue1 := func(v []int64) int64 { + return v[1] + } + + for _, tc := range []testcase{ + { + rpt: New( + testProfile.Copy(), + &Options{ + OutputFormat: List, + Symbol: regexp.MustCompile(`.`), + TrimPath: "/some/path", + + SampleValue: sampleValue1, + SampleUnit: testProfile.SampleType[1].Unit, + }, + ), + want: path + "source.rpt", + }, + { + rpt: New( + testProfile.Copy(), + &Options{ + OutputFormat: Dot, + CallTree: true, + Symbol: regexp.MustCompile(`.`), + TrimPath: "/some/path", + + SampleValue: sampleValue1, + SampleUnit: testProfile.SampleType[1].Unit, + }, + ), + want: path + "source.dot", + }, + } { + var b bytes.Buffer + if err := Generate(&b, tc.rpt, &binutils.Binutils{}); err != nil { + t.Fatalf("%s: %v", tc.want, err) + } + + gold, err := os.ReadFile(tc.want) + if err != nil { + t.Fatalf("%s: %v", tc.want, err) + } + if runtime.GOOS == "windows" { + if tc.rpt.options.OutputFormat == Dot { + // The .dot test has the paths inside strings, so \ must be escaped. + gold = bytes.Replace(gold, []byte("testdata/"), []byte(`testdata\\`), -1) + } else { + gold = bytes.Replace(gold, []byte("testdata/"), []byte(`testdata\`), -1) + } + } + if string(b.String()) != string(gold) { + d, err := proftest.Diff(gold, b.Bytes()) + if err != nil { + t.Fatalf("%s: %v", "source", err) + } + t.Error("source" + "\n" + string(d) + "\n" + "gold:\n" + tc.want) + } + } +} + +// TestFilter ensures that commands with a regexp filter argument return an +// error if there are no results. +func TestFilter(t *testing.T) { + const filter = "doesNotExist" + + tests := []struct { + name string + format int + }{ + { + name: "list", + format: List, + }, + { + name: "disasm", + format: Dis, + }, + { + // N.B. Tree with a Symbol is "peek". + name: "peek", + format: Tree, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + rpt := New(testProfile.Copy(), &Options{ + OutputFormat: tc.format, + Symbol: regexp.MustCompile(filter), + SampleValue: func(v []int64) int64 { return v[1] }, + SampleUnit: testProfile.SampleType[1].Unit, + }) + + var buf bytes.Buffer + err := Generate(&buf, rpt, &binutils.Binutils{}) + if err == nil { + t.Fatalf("Generate got nil, want error; buf = %s", buf.String()) + } + if !strings.Contains(err.Error(), filter) { + t.Errorf("Error got %v, want it to contain %q", err, filter) + } + }) + } +} + +// testM contains mappings for fake profiles used in tests. +var testM = []*profile.Mapping{ + { + ID: 1, + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, +} + +// testF contains functions for fake profiles used in tests. +var testF = []*profile.Function{ + { + ID: 1, + Name: "main", + Filename: "testdata/source1", + }, + { + ID: 2, + Name: "foo", + Filename: "testdata/source1", + }, + { + ID: 3, + Name: "bar", + Filename: "testdata/source1", + }, + { + ID: 4, + Name: "tee", + Filename: "/some/path/testdata/source2", + }, +} + +// testL contains locations for fake profiles used in tests. +var testL = []*profile.Location{ + { + ID: 1, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[0], + Line: 2, + Column: 2, + }, + }, + }, + { + ID: 2, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[1], + Line: 4, + Column: 4, + }, + }, + }, + { + ID: 3, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[2], + Line: 10, + }, + }, + }, + { + ID: 4, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[3], + Line: 2, + }, + }, + }, + { + ID: 5, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[3], + Line: 8, + }, + }, + }, + { + ID: 6, + Mapping: testM[0], + Line: []profile.Line{ + { + Function: testF[3], + Line: 7, + }, + { + Function: testF[2], + Line: 6, + }, + }, + }, +} + +// testSample returns a profile sample with specified value and stack. +// Note: callees come first in sample stacks. +func testSample(value int64, locs ...*profile.Location) *profile.Sample { + return &profile.Sample{ + Value: []int64{value}, + Location: locs, + } +} + +// makeTestProfile returns a profile with specified samples that uses testL/testF/testM +// (defined in report_test.go). +func makeTestProfile(samples ...*profile.Sample) *profile.Profile { + return &profile.Profile{ + SampleType: []*profile.ValueType{{Type: "samples", Unit: "count"}}, + Sample: samples, + Location: testL, + Function: testF, + Mapping: testM, + } +} + +// testProfile contains a fake profile used in tests. +// Various report methods modify profiles so tests should operate on testProfile.Copy(). +var testProfile = &profile.Profile{ + PeriodType: &profile.ValueType{Type: "cpu", Unit: "millisecond"}, + Period: 10, + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "cycles"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{testL[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{1, 10}, + }, + { + Location: []*profile.Location{testL[4], testL[2], testL[0]}, + Value: []int64{1, 100}, + }, + { + Location: []*profile.Location{testL[3], testL[0]}, + Value: []int64{1, 1000}, + }, + { + Location: []*profile.Location{testL[4], testL[3], testL[0]}, + Value: []int64{1, 10000}, + }, + }, + Location: testL, + Function: testF, + Mapping: testM, +} + +func TestDisambiguation(t *testing.T) { + parent1 := &graph.Node{Info: graph.NodeInfo{Name: "parent1"}} + parent2 := &graph.Node{Info: graph.NodeInfo{Name: "parent2"}} + child1 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent1} + child2 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent2} + child3 := &graph.Node{Info: graph.NodeInfo{Name: "child"}, Function: parent1} + sibling := &graph.Node{Info: graph.NodeInfo{Name: "sibling"}, Function: parent1} + + n := []*graph.Node{parent1, parent2, child1, child2, child3, sibling} + + wanted := map[*graph.Node]string{ + parent1: "parent1", + parent2: "parent2", + child1: "child [1/2]", + child2: "child [2/2]", + child3: "child [1/2]", + sibling: "sibling", + } + + g := &graph.Graph{Nodes: n} + + names := getDisambiguatedNames(g) + + for node, want := range wanted { + if got := names[node]; got != want { + t.Errorf("name %s, got %s, want %s", node.Info.Name, got, want) + } + } +} + +func TestFunctionMap(t *testing.T) { + + fm := make(functionMap) + nodes := []graph.NodeInfo{ + {Name: "fun1"}, + {Name: "fun2", File: "filename"}, + {Name: "fun1"}, + {Name: "fun2", File: "filename2"}, + } + + want := []struct { + wantFunction profile.Function + wantAdded bool + }{ + {profile.Function{ID: 1, Name: "fun1"}, true}, + {profile.Function{ID: 2, Name: "fun2", Filename: "filename"}, true}, + {profile.Function{ID: 1, Name: "fun1"}, false}, + {profile.Function{ID: 3, Name: "fun2", Filename: "filename2"}, true}, + } + + for i, tc := range nodes { + gotFunc, gotAdded := fm.findOrAdd(tc) + if got, want := gotFunc, want[i].wantFunction; *got != want { + t.Errorf("%d: got %v, want %v", i, got, want) + } + if got, want := gotAdded, want[i].wantAdded; got != want { + t.Errorf("%d: got %v, want %v", i, got, want) + } + } +} + +func TestLegendActiveFilters(t *testing.T) { + activeFilterInput := []string{ + "focus=123|456|789|101112|131415|161718|192021|222324|252627|282930|313233|343536|363738|acbdefghijklmnop", + "show=short filter", + } + expectedLegendActiveFilter := []string{ + "Active filters:", + " focus=123|456|789|101112|131415|161718|192021|222324|252627|282930|313233|343536…", + " show=short filter", + } + legendActiveFilter := legendActiveFilters(activeFilterInput) + if len(legendActiveFilter) != len(expectedLegendActiveFilter) { + t.Errorf("wanted length %v got length %v", len(expectedLegendActiveFilter), len(legendActiveFilter)) + } + for i := range legendActiveFilter { + if legendActiveFilter[i] != expectedLegendActiveFilter[i] { + t.Errorf("%d: want \"%v\", got \"%v\"", i, expectedLegendActiveFilter[i], legendActiveFilter[i]) + } + } +} + +func TestComputeTotal(t *testing.T) { + p1 := testProfile.Copy() + p1.Sample = []*profile.Sample{ + { + Location: []*profile.Location{testL[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{1, 10}, + }, + { + Location: []*profile.Location{testL[4], testL[2], testL[0]}, + Value: []int64{1, 100}, + }, + } + + p2 := testProfile.Copy() + p2.Sample = []*profile.Sample{ + { + Location: []*profile.Location{testL[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{1, -10}, + }, + { + Location: []*profile.Location{testL[4], testL[2], testL[0]}, + Value: []int64{1, 100}, + }, + } + + p3 := testProfile.Copy() + p3.Sample = []*profile.Sample{ + { + Location: []*profile.Location{testL[0]}, + Value: []int64{10000, 1}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{-10, 3}, + Label: map[string][]string{"pprof::base": {"true"}}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{1000, -10}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{-9000, 3}, + Label: map[string][]string{"pprof::base": {"true"}}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{-1, 3}, + Label: map[string][]string{"pprof::base": {"true"}}, + }, + { + Location: []*profile.Location{testL[4], testL[2], testL[0]}, + Value: []int64{100, 100}, + }, + { + Location: []*profile.Location{testL[2], testL[1], testL[0]}, + Value: []int64{100, 3}, + Label: map[string][]string{"pprof::base": {"true"}}, + }, + } + + testcases := []struct { + desc string + prof *profile.Profile + value, meanDiv func(v []int64) int64 + wantTotal int64 + }{ + { + desc: "no diff base, all positive values, index 1", + prof: p1, + value: func(v []int64) int64 { + return v[0] + }, + wantTotal: 3, + }, + { + desc: "no diff base, all positive values, index 2", + prof: p1, + value: func(v []int64) int64 { + return v[1] + }, + wantTotal: 111, + }, + { + desc: "no diff base, some negative values", + prof: p2, + value: func(v []int64) int64 { + return v[1] + }, + wantTotal: 111, + }, + { + desc: "diff base, some negative values", + prof: p3, + value: func(v []int64) int64 { + return v[0] + }, + wantTotal: 9111, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + if gotTotal := computeTotal(tc.prof, tc.value, tc.meanDiv); gotTotal != tc.wantTotal { + t.Errorf("got total %d, want %v", gotTotal, tc.wantTotal) + } + }) + } +} + +func TestPrintAssemblyErrorMessage(t *testing.T) { + profile := readProfile(filepath.Join("testdata", "sample.cpu"), t) + + for _, tc := range []struct { + desc string + symbol string + want string + }{ + { + desc: "no matched symbol in binary", + symbol: "symbol-not-exist", + want: "no matches found for regexp symbol-not-exist", + }, + { + desc: "no matched address in binary", + symbol: "0xffffaaaa", + want: "no matches found for address 0xffffaaaa", + }, + { + desc: "matched address in binary but not in the profile", + symbol: "0x400000", + want: "address 0x400000 found in binary, but the corresponding symbols do not have samples in the profile", + }, + } { + rpt := New( + profile.Copy(), + &Options{ + OutputFormat: List, + Symbol: regexp.MustCompile(tc.symbol), + SampleValue: func(v []int64) int64 { + return v[1] + }, + SampleUnit: profile.SampleType[1].Unit, + }, + ) + + if err := PrintAssembly(os.Stdout, rpt, &binutils.Binutils{}, -1); err == nil || err.Error() != tc.want { + t.Errorf(`Got "%v", want %q`, err, tc.want) + } + } +} + +func TestDocURL(t *testing.T) { + type testCase struct { + input string + want string + } + for name, c := range map[string]testCase{ + "empty": {"", ""}, + "http": {"http://example.com/pprof-help", "http://example.com/pprof-help"}, + "https": {"https://example.com/pprof-help", "https://example.com/pprof-help"}, + "relative": {"/foo", ""}, + "nonhttp": {"mailto:nobody@example.com", ""}, + } { + t.Run(name, func(t *testing.T) { + profile := testProfile.Copy() + profile.DocURL = c.input + rpt := New(profile, &Options{ + OutputFormat: Dot, + Symbol: regexp.MustCompile(`.`), + TrimPath: "/some/path", + SampleValue: func(v []int64) int64 { return v[1] }, + SampleUnit: testProfile.SampleType[1].Unit, + }) + if got := rpt.DocURL(); got != c.want { + t.Errorf("bad doc URL %q, expecting %q", got, c.want) + } + }) + } +} + +func TestDocURLInLabels(t *testing.T) { + const url = "http://example.com/pprof-help" + profile := testProfile.Copy() + profile.DocURL = url + rpt := New(profile, &Options{ + OutputFormat: Text, + Symbol: regexp.MustCompile(`.`), + TrimPath: "/some/path", + SampleValue: func(v []int64) int64 { return v[1] }, + SampleUnit: testProfile.SampleType[1].Unit, + }) + + labels := fmt.Sprintf("%v", ProfileLabels(rpt)) + if !strings.Contains(labels, url) { + t.Errorf("expected URL %q not found in %s", url, labels) + } +} + +func TestProfileLabels(t *testing.T) { + // Force the local timezone to UTC for the duration of this function to get a + // predictable result out of timezone printing. + defer func(prev *time.Location) { time.Local = prev }(time.Local) + time.Local = time.UTC + + profile := testProfile.Copy() + profile.TimeNanos = time.Unix(131, 0).UnixNano() + rpt := New(profile, &Options{ + SampleValue: func(v []int64) int64 { return v[1] }, + }) + + const want = "Time: 1970-01-01 00:02:11 UTC" + if labels := ProfileLabels(rpt); !slices.Contains(labels, want) { + t.Errorf("wanted to find a label containing %q, but found none in %v", want, labels) + } +} diff --git a/plugin/debug/pkg/internal/report/shortnames.go b/plugin/debug/pkg/internal/report/shortnames.go new file mode 100644 index 0000000..7ea67a0 --- /dev/null +++ b/plugin/debug/pkg/internal/report/shortnames.go @@ -0,0 +1,59 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "path/filepath" + "regexp" + + "m7s.live/v5/plugin/debug/pkg/internal/graph" +) + +var ( + sepRE = regexp.MustCompile(`::|\.`) + fileSepRE = regexp.MustCompile(`/`) +) + +// fileNameSuffixes returns a non-empty sequence of shortened file names +// (in decreasing preference) that can be used to represent name. +func fileNameSuffixes(name string) []string { + if name == "" { + // Avoid returning "." when symbol info is missing + return []string{""} + } + return allSuffixes(filepath.ToSlash(filepath.Clean(name)), fileSepRE) +} + +// shortNameList returns a non-empty sequence of shortened names +// (in decreasing preference) that can be used to represent name. +func shortNameList(name string) []string { + name = graph.ShortenFunctionName(name) + return allSuffixes(name, sepRE) +} + +// allSuffixes returns a list of suffixes (in order of decreasing length) +// found by splitting at re. +func allSuffixes(name string, re *regexp.Regexp) []string { + seps := re.FindAllStringIndex(name, -1) + result := make([]string, 0, len(seps)+1) + result = append(result, name) + for _, sep := range seps { + // Suffix starting just after sep + if sep[1] < len(name) { + result = append(result, name[sep[1]:]) + } + } + return result +} diff --git a/plugin/debug/pkg/internal/report/shortnames_test.go b/plugin/debug/pkg/internal/report/shortnames_test.go new file mode 100644 index 0000000..820cd4d --- /dev/null +++ b/plugin/debug/pkg/internal/report/shortnames_test.go @@ -0,0 +1,60 @@ +package report + +import ( + "reflect" + "testing" +) + +func TestShortNames(t *testing.T) { + type testCase struct { + name string + in string + out []string + } + test := func(name, in string, out ...string) testCase { + return testCase{name, in, out} + } + + for _, c := range []testCase{ + test("empty", "", ""), + test("simple", "foo", "foo"), + test("trailingsep", "foo.bar.", "foo.bar.", "bar."), + test("cplusplus", "a::b::c", "a::b::c", "b::c", "c"), + test("dotted", "a.b.c", "a.b.c", "b.c", "c"), + test("mixed_separators", "a::b.c::d", "a::b.c::d", "b.c::d", "c::d", "d"), + test("call_operator", "foo::operator()", "foo::operator()", "operator()"), + } { + t.Run(c.name, func(t *testing.T) { + got := shortNameList(c.in) + if !reflect.DeepEqual(c.out, got) { + t.Errorf("shortNameList(%q) = %#v, expecting %#v", c.in, got, c.out) + } + }) + } +} + +func TestFileNameSuffixes(t *testing.T) { + type testCase struct { + name string + in string + out []string + } + test := func(name, in string, out ...string) testCase { + return testCase{name, in, out} + } + + for _, c := range []testCase{ + test("empty", "", ""), + test("simple", "foo", "foo"), + test("manypaths", "a/b/c", "a/b/c", "b/c", "c"), + test("leading", "/a/b", "/a/b", "a/b", "b"), + test("trailing", "a/b", "a/b", "b"), + } { + t.Run(c.name, func(t *testing.T) { + got := fileNameSuffixes(c.in) + if !reflect.DeepEqual(c.out, got) { + t.Errorf("fileNameSuffixes(%q) = %#v, expecting %#v", c.in, got, c.out) + } + }) + } +} diff --git a/plugin/debug/pkg/internal/report/source.go b/plugin/debug/pkg/internal/report/source.go new file mode 100644 index 0000000..5c8a85b --- /dev/null +++ b/plugin/debug/pkg/internal/report/source.go @@ -0,0 +1,1117 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +// This file contains routines related to the generation of annotated +// source listings. + +import ( + "bufio" + "fmt" + "html/template" + "io" + "os" + "path/filepath" + "regexp" + "sort" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/graph" + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// printSource prints an annotated source listing, include all +// functions with samples that match the regexp rpt.options.symbol. +// The sources are sorted by function name and then by filename to +// eliminate potential nondeterminism. +func printSource(w io.Writer, rpt *Report) error { + o := rpt.options + g := rpt.newGraph(nil) + + // Identify all the functions that match the regexp provided. + // Group nodes for each matching function. + var functions graph.Nodes + functionNodes := make(map[string]graph.Nodes) + for _, n := range g.Nodes { + if !o.Symbol.MatchString(n.Info.Name) { + continue + } + if functionNodes[n.Info.Name] == nil { + functions = append(functions, n) + } + functionNodes[n.Info.Name] = append(functionNodes[n.Info.Name], n) + } + functions.Sort(graph.NameOrder) + + if len(functionNodes) == 0 { + return fmt.Errorf("no matches found for regexp: %s", o.Symbol) + } + + sourcePath := o.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + reader := newSourceReader(sourcePath, o.TrimPath) + + fmt.Fprintf(w, "Total: %s\n", rpt.formatValue(rpt.total)) + for _, fn := range functions { + name := fn.Info.Name + + // Identify all the source files associated to this function. + // Group nodes for each source file. + var sourceFiles graph.Nodes + fileNodes := make(map[string]graph.Nodes) + for _, n := range functionNodes[name] { + if n.Info.File == "" { + continue + } + if fileNodes[n.Info.File] == nil { + sourceFiles = append(sourceFiles, n) + } + fileNodes[n.Info.File] = append(fileNodes[n.Info.File], n) + } + + if len(sourceFiles) == 0 { + fmt.Fprintf(w, "No source information for %s\n", name) + continue + } + + sourceFiles.Sort(graph.FileOrder) + + // Print each file associated with this function. + for _, fl := range sourceFiles { + filename := fl.Info.File + fns := fileNodes[filename] + flatSum, cumSum := fns.Sum() + + fnodes, _, err := getSourceFromFile(filename, reader, fns, 0, 0) + fmt.Fprintf(w, "ROUTINE ======================== %s in %s\n", name, filename) + fmt.Fprintf(w, "%10s %10s (flat, cum) %s of Total\n", + rpt.formatValue(flatSum), rpt.formatValue(cumSum), + measurement.Percentage(cumSum, rpt.total)) + + if err != nil { + fmt.Fprintf(w, " Error: %v\n", err) + continue + } + + for _, fn := range fnodes { + fmt.Fprintf(w, "%10s %10s %6d:%s\n", valueOrDot(fn.Flat, rpt), valueOrDot(fn.Cum, rpt), fn.Info.Lineno, fn.Info.Name) + } + } + } + return nil +} + +// sourcePrinter holds state needed for generating source+asm HTML listing. +type sourcePrinter struct { + reader *sourceReader + synth *synthCode + objectTool plugin.ObjTool + objects map[string]plugin.ObjFile // Opened object files + sym *regexp.Regexp // May be nil + files map[string]*sourceFile // Set of files to print. + insts map[uint64]instructionInfo // Instructions of interest (keyed by address). + + // Set of function names that we are interested in (because they had + // a sample and match sym). + interest map[string]bool + + // Mapping from system function names to printable names. + prettyNames map[string]string +} + +// addrInfo holds information for an address we are interested in. +type addrInfo struct { + loc *profile.Location // Always non-nil + obj plugin.ObjFile // May be nil +} + +// instructionInfo holds collected information for an instruction. +type instructionInfo struct { + objAddr uint64 // Address in object file (with base subtracted out) + length int // Instruction length in bytes + disasm string // Disassembly of instruction + file string // For top-level function in which instruction occurs + line int // For top-level function in which instruction occurs + flat, cum int64 // Samples to report (divisor already applied) +} + +// sourceFile contains collected information for files we will print. +type sourceFile struct { + fname string + cum int64 + flat int64 + lines map[int][]sourceInst // Instructions to show per line + funcName map[int]string // Function name per line +} + +// sourceInst holds information for an instruction to be displayed. +type sourceInst struct { + addr uint64 + stack []callID // Inlined call-stack +} + +// sourceFunction contains information for a contiguous range of lines per function we +// will print. +type sourceFunction struct { + name string + begin, end int // Line numbers (end is not included in the range) + flat, cum int64 +} + +// addressRange is a range of addresses plus the object file that contains it. +type addressRange struct { + begin, end uint64 + obj plugin.ObjFile + mapping *profile.Mapping + score int64 // Used to order ranges for processing +} + +// WebListData holds the data needed to generate HTML source code listing. +type WebListData struct { + Total string + Files []WebListFile +} + +// WebListFile holds the per-file information for HTML source code listing. +type WebListFile struct { + Funcs []WebListFunc +} + +// WebListFunc holds the per-function information for HTML source code listing. +type WebListFunc struct { + Name string + File string + Flat string + Cumulative string + Percent string + Lines []WebListLine +} + +// WebListLine holds the per-source-line information for HTML source code listing. +type WebListLine struct { + SrcLine string + HTMLClass string + Line int + Flat string + Cumulative string + Instructions []WebListInstruction +} + +// WebListInstruction holds the per-instruction information for HTML source code listing. +type WebListInstruction struct { + NewBlock bool // Insert marker that indicates separation from previous block + Flat string + Cumulative string + Synthetic bool + Address uint64 + Disasm string + FileLine string + InlinedCalls []WebListCall +} + +// WebListCall holds the per-inlined-call information for HTML source code listing. +type WebListCall struct { + SrcLine string + FileBase string + Line int +} + +// MakeWebList returns an annotated source listing of rpt. +// rpt.prof should contain inlined call info. +func MakeWebList(rpt *Report, obj plugin.ObjTool, maxFiles int) (WebListData, error) { + sourcePath := rpt.options.SourcePath + if sourcePath == "" { + wd, err := os.Getwd() + if err != nil { + return WebListData{}, fmt.Errorf("could not stat current dir: %v", err) + } + sourcePath = wd + } + sp := newSourcePrinter(rpt, obj, sourcePath) + if len(sp.interest) == 0 { + return WebListData{}, fmt.Errorf("no matches found for regexp: %s", rpt.options.Symbol) + } + defer sp.close() + return sp.generate(maxFiles, rpt), nil +} + +func newSourcePrinter(rpt *Report, obj plugin.ObjTool, sourcePath string) *sourcePrinter { + sp := &sourcePrinter{ + reader: newSourceReader(sourcePath, rpt.options.TrimPath), + synth: newSynthCode(rpt.prof.Mapping), + objectTool: obj, + objects: map[string]plugin.ObjFile{}, + sym: rpt.options.Symbol, + files: map[string]*sourceFile{}, + insts: map[uint64]instructionInfo{}, + prettyNames: map[string]string{}, + interest: map[string]bool{}, + } + + // If the regexp source can be parsed as an address, also match + // functions that land on that address. + var address *uint64 + if sp.sym != nil { + if hex, err := strconv.ParseUint(sp.sym.String(), 0, 64); err == nil { + address = &hex + } + } + + addrs := map[uint64]addrInfo{} + flat := map[uint64]int64{} + cum := map[uint64]int64{} + + // Record an interest in the function corresponding to lines[index]. + markInterest := func(addr uint64, loc *profile.Location, index int) { + fn := loc.Line[index] + if fn.Function == nil { + return + } + sp.interest[fn.Function.Name] = true + sp.interest[fn.Function.SystemName] = true + if _, ok := addrs[addr]; !ok { + addrs[addr] = addrInfo{loc, sp.objectFile(loc.Mapping)} + } + } + + // See if sp.sym matches line. + matches := func(line profile.Line) bool { + if line.Function == nil { + return false + } + return sp.sym.MatchString(line.Function.Name) || + sp.sym.MatchString(line.Function.SystemName) || + sp.sym.MatchString(line.Function.Filename) + } + + // Extract sample counts and compute set of interesting functions. + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + if rpt.options.SampleMeanDivisor != nil { + div := rpt.options.SampleMeanDivisor(sample.Value) + if div != 0 { + value /= div + } + } + + // Find call-sites matching sym. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for _, line := range loc.Line { + if line.Function == nil { + continue + } + sp.prettyNames[line.Function.SystemName] = line.Function.Name + } + + addr := loc.Address + if addr == 0 { + // Some profiles are missing valid addresses. + addr = sp.synth.address(loc) + } + + cum[addr] += value + if i == 0 { + flat[addr] += value + } + + if sp.sym == nil || (address != nil && addr == *address) { + // Interested in top-level entry of stack. + if len(loc.Line) > 0 { + markInterest(addr, loc, len(loc.Line)-1) + } + continue + } + + // Search in inlined stack for a match. + matchFile := (loc.Mapping != nil && sp.sym.MatchString(loc.Mapping.File)) + for j, line := range loc.Line { + if (j == 0 && matchFile) || matches(line) { + markInterest(addr, loc, j) + } + } + } + } + + sp.expandAddresses(rpt, addrs, flat) + sp.initSamples(flat, cum) + return sp +} + +func (sp *sourcePrinter) close() { + for _, objFile := range sp.objects { + if objFile != nil { + objFile.Close() + } + } +} + +func (sp *sourcePrinter) expandAddresses(rpt *Report, addrs map[uint64]addrInfo, flat map[uint64]int64) { + // We found interesting addresses (ones with non-zero samples) above. + // Get covering address ranges and disassemble the ranges. + ranges, unprocessed := sp.splitIntoRanges(rpt.prof, addrs, flat) + sp.handleUnprocessed(addrs, unprocessed) + + // Trim ranges if there are too many. + const maxRanges = 25 + sort.Slice(ranges, func(i, j int) bool { + return ranges[i].score > ranges[j].score + }) + if len(ranges) > maxRanges { + ranges = ranges[:maxRanges] + } + + for _, r := range ranges { + objBegin, err := r.obj.ObjAddr(r.begin) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range start %x: %v\n", r.begin, err) + continue + } + objEnd, err := r.obj.ObjAddr(r.end) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to compute objdump address for range end %x: %v\n", r.end, err) + continue + } + base := r.begin - objBegin + insts, err := sp.objectTool.Disasm(r.mapping.File, objBegin, objEnd, rpt.options.IntelSyntax) + if err != nil { + // TODO(sanjay): Report that the covered addresses are missing. + continue + } + + var lastFrames []plugin.Frame + var lastAddr, maxAddr uint64 + for i, inst := range insts { + addr := inst.Addr + base + + // Guard against duplicate output from Disasm. + if addr <= maxAddr { + continue + } + maxAddr = addr + + length := 1 + if i+1 < len(insts) && insts[i+1].Addr > inst.Addr { + // Extend to next instruction. + length = int(insts[i+1].Addr - inst.Addr) + } + + // Get inlined-call-stack for address. + frames, err := r.obj.SourceLine(addr) + if err != nil { + // Construct a frame from disassembler output. + frames = []plugin.Frame{{Func: inst.Function, File: inst.File, Line: inst.Line}} + } + + x := instructionInfo{objAddr: inst.Addr, length: length, disasm: inst.Text} + if len(frames) > 0 { + // We could consider using the outer-most caller's source + // location so we give the some hint as to where the + // inlining happened that led to this instruction. So for + // example, suppose we have the following (inlined) call + // chains for this instruction: + // F1->G->H + // F2->G->H + // We could tag the instructions from the first call with + // F1 and instructions from the second call with F2. But + // that leads to a somewhat confusing display. So for now, + // we stick with just the inner-most location (i.e., H). + // In the future we will consider changing the display to + // make caller info more visible. + index := 0 // Inner-most frame + x.file = frames[index].File + x.line = frames[index].Line + } + sp.insts[addr] = x + + // We sometimes get instructions with a zero reported line number. + // Make such instructions have the same line info as the preceding + // instruction, if an earlier instruction is found close enough. + const neighborhood = 32 + if len(frames) > 0 && frames[0].Line != 0 { + lastFrames = frames + lastAddr = addr + } else if (addr-lastAddr <= neighborhood) && lastFrames != nil { + frames = lastFrames + } + + sp.addStack(addr, frames) + } + } +} + +func (sp *sourcePrinter) addStack(addr uint64, frames []plugin.Frame) { + // See if the stack contains a function we are interested in. + for i, f := range frames { + if !sp.interest[f.Func] { + continue + } + + // Record sub-stack under frame's file/line. + fname := canonicalizeFileName(f.File) + file := sp.files[fname] + if file == nil { + file = &sourceFile{ + fname: fname, + lines: map[int][]sourceInst{}, + funcName: map[int]string{}, + } + sp.files[fname] = file + } + callees := frames[:i] + stack := make([]callID, 0, len(callees)) + for j := len(callees) - 1; j >= 0; j-- { // Reverse so caller is first + stack = append(stack, callID{ + file: callees[j].File, + line: callees[j].Line, + }) + } + file.lines[f.Line] = append(file.lines[f.Line], sourceInst{addr, stack}) + + // Remember the first function name encountered per source line + // and assume that that line belongs to that function. + if _, ok := file.funcName[f.Line]; !ok { + file.funcName[f.Line] = f.Func + } + } +} + +// synthAsm is the special disassembler value used for instructions without an object file. +const synthAsm = "" + +// handleUnprocessed handles addresses that were skipped by splitIntoRanges because they +// did not belong to a known object file. +func (sp *sourcePrinter) handleUnprocessed(addrs map[uint64]addrInfo, unprocessed []uint64) { + // makeFrames synthesizes a []plugin.Frame list for the specified address. + // The result will typically have length 1, but may be longer if address corresponds + // to inlined calls. + makeFrames := func(addr uint64) []plugin.Frame { + loc := addrs[addr].loc + stack := make([]plugin.Frame, 0, len(loc.Line)) + for _, line := range loc.Line { + fn := line.Function + if fn == nil { + continue + } + stack = append(stack, plugin.Frame{ + Func: fn.Name, + File: fn.Filename, + Line: int(line.Line), + }) + } + return stack + } + + for _, addr := range unprocessed { + frames := makeFrames(addr) + x := instructionInfo{ + objAddr: addr, + length: 1, + disasm: synthAsm, + } + if len(frames) > 0 { + x.file = frames[0].File + x.line = frames[0].Line + } + sp.insts[addr] = x + + sp.addStack(addr, frames) + } +} + +// splitIntoRanges converts the set of addresses we are interested in into a set of address +// ranges to disassemble. It also returns the set of addresses found that did not have an +// associated object file and were therefore not added to an address range. +func (sp *sourcePrinter) splitIntoRanges(prof *profile.Profile, addrMap map[uint64]addrInfo, flat map[uint64]int64) ([]addressRange, []uint64) { + // Partition addresses into two sets: ones with a known object file, and ones without. + var addrs, unprocessed []uint64 + for addr, info := range addrMap { + if info.obj != nil { + addrs = append(addrs, addr) + } else { + unprocessed = append(unprocessed, addr) + } + } + sort.Slice(addrs, func(i, j int) bool { return addrs[i] < addrs[j] }) + + const expand = 500 // How much to expand range to pick up nearby addresses. + var result []addressRange + for i, n := 0, len(addrs); i < n; { + begin, end := addrs[i], addrs[i] + sum := flat[begin] + i++ + + info := addrMap[begin] + m := info.loc.Mapping + obj := info.obj // Non-nil because of the partitioning done above. + + // Find following addresses that are close enough to addrs[i]. + for i < n && addrs[i] <= end+2*expand && addrs[i] < m.Limit { + // When we expand ranges by "expand" on either side, the ranges + // for addrs[i] and addrs[i-1] will merge. + end = addrs[i] + sum += flat[end] + i++ + } + if m.Start-begin >= expand { + begin -= expand + } else { + begin = m.Start + } + if m.Limit-end >= expand { + end += expand + } else { + end = m.Limit + } + + result = append(result, addressRange{begin, end, obj, m, sum}) + } + return result, unprocessed +} + +func (sp *sourcePrinter) initSamples(flat, cum map[uint64]int64) { + for addr, inst := range sp.insts { + // Move all samples that were assigned to the middle of an instruction to the + // beginning of that instruction. This takes care of samples that were recorded + // against pc+1. + instEnd := addr + uint64(inst.length) + for p := addr; p < instEnd; p++ { + inst.flat += flat[p] + inst.cum += cum[p] + } + sp.insts[addr] = inst + } +} + +func (sp *sourcePrinter) generate(maxFiles int, rpt *Report) WebListData { + // Finalize per-file counts. + for _, file := range sp.files { + seen := map[uint64]bool{} + for _, line := range file.lines { + for _, x := range line { + if seen[x.addr] { + // Same address can be displayed multiple times in a file + // (e.g., if we show multiple inlined functions). + // Avoid double-counting samples in this case. + continue + } + seen[x.addr] = true + inst := sp.insts[x.addr] + file.cum += inst.cum + file.flat += inst.flat + } + } + } + + // Get sorted list of files to print. + var files []*sourceFile + for _, f := range sp.files { + files = append(files, f) + } + order := func(i, j int) bool { return files[i].flat > files[j].flat } + if maxFiles < 0 { + // Order by name for compatibility with old code. + order = func(i, j int) bool { return files[i].fname < files[j].fname } + maxFiles = len(files) + } + sort.Slice(files, order) + result := WebListData{ + Total: rpt.formatValue(rpt.total), + } + for i, f := range files { + if i < maxFiles { + result.Files = append(result.Files, sp.generateFile(f, rpt)) + } + } + return result +} + +func (sp *sourcePrinter) generateFile(f *sourceFile, rpt *Report) WebListFile { + var result WebListFile + for _, fn := range sp.functions(f) { + if fn.cum == 0 { + continue + } + + listfn := WebListFunc{ + Name: fn.name, + File: f.fname, + Flat: rpt.formatValue(fn.flat), + Cumulative: rpt.formatValue(fn.cum), + Percent: measurement.Percentage(fn.cum, rpt.total), + } + var asm []assemblyInstruction + for l := fn.begin; l < fn.end; l++ { + lineContents, ok := sp.reader.line(f.fname, l) + if !ok { + if len(f.lines[l]) == 0 { + // Outside of range of valid lines and nothing to print. + continue + } + if l == 0 { + // Line number 0 shows up if line number is not known. + lineContents = "" + } else { + // Past end of file, but have data to print. + lineContents = "???" + } + } + + // Make list of assembly instructions. + asm = asm[:0] + var flatSum, cumSum int64 + var lastAddr uint64 + for _, inst := range f.lines[l] { + addr := inst.addr + x := sp.insts[addr] + flatSum += x.flat + cumSum += x.cum + startsBlock := (addr != lastAddr+uint64(sp.insts[lastAddr].length)) + lastAddr = addr + + // divisors already applied, so leave flatDiv,cumDiv as 0 + asm = append(asm, assemblyInstruction{ + address: x.objAddr, + instruction: x.disasm, + function: fn.name, + file: x.file, + line: x.line, + flat: x.flat, + cum: x.cum, + startsBlock: startsBlock, + inlineCalls: inst.stack, + }) + } + + listfn.Lines = append(listfn.Lines, makeWebListLine(l, flatSum, cumSum, lineContents, asm, sp.reader, rpt)) + } + + result.Funcs = append(result.Funcs, listfn) + } + return result +} + +// functions splits apart the lines to show in a file into a list of per-function ranges. +func (sp *sourcePrinter) functions(f *sourceFile) []sourceFunction { + var funcs []sourceFunction + + // Get interesting lines in sorted order. + lines := make([]int, 0, len(f.lines)) + for l := range f.lines { + lines = append(lines, l) + } + sort.Ints(lines) + + // Merge adjacent lines that are in same function and not too far apart. + const mergeLimit = 20 + for _, l := range lines { + name := f.funcName[l] + if pretty, ok := sp.prettyNames[name]; ok { + // Use demangled name if available. + name = pretty + } + + fn := sourceFunction{name: name, begin: l, end: l + 1} + for _, x := range f.lines[l] { + inst := sp.insts[x.addr] + fn.flat += inst.flat + fn.cum += inst.cum + } + + // See if we should merge into preceding function. + if len(funcs) > 0 { + last := funcs[len(funcs)-1] + if l-last.end < mergeLimit && last.name == name { + last.end = l + 1 + last.flat += fn.flat + last.cum += fn.cum + funcs[len(funcs)-1] = last + continue + } + } + + // Add new function. + funcs = append(funcs, fn) + } + + // Expand function boundaries to show neighborhood. + const expand = 5 + for i, f := range funcs { + if i == 0 { + // Extend backwards, stopping at line number 1, but do not disturb 0 + // since that is a special line number that can show up when addr2line + // cannot determine the real line number. + if f.begin > expand { + f.begin -= expand + } else if f.begin > 1 { + f.begin = 1 + } + } else { + // Find gap from predecessor and divide between predecessor and f. + halfGap := (f.begin - funcs[i-1].end) / 2 + if halfGap > expand { + halfGap = expand + } + funcs[i-1].end += halfGap + f.begin -= halfGap + } + funcs[i] = f + } + + // Also extend the ending point of the last function. + if len(funcs) > 0 { + funcs[len(funcs)-1].end += expand + } + + return funcs +} + +// objectFile return the object for the specified mapping, opening it if necessary. +// It returns nil on error. +func (sp *sourcePrinter) objectFile(m *profile.Mapping) plugin.ObjFile { + if m == nil { + return nil + } + if object, ok := sp.objects[m.File]; ok { + return object // May be nil if we detected an error earlier. + } + object, err := sp.objectTool.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + object = nil + } + sp.objects[m.File] = object // Cache even on error. + return object +} + +// makeWebListLine returns the contents of a single line in a web listing. This includes +// the source line and the corresponding assembly. +func makeWebListLine(lineNo int, flat, cum int64, lineContents string, + assembly []assemblyInstruction, reader *sourceReader, rpt *Report) WebListLine { + line := WebListLine{ + SrcLine: lineContents, + Line: lineNo, + Flat: valueOrDot(flat, rpt), + Cumulative: valueOrDot(cum, rpt), + } + + if len(assembly) == 0 { + line.HTMLClass = "nop" + return line + } + + nestedInfo := false + line.HTMLClass = "deadsrc" + for _, an := range assembly { + if len(an.inlineCalls) > 0 || an.instruction != synthAsm { + nestedInfo = true + line.HTMLClass = "livesrc" + } + } + + if nestedInfo { + srcIndent := indentation(lineContents) + line.Instructions = makeWebListInstructions(srcIndent, assembly, reader, rpt) + } + return line +} + +func makeWebListInstructions(srcIndent int, assembly []assemblyInstruction, reader *sourceReader, rpt *Report) []WebListInstruction { + var result []WebListInstruction + var curCalls []callID + for i, an := range assembly { + var fileline string + if an.file != "" { + fileline = fmt.Sprintf("%s:%d", template.HTMLEscapeString(filepath.Base(an.file)), an.line) + } + text := strings.Repeat(" ", srcIndent+4+4*len(an.inlineCalls)) + an.instruction + inst := WebListInstruction{ + NewBlock: (an.startsBlock && i != 0), + Flat: valueOrDot(an.flat, rpt), + Cumulative: valueOrDot(an.cum, rpt), + Synthetic: (an.instruction == synthAsm), + Address: an.address, + Disasm: rightPad(text, 80), + FileLine: fileline, + } + + // Add inlined call context. + for j, c := range an.inlineCalls { + if j < len(curCalls) && curCalls[j] == c { + // Skip if same as previous instruction. + continue + } + curCalls = nil + fline, ok := reader.line(c.file, c.line) + if !ok { + fline = "" + } + srcCode := strings.Repeat(" ", srcIndent+4+4*j) + strings.TrimSpace(fline) + inst.InlinedCalls = append(inst.InlinedCalls, WebListCall{ + SrcLine: rightPad(srcCode, 80), + FileBase: filepath.Base(c.file), + Line: c.line, + }) + } + curCalls = an.inlineCalls + + result = append(result, inst) + } + return result +} + +// getSourceFromFile collects the sources of a function from a source +// file and annotates it with the samples in fns. Returns the sources +// as nodes, using the info.name field to hold the source code. +func getSourceFromFile(file string, reader *sourceReader, fns graph.Nodes, start, end int) (graph.Nodes, string, error) { + lineNodes := make(map[int]graph.Nodes) + + // Collect source coordinates from profile. + const margin = 5 // Lines before first/after last sample. + if start == 0 { + if fns[0].Info.StartLine != 0 { + start = fns[0].Info.StartLine + } else { + start = fns[0].Info.Lineno - margin + } + } else { + start -= margin + } + if end == 0 { + end = fns[0].Info.Lineno + } + end += margin + for _, n := range fns { + lineno := n.Info.Lineno + nodeStart := n.Info.StartLine + if nodeStart == 0 { + nodeStart = lineno - margin + } + nodeEnd := lineno + margin + if nodeStart < start { + start = nodeStart + } else if nodeEnd > end { + end = nodeEnd + } + lineNodes[lineno] = append(lineNodes[lineno], n) + } + if start < 1 { + start = 1 + } + + var src graph.Nodes + for lineno := start; lineno <= end; lineno++ { + line, ok := reader.line(file, lineno) + if !ok { + break + } + flat, cum := lineNodes[lineno].Sum() + src = append(src, &graph.Node{ + Info: graph.NodeInfo{ + Name: strings.TrimRight(line, "\n"), + Lineno: lineno, + }, + Flat: flat, + Cum: cum, + }) + } + if err := reader.fileError(file); err != nil { + return nil, file, err + } + return src, file, nil +} + +// sourceReader provides access to source code with caching of file contents. +type sourceReader struct { + // searchPath is a filepath.ListSeparator-separated list of directories where + // source files should be searched. + searchPath string + + // trimPath is a filepath.ListSeparator-separated list of paths to trim. + trimPath string + + // files maps from path name to a list of lines. + // files[*][0] is unused since line numbering starts at 1. + files map[string][]string + + // errors collects errors encountered per file. These errors are + // consulted before returning out of these module. + errors map[string]error +} + +func newSourceReader(searchPath, trimPath string) *sourceReader { + return &sourceReader{ + searchPath, + trimPath, + make(map[string][]string), + make(map[string]error), + } +} + +func (reader *sourceReader) fileError(path string) error { + return reader.errors[path] +} + +// line returns the line numbered "lineno" in path, or _,false if lineno is out of range. +func (reader *sourceReader) line(path string, lineno int) (string, bool) { + lines, ok := reader.files[path] + if !ok { + // Read and cache file contents. + lines = []string{""} // Skip 0th line + f, err := openSourceFile(path, reader.searchPath, reader.trimPath) + if err != nil { + reader.errors[path] = err + } else { + s := bufio.NewScanner(f) + for s.Scan() { + lines = append(lines, s.Text()) + } + f.Close() + if s.Err() != nil { + reader.errors[path] = err + } + } + reader.files[path] = lines + } + if lineno <= 0 || lineno >= len(lines) { + return "", false + } + return lines[lineno], true +} + +// openSourceFile opens a source file from a name encoded in a profile. File +// names in a profile after can be relative paths, so search them in each of +// the paths in searchPath and their parents. In case the profile contains +// absolute paths, additional paths may be configured to trim from the source +// paths in the profile. This effectively turns the path into a relative path +// searching it using searchPath as usual). +func openSourceFile(path, searchPath, trim string) (*os.File, error) { + path = trimPath(path, trim, searchPath) + // If file is still absolute, require file to exist. + if filepath.IsAbs(path) { + f, err := os.Open(path) + return f, err + } + // Scan each component of the path. + for _, dir := range filepath.SplitList(searchPath) { + // Search up for every parent of each possible path. + for { + filename := filepath.Join(dir, path) + if f, err := os.Open(filename); err == nil { + return f, nil + } + parent := filepath.Dir(dir) + if parent == dir { + break + } + dir = parent + } + } + + return nil, fmt.Errorf("could not find file %s on path %s", path, searchPath) +} + +// trimPath cleans up a path by removing prefixes that are commonly +// found on profiles plus configured prefixes. +// TODO(aalexand): Consider optimizing out the redundant work done in this +// function if it proves to matter. +func trimPath(path, trimPath, searchPath string) string { + // Keep path variable intact as it's used below to form the return value. + sPath, searchPath := filepath.ToSlash(path), filepath.ToSlash(searchPath) + if trimPath == "" { + // If the trim path is not configured, try to guess it heuristically: + // search for basename of each search path in the original path and, if + // found, strip everything up to and including the basename. So, for + // example, given original path "/some/remote/path/my-project/foo/bar.c" + // and search path "/my/local/path/my-project" the heuristic will return + // "/my/local/path/my-project/foo/bar.c". + for _, dir := range filepath.SplitList(searchPath) { + want := "/" + filepath.Base(dir) + "/" + if found := strings.Index(sPath, want); found != -1 { + return path[found+len(want):] + } + } + } + // Trim configured trim prefixes. + trimPaths := append(filepath.SplitList(filepath.ToSlash(trimPath)), "/proc/self/cwd/./", "/proc/self/cwd/") + for _, trimPath := range trimPaths { + if !strings.HasSuffix(trimPath, "/") { + trimPath += "/" + } + if strings.HasPrefix(sPath, trimPath) { + return path[len(trimPath):] + } + } + return path +} + +func indentation(line string) int { + column := 0 + for _, c := range line { + if c == ' ' { + column++ + } else if c == '\t' { + column++ + for column%8 != 0 { + column++ + } + } else { + break + } + } + return column +} + +// rightPad pads the input with spaces on the right-hand-side to make it have +// at least width n. It treats tabs as enough spaces that lead to the next +// 8-aligned tab-stop. +func rightPad(s string, n int) string { + var str strings.Builder + + // Convert tabs to spaces as we go so padding works regardless of what prefix + // is placed before the result. + column := 0 + for _, c := range s { + column++ + if c == '\t' { + str.WriteRune(' ') + for column%8 != 0 { + column++ + str.WriteRune(' ') + } + } else { + str.WriteRune(c) + } + } + for column < n { + column++ + str.WriteRune(' ') + } + return str.String() +} + +func canonicalizeFileName(fname string) string { + fname = strings.TrimPrefix(fname, "/proc/self/cwd/") + fname = strings.TrimPrefix(fname, "./") + return filepath.Clean(fname) +} diff --git a/plugin/debug/pkg/internal/report/source_html.go b/plugin/debug/pkg/internal/report/source_html.go new file mode 100644 index 0000000..614a5ee --- /dev/null +++ b/plugin/debug/pkg/internal/report/source_html.go @@ -0,0 +1,71 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "html/template" +) + +// AddSourceTemplates adds templates used by PrintWebList to t. +func AddSourceTemplates(t *template.Template) { + template.Must(t.Parse(`{{define "weblistcss"}}` + weblistPageCSS + `{{end}}`)) + template.Must(t.Parse(`{{define "weblistjs"}}` + weblistPageScript + `{{end}}`)) +} + +const weblistPageCSS = `` + +const weblistPageScript = `` diff --git a/plugin/debug/pkg/internal/report/source_test.go b/plugin/debug/pkg/internal/report/source_test.go new file mode 100644 index 0000000..14014f1 --- /dev/null +++ b/plugin/debug/pkg/internal/report/source_test.go @@ -0,0 +1,288 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "fmt" + "os" + "path/filepath" + "regexp" + "runtime" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func TestWebList(t *testing.T) { + if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" { + t.Skip("weblist only tested on x86-64 linux") + } + + cpu := readProfile(filepath.Join("testdata", "sample.cpu"), t) + rpt := New(cpu, &Options{ + OutputFormat: WebList, + Symbol: regexp.MustCompile("busyLoop"), + SampleValue: func(v []int64) int64 { return v[1] }, + SampleUnit: cpu.SampleType[1].Unit, + }) + result, err := MakeWebList(rpt, &binutils.Binutils{}, -1) + if err != nil { + t.Fatalf("could not generate weblist: %v", err) + } + output := fmt.Sprint(result) + + for _, expect := range []string{"func busyLoop", "call.*mapassign"} { + if match, _ := regexp.MatchString(expect, output); !match { + t.Errorf("weblist output does not contain '%s':\n%s", expect, output) + } + } +} + +func TestSourceSyntheticAddress(t *testing.T) { + testSourceMapping(t, true) +} + +func TestSourceMissingMapping(t *testing.T) { + testSourceMapping(t, false) +} + +// testSourceMapping checks that source info is found even when no applicable +// Mapping/objectFile exists. The locations used in the test are either zero +// (if zeroAddress is true), or non-zero (otherwise). +func testSourceMapping(t *testing.T, zeroAddress bool) { + nextAddr := uint64(0) + + makeLoc := func(name, fname string, line int64) *profile.Location { + if !zeroAddress { + nextAddr++ + } + return &profile.Location{ + Address: nextAddr, + Line: []profile.Line{ + { + Function: &profile.Function{Name: name, Filename: fname}, + Line: line, + }, + }, + } + } + + // Create profile that will need synthetic addresses since it has no mappings. + foo100 := makeLoc("foo", "foo.go", 100) + bar50 := makeLoc("bar", "bar.go", 50) + prof := &profile.Profile{ + Sample: []*profile.Sample{ + { + Value: []int64{9}, + Location: []*profile.Location{foo100, bar50}, + }, + { + Value: []int64{17}, + Location: []*profile.Location{bar50}, + }, + }, + } + rpt := &Report{ + prof: prof, + options: &Options{ + Symbol: regexp.MustCompile("foo|bar"), + SampleValue: func(s []int64) int64 { return s[0] }, + }, + formatValue: func(v int64) string { return fmt.Sprint(v) }, + } + + result, err := MakeWebList(rpt, nil, -1) + if err != nil { + t.Fatalf("MakeWebList returned unexpected error: %v", err) + } + got := fmt.Sprint(result) + + expect := regexp.MustCompile( + `(?s)` + // Allow "." to match newline + `bar\.go.* 50\b.* 17 +26 .*` + + `foo\.go.* 100\b.* 9 +9 `) + if !expect.MatchString(got) { + t.Errorf("expected regular expression %v does not match output:\n%s\n", expect, got) + } +} + +func TestOpenSourceFile(t *testing.T) { + tempdir, err := os.MkdirTemp("", "") + if err != nil { + t.Fatalf("failed to create temp dir: %v", err) + } + const lsep = string(filepath.ListSeparator) + for _, tc := range []struct { + desc string + searchPath string + trimPath string + fs []string + path string + wantPath string // If empty, error is wanted. + }{ + { + desc: "exact absolute path is found", + fs: []string{"foo/bar.cc"}, + path: "$dir/foo/bar.cc", + wantPath: "$dir/foo/bar.cc", + }, + { + desc: "exact relative path is found", + searchPath: "$dir", + fs: []string{"foo/bar.cc"}, + path: "foo/bar.cc", + wantPath: "$dir/foo/bar.cc", + }, + { + desc: "multiple search path", + searchPath: "some/path" + lsep + "$dir", + fs: []string{"foo/bar.cc"}, + path: "foo/bar.cc", + wantPath: "$dir/foo/bar.cc", + }, + { + desc: "relative path is found in parent dir", + searchPath: "$dir/foo/bar", + fs: []string{"bar.cc", "foo/bar/baz.cc"}, + path: "bar.cc", + wantPath: "$dir/bar.cc", + }, + { + desc: "trims configured prefix", + searchPath: "$dir", + trimPath: "some-path" + lsep + "/some/remote/path", + fs: []string{"my-project/foo/bar.cc"}, + path: "/some/remote/path/my-project/foo/bar.cc", + wantPath: "$dir/my-project/foo/bar.cc", + }, + { + desc: "trims heuristically", + searchPath: "$dir/my-project", + fs: []string{"my-project/foo/bar.cc"}, + path: "/some/remote/path/my-project/foo/bar.cc", + wantPath: "$dir/my-project/foo/bar.cc", + }, + { + desc: "error when not found", + path: "foo.cc", + }, + } { + t.Run(tc.desc, func(t *testing.T) { + defer func() { + if err := os.RemoveAll(tempdir); err != nil { + t.Fatalf("failed to remove dir %q: %v", tempdir, err) + } + }() + for _, f := range tc.fs { + path := filepath.Join(tempdir, filepath.FromSlash(f)) + dir := filepath.Dir(path) + if err := os.MkdirAll(dir, 0755); err != nil { + t.Fatalf("failed to create dir %q: %v", dir, err) + } + if err := os.WriteFile(path, nil, 0644); err != nil { + t.Fatalf("failed to create file %q: %v", path, err) + } + } + tc.searchPath = filepath.FromSlash(strings.Replace(tc.searchPath, "$dir", tempdir, -1)) + tc.path = filepath.FromSlash(strings.Replace(tc.path, "$dir", tempdir, 1)) + tc.wantPath = filepath.FromSlash(strings.Replace(tc.wantPath, "$dir", tempdir, 1)) + if file, err := openSourceFile(tc.path, tc.searchPath, tc.trimPath); err != nil && tc.wantPath != "" { + t.Errorf("openSourceFile(%q, %q, %q) = err %v, want path %q", tc.path, tc.searchPath, tc.trimPath, err, tc.wantPath) + } else if err == nil { + defer file.Close() + gotPath := file.Name() + if tc.wantPath == "" { + t.Errorf("openSourceFile(%q, %q, %q) = %q, want error", tc.path, tc.searchPath, tc.trimPath, gotPath) + } else if gotPath != tc.wantPath { + t.Errorf("openSourceFile(%q, %q, %q) = %q, want path %q", tc.path, tc.searchPath, tc.trimPath, gotPath, tc.wantPath) + } + } + }) + } +} + +func TestIndentation(t *testing.T) { + for _, c := range []struct { + str string + wantIndent int + }{ + {"", 0}, + {"foobar", 0}, + {" foo", 2}, + {"\tfoo", 8}, + {"\t foo", 9}, + {" \tfoo", 8}, + {" \tfoo", 8}, + {" \tfoo", 16}, + } { + if n := indentation(c.str); n != c.wantIndent { + t.Errorf("indentation(%v): got %d, want %d", c.str, n, c.wantIndent) + } + } +} + +func TestRightPad(t *testing.T) { + for _, c := range []struct { + pad int + in string + expect string + }{ + {0, "", ""}, + {4, "", " "}, + {4, "x", "x "}, + {4, "abcd", "abcd"}, // No padding because of overflow + {4, "abcde", "abcde"}, // No padding because of overflow + {10, "\tx", " x "}, + {10, "w\txy\tz", "w xy z"}, + {20, "w\txy\tz", "w xy z "}, + } { + out := rightPad(c.in, c.pad) + if out != c.expect { + t.Errorf("rightPad(%q, %d): got %q, want %q", c.in, c.pad, out, c.expect) + } + } +} + +func readProfile(fname string, t *testing.T) *profile.Profile { + file, err := os.Open(fname) + if err != nil { + t.Fatalf("%s: could not open profile: %v", fname, err) + } + defer file.Close() + p, err := profile.Parse(file) + if err != nil { + t.Fatalf("%s: could not parse profile: %v", fname, err) + } + + // Fix file names so they do not include absolute path names. + fix := func(s string) string { + const testdir = "/internal/report/" + pos := strings.Index(s, testdir) + if pos == -1 { + return s + } + return s[pos+len(testdir):] + } + for _, m := range p.Mapping { + m.File = fix(m.File) + } + for _, f := range p.Function { + f.Filename = fix(f.Filename) + } + + return p +} diff --git a/plugin/debug/pkg/internal/report/stacks.go b/plugin/debug/pkg/internal/report/stacks.go new file mode 100644 index 0000000..0746523 --- /dev/null +++ b/plugin/debug/pkg/internal/report/stacks.go @@ -0,0 +1,220 @@ +// Copyright 2022 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package report + +import ( + "crypto/sha256" + "encoding/binary" + "fmt" + "path/filepath" + + "m7s.live/v5/plugin/debug/pkg/internal/measurement" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// StackSet holds a set of stacks corresponding to a profile. +// +// Slices in StackSet and the types it contains are always non-nil, +// which makes Javascript code that uses the JSON encoding less error-prone. +type StackSet struct { + Total int64 // Total value of the profile. + Scale float64 // Multiplier to generate displayed value + Type string // Profile type. E.g., "cpu". + Unit string // One of "B", "s", "GCU", or "" (if unknown) + Stacks []Stack // List of stored stacks + Sources []StackSource // Mapping from source index to info + report *Report +} + +// Stack holds a single stack instance. +type Stack struct { + Value int64 // Total value for all samples of this stack. + Sources []int // Indices in StackSet.Sources (callers before callees). +} + +// StackSource holds function/location info for a stack entry. +type StackSource struct { + FullName string + FileName string + UniqueName string // Disambiguates functions with same names + Inlined bool // If true this source was inlined into its caller + + // Alternative names to display (with decreasing lengths) to make text fit. + // Guaranteed to be non-empty. + Display []string + + // Places holds the list of stack slots where this source occurs. + // In particular, if [a,b] is an element in Places, + // StackSet.Stacks[a].Sources[b] points to this source. + // + // No stack will be referenced twice in the Places slice for a given + // StackSource. In case of recursion, Places will contain the outer-most + // entry in the recursive stack. E.g., if stack S has source X at positions + // 4,6,9,10, the Places entry for X will contain [S,4]. + Places []StackSlot + + // Combined count of stacks where this source is the leaf. + Self int64 + + // Color number to use for this source. + // Colors with high numbers than supported may be treated as zero. + Color int +} + +// StackSlot identifies a particular StackSlot. +type StackSlot struct { + Stack int // Index in StackSet.Stacks + Pos int // Index in Stack.Sources +} + +// Stacks returns a StackSet for the profile in rpt. +func (rpt *Report) Stacks() StackSet { + // Get scale for converting to default unit of the right type. + scale, unit := measurement.Scale(1, rpt.options.SampleUnit, "default") + if unit == "default" { + unit = "" + } + if rpt.options.Ratio > 0 { + scale *= rpt.options.Ratio + } + s := &StackSet{ + Total: rpt.total, + Scale: scale, + Type: rpt.options.SampleType, + Unit: unit, + Stacks: []Stack{}, // Ensure non-nil + Sources: []StackSource{}, // Ensure non-nil + report: rpt, + } + s.makeInitialStacks(rpt) + s.fillPlaces() + return *s +} + +func (s *StackSet) makeInitialStacks(rpt *Report) { + type key struct { + funcName string + fileName string + line int64 + column int64 + inlined bool + } + srcs := map[key]int{} // Sources identified so far. + seenFunctions := map[string]bool{} + unknownIndex := 1 + + getSrc := func(line profile.Line, inlined bool) int { + fn := line.Function + if fn == nil { + fn = &profile.Function{Name: fmt.Sprintf("?%d?", unknownIndex)} + unknownIndex++ + } + + k := key{fn.Name, fn.Filename, line.Line, line.Column, inlined} + if i, ok := srcs[k]; ok { + return i + } + + fileName := trimPath(fn.Filename, rpt.options.TrimPath, rpt.options.SourcePath) + x := StackSource{ + FileName: fileName, + Inlined: inlined, + Places: []StackSlot{}, // Ensure Places is non-nil + } + if fn.Name != "" { + x.FullName = addLineInfo(fn.Name, line) + x.Display = shortNameList(x.FullName) + x.Color = pickColor(packageName(fn.Name)) + } else { // Use file name, e.g., for file granularity display. + x.FullName = addLineInfo(fileName, line) + x.Display = fileNameSuffixes(x.FullName) + x.Color = pickColor(filepath.Dir(fileName)) + } + + if !seenFunctions[x.FullName] { + x.UniqueName = x.FullName + seenFunctions[x.FullName] = true + } else { + // Assign a different name so pivoting picks this function. + x.UniqueName = fmt.Sprint(x.FullName, "#", fn.ID) + } + + s.Sources = append(s.Sources, x) + srcs[k] = len(s.Sources) - 1 + return len(s.Sources) - 1 + } + + // Synthesized root location that will be placed at the beginning of each stack. + s.Sources = []StackSource{{ + FullName: "root", + Display: []string{"root"}, + Places: []StackSlot{}, + }} + + for _, sample := range rpt.prof.Sample { + value := rpt.options.SampleValue(sample.Value) + stack := Stack{Value: value, Sources: []int{0}} // Start with the root + + // Note: we need to reverse the order in the produced stack. + for i := len(sample.Location) - 1; i >= 0; i-- { + loc := sample.Location[i] + for j := len(loc.Line) - 1; j >= 0; j-- { + line := loc.Line[j] + inlined := (j != len(loc.Line)-1) + stack.Sources = append(stack.Sources, getSrc(line, inlined)) + } + } + + leaf := stack.Sources[len(stack.Sources)-1] + s.Sources[leaf].Self += value + s.Stacks = append(s.Stacks, stack) + } +} + +func (s *StackSet) fillPlaces() { + for i, stack := range s.Stacks { + seenSrcs := map[int]bool{} + for j, src := range stack.Sources { + if seenSrcs[src] { + continue + } + seenSrcs[src] = true + s.Sources[src].Places = append(s.Sources[src].Places, StackSlot{i, j}) + } + } +} + +// pickColor picks a color for key. +func pickColor(key string) int { + const numColors = 1048576 + h := sha256.Sum256([]byte(key)) + index := binary.LittleEndian.Uint32(h[:]) + return int(index % numColors) +} + +// Legend returns the list of lines to display as the legend. +func (s *StackSet) Legend() []string { + return reportLabels(s.report, s.report.total, len(s.Sources), len(s.Sources), 0, 0, false) +} + +func addLineInfo(str string, line profile.Line) string { + if line.Column != 0 { + return fmt.Sprint(str, ":", line.Line, ":", line.Column) + } + if line.Line != 0 { + return fmt.Sprint(str, ":", line.Line) + } + return str +} diff --git a/plugin/debug/pkg/internal/report/stacks_test.go b/plugin/debug/pkg/internal/report/stacks_test.go new file mode 100644 index 0000000..c158e8c --- /dev/null +++ b/plugin/debug/pkg/internal/report/stacks_test.go @@ -0,0 +1,240 @@ +package report + +import ( + "fmt" + "reflect" + "slices" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// makeTestStacks generates a StackSet from a supplied list of samples. +func makeTestStacks(samples ...*profile.Sample) StackSet { + prof := makeTestProfile(samples...) + rpt := NewDefault(prof, Options{OutputFormat: Tree, CallTree: true}) + return rpt.Stacks() +} + +func TestStacks(t *testing.T) { + // See report_test.go for the functions available to use in tests. + locs := clearLineAndColumn(testL) + main, foo, bar, tee := locs[0], locs[1], locs[2], locs[3] + + // Also make some file-only locations to test file granularity. + fileMain := makeFileLocation(main) + fileFoo := makeFileLocation(foo) + fileBar := makeFileLocation(bar) + + // stack holds an expected stack value found in StackSet. + type stack struct { + value int64 + names []string + } + makeStack := func(value int64, names ...string) stack { + return stack{value, names} + } + + for _, c := range []struct { + name string + stacks StackSet + expect []stack + }{ + { + "simple", + makeTestStacks( + testSample(100, bar, foo, main), + testSample(200, tee, foo, main), + ), + []stack{ + makeStack(100, "0:root", "1:main", "2:foo", "3:bar"), + makeStack(200, "0:root", "1:main", "2:foo", "4:tee"), + }, + }, + { + "recursion", + makeTestStacks( + testSample(100, bar, foo, foo, foo, main), + testSample(200, bar, foo, foo, main), + ), + []stack{ + // Note: Recursive calls to foo have different source indices. + makeStack(100, "0:root", "1:main", "2:foo", "2:foo", "2:foo", "3:bar"), + makeStack(200, "0:root", "1:main", "2:foo", "2:foo", "3:bar"), + }, + }, + { + "files", + makeTestStacks( + testSample(100, fileFoo, fileMain), + testSample(200, fileBar, fileMain), + ), + []stack{ + makeStack(100, "0:root", "1:dir/main", "2:dir/foo"), + makeStack(200, "0:root", "1:dir/main", "3:dir/bar"), + }, + }, + } { + t.Run(c.name, func(t *testing.T) { + var got []stack + for _, s := range c.stacks.Stacks { + stk := stack{ + value: s.Value, + names: make([]string, len(s.Sources)), + } + for i, src := range s.Sources { + stk.names[i] = fmt.Sprint(src, ":", c.stacks.Sources[src].FullName) + } + got = append(got, stk) + } + if !reflect.DeepEqual(c.expect, got) { + t.Errorf("expecting source %+v, got %+v", c.expect, got) + } + }) + } +} + +func TestStackSources(t *testing.T) { + // See report_test.go for the functions available to use in tests. + locs := clearLineAndColumn(testL) + main, foo, bar, tee, inl := locs[0], locs[1], locs[2], locs[3], locs[5] + + type srcInfo struct { + name string + self int64 + inlined bool + } + + source := func(stacks StackSet, name string) srcInfo { + src := findSource(stacks, name) + return srcInfo{src.FullName, src.Self, src.Inlined} + } + + for _, c := range []struct { + name string + stacks StackSet + srcs []srcInfo + }{ + { + "empty", + makeTestStacks(), + []srcInfo{}, + }, + { + "two-leaves", + makeTestStacks( + testSample(100, bar, foo, main), + testSample(200, tee, bar, foo, main), + testSample(1000, tee, main), + ), + []srcInfo{ + {"main", 0, false}, + {"bar", 100, false}, + {"foo", 0, false}, + {"tee", 1200, false}, + }, + }, + { + "inlined", + makeTestStacks( + testSample(100, inl), + testSample(200, inl), + ), + []srcInfo{ + // inl has bar->tee + {"tee", 300, true}, + }, + }, + { + "recursion", + makeTestStacks( + testSample(100, foo, foo, foo, main), + testSample(100, foo, foo, main), + ), + []srcInfo{ + {"main", 0, false}, + {"foo", 200, false}, + }, + }, + { + "flat", + makeTestStacks( + testSample(100, main), + testSample(100, foo), + testSample(100, bar), + testSample(100, tee), + ), + []srcInfo{ + {"main", 100, false}, + {"bar", 100, false}, + {"foo", 100, false}, + {"tee", 100, false}, + }, + }, + } { + t.Run(c.name, func(t *testing.T) { + for _, expect := range c.srcs { + got := source(c.stacks, expect.name) + if !reflect.DeepEqual(expect, got) { + t.Errorf("expecting source %+v, got %+v", expect, got) + } + } + }) + } +} + +func TestLegend(t *testing.T) { + // See report_test.go for the functions available to use in tests. + main, foo, bar, tee := testL[0], testL[1], testL[2], testL[3] + stacks := makeTestStacks( + testSample(100, bar, foo, main), + testSample(200, tee, foo, main), + ) + got := strings.Join(stacks.Legend(), "\n") + expectStrings := []string{"Type: samples", "Showing nodes", "100% of 300 total"} + for _, expect := range expectStrings { + if !strings.Contains(got, expect) { + t.Errorf("missing expected string %q in legend %q", expect, got) + } + } +} + +func findSource(stacks StackSet, name string) StackSource { + for _, src := range stacks.Sources { + if src.FullName == name { + return src + } + } + return StackSource{} +} + +// clearLineAndColumn drops line and column numbers to simplify tests that +// do not care about line and column numbers. +func clearLineAndColumn(locs []*profile.Location) []*profile.Location { + result := make([]*profile.Location, len(locs)) + for i, loc := range locs { + newLoc := *loc + newLoc.Line = slices.Clone(loc.Line) + for j := range newLoc.Line { + newLoc.Line[j].Line = 0 + newLoc.Line[j].Column = 0 + } + result[i] = &newLoc + } + return result +} + +// makeFileLocation switches loc from function to file-granularity. +func makeFileLocation(loc *profile.Location) *profile.Location { + result := *loc + result.ID += 1000 + result.Line = slices.Clone(loc.Line) + for i := range result.Line { + fn := *result.Line[i].Function + fn.Filename = "dir/" + fn.Name + fn.Name = "" + result.Line[i].Function = &fn + } + return &result +} diff --git a/plugin/debug/pkg/internal/report/synth.go b/plugin/debug/pkg/internal/report/synth.go new file mode 100644 index 0000000..261e8a9 --- /dev/null +++ b/plugin/debug/pkg/internal/report/synth.go @@ -0,0 +1,39 @@ +package report + +import ( + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// synthCode assigns addresses to locations without an address. +type synthCode struct { + next uint64 + addr map[*profile.Location]uint64 // Synthesized address assigned to a location +} + +func newSynthCode(mappings []*profile.Mapping) *synthCode { + // Find a larger address than any mapping. + s := &synthCode{next: 1} + for _, m := range mappings { + if s.next < m.Limit { + s.next = m.Limit + } + } + return s +} + +// address returns the synthetic address for loc, creating one if needed. +func (s *synthCode) address(loc *profile.Location) uint64 { + if loc.Address != 0 { + panic("can only synthesize addresses for locations without an address") + } + if addr, ok := s.addr[loc]; ok { + return addr + } + if s.addr == nil { + s.addr = map[*profile.Location]uint64{} + } + addr := s.next + s.next++ + s.addr[loc] = addr + return addr +} diff --git a/plugin/debug/pkg/internal/report/synth_test.go b/plugin/debug/pkg/internal/report/synth_test.go new file mode 100644 index 0000000..7eb584d --- /dev/null +++ b/plugin/debug/pkg/internal/report/synth_test.go @@ -0,0 +1,36 @@ +package report + +import ( + "testing" + + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func TestSynthAddresses(t *testing.T) { + s := newSynthCode(nil) + l1 := &profile.Location{} + addr1 := s.address(l1) + if s.address(l1) != addr1 { + t.Errorf("different calls with same location returned different addresses") + } + + l2 := &profile.Location{} + addr2 := s.address(l2) + if addr2 == addr1 { + t.Errorf("same address assigned to different locations") + } + +} + +func TestSynthAvoidsMapping(t *testing.T) { + mappings := []*profile.Mapping{ + {Start: 100, Limit: 200}, + {Start: 300, Limit: 400}, + } + s := newSynthCode(mappings) + loc := &profile.Location{} + addr := s.address(loc) + if addr >= 100 && addr < 200 || addr >= 300 && addr < 400 { + t.Errorf("synthetic location %d overlaps mapping %v", addr, mappings) + } +} diff --git a/plugin/debug/pkg/internal/report/testdata/README.md b/plugin/debug/pkg/internal/report/testdata/README.md new file mode 100644 index 0000000..2b60fcc --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/README.md @@ -0,0 +1,10 @@ +sample/ contains a sample program that can be profiled. +sample.bin is its x86-64 binary. +sample.cpu is a profile generated by sample.bin. + +To update the binary and profile: + +```shell +go build -o sample.bin ./sample +./sample.bin -cpuprofile sample.cpu +``` diff --git a/plugin/debug/pkg/internal/report/testdata/sample.bin b/plugin/debug/pkg/internal/report/testdata/sample.bin new file mode 100755 index 0000000..25929e6 Binary files /dev/null and b/plugin/debug/pkg/internal/report/testdata/sample.bin differ diff --git a/plugin/debug/pkg/internal/report/testdata/sample.cpu b/plugin/debug/pkg/internal/report/testdata/sample.cpu new file mode 100644 index 0000000..50eea72 Binary files /dev/null and b/plugin/debug/pkg/internal/report/testdata/sample.cpu differ diff --git a/plugin/debug/pkg/internal/report/testdata/sample/sample.go b/plugin/debug/pkg/internal/report/testdata/sample/sample.go new file mode 100644 index 0000000..196a7ea --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/sample/sample.go @@ -0,0 +1,55 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// sample program that is used to produce some of the files in +// pprof/internal/report/testdata. +package main + +import ( + "flag" + "fmt" + "log" + "math" + "os" + "runtime/pprof" +) + +var cpuProfile = flag.String("cpuprofile", "", "where to write cpu profile") + +func main() { + flag.Parse() + f, err := os.Create(*cpuProfile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + busyLoop() +} + +func busyLoop() { + m := make(map[int]int) + for i := 0; i < 1000000; i++ { + m[i] = i + 10 + } + var sum float64 + for i := 0; i < 100; i++ { + for _, v := range m { + sum += math.Abs(float64(v)) + } + } + fmt.Println("Sum", sum) +} diff --git a/plugin/debug/pkg/internal/report/testdata/source.dot b/plugin/debug/pkg/internal/report/testdata/source.dot new file mode 100644 index 0000000..83ee42e --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/source.dot @@ -0,0 +1,17 @@ +digraph "unnamed" { +node [style=filled fillcolor="#f8f8f8"] +subgraph cluster_L { "Duration: 10s, Total samples = 11111 " [shape=box fontsize=16 label="Duration: 10s, Total samples = 11111 \lShowing nodes accounting for 11111, 100% of 11111 total\l\lSee https://git.io/JfYMW for how to read the graph\l"] } +N1 [label="tee\nsource2:8\n10000 (90.00%)" id="node1" fontsize=24 shape=box tooltip="tee testdata/source2:8 (10000)" color="#b20500" fillcolor="#edd6d5"] +N2 [label="main\nsource1:2:2\n1 (0.009%)\nof 11111 (100%)" id="node2" fontsize=9 shape=box tooltip="main testdata/source1:2:2 (11111)" color="#b20000" fillcolor="#edd5d5"] +N3 [label="tee\nsource2:2\n1000 (9.00%)\nof 11000 (99.00%)" id="node3" fontsize=14 shape=box tooltip="tee testdata/source2:2 (11000)" color="#b20000" fillcolor="#edd5d5"] +N4 [label="tee\nsource2:8\n100 (0.9%)" id="node4" fontsize=10 shape=box tooltip="tee testdata/source2:8 (100)" color="#b2b0aa" fillcolor="#edecec"] +N5 [label="bar\nsource1:10\n10 (0.09%)" id="node5" fontsize=9 shape=box tooltip="bar testdata/source1:10 (10)" color="#b2b2b1" fillcolor="#ededed"] +N6 [label="bar\nsource1:10\n0 of 100 (0.9%)" id="node6" fontsize=8 shape=box tooltip="bar testdata/source1:10 (100)" color="#b2b0aa" fillcolor="#edecec"] +N7 [label="foo\nsource1:4:4\n0 of 10 (0.09%)" id="node7" fontsize=8 shape=box tooltip="foo testdata/source1:4:4 (10)" color="#b2b2b1" fillcolor="#ededed"] +N2 -> N3 [label=" 11000" weight=100 penwidth=5 color="#b20000" tooltip="main testdata/source1:2:2 -> tee testdata/source2:2 (11000)" labeltooltip="main testdata/source1:2:2 -> tee testdata/source2:2 (11000)"] +N3 -> N1 [label=" 10000" weight=91 penwidth=5 color="#b20500" tooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)" labeltooltip="tee testdata/source2:2 -> tee testdata/source2:8 (10000)"] +N6 -> N4 [label=" 100" color="#b2b0aa" tooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)" labeltooltip="bar testdata/source1:10 -> tee testdata/source2:8 (100)"] +N2 -> N6 [label=" 100" color="#b2b0aa" tooltip="main testdata/source1:2:2 -> bar testdata/source1:10 (100)" labeltooltip="main testdata/source1:2:2 -> bar testdata/source1:10 (100)"] +N7 -> N5 [label=" 10" color="#b2b2b1" tooltip="foo testdata/source1:4:4 -> bar testdata/source1:10 (10)" labeltooltip="foo testdata/source1:4:4 -> bar testdata/source1:10 (10)"] +N2 -> N7 [label=" 10" color="#b2b2b1" tooltip="main testdata/source1:2:2 -> foo testdata/source1:4:4 (10)" labeltooltip="main testdata/source1:2:2 -> foo testdata/source1:4:4 (10)"] +} diff --git a/plugin/debug/pkg/internal/report/testdata/source.rpt b/plugin/debug/pkg/internal/report/testdata/source.rpt new file mode 100644 index 0000000..9ec7b3b --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/source.rpt @@ -0,0 +1,49 @@ +Total: 11111 +ROUTINE ======================== bar in testdata/source1 + 10 110 (flat, cum) 0.99% of Total + . . 5:source1 line 5; + . . 6:source1 line 6; + . . 7:source1 line 7; + . . 8:source1 line 8; + . . 9:source1 line 9; + 10 110 10:source1 line 10; + . . 11:source1 line 11; + . . 12:source1 line 12; + . . 13:source1 line 13; + . . 14:source1 line 14; + . . 15:source1 line 15; +ROUTINE ======================== foo in testdata/source1 + 0 10 (flat, cum) 0.09% of Total + . . 1:source1 line 1; + . . 2:source1 line 2; + . . 3:source1 line 3; + . 10 4:source1 line 4; + . . 5:source1 line 5; + . . 6:source1 line 6; + . . 7:source1 line 7; + . . 8:source1 line 8; + . . 9:source1 line 9; +ROUTINE ======================== main in testdata/source1 + 1 11111 (flat, cum) 100% of Total + . . 1:source1 line 1; + 1 11111 2:source1 line 2; + . . 3:source1 line 3; + . . 4:source1 line 4; + . . 5:source1 line 5; + . . 6:source1 line 6; + . . 7:source1 line 7; +ROUTINE ======================== tee in testdata/source2 + 11100 21100 (flat, cum) 189.90% of Total + . . 1:source2 line 1; + 1000 11000 2:source2 line 2; + . . 3:source2 line 3; + . . 4:source2 line 4; + . . 5:source2 line 5; + . . 6:source2 line 6; + . . 7:source2 line 7; + 10100 10100 8:source2 line 8; + . . 9:source2 line 9; + . . 10:source2 line 10; + . . 11:source2 line 11; + . . 12:source2 line 12; + . . 13:source2 line 13; diff --git a/plugin/debug/pkg/internal/report/testdata/source1 b/plugin/debug/pkg/internal/report/testdata/source1 new file mode 100644 index 0000000..70e3fc3 --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/source1 @@ -0,0 +1,19 @@ +source1 line 1; +source1 line 2; +source1 line 3; +source1 line 4; +source1 line 5; +source1 line 6; +source1 line 7; +source1 line 8; +source1 line 9; +source1 line 10; +source1 line 11; +source1 line 12; +source1 line 13; +source1 line 14; +source1 line 15; +source1 line 16; +source1 line 17; +source1 line 18; + diff --git a/plugin/debug/pkg/internal/report/testdata/source2 b/plugin/debug/pkg/internal/report/testdata/source2 new file mode 100644 index 0000000..54f99cc --- /dev/null +++ b/plugin/debug/pkg/internal/report/testdata/source2 @@ -0,0 +1,19 @@ +source2 line 1; +source2 line 2; +source2 line 3; +source2 line 4; +source2 line 5; +source2 line 6; +source2 line 7; +source2 line 8; +source2 line 9; +source2 line 10; +source2 line 11; +source2 line 12; +source2 line 13; +source2 line 14; +source2 line 15; +source2 line 16; +source2 line 17; +source2 line 18; + diff --git a/plugin/debug/pkg/internal/symbolizer/symbolizer.go b/plugin/debug/pkg/internal/symbolizer/symbolizer.go new file mode 100644 index 0000000..e70ecc9 --- /dev/null +++ b/plugin/debug/pkg/internal/symbolizer/symbolizer.go @@ -0,0 +1,350 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolizer provides a routine to populate a profile with +// symbol, file and line number information. It relies on the +// addr2liner and demangle packages to do the actual work. +package symbolizer + +import ( + "fmt" + "io" + "net/http" + "net/url" + "path/filepath" + "strings" + + "github.com/ianlancetaylor/demangle" + "m7s.live/v5/plugin/debug/pkg/internal/binutils" + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/symbolz" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +// Symbolizer implements the plugin.Symbolize interface. +type Symbolizer struct { + Obj plugin.ObjTool + UI plugin.UI + Transport http.RoundTripper +} + +// test taps for dependency injection +var symbolzSymbolize = symbolz.Symbolize +var localSymbolize = doLocalSymbolize +var demangleFunction = Demangle + +// Symbolize attempts to symbolize profile p. First uses binutils on +// local binaries; if the source is a URL it attempts to get any +// missed entries using symbolz. +func (s *Symbolizer) Symbolize(mode string, sources plugin.MappingSources, p *profile.Profile) error { + remote, local, fast, force, demanglerMode := true, true, false, false, "" + for _, o := range strings.Split(strings.ToLower(mode), ":") { + switch o { + case "": + continue + case "none", "no": + return nil + case "local": + remote, local = false, true + case "fastlocal": + remote, local, fast = false, true, true + case "remote": + remote, local = true, false + case "force": + force = true + default: + switch d := strings.TrimPrefix(o, "demangle="); d { + case "full", "none", "templates": + demanglerMode = d + force = true + continue + case "default": + continue + } + s.UI.PrintErr("ignoring unrecognized symbolization option: " + mode) + s.UI.PrintErr("expecting -symbolize=[local|fastlocal|remote|none][:force][:demangle=[none|full|templates|default]") + } + } + + var err error + if local { + // Symbolize locally using binutils. + if err = localSymbolize(p, fast, force, s.Obj, s.UI); err != nil { + s.UI.PrintErr("local symbolization: " + err.Error()) + } + } + if remote { + post := func(source, post string) ([]byte, error) { + return postURL(source, post, s.Transport) + } + if err = symbolzSymbolize(p, force, sources, post, s.UI); err != nil { + return err // Ran out of options. + } + } + + demangleFunction(p, force, demanglerMode) + return nil +} + +// postURL issues a POST to a URL over HTTP. +func postURL(source, post string, tr http.RoundTripper) ([]byte, error) { + client := &http.Client{ + Transport: tr, + } + resp, err := client.Post(source, "application/octet-stream", strings.NewReader(post)) + if err != nil { + return nil, fmt.Errorf("http post %s: %v", source, err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("http post %s: %v", source, statusCodeError(resp)) + } + return io.ReadAll(resp.Body) +} + +func statusCodeError(resp *http.Response) error { + if resp.Header.Get("X-Go-Pprof") != "" && strings.Contains(resp.Header.Get("Content-Type"), "text/plain") { + // error is from pprof endpoint + if body, err := io.ReadAll(resp.Body); err == nil { + return fmt.Errorf("server response: %s - %s", resp.Status, body) + } + } + return fmt.Errorf("server response: %s", resp.Status) +} + +// doLocalSymbolize adds symbol and line number information to all locations +// in a profile. mode enables some options to control +// symbolization. +func doLocalSymbolize(prof *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + if fast { + if bu, ok := obj.(*binutils.Binutils); ok { + bu.SetFastSymbolization(true) + } + } + + functions := map[profile.Function]*profile.Function{} + addFunction := func(f *profile.Function) *profile.Function { + if fp := functions[*f]; fp != nil { + return fp + } + functions[*f] = f + f.ID = uint64(len(prof.Function)) + 1 + prof.Function = append(prof.Function, f) + return f + } + + missingBinaries := false + mappingLocs := map[*profile.Mapping][]*profile.Location{} + for _, l := range prof.Location { + mappingLocs[l.Mapping] = append(mappingLocs[l.Mapping], l) + } + for midx, m := range prof.Mapping { + locs := mappingLocs[m] + if len(locs) == 0 { + // The mapping is dangling and has no locations pointing to it. + continue + } + // Do not attempt to re-symbolize a mapping that has already been symbolized. + if !force && (m.HasFunctions || m.HasFilenames || m.HasLineNumbers) { + continue + } + if m.File == "" { + if midx == 0 { + ui.PrintErr("Main binary filename not available.") + continue + } + missingBinaries = true + continue + } + if m.Unsymbolizable() { + // Skip well-known system mappings + continue + } + if m.BuildID == "" { + if u, err := url.Parse(m.File); err == nil && u.IsAbs() && strings.Contains(strings.ToLower(u.Scheme), "http") { + // Skip mappings pointing to a source URL + continue + } + } + + name := filepath.Base(m.File) + if m.BuildID != "" { + name += fmt.Sprintf(" (build ID %s)", m.BuildID) + } + f, err := obj.Open(m.File, m.Start, m.Limit, m.Offset, m.KernelRelocationSymbol) + if err != nil { + ui.PrintErr("Local symbolization failed for ", name, ": ", err) + missingBinaries = true + continue + } + if fid := f.BuildID(); m.BuildID != "" && fid != "" && fid != m.BuildID { + ui.PrintErr("Local symbolization failed for ", name, ": build ID mismatch") + f.Close() + continue + } + symbolizeOneMapping(m, locs, f, addFunction) + f.Close() + } + + if missingBinaries { + ui.PrintErr("Some binary filenames not available. Symbolization may be incomplete.\n" + + "Try setting PPROF_BINARY_PATH to the search path for local binaries.") + } + return nil +} + +func symbolizeOneMapping(m *profile.Mapping, locs []*profile.Location, obj plugin.ObjFile, addFunction func(*profile.Function) *profile.Function) { + for _, l := range locs { + stack, err := obj.SourceLine(l.Address) + if err != nil || len(stack) == 0 { + // No answers from addr2line. + continue + } + + l.Line = make([]profile.Line, len(stack)) + l.IsFolded = false + for i, frame := range stack { + if frame.Func != "" { + m.HasFunctions = true + } + if frame.File != "" { + m.HasFilenames = true + } + if frame.Line != 0 { + m.HasLineNumbers = true + } + f := addFunction(&profile.Function{ + Name: frame.Func, + SystemName: frame.Func, + Filename: frame.File, + StartLine: int64(frame.StartLine), + }) + l.Line[i] = profile.Line{ + Function: f, + Line: int64(frame.Line), + Column: int64(frame.Column), + } + } + + if len(stack) > 0 { + m.HasInlineFrames = true + } + } +} + +// Demangle updates the function names in a profile with demangled C++ +// names, simplified according to demanglerMode. If force is set, +// overwrite any names that appear already demangled. +func Demangle(prof *profile.Profile, force bool, demanglerMode string) { + if force { + // Remove the current demangled names to force demangling + for _, f := range prof.Function { + if f.Name != "" && f.SystemName != "" { + f.Name = f.SystemName + } + } + } + + options := demanglerModeToOptions(demanglerMode) + // Bail out fast to avoid any parsing, if we really don't want any demangling. + if len(options) == 0 { + return + } + for _, fn := range prof.Function { + demangleSingleFunction(fn, options) + } +} + +func demanglerModeToOptions(demanglerMode string) []demangle.Option { + switch demanglerMode { + case "": // demangled, simplified: no parameters, no templates, no return type + return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams} + case "templates": // demangled, simplified: no parameters, no return type + return []demangle.Option{demangle.NoParams, demangle.NoEnclosingParams} + case "full": + return []demangle.Option{demangle.NoClones} + case "none": // no demangling + return []demangle.Option{} + } + + panic(fmt.Sprintf("unknown demanglerMode %s", demanglerMode)) +} + +func demangleSingleFunction(fn *profile.Function, options []demangle.Option) { + if fn.Name != "" && fn.SystemName != fn.Name { + return // Already demangled. + } + // Copy the options because they may be updated by the call. + o := make([]demangle.Option, len(options)) + copy(o, options) + if demangled := demangle.Filter(fn.SystemName, o...); demangled != fn.SystemName { + fn.Name = demangled + return + } + // Could not demangle. Apply heuristics in case the name is + // already demangled. + name := fn.SystemName + if looksLikeDemangledCPlusPlus(name) { + for _, o := range options { + switch o { + case demangle.NoParams: + name = removeMatching(name, '(', ')') + case demangle.NoTemplateParams: + name = removeMatching(name, '<', '>') + } + } + } + fn.Name = name +} + +// looksLikeDemangledCPlusPlus is a heuristic to decide if a name is +// the result of demangling C++. If so, further heuristics will be +// applied to simplify the name. +func looksLikeDemangledCPlusPlus(demangled string) bool { + // Skip java names of the form "class.". + if strings.Contains(demangled, ".<") { + return false + } + // Skip Go names of the form "foo.(*Bar[...]).Method". + if strings.Contains(demangled, "]).") { + return false + } + return strings.ContainsAny(demangled, "<>[]") || strings.Contains(demangled, "::") +} + +// removeMatching removes nested instances of start..end from name. +func removeMatching(name string, start, end byte) string { + s := string(start) + string(end) + var nesting, first, current int + for index := strings.IndexAny(name[current:], s); index != -1; index = strings.IndexAny(name[current:], s) { + switch current += index; name[current] { + case start: + nesting++ + if nesting == 1 { + first = current + } + case end: + nesting-- + switch { + case nesting < 0: + return name // Mismatch, abort + case nesting == 0: + name = name[:first] + name[current+1:] + current = first - 1 + } + } + current++ + } + return name +} diff --git a/plugin/debug/pkg/internal/symbolizer/symbolizer_test.go b/plugin/debug/pkg/internal/symbolizer/symbolizer_test.go new file mode 100644 index 0000000..1381713 --- /dev/null +++ b/plugin/debug/pkg/internal/symbolizer/symbolizer_test.go @@ -0,0 +1,489 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package symbolizer + +import ( + "fmt" + "regexp" + "sort" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +const filePath = "mapping" +const buildID = "build-id" + +var testM = []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x5000, + File: filePath, + BuildID: buildID, + }, +} + +var testL = []*profile.Location{ + { + ID: 1, + Mapping: testM[0], + Address: 1000, + }, + { + ID: 2, + Mapping: testM[0], + Address: 2000, + }, + { + ID: 3, + Mapping: testM[0], + Address: 3000, + }, + { + ID: 4, + Mapping: testM[0], + Address: 4000, + }, + { + ID: 5, + Mapping: testM[0], + Address: 5000, + }, +} + +var testProfile = profile.Profile{ + DurationNanos: 10e9, + SampleType: []*profile.ValueType{ + {Type: "cpu", Unit: "cycles"}, + }, + Sample: []*profile.Sample{ + { + Location: []*profile.Location{testL[0]}, + Value: []int64{1}, + }, + { + Location: []*profile.Location{testL[1], testL[0]}, + Value: []int64{10}, + }, + { + Location: []*profile.Location{testL[2], testL[0]}, + Value: []int64{100}, + }, + { + Location: []*profile.Location{testL[3], testL[0]}, + Value: []int64{1}, + }, + { + Location: []*profile.Location{testL[4], testL[3], testL[0]}, + Value: []int64{10000}, + }, + }, + Location: testL, + Mapping: testM, + PeriodType: &profile.ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 10, +} + +func TestSymbolization(t *testing.T) { + sSym := symbolzSymbolize + lSym := localSymbolize + defer func() { + symbolzSymbolize = sSym + localSymbolize = lSym + demangleFunction = Demangle + }() + symbolzSymbolize = symbolzMock + localSymbolize = localMock + demangleFunction = demangleMock + + type testcase struct { + mode string + wantComment string + } + + s := Symbolizer{ + Obj: mockObjTool{}, + UI: &proftest.TestUI{T: t}, + } + for i, tc := range []testcase{ + { + "local", + "local=[]", + }, + { + "fastlocal", + "local=[fast]", + }, + { + "remote", + "symbolz=[]", + }, + { + "", + "local=[]:symbolz=[]", + }, + { + "demangle=none", + "demangle=[none]:force:local=[force]:symbolz=[force]", + }, + { + "remote:demangle=full", + "demangle=[full]:force:symbolz=[force]", + }, + { + "local:demangle=templates", + "demangle=[templates]:force:local=[force]", + }, + { + "force:remote", + "force:symbolz=[force]", + }, + } { + prof := testProfile.Copy() + if err := s.Symbolize(tc.mode, nil, prof); err != nil { + t.Errorf("symbolize #%d: %v", i, err) + continue + } + sort.Strings(prof.Comments) + if got, want := strings.Join(prof.Comments, ":"), tc.wantComment; got != want { + t.Errorf("%q: got %s, want %s", tc.mode, got, want) + continue + } + } +} + +func symbolzMock(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { + var args []string + if force { + args = append(args, "force") + } + p.Comments = append(p.Comments, "symbolz=["+strings.Join(args, ",")+"]") + return nil +} + +func localMock(p *profile.Profile, fast, force bool, obj plugin.ObjTool, ui plugin.UI) error { + var args []string + if fast { + args = append(args, "fast") + } + if force { + args = append(args, "force") + } + p.Comments = append(p.Comments, "local=["+strings.Join(args, ",")+"]") + return nil +} + +func demangleMock(p *profile.Profile, force bool, mode string) { + if force { + p.Comments = append(p.Comments, "force") + } + if mode != "" { + p.Comments = append(p.Comments, "demangle=["+mode+"]") + } +} + +func TestLocalSymbolization(t *testing.T) { + prof := testProfile.Copy() + + if prof.HasFunctions() { + t.Error("unexpected function names") + } + if prof.HasFileLines() { + t.Error("unexpected filenames or line numbers") + } + + b := mockObjTool{} + if err := localSymbolize(prof, false, false, b, &proftest.TestUI{T: t}); err != nil { + t.Fatalf("localSymbolize(): %v", err) + } + + for _, loc := range prof.Location { + if err := checkSymbolizedLocation(loc.Address, loc.Line); err != nil { + t.Errorf("location %d: %v", loc.Address, err) + } + } + if !prof.HasFunctions() { + t.Error("missing function names") + } + if !prof.HasFileLines() { + t.Error("missing filenames or line numbers") + } +} + +func TestLocalSymbolizationHandlesSpecialCases(t *testing.T) { + for _, tc := range []struct { + desc, file, buildID, allowOutputRx string + wantNumOutputRegexMatches int + }{{ + desc: "Unsymbolizable files are skipped", + file: "[some unsymbolizable file]", + buildID: "", + }, { + desc: "HTTP URL like paths are skipped", + file: "http://original-url-source-of-profile-fetch", + buildID: "", + }, { + desc: "Non-existent files are ignored", + file: "/does-not-exist", + buildID: buildID, + allowOutputRx: "(?s)unknown or non-existent file|Some binary filenames not available.*Try setting PPROF_BINARY_PATH", + wantNumOutputRegexMatches: 2, + }, { + desc: "Missing main binary is detected", + file: "", + buildID: buildID, + allowOutputRx: "Main binary filename not available", + wantNumOutputRegexMatches: 1, + }, { + desc: "Different build ID is detected", + file: filePath, + buildID: "unexpected-build-id", + allowOutputRx: "build ID mismatch", + wantNumOutputRegexMatches: 1, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + prof := testProfile.Copy() + prof.Mapping[0].File = tc.file + prof.Mapping[0].BuildID = tc.buildID + origProf := prof.Copy() + + if prof.HasFunctions() { + t.Error("unexpected function names") + } + if prof.HasFileLines() { + t.Error("unexpected filenames or line numbers") + } + + b := mockObjTool{} + ui := &proftest.TestUI{T: t, AllowRx: tc.allowOutputRx} + if err := localSymbolize(prof, false, false, b, ui); err != nil { + t.Fatalf("localSymbolize(): %v", err) + } + if ui.NumAllowRxMatches != tc.wantNumOutputRegexMatches { + t.Errorf("localSymbolize(): got %d matches for %q UI regexp, want %d", ui.NumAllowRxMatches, tc.allowOutputRx, tc.wantNumOutputRegexMatches) + } + + if diff, err := proftest.Diff([]byte(origProf.String()), []byte(prof.String())); err != nil { + t.Fatalf("Failed to get diff: %v", err) + } else if string(diff) != "" { + t.Errorf("Profile changed unexpectedly, diff(want->got):\n%s", diff) + } + }) + } +} + +func checkSymbolizedLocation(a uint64, got []profile.Line) error { + want, ok := mockAddresses[a] + if !ok { + return fmt.Errorf("unexpected address") + } + if len(want) != len(got) { + return fmt.Errorf("want len %d, got %d", len(want), len(got)) + } + + for i, w := range want { + g := got[i] + if g.Function.Name != w.Func { + return fmt.Errorf("want function: %q, got %q", w.Func, g.Function.Name) + } + if g.Function.Filename != w.File { + return fmt.Errorf("want filename: %q, got %q", w.File, g.Function.Filename) + } + if g.Line != int64(w.Line) { + return fmt.Errorf("want lineno: %d, got %d", w.Line, g.Line) + } + if g.Column != int64(w.Column) { + return fmt.Errorf("want columnno: %d, got %d", w.Column, g.Column) + } + } + return nil +} + +var mockAddresses = map[uint64][]plugin.Frame{ + 1000: {frame("fun11", "file11.src", 10, 1)}, + 2000: {frame("fun21", "file21.src", 20, 2), frame("fun22", "file22.src", 20, 2)}, + 3000: {frame("fun31", "file31.src", 30, 3), frame("fun32", "file32.src", 30, 3), frame("fun33", "file33.src", 30, 3)}, + 4000: {frame("fun41", "file41.src", 40, 4), frame("fun42", "file42.src", 40, 4), frame("fun43", "file43.src", 40, 4), frame("fun44", "file44.src", 40, 4)}, + 5000: {frame("fun51", "file51.src", 50, 5), frame("fun52", "file52.src", 50, 5), frame("fun53", "file53.src", 50, 5), frame("fun54", "file54.src", 50, 5), frame("fun55", "file55.src", 50, 5)}, +} + +func frame(fname, file string, line int, column int) plugin.Frame { + return plugin.Frame{ + Func: fname, + File: file, + Line: line, + Column: column} +} + +func TestDemangleSingleFunction(t *testing.T) { + // All tests with default mode. + demanglerMode := "" + options := demanglerModeToOptions(demanglerMode) + + cases := []struct { + symbol string + want string + }{ + { + // Trivial C symbol. + symbol: "printf", + want: "printf", + }, + { + // foo::bar(int) + symbol: "_ZN3foo3barEi", + want: "foo::bar", + }, + { + // Already demangled. + symbol: "foo::bar(int)", + want: "foo::bar", + }, + { + // int foo::baz(double) + symbol: "_ZN3foo3bazIdEEiT", + want: "foo::baz", + }, + { + // Already demangled. + // + // TODO: The demangled form of this is actually + // 'int foo::baz(double)', but our heuristic + // can't strip the return type. Should it be able to? + symbol: "foo::baz(double)", + want: "foo::baz", + }, + { + // operator delete[](void*) + symbol: "_ZdaPv", + want: "operator delete[]", + }, + { + // Already demangled. + symbol: "operator delete[](void*)", + want: "operator delete[]", + }, + { + // bar(int (*) [5]) + symbol: "_Z3barPA5_i", + want: "bar", + }, + { + // Already demangled. + symbol: "bar(int (*) [5])", + want: "bar", + }, + // Java symbols, do not demangle. + { + symbol: "java.lang.Float.parseFloat", + want: "java.lang.Float.parseFloat", + }, + { + symbol: "java.lang.Float.", + want: "java.lang.Float.", + }, + // Go symbols, do not demangle. + { + symbol: "example.com/foo.Bar", + want: "example.com/foo.Bar", + }, + { + symbol: "example.com/foo.(*Bar).Bat", + want: "example.com/foo.(*Bar).Bat", + }, + { + // Method on type with type parameters, as reported by + // Go pprof profiles (simplified symbol name). + symbol: "example.com/foo.(*Bar[...]).Bat", + want: "example.com/foo.(*Bar[...]).Bat", + }, + { + // Method on type with type parameters, as reported by + // perf profiles (actual symbol name). + symbol: "example.com/foo.(*Bar[go.shape.string_0,go.shape.int_1]).Bat", + want: "example.com/foo.(*Bar[go.shape.string_0,go.shape.int_1]).Bat", + }, + { + // Function with type parameters, as reported by Go + // pprof profiles (simplified symbol name). + symbol: "example.com/foo.Bar[...]", + want: "example.com/foo.Bar[...]", + }, + { + // Function with type parameters, as reported by perf + // profiles (actual symbol name). + symbol: "example.com/foo.Bar[go.shape.string_0,go.shape.int_1]", + want: "example.com/foo.Bar[go.shape.string_0,go.shape.int_1]", + }, + } + for _, tc := range cases { + fn := &profile.Function{ + SystemName: tc.symbol, + } + demangleSingleFunction(fn, options) + if fn.Name != tc.want { + t.Errorf("demangleSingleFunction(%s) got %s want %s", tc.symbol, fn.Name, tc.want) + } + } +} + +type mockObjTool struct{} + +func (mockObjTool) Open(file string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) { + if file != filePath { + return nil, fmt.Errorf("unknown or non-existent file %q", file) + } + return mockObjFile{frames: mockAddresses}, nil +} + +func (mockObjTool) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) { + if file != filePath { + return nil, fmt.Errorf("unknown or non-existent file %q", file) + } + return nil, fmt.Errorf("disassembly not supported") +} + +type mockObjFile struct { + frames map[uint64][]plugin.Frame +} + +func (mockObjFile) Name() string { + return filePath +} + +func (mockObjFile) ObjAddr(addr uint64) (uint64, error) { + return addr, nil +} + +func (mockObjFile) BuildID() string { + return buildID +} + +func (mf mockObjFile) SourceLine(addr uint64) ([]plugin.Frame, error) { + return mf.frames[addr], nil +} + +func (mockObjFile) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) { + return []*plugin.Sym{}, nil +} + +func (mockObjFile) Close() error { + return nil +} diff --git a/plugin/debug/pkg/internal/symbolz/symbolz.go b/plugin/debug/pkg/internal/symbolz/symbolz.go new file mode 100644 index 0000000..bb7f359 --- /dev/null +++ b/plugin/debug/pkg/internal/symbolz/symbolz.go @@ -0,0 +1,196 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package symbolz symbolizes a profile using the output from the symbolz +// service. +package symbolz + +import ( + "bytes" + "fmt" + "io" + "net/url" + "path" + "regexp" + "strconv" + "strings" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +var ( + symbolzRE = regexp.MustCompile(`(0x[[:xdigit:]]+)\s+(.*)`) +) + +// Symbolize symbolizes profile p by parsing data returned by a symbolz +// handler. syms receives the symbolz query (hex addresses separated by '+') +// and returns the symbolz output in a string. If force is false, it will only +// symbolize locations from mappings not already marked as HasFunctions. Does +// not skip unsymbolizable files since the symbolz handler can be flexible +// enough to handle some of those cases such as JIT locations in //anon. +func Symbolize(p *profile.Profile, force bool, sources plugin.MappingSources, syms func(string, string) ([]byte, error), ui plugin.UI) error { + for _, m := range p.Mapping { + if !force && m.HasFunctions { + // Only check for HasFunctions as symbolz only populates function names. + continue + } + mappingSources := sources[m.File] + if m.BuildID != "" { + mappingSources = append(mappingSources, sources[m.BuildID]...) + } + for _, source := range mappingSources { + if symz := symbolz(source.Source); symz != "" { + if err := symbolizeMapping(symz, int64(source.Start)-int64(m.Start), syms, m, p); err != nil { + return err + } + m.HasFunctions = true + break + } + } + } + + return nil +} + +// hasGperftoolsSuffix checks whether path ends with one of the suffixes listed in +// pprof_remote_servers.html from the gperftools distribution +func hasGperftoolsSuffix(path string) bool { + suffixes := []string{ + "/pprof/heap", + "/pprof/growth", + "/pprof/profile", + "/pprof/pmuprofile", + "/pprof/contention", + } + for _, s := range suffixes { + if strings.HasSuffix(path, s) { + return true + } + } + return false +} + +// symbolz returns the corresponding symbolz source for a profile URL. +func symbolz(source string) string { + if url, err := url.Parse(source); err == nil && url.Host != "" { + // All paths in the net/http/pprof Go package contain /debug/pprof/ + if strings.Contains(url.Path, "/debug/pprof/") || hasGperftoolsSuffix(url.Path) { + url.Path = path.Clean(url.Path + "/../symbol") + } else { + url.Path = path.Clean(url.Path + "/../symbolz") + } + url.RawQuery = "" + return url.String() + } + + return "" +} + +// symbolizeMapping symbolizes locations belonging to a Mapping by querying +// a symbolz handler. An offset is applied to all addresses to take care of +// normalization occurred for merged Mappings. +func symbolizeMapping(source string, offset int64, syms func(string, string) ([]byte, error), m *profile.Mapping, p *profile.Profile) error { + // Construct query of addresses to symbolize. + var a []string + for _, l := range p.Location { + if l.Mapping == m && l.Address != 0 && len(l.Line) == 0 { + // Compensate for normalization. + addr, overflow := adjust(l.Address, offset) + if overflow { + return fmt.Errorf("cannot adjust address %d by %d, it would overflow (mapping %v)", l.Address, offset, l.Mapping) + } + a = append(a, fmt.Sprintf("%#x", addr)) + } + } + + if len(a) == 0 { + // No addresses to symbolize. + return nil + } + + lines := make(map[uint64]profile.Line) + functions := make(map[string]*profile.Function) + + b, err := syms(source, strings.Join(a, "+")) + if err != nil { + return err + } + + buf := bytes.NewBuffer(b) + for { + l, err := buf.ReadString('\n') + + if err != nil { + if err == io.EOF { + break + } + return err + } + + if symbol := symbolzRE.FindStringSubmatch(l); len(symbol) == 3 { + origAddr, err := strconv.ParseUint(symbol[1], 0, 64) + if err != nil { + return fmt.Errorf("unexpected parse failure %s: %v", symbol[1], err) + } + // Reapply offset expected by the profile. + addr, overflow := adjust(origAddr, -offset) + if overflow { + return fmt.Errorf("cannot adjust symbolz address %d by %d, it would overflow", origAddr, -offset) + } + + name := symbol[2] + fn := functions[name] + if fn == nil { + fn = &profile.Function{ + ID: uint64(len(p.Function) + 1), + Name: name, + SystemName: name, + } + functions[name] = fn + p.Function = append(p.Function, fn) + } + + lines[addr] = profile.Line{Function: fn} + } + } + + for _, l := range p.Location { + if l.Mapping != m { + continue + } + if line, ok := lines[l.Address]; ok { + l.Line = []profile.Line{line} + } + } + + return nil +} + +// adjust shifts the specified address by the signed offset. It returns the +// adjusted address. It signals that the address cannot be adjusted without an +// overflow by returning true in the second return value. +func adjust(addr uint64, offset int64) (uint64, bool) { + adj := uint64(int64(addr) + offset) + if offset < 0 { + if adj >= addr { + return 0, true + } + } else { + if adj < addr { + return 0, true + } + } + return adj, false +} diff --git a/plugin/debug/pkg/internal/symbolz/symbolz_test.go b/plugin/debug/pkg/internal/symbolz/symbolz_test.go new file mode 100644 index 0000000..f1c552b --- /dev/null +++ b/plugin/debug/pkg/internal/symbolz/symbolz_test.go @@ -0,0 +1,170 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package symbolz + +import ( + "fmt" + "math" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" + "m7s.live/v5/plugin/debug/pkg/internal/proftest" + "m7s.live/v5/plugin/debug/pkg/profile" +) + +func TestSymbolzURL(t *testing.T) { + for try, want := range map[string]string{ + "http://host:8000/profilez": "http://host:8000/symbolz", + "http://host:8000/profilez?seconds=5": "http://host:8000/symbolz", + "http://host:8000/profilez?seconds=5&format=proto": "http://host:8000/symbolz", + "http://host:8000/heapz?format=legacy": "http://host:8000/symbolz", + "http://host:8000/some/deeper/path/profilez?seconds=5": "http://host:8000/some/deeper/path/symbolz", + "http://host:8000/debug/pprof/profile": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/profile?seconds=10": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/heap": "http://host:8000/debug/pprof/symbol", + "http://some.host:8080/some/deeper/path/debug/pprof/endpoint?param=value": "http://some.host:8080/some/deeper/path/debug/pprof/symbol", + "http://host:8000/pprof/profile": "http://host:8000/pprof/symbol", + "http://host:8000/pprof/profile?seconds=15": "http://host:8000/pprof/symbol", + "http://host:8000/pprof/heap": "http://host:8000/pprof/symbol", + "http://host:8000/debug/pprof/block": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/trace?seconds=5": "http://host:8000/debug/pprof/symbol", + "http://host:8000/debug/pprof/mutex": "http://host:8000/debug/pprof/symbol", + "http://host/whatever/pprof/heap": "http://host/whatever/pprof/symbol", + "http://host/whatever/pprof/growth": "http://host/whatever/pprof/symbol", + "http://host/whatever/pprof/profile": "http://host/whatever/pprof/symbol", + "http://host/whatever/pprof/pmuprofile": "http://host/whatever/pprof/symbol", + "http://host/whatever/pprof/contention": "http://host/whatever/pprof/symbol", + } { + if got := symbolz(try); got != want { + t.Errorf(`symbolz(%s)=%s, want "%s"`, try, got, want) + } + } +} + +func TestSymbolize(t *testing.T) { + s := plugin.MappingSources{ + "buildid": []struct { + Source string + Start uint64 + }{ + {Source: "http://localhost:80/profilez"}, + }, + } + + for _, hasFunctions := range []bool{false, true} { + for _, force := range []bool{false, true} { + p := testProfile(hasFunctions) + + if err := Symbolize(p, force, s, fetchSymbols, &proftest.TestUI{T: t}); err != nil { + t.Errorf("symbolz: %v", err) + continue + } + var wantSym, wantNoSym []*profile.Location + if force || !hasFunctions { + wantNoSym = p.Location[:1] + wantSym = p.Location[1:] + } else { + wantNoSym = p.Location + } + + if err := checkSymbolized(wantSym, true); err != nil { + t.Errorf("symbolz hasFns=%v force=%v: %v", hasFunctions, force, err) + } + if err := checkSymbolized(wantNoSym, false); err != nil { + t.Errorf("symbolz hasFns=%v force=%v: %v", hasFunctions, force, err) + } + } + } +} + +func testProfile(hasFunctions bool) *profile.Profile { + m := []*profile.Mapping{ + { + ID: 1, + Start: 0x1000, + Limit: 0x5000, + BuildID: "buildid", + HasFunctions: hasFunctions, + }, + } + p := &profile.Profile{ + Location: []*profile.Location{ + {ID: 1, Mapping: m[0], Address: 0x1000}, + {ID: 2, Mapping: m[0], Address: 0x2000}, + {ID: 3, Mapping: m[0], Address: 0x3000}, + {ID: 4, Mapping: m[0], Address: 0x4000}, + }, + Mapping: m, + } + + return p +} + +func checkSymbolized(locs []*profile.Location, wantSymbolized bool) error { + for _, loc := range locs { + if !wantSymbolized && len(loc.Line) != 0 { + return fmt.Errorf("unexpected symbolization for %#x: %v", loc.Address, loc.Line) + } + if wantSymbolized { + if len(loc.Line) != 1 { + return fmt.Errorf("expected symbolization for %#x: %v", loc.Address, loc.Line) + } + address := loc.Address - loc.Mapping.Start + if got, want := loc.Line[0].Function.Name, fmt.Sprintf("%#x", address); got != want { + return fmt.Errorf("symbolz %#x, got %s, want %s", address, got, want) + } + } + } + return nil +} + +func fetchSymbols(source, post string) ([]byte, error) { + var symbolz string + + addresses := strings.Split(post, "+") + // Do not symbolize the first symbol. + for _, address := range addresses[1:] { + symbolz += fmt.Sprintf("%s\t%s\n", address, address) + } + return []byte(symbolz), nil +} + +func TestAdjust(t *testing.T) { + for _, tc := range []struct { + addr uint64 + offset int64 + wantAdj uint64 + wantOverflow bool + }{{math.MaxUint64, 0, math.MaxUint64, false}, + {math.MaxUint64, 1, 0, true}, + {math.MaxUint64 - 1, 1, math.MaxUint64, false}, + {math.MaxUint64 - 1, 2, 0, true}, + {math.MaxInt64 + 1, math.MaxInt64, math.MaxUint64, false}, + {0, 0, 0, false}, + {0, -1, 0, true}, + {1, -1, 0, false}, + {2, -1, 1, false}, + {2, -2, 0, false}, + {2, -3, 0, true}, + {-math.MinInt64, math.MinInt64, 0, false}, + {-math.MinInt64 + 1, math.MinInt64, 1, false}, + {-math.MinInt64 - 1, math.MinInt64, 0, true}, + } { + if adj, overflow := adjust(tc.addr, tc.offset); adj != tc.wantAdj || overflow != tc.wantOverflow { + t.Errorf("adjust(%d, %d) = (%d, %t), want (%d, %t)", tc.addr, tc.offset, adj, overflow, tc.wantAdj, tc.wantOverflow) + } + } +} diff --git a/plugin/debug/pkg/internal/transport/transport.go b/plugin/debug/pkg/internal/transport/transport.go new file mode 100644 index 0000000..a46f4c7 --- /dev/null +++ b/plugin/debug/pkg/internal/transport/transport.go @@ -0,0 +1,131 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package transport provides a mechanism to send requests with https cert, +// key, and CA. +package transport + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "net/http" + "os" + "sync" + + "m7s.live/v5/plugin/debug/pkg/internal/plugin" +) + +type transport struct { + cert *string + key *string + ca *string + caCertPool *x509.CertPool + certs []tls.Certificate + initOnce sync.Once + initErr error +} + +const extraUsage = ` -tls_cert TLS client certificate file for fetching profile and symbols + -tls_key TLS private key file for fetching profile and symbols + -tls_ca TLS CA certs file for fetching profile and symbols` + +// New returns a round tripper for making requests with the +// specified cert, key, and ca. The flags tls_cert, tls_key, and tls_ca are +// added to the flagset to allow a user to specify the cert, key, and ca. If +// the flagset is nil, no flags will be added, and users will not be able to +// use these flags. +func New(flagset plugin.FlagSet) http.RoundTripper { + if flagset == nil { + return &transport{} + } + flagset.AddExtraUsage(extraUsage) + return &transport{ + cert: flagset.String("tls_cert", "", "TLS client certificate file for fetching profile and symbols"), + key: flagset.String("tls_key", "", "TLS private key file for fetching profile and symbols"), + ca: flagset.String("tls_ca", "", "TLS CA certs file for fetching profile and symbols"), + } +} + +// initialize uses the cert, key, and ca to initialize the certs +// to use these when making requests. +func (tr *transport) initialize() error { + var cert, key, ca string + if tr.cert != nil { + cert = *tr.cert + } + if tr.key != nil { + key = *tr.key + } + if tr.ca != nil { + ca = *tr.ca + } + + if cert != "" && key != "" { + tlsCert, err := tls.LoadX509KeyPair(cert, key) + if err != nil { + return fmt.Errorf("could not load certificate/key pair specified by -tls_cert and -tls_key: %v", err) + } + tr.certs = []tls.Certificate{tlsCert} + } else if cert == "" && key != "" { + return fmt.Errorf("-tls_key is specified, so -tls_cert must also be specified") + } else if cert != "" && key == "" { + return fmt.Errorf("-tls_cert is specified, so -tls_key must also be specified") + } + + if ca != "" { + caCertPool := x509.NewCertPool() + caCert, err := os.ReadFile(ca) + if err != nil { + return fmt.Errorf("could not load CA specified by -tls_ca: %v", err) + } + caCertPool.AppendCertsFromPEM(caCert) + tr.caCertPool = caCertPool + } + + return nil +} + +// RoundTrip executes a single HTTP transaction, returning +// a Response for the provided Request. +func (tr *transport) RoundTrip(req *http.Request) (*http.Response, error) { + tr.initOnce.Do(func() { + tr.initErr = tr.initialize() + }) + if tr.initErr != nil { + return nil, tr.initErr + } + + tlsConfig := &tls.Config{ + RootCAs: tr.caCertPool, + Certificates: tr.certs, + } + + if req.URL.Scheme == "https+insecure" { + // Make shallow copy of request, and req.URL, so the request's URL can be + // modified. + r := *req + *r.URL = *req.URL + req = &r + tlsConfig.InsecureSkipVerify = true + req.URL.Scheme = "https" + } + + transport := http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSClientConfig: tlsConfig, + } + + return transport.RoundTrip(req) +} diff --git a/plugin/debug/pkg/profile/encode.go b/plugin/debug/pkg/profile/encode.go new file mode 100644 index 0000000..8ce9d3c --- /dev/null +++ b/plugin/debug/pkg/profile/encode.go @@ -0,0 +1,596 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" + "strings" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + p.docURLX = addString(strings, p.DocURL) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) + encodeInt64Opt(b, 15, p.docURLX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = b.tmpLines[:0] // Use shared space temporarily + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, + // string doc_link = 15; + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).docURLX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + // Pre-allocate space for all locations. + numLocations := 0 + for _, s := range p.Sample { + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + } + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.DocURL, err = getString(p.stringTable, &p.docURLX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/plugin/debug/pkg/profile/filter.go b/plugin/debug/pkg/profile/filter.go new file mode 100644 index 0000000..c794b93 --- /dev/null +++ b/plugin/debug/pkg/profile/filter.go @@ -0,0 +1,274 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/plugin/debug/pkg/profile/filter_test.go b/plugin/debug/pkg/profile/filter_test.go new file mode 100644 index 0000000..4aea379 --- /dev/null +++ b/plugin/debug/pkg/profile/filter_test.go @@ -0,0 +1,600 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "regexp" + "strings" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" +) + +var mappings = []*Mapping{ + {ID: 1, Start: 0x10000, Limit: 0x40000, File: "map0", HasFunctions: true, HasFilenames: true, HasLineNumbers: true, HasInlineFrames: true}, + {ID: 2, Start: 0x50000, Limit: 0x70000, File: "map1", HasFunctions: true, HasFilenames: true, HasLineNumbers: true, HasInlineFrames: true}, +} + +var functions = []*Function{ + {ID: 1, Name: "fun0", SystemName: "fun0", Filename: "file0"}, + {ID: 2, Name: "fun1", SystemName: "fun1", Filename: "file1"}, + {ID: 3, Name: "fun2", SystemName: "fun2", Filename: "file2"}, + {ID: 4, Name: "fun3", SystemName: "fun3", Filename: "file3"}, + {ID: 5, Name: "fun4", SystemName: "fun4", Filename: "file4"}, + {ID: 6, Name: "fun5", SystemName: "fun5", Filename: "file5"}, + {ID: 7, Name: "fun6", SystemName: "fun6", Filename: "file6"}, + {ID: 8, Name: "fun7", SystemName: "fun7", Filename: "file7"}, + {ID: 9, Name: "fun8", SystemName: "fun8", Filename: "file8"}, + {ID: 10, Name: "fun9", SystemName: "fun9", Filename: "file9"}, + {ID: 11, Name: "fun10", SystemName: "fun10", Filename: "file10"}, +} + +var noInlinesLocs = []*Location{ + {ID: 1, Mapping: mappings[0], Address: 0x1000, Line: []Line{{Function: functions[0], Line: 1}}}, + {ID: 2, Mapping: mappings[0], Address: 0x2000, Line: []Line{{Function: functions[1], Line: 1}}}, + {ID: 3, Mapping: mappings[0], Address: 0x3000, Line: []Line{{Function: functions[2], Line: 1}}}, + {ID: 4, Mapping: mappings[0], Address: 0x4000, Line: []Line{{Function: functions[3], Line: 1}}}, + {ID: 5, Mapping: mappings[0], Address: 0x5000, Line: []Line{{Function: functions[4], Line: 1}}}, + {ID: 6, Mapping: mappings[0], Address: 0x6000, Line: []Line{{Function: functions[5], Line: 1}}}, + {ID: 7, Mapping: mappings[0], Address: 0x7000, Line: []Line{{Function: functions[6], Line: 1}}}, + {ID: 8, Mapping: mappings[0], Address: 0x8000, Line: []Line{{Function: functions[7], Line: 1}}}, + {ID: 9, Mapping: mappings[0], Address: 0x9000, Line: []Line{{Function: functions[8], Line: 1}}}, + {ID: 10, Mapping: mappings[0], Address: 0x10000, Line: []Line{{Function: functions[9], Line: 1}}}, + {ID: 11, Mapping: mappings[1], Address: 0x11000, Line: []Line{{Function: functions[10], Line: 1}}}, +} + +var noInlinesProfile = &Profile{ + TimeNanos: 10000, + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}}, + Mapping: mappings, + Function: functions, + Location: noInlinesLocs, + Sample: []*Sample{ + {Value: []int64{1}, Location: []*Location{noInlinesLocs[0], noInlinesLocs[1], noInlinesLocs[2], noInlinesLocs[3]}}, + {Value: []int64{2}, Location: []*Location{noInlinesLocs[4], noInlinesLocs[5], noInlinesLocs[1], noInlinesLocs[6]}}, + {Value: []int64{3}, Location: []*Location{noInlinesLocs[7], noInlinesLocs[8]}}, + {Value: []int64{4}, Location: []*Location{noInlinesLocs[9], noInlinesLocs[4], noInlinesLocs[10], noInlinesLocs[7]}}, + }, +} + +var allNoInlinesSampleFuncs = []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun1 fun6: 2", + "fun7 fun8: 3", + "fun9 fun4 fun10 fun7: 4", +} + +var inlinesLocs = []*Location{ + {ID: 1, Mapping: mappings[0], Address: 0x1000, Line: []Line{{Function: functions[0], Line: 1}, {Function: functions[1], Line: 1}}}, + {ID: 2, Mapping: mappings[0], Address: 0x2000, Line: []Line{{Function: functions[2], Line: 1}, {Function: functions[3], Line: 1}}}, + {ID: 3, Mapping: mappings[0], Address: 0x3000, Line: []Line{{Function: functions[4], Line: 1}, {Function: functions[5], Line: 1}, {Function: functions[6], Line: 1}}}, +} + +var inlinesProfile = &Profile{ + TimeNanos: 10000, + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}}, + Mapping: mappings, + Function: functions, + Location: inlinesLocs, + Sample: []*Sample{ + {Value: []int64{1}, Location: []*Location{inlinesLocs[0], inlinesLocs[1]}}, + {Value: []int64{2}, Location: []*Location{inlinesLocs[2]}}, + }, +} + +var emptyLinesLocs = []*Location{ + {ID: 1, Mapping: mappings[0], Address: 0x1000, Line: []Line{{Function: functions[0], Line: 1}, {Function: functions[1], Line: 1}}}, + {ID: 2, Mapping: mappings[0], Address: 0x2000, Line: []Line{}}, + {ID: 3, Mapping: mappings[1], Address: 0x2000, Line: []Line{}}, +} + +var emptyLinesProfile = &Profile{ + TimeNanos: 10000, + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}}, + Mapping: mappings, + Function: functions, + Location: emptyLinesLocs, + Sample: []*Sample{ + {Value: []int64{1}, Location: []*Location{emptyLinesLocs[0], emptyLinesLocs[1]}}, + {Value: []int64{2}, Location: []*Location{emptyLinesLocs[2]}}, + {Value: []int64{3}, Location: []*Location{}}, + }, +} + +func TestFilterSamplesByName(t *testing.T) { + for _, tc := range []struct { + // name is the name of the test case. + name string + // profile is the profile that gets filtered. + profile *Profile + // These are the inputs to FilterSamplesByName(). + focus, ignore, hide, show *regexp.Regexp + // want{F,I,S,H}m are expected return values from FilterSamplesByName. + wantFm, wantIm, wantSm, wantHm bool + // wantSampleFuncs contains expected stack functions and sample value after + // filtering, in the same order as in the profile. The format is as + // returned by sampleFuncs function below, which is "callee caller: ". + wantSampleFuncs []string + }{ + // No Filters + { + name: "empty filters keep all frames", + profile: noInlinesProfile, + wantFm: true, + wantSampleFuncs: allNoInlinesSampleFuncs, + }, + // Focus + { + name: "focus with no matches", + profile: noInlinesProfile, + focus: regexp.MustCompile("unknown"), + }, + { + name: "focus matches function names", + profile: noInlinesProfile, + focus: regexp.MustCompile("fun1"), + wantFm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun1 fun6: 2", + "fun9 fun4 fun10 fun7: 4", + }, + }, + { + name: "focus matches file names", + profile: noInlinesProfile, + focus: regexp.MustCompile("file1"), + wantFm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun1 fun6: 2", + "fun9 fun4 fun10 fun7: 4", + }, + }, + { + name: "focus matches mapping names", + profile: noInlinesProfile, + focus: regexp.MustCompile("map1"), + wantFm: true, + wantSampleFuncs: []string{ + "fun9 fun4 fun10 fun7: 4", + }, + }, + { + name: "focus matches inline functions", + profile: inlinesProfile, + focus: regexp.MustCompile("fun5"), + wantFm: true, + wantSampleFuncs: []string{ + "fun4 fun5 fun6: 2", + }, + }, + // Ignore + { + name: "ignore with no matches matches all samples", + profile: noInlinesProfile, + ignore: regexp.MustCompile("unknown"), + wantFm: true, + wantSampleFuncs: allNoInlinesSampleFuncs, + }, + { + name: "ignore matches function names", + profile: noInlinesProfile, + ignore: regexp.MustCompile("fun1"), + wantFm: true, + wantIm: true, + wantSampleFuncs: []string{ + "fun7 fun8: 3", + }, + }, + { + name: "ignore matches file names", + profile: noInlinesProfile, + ignore: regexp.MustCompile("file1"), + wantFm: true, + wantIm: true, + wantSampleFuncs: []string{ + "fun7 fun8: 3", + }, + }, + { + name: "ignore matches mapping names", + profile: noInlinesProfile, + ignore: regexp.MustCompile("map1"), + wantFm: true, + wantIm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun1 fun6: 2", + "fun7 fun8: 3", + }, + }, + { + name: "ignore matches inline functions", + profile: inlinesProfile, + ignore: regexp.MustCompile("fun5"), + wantFm: true, + wantIm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + }, + }, + // Show + { + name: "show with no matches", + profile: noInlinesProfile, + show: regexp.MustCompile("unknown"), + wantFm: true, + }, + { + name: "show matches function names", + profile: noInlinesProfile, + show: regexp.MustCompile("fun1|fun2"), + wantFm: true, + wantSm: true, + wantSampleFuncs: []string{ + "fun1 fun2: 1", + "fun1: 2", + "fun10: 4", + }, + }, + { + name: "show matches file names", + profile: noInlinesProfile, + show: regexp.MustCompile("file1|file3"), + wantFm: true, + wantSm: true, + wantSampleFuncs: []string{ + "fun1 fun3: 1", + "fun1: 2", + "fun10: 4", + }, + }, + { + name: "show matches mapping names", + profile: noInlinesProfile, + show: regexp.MustCompile("map1"), + wantFm: true, + wantSm: true, + wantSampleFuncs: []string{ + "fun10: 4", + }, + }, + { + name: "show matches inline functions", + profile: inlinesProfile, + show: regexp.MustCompile("fun[03]"), + wantFm: true, + wantSm: true, + wantSampleFuncs: []string{ + "fun0 fun3: 1", + }, + }, + { + name: "show keeps all lines when matching both mapping and function", + profile: inlinesProfile, + show: regexp.MustCompile("map0|fun5"), + wantFm: true, + wantSm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun6: 2", + }, + }, + // Hide + { + name: "hide with no matches", + profile: noInlinesProfile, + hide: regexp.MustCompile("unknown"), + wantFm: true, + wantSampleFuncs: allNoInlinesSampleFuncs, + }, + { + name: "hide matches function names", + profile: noInlinesProfile, + hide: regexp.MustCompile("fun1|fun2"), + wantFm: true, + wantHm: true, + wantSampleFuncs: []string{ + "fun0 fun3: 1", + "fun4 fun5 fun6: 2", + "fun7 fun8: 3", + "fun9 fun4 fun7: 4", + }, + }, + { + name: "hide matches file names", + profile: noInlinesProfile, + hide: regexp.MustCompile("file1|file3"), + wantFm: true, + wantHm: true, + wantSampleFuncs: []string{ + "fun0 fun2: 1", + "fun4 fun5 fun6: 2", + "fun7 fun8: 3", + "fun9 fun4 fun7: 4", + }, + }, + { + name: "hide matches mapping names", + profile: noInlinesProfile, + hide: regexp.MustCompile("map1"), + wantFm: true, + wantHm: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun1 fun6: 2", + "fun7 fun8: 3", + "fun9 fun4 fun7: 4", + }, + }, + { + name: "hide matches inline functions", + profile: inlinesProfile, + hide: regexp.MustCompile("fun[125]"), + wantFm: true, + wantHm: true, + wantSampleFuncs: []string{ + "fun0 fun3: 1", + "fun4 fun6: 2", + }, + }, + { + name: "hide drops all lines when matching both mapping and function", + profile: inlinesProfile, + hide: regexp.MustCompile("map0|fun5"), + wantFm: true, + wantHm: true, + }, + // Compound filters + { + name: "hides a stack matched by both focus and ignore", + profile: noInlinesProfile, + focus: regexp.MustCompile("fun1|fun7"), + ignore: regexp.MustCompile("fun1"), + wantFm: true, + wantIm: true, + wantSampleFuncs: []string{ + "fun7 fun8: 3", + }, + }, + { + name: "hides a function if both show and hide match it", + profile: noInlinesProfile, + show: regexp.MustCompile("fun1"), + hide: regexp.MustCompile("fun10"), + wantFm: true, + wantSm: true, + wantHm: true, + wantSampleFuncs: []string{ + "fun1: 1", + "fun1: 2", + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + p := tc.profile.Copy() + fm, im, hm, sm := p.FilterSamplesByName(tc.focus, tc.ignore, tc.hide, tc.show) + + type match struct{ fm, im, hm, sm bool } + if got, want := (match{fm: fm, im: im, hm: hm, sm: sm}), (match{fm: tc.wantFm, im: tc.wantIm, hm: tc.wantHm, sm: tc.wantSm}); got != want { + t.Errorf("match got %+v want %+v", got, want) + } + + if got, want := strings.Join(sampleFuncs(p), "\n")+"\n", strings.Join(tc.wantSampleFuncs, "\n")+"\n"; got != want { + diff, err := proftest.Diff([]byte(want), []byte(got)) + if err != nil { + t.Fatalf("failed to get diff: %v", err) + } + t.Errorf("FilterSamplesByName: got diff(want->got):\n%s", diff) + } + }) + } +} + +func TestShowFrom(t *testing.T) { + for _, tc := range []struct { + name string + profile *Profile + showFrom *regexp.Regexp + // wantMatch is the expected return value. + wantMatch bool + // wantSampleFuncs contains expected stack functions and sample value after + // filtering, in the same order as in the profile. The format is as + // returned by sampleFuncs function below, which is "callee caller: ". + wantSampleFuncs []string + }{ + { + name: "nil showFrom keeps all frames", + profile: noInlinesProfile, + wantMatch: false, + wantSampleFuncs: allNoInlinesSampleFuncs, + }, + { + name: "showFrom with no matches drops all samples", + profile: noInlinesProfile, + showFrom: regexp.MustCompile("unknown"), + wantMatch: false, + }, + { + name: "showFrom matches function names", + profile: noInlinesProfile, + showFrom: regexp.MustCompile("fun1"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0 fun1: 1", + "fun4 fun5 fun1: 2", + "fun9 fun4 fun10: 4", + }, + }, + { + name: "showFrom matches file names", + profile: noInlinesProfile, + showFrom: regexp.MustCompile("file1"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0 fun1: 1", + "fun4 fun5 fun1: 2", + "fun9 fun4 fun10: 4", + }, + }, + { + name: "showFrom matches mapping names", + profile: noInlinesProfile, + showFrom: regexp.MustCompile("map1"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun9 fun4 fun10: 4", + }, + }, + { + name: "showFrom drops frames above highest of multiple matches", + profile: noInlinesProfile, + showFrom: regexp.MustCompile("fun[12]"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2: 1", + "fun4 fun5 fun1: 2", + "fun9 fun4 fun10: 4", + }, + }, + { + name: "showFrom matches inline functions", + profile: inlinesProfile, + showFrom: regexp.MustCompile("fun0|fun5"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0: 1", + "fun4 fun5: 2", + }, + }, + { + name: "showFrom drops frames above highest of multiple inline matches", + profile: inlinesProfile, + showFrom: regexp.MustCompile("fun[1245]"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2: 1", + "fun4 fun5: 2", + }, + }, + { + name: "showFrom keeps all lines when matching mapping and function", + profile: inlinesProfile, + showFrom: regexp.MustCompile("map0|fun5"), + wantMatch: true, + wantSampleFuncs: []string{ + "fun0 fun1 fun2 fun3: 1", + "fun4 fun5 fun6: 2", + }, + }, + { + name: "showFrom matches location with empty lines", + profile: emptyLinesProfile, + showFrom: regexp.MustCompile("map1"), + wantMatch: true, + wantSampleFuncs: []string{ + ": 2", + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + p := tc.profile.Copy() + + if gotMatch := p.ShowFrom(tc.showFrom); gotMatch != tc.wantMatch { + t.Errorf("match got %+v, want %+v", gotMatch, tc.wantMatch) + } + + if got, want := strings.Join(sampleFuncs(p), "\n")+"\n", strings.Join(tc.wantSampleFuncs, "\n")+"\n"; got != want { + diff, err := proftest.Diff([]byte(want), []byte(got)) + if err != nil { + t.Fatalf("failed to get diff: %v", err) + } + t.Errorf("profile samples got diff(want->got):\n%s", diff) + } + }) + } +} + +// sampleFuncs returns a slice of strings where each string represents one +// profile sample in the format " : ". This allows +// the expected values for test cases to be specified in human-readable +// strings. +func sampleFuncs(p *Profile) []string { + var ret []string + for _, s := range p.Sample { + var funcs []string + for _, loc := range s.Location { + for _, line := range loc.Line { + funcs = append(funcs, line.Function.Name) + } + } + ret = append(ret, fmt.Sprintf("%s: %d", strings.Join(funcs, " "), s.Value[0])) + } + return ret +} + +func TestTagFilter(t *testing.T) { + // Perform several forms of tag filtering on the test profile. + + type filterTestcase struct { + include, exclude *regexp.Regexp + im, em bool + count int + } + + countTags := func(p *Profile) map[string]bool { + tags := make(map[string]bool) + + for _, s := range p.Sample { + for l := range s.Label { + tags[l] = true + } + for l := range s.NumLabel { + tags[l] = true + } + } + return tags + } + + for tx, tc := range []filterTestcase{ + {nil, nil, true, false, 3}, + {regexp.MustCompile("notfound"), nil, false, false, 0}, + {regexp.MustCompile("key1"), nil, true, false, 1}, + {nil, regexp.MustCompile("key[12]"), true, true, 1}, + } { + prof := testProfile1.Copy() + gim, gem := prof.FilterTagsByName(tc.include, tc.exclude) + if gim != tc.im { + t.Errorf("Filter #%d, got include match=%v, want %v", tx, gim, tc.im) + } + if gem != tc.em { + t.Errorf("Filter #%d, got exclude match=%v, want %v", tx, gem, tc.em) + } + if tags := countTags(prof); len(tags) != tc.count { + t.Errorf("Filter #%d, got %d tags[%v], want %d", tx, len(tags), tags, tc.count) + } + } +} diff --git a/plugin/debug/pkg/profile/index.go b/plugin/debug/pkg/profile/index.go new file mode 100644 index 0000000..bef1d60 --- /dev/null +++ b/plugin/debug/pkg/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/plugin/debug/pkg/profile/index_test.go b/plugin/debug/pkg/profile/index_test.go new file mode 100644 index 0000000..f846b59 --- /dev/null +++ b/plugin/debug/pkg/profile/index_test.go @@ -0,0 +1,114 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "testing" +) + +func TestSampleIndexByName(t *testing.T) { + for _, c := range []struct { + desc string + sampleTypes []string + defaultSampleType string + index string + want int + wantError bool + }{ + { + desc: "use last by default", + index: "", + want: 1, + sampleTypes: []string{"zero", "default"}, + }, + { + desc: "honour specified default", + index: "", + want: 1, + defaultSampleType: "default", + sampleTypes: []string{"zero", "default", "two"}, + }, + { + desc: "invalid default is ignored", + index: "", + want: 2, + defaultSampleType: "non-existent", + sampleTypes: []string{"zero", "one", "default"}, + }, + { + desc: "index by int", + index: "0", + want: 0, + sampleTypes: []string{"zero", "one", "two"}, + }, + { + desc: "index by int ignores default", + index: "0", + want: 0, + defaultSampleType: "default", + sampleTypes: []string{"zero", "default", "two"}, + }, + { + desc: "index by name", + index: "two", + want: 2, + sampleTypes: []string{"zero", "one", "two", "three"}, + }, + { + desc: "index by name ignores default", + index: "zero", + want: 0, + defaultSampleType: "default", + sampleTypes: []string{"zero", "default", "two"}, + }, + { + desc: "out of bound int causes error", + index: "100", + wantError: true, + sampleTypes: []string{"zero", "default"}, + }, + { + desc: "unknown name causes error", + index: "does not exist", + wantError: true, + sampleTypes: []string{"zero", "default"}, + }, + { + desc: "'inused_{x}' recognized for legacy '{x}'", + index: "inuse_zero", + want: 0, + sampleTypes: []string{"zero", "default"}, + }, + } { + p := &Profile{ + DefaultSampleType: c.defaultSampleType, + SampleType: []*ValueType{}, + } + for _, st := range c.sampleTypes { + p.SampleType = append(p.SampleType, &ValueType{Type: st, Unit: "milliseconds"}) + } + + got, err := p.SampleIndexByName(c.index) + + switch { + case c.wantError && err == nil: + t.Errorf("%s: error should have been returned not index=%d, err=%v", c.desc, got, err) + case !c.wantError && err != nil: + t.Errorf("%s: unexpected got index=%d, err=%v; wanted index=%d, err=nil", c.desc, got, err, c.want) + case !c.wantError && got != c.want: + t.Errorf("%s: got index=%d, want index=%d", c.desc, got, c.want) + } + } +} diff --git a/plugin/debug/pkg/profile/legacy_java_profile.go b/plugin/debug/pkg/profile/legacy_java_profile.go new file mode 100644 index 0000000..4580bab --- /dev/null +++ b/plugin/debug/pkg/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/plugin/debug/pkg/profile/legacy_profile.go b/plugin/debug/pkg/profile/legacy_profile.go new file mode 100644 index 0000000..8d07fd6 --- /dev/null +++ b/plugin/debug/pkg/profile/legacy_profile.go @@ -0,0 +1,1228 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// +// The last stack trace is of the form: +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/plugin/debug/pkg/profile/legacy_profile_test.go b/plugin/debug/pkg/profile/legacy_profile_test.go new file mode 100644 index 0000000..6ba0e33 --- /dev/null +++ b/plugin/debug/pkg/profile/legacy_profile_test.go @@ -0,0 +1,321 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" + "testing" +) + +func TestLegacyProfileType(t *testing.T) { + type testcase struct { + sampleTypes []string + typeSet [][]string + want bool + setName string + } + + heap := heapzSampleTypes + cont := contentionzSampleTypes + testcases := []testcase{ + // True cases + {[]string{"allocations", "size"}, heap, true, "heapzSampleTypes"}, + {[]string{"objects", "space"}, heap, true, "heapzSampleTypes"}, + {[]string{"inuse_objects", "inuse_space"}, heap, true, "heapzSampleTypes"}, + {[]string{"alloc_objects", "alloc_space"}, heap, true, "heapzSampleTypes"}, + {[]string{"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, heap, true, "heapzSampleTypes"}, + {[]string{"contentions", "delay"}, cont, true, "contentionzSampleTypes"}, + // False cases + {[]string{"objects"}, heap, false, "heapzSampleTypes"}, + {[]string{"objects", "unknown"}, heap, false, "heapzSampleTypes"}, + {[]string{"inuse_objects", "inuse_space", "alloc_objects", "alloc_space"}, heap, false, "heapzSampleTypes"}, + {[]string{"contentions", "delay"}, heap, false, "heapzSampleTypes"}, + {[]string{"samples", "cpu"}, heap, false, "heapzSampleTypes"}, + {[]string{"samples", "cpu"}, cont, false, "contentionzSampleTypes"}, + } + + for _, tc := range testcases { + p := profileOfType(tc.sampleTypes) + if got := isProfileType(p, tc.typeSet); got != tc.want { + t.Error("isProfileType({"+strings.Join(tc.sampleTypes, ",")+"},", tc.setName, "), got", got, "want", tc.want) + } + } +} + +func TestCpuParse(t *testing.T) { + // profileString is a legacy encoded profile, represnted by words separated by ":" + // Each sample has the form value : N : stack1..stackN + // EOF is represented as "0:1:0" + profileString := "1:3:100:999:100:" // sample with bogus 999 and duplicate leaf + profileString += "1:5:200:999:200:501:502:" // sample with bogus 999 and duplicate leaf + profileString += "1:12:300:999:300:601:602:603:604:605:606:607:608:609:" // sample with bogus 999 and duplicate leaf + profileString += "0:1:0000" // EOF -- must use 4 bytes for the final zero + + p, err := cpuProfile([]byte(profileString), 1, parseString) + if err != nil { + t.Fatal(err) + } + + if err := checkTestSample(p, []uint64{100}); err != nil { + t.Error(err) + } + if err := checkTestSample(p, []uint64{200, 500, 501}); err != nil { + t.Error(err) + } + if err := checkTestSample(p, []uint64{300, 600, 601, 602, 603, 604, 605, 606, 607, 608}); err != nil { + t.Error(err) + } +} + +func parseString(b []byte) (uint64, []byte) { + slices := bytes.SplitN(b, []byte(":"), 2) + var value, remainder []byte + if len(slices) > 0 { + value = slices[0] + } + if len(slices) > 1 { + remainder = slices[1] + } + v, _ := strconv.ParseUint(string(value), 10, 64) + return v, remainder +} + +func checkTestSample(p *Profile, want []uint64) error { + for _, s := range p.Sample { + got := []uint64{} + for _, l := range s.Location { + got = append(got, l.Address) + } + if reflect.DeepEqual(got, want) { + return nil + } + } + return fmt.Errorf("Could not find sample : %v", want) +} + +// profileOfType creates an empty profile with only sample types set, +// for testing purposes only. +func profileOfType(sampleTypes []string) *Profile { + p := new(Profile) + p.SampleType = make([]*ValueType, len(sampleTypes)) + for i, t := range sampleTypes { + p.SampleType[i] = new(ValueType) + p.SampleType[i].Type = t + } + return p +} + +func TestParseMappingEntry(t *testing.T) { + for _, test := range []*struct { + entry string + want *Mapping + }{ + { + entry: "00400000-02e00000 r-xp 00000000 00:00 0", + want: &Mapping{ + Start: 0x400000, + Limit: 0x2e00000, + }, + }, + { + entry: "02e00000-02e8a000 r-xp 02a00000 00:00 15953927 /foo/bin", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + Offset: 0x2a00000, + File: "/foo/bin", + }, + }, + { + entry: "02e00000-02e8a000 r-xp 000000 00:00 15953927 [vdso]", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + File: "[vdso]", + }, + }, + { + entry: " 02e00000-02e8a000: /foo/bin (@2a00000)", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + Offset: 0x2a00000, + File: "/foo/bin", + }, + }, + { + entry: " 02e00000-02e8a000: /foo/bin (deleted)", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + File: "/foo/bin", + }, + }, + { + entry: " 02e00000-02e8a000: /foo/bin", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + File: "/foo/bin", + }, + }, + { + entry: " 02e00000-02e8a000: [vdso]", + want: &Mapping{ + Start: 0x2e00000, + Limit: 0x2e8a000, + File: "[vdso]", + }, + }, + {entry: "0xff6810563000 0xff6810565000 r-xp abc_exe 87c4d547f895cfd6a370e08dc5c5ee7bd4199d5b", + want: &Mapping{ + Start: 0xff6810563000, + Limit: 0xff6810565000, + File: "abc_exe", + BuildID: "87c4d547f895cfd6a370e08dc5c5ee7bd4199d5b", + }, + }, + {entry: "7f5e5435e000-7f5e5455e000 --xp 00002000 00:00 1531 myprogram", + want: &Mapping{ + Start: 0x7f5e5435e000, + Limit: 0x7f5e5455e000, + Offset: 0x2000, + File: "myprogram", + }, + }, + {entry: "7f7472710000-7f7472722000 r-xp 00000000 fc:00 790190 /usr/lib/libfantastic-1.2.so", + want: &Mapping{ + Start: 0x7f7472710000, + Limit: 0x7f7472722000, + File: "/usr/lib/libfantastic-1.2.so", + }, + }, + {entry: "7f47a542f000-7f47a5447000: /lib/libpthread-2.15.so", + want: &Mapping{ + Start: 0x7f47a542f000, + Limit: 0x7f47a5447000, + File: "/lib/libpthread-2.15.so", + }, + }, + {entry: "0x40000-0x80000 /path/to/binary (@FF00) abc123456", + want: &Mapping{ + Start: 0x40000, + Limit: 0x80000, + File: "/path/to/binary", + Offset: 0xFF00, + BuildID: "abc123456", + }, + }, + {entry: "W1220 15:07:15.201776 8272 logger.cc:12033] --- Memory map: ---\n" + + "0x40000-0x80000 /path/to/binary (@FF00) abc123456", + want: &Mapping{ + Start: 0x40000, + Limit: 0x80000, + File: "/path/to/binary", + Offset: 0xFF00, + BuildID: "abc123456", + }, + }, + {entry: "W1220 15:07:15.201776 8272 logger.cc:12033] --- Memory map: ---\n" + + "W1220 15:07:15.202776 8272 logger.cc:12036] 0x40000-0x80000 /path/to/binary (@FF00) abc123456", + want: &Mapping{ + Start: 0x40000, + Limit: 0x80000, + File: "/path/to/binary", + Offset: 0xFF00, + BuildID: "abc123456", + }, + }, + {entry: "7f5e5435e000-7f5e5455e000 ---p 00002000 00:00 1531 myprogram", + want: nil, + }, + } { + got, err := ParseProcMaps(strings.NewReader(test.entry)) + if err != nil { + t.Errorf("%s: %v", test.entry, err) + continue + } + if test.want == nil { + if got, want := len(got), 0; got != want { + t.Errorf("%s: got %d mappings, want %d", test.entry, got, want) + } + continue + } + if got, want := len(got), 1; got != want { + t.Errorf("%s: got %d mappings, want %d", test.entry, got, want) + continue + } + if !reflect.DeepEqual(test.want, got[0]) { + t.Errorf("%s want=%v got=%v", test.entry, test.want, got[0]) + } + } +} + +func TestParseThreadProfileWithInvalidAddress(t *testing.T) { + profile := ` +--- threadz 1 --- + +--- Thread 7eff063d9940 (name: main/25376) stack: --- + PC: 0x40b688 0x4d5f51 0x40be31 0x473add693e639c6f0 +--- Memory map: --- + 00400000-00fcb000: /home/rsilvera/cppbench/cppbench_server_main.unstripped + ` + wantErr := "failed to parse as hex 64-bit number: 0x473add693e639c6f0" + if _, gotErr := parseThread([]byte(profile)); !strings.Contains(gotErr.Error(), wantErr) { + t.Errorf("parseThread(): got error %q, want error containing %q", gotErr, wantErr) + } +} + +func TestParseGoCount(t *testing.T) { + for _, test := range []struct { + in string + typ string + }{ + { + in: `# ignored comment + +threadcreate profile: total 123 +`, + typ: "threadcreate", + }, + { + in: ` +# ignored comment +goroutine profile: total 123456 +`, + typ: "goroutine", + }, + { + in: ` +sub/dir-ect_o.ry profile: total 999 +`, + typ: "sub/dir-ect_o.ry", + }, + } { + t.Run(test.typ, func(t *testing.T) { + p, err := parseGoCount([]byte(test.in)) + if err != nil { + t.Fatalf("parseGoCount(%q) = %v", test.in, err) + } + if typ := p.PeriodType.Type; typ != test.typ { + t.Fatalf("parseGoCount(%q).PeriodType.Type = %q want %q", test.in, typ, test.typ) + } + }) + } +} diff --git a/plugin/debug/pkg/profile/merge.go b/plugin/debug/pkg/profile/merge.go new file mode 100644 index 0000000..ba4d746 --- /dev/null +++ b/plugin/debug/pkg/profile/merge.go @@ -0,0 +1,674 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "encoding/binary" + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = makeLocationIDMap(len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID locationIDMap + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) + } + + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) + } + + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } + } + putNumber(0) // Delimiter + + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } + } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) +} + +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l := pm.locationsByID.get(src.ID); l != nil { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID.set(src.ID, ll) + return ll + } + pm.locationsByID.set(src.ID, l) + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*3) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + Column: src.Column, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var docURL string + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + if docURL == "" { + docURL = s.DocURL + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + DocURL: docURL, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/plugin/debug/pkg/profile/merge_test.go b/plugin/debug/pkg/profile/merge_test.go new file mode 100644 index 0000000..5dff13d --- /dev/null +++ b/plugin/debug/pkg/profile/merge_test.go @@ -0,0 +1,506 @@ +// Copyright 2018 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "bytes" + "fmt" + "reflect" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" +) + +func TestMapMapping(t *testing.T) { + pm := &profileMerger{ + p: &Profile{}, + mappings: make(map[mappingKey]*Mapping), + mappingsByID: make(map[uint64]mapInfo), + } + for _, tc := range []struct { + desc string + m1 Mapping + m2 Mapping + wantMerged bool + }{ + { + desc: "same file name", + m1: Mapping{ + ID: 1, + File: "test-file-1", + }, + m2: Mapping{ + ID: 2, + File: "test-file-1", + }, + wantMerged: true, + }, + { + desc: "same build ID", + m1: Mapping{ + ID: 3, + BuildID: "test-build-id-1", + }, + m2: Mapping{ + ID: 4, + BuildID: "test-build-id-1", + }, + wantMerged: true, + }, + { + desc: "same fake mapping", + m1: Mapping{ + ID: 5, + }, + m2: Mapping{ + ID: 6, + }, + wantMerged: true, + }, + { + desc: "different start", + m1: Mapping{ + ID: 7, + Start: 0x1000, + Limit: 0x2000, + BuildID: "test-build-id-2", + }, + m2: Mapping{ + ID: 8, + Start: 0x3000, + Limit: 0x4000, + BuildID: "test-build-id-2", + }, + wantMerged: true, + }, + { + desc: "different file name", + m1: Mapping{ + ID: 9, + File: "test-file-2", + }, + m2: Mapping{ + ID: 10, + File: "test-file-3", + }, + }, + { + desc: "different build id", + m1: Mapping{ + ID: 11, + BuildID: "test-build-id-3", + }, + m2: Mapping{ + ID: 12, + BuildID: "test-build-id-4", + }, + }, + { + desc: "different size", + m1: Mapping{ + ID: 13, + Start: 0x1000, + Limit: 0x3000, + BuildID: "test-build-id-5", + }, + m2: Mapping{ + ID: 14, + Start: 0x1000, + Limit: 0x5000, + BuildID: "test-build-id-5", + }, + }, + { + desc: "different offset", + m1: Mapping{ + ID: 15, + Offset: 1, + BuildID: "test-build-id-6", + }, + m2: Mapping{ + ID: 16, + Offset: 2, + BuildID: "test-build-id-6", + }, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + info1 := pm.mapMapping(&tc.m1) + info2 := pm.mapMapping(&tc.m2) + gotM1, gotM2 := *info1.m, *info2.m + + wantM1 := tc.m1 + wantM1.ID = gotM1.ID + if gotM1 != wantM1 { + t.Errorf("first mapping got %v, want %v", gotM1, wantM1) + } + + if tc.wantMerged { + if gotM1 != gotM2 { + t.Errorf("first mapping got %v, second mapping got %v, want equal", gotM1, gotM2) + } + if info1.offset != 0 { + t.Errorf("first mapping info got offset %d, want 0", info1.offset) + } + if wantOffset := int64(tc.m1.Start) - int64(tc.m2.Start); wantOffset != info2.offset { + t.Errorf("second mapping info got offset %d, want %d", info2.offset, wantOffset) + } + } else { + if gotM1.ID == gotM2.ID { + t.Errorf("first mapping got %v, second mapping got %v, want different IDs", gotM1, gotM2) + } + wantM2 := tc.m2 + wantM2.ID = gotM2.ID + if gotM2 != wantM2 { + t.Errorf("second mapping got %v, want %v", gotM2, wantM2) + } + } + }) + } +} + +func TestLocationIDMap(t *testing.T) { + ids := []uint64{1, 2, 5, 9, 10, 11, 100, 1000, 1000000} + missing := []uint64{3, 4, 200} + + // Populate the map,. + idmap := makeLocationIDMap(10) + for _, id := range ids { + loc := &Location{ID: id} + idmap.set(id, loc) + } + + // Check ids that should be present in the map. + for _, id := range ids { + loc := idmap.get(id) + if loc == nil { + t.Errorf("No location found for %d", id) + } else if loc.ID != id { + t.Errorf("Wrong location %d found for %d", loc.ID, id) + } + } + + // Check ids that should not be present in the map. + for _, id := range missing { + loc := idmap.get(id) + if loc != nil { + t.Errorf("Unexpected location %d found for %d", loc.ID, id) + } + } +} + +func BenchmarkMerge(b *testing.B) { + data := proftest.LargeProfile(b) + for n := 1; n <= 2; n++ { // Merge either 1 or 2 instances. + b.Run(fmt.Sprintf("%d", n), func(b *testing.B) { + list := make([]*Profile, n) + for i := 0; i < n; i++ { + prof, err := Parse(bytes.NewBuffer(data)) + if err != nil { + b.Fatal(err) + } + list[i] = prof + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Merge(list) + if err != nil { + b.Fatal(err) + } + } + }) + } +} + +func TestCompatibilizeSampleTypes(t *testing.T) { + for _, tc := range []struct { + desc string + ps []*Profile + want []*Profile + wantError bool + }{ + { + desc: "drop first sample types", + ps: []*Profile{ + { + DefaultSampleType: "delete1", + SampleType: []*ValueType{ + {Type: "delete1", Unit: "Unit1"}, + {Type: "delete2", Unit: "Unit2"}, + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3, 4, 5}}, + {Value: []int64{10, 20, 30, 40, 50}}, + }, + }, + { + DefaultSampleType: "keep1", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + }, + want: []*Profile{ + { + DefaultSampleType: "keep1", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{3, 4, 5}}, + {Value: []int64{30, 40, 50}}, + }, + }, + { + DefaultSampleType: "keep1", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + }, + }, + { + desc: "drop last sample types", + ps: []*Profile{ + { + DefaultSampleType: "delete2", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + {Type: "delete1", Unit: "Unit1"}, + {Type: "delete2", Unit: "Unit2"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3, 4, 5}}, + {Value: []int64{10, 20, 30, 40, 50}}, + }, + }, + { + DefaultSampleType: "keep2", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + }, + want: []*Profile{ + { + DefaultSampleType: "keep1", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + { + DefaultSampleType: "keep2", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + }, + }, + { + desc: "drop sample types and reorder", + ps: []*Profile{ + { + DefaultSampleType: "keep3", + SampleType: []*ValueType{ + {Type: "delete1", Unit: "Unit1"}, + {Type: "keep1", Unit: "Unit3"}, + {Type: "delete2", Unit: "Unit2"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3, 4, 5}}, + {Value: []int64{10, 20, 30, 40, 50}}, + }, + }, + { + DefaultSampleType: "keep2", + SampleType: []*ValueType{ + {Type: "keep3", Unit: "Unit5"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep1", Unit: "Unit3"}, + }, + Sample: []*Sample{ + {Value: []int64{1, 2, 3}}, + {Value: []int64{10, 20, 30}}, + }, + }, + }, + want: []*Profile{ + { + DefaultSampleType: "keep3", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{2, 4, 5}}, + {Value: []int64{20, 40, 50}}, + }, + }, + { + DefaultSampleType: "keep2", + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit3"}, + {Type: "keep2", Unit: "Unit4"}, + {Type: "keep3", Unit: "Unit5"}, + }, + Sample: []*Sample{ + {Value: []int64{3, 2, 1}}, + {Value: []int64{30, 20, 10}}, + }, + }, + }, + }, + { + desc: "empty common types", + ps: []*Profile{ + { + SampleType: []*ValueType{ + {Type: "keep1", Unit: "Unit1"}, + {Type: "keep2", Unit: "Unit2"}, + {Type: "keep3", Unit: "Unit3"}, + }, + }, + { + SampleType: []*ValueType{ + {Type: "keep4", Unit: "Unit4"}, + {Type: "keep5", Unit: "Unit5"}, + }, + }, + }, + wantError: true, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + err := CompatibilizeSampleTypes(tc.ps) + if (err != nil) != tc.wantError { + t.Fatalf("CompatibilizeSampleTypes() returned error: %v, want any error=%t", err, tc.wantError) + } + if err != nil { + return + } + for i := 0; i < len(tc.want); i++ { + gotStr := tc.ps[i].String() + wantStr := tc.want[i].String() + if gotStr != wantStr { + d, err := proftest.Diff([]byte(wantStr), []byte(gotStr)) + if err != nil { + t.Fatalf("failed to get diff: %v", err) + } + t.Errorf("CompatibilizeSampleTypes(): profile[%d] got diff (-want +got)\n%s", i, string(d)) + } + } + }) + } +} + +func TestDocURLMerge(t *testing.T) { + const url1 = "http://example.com/url1" + const url2 = "http://example.com/url2" + type testCase struct { + name string + profiles []*Profile + want string + } + profile := func(url string) *Profile { + return &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "seconds"}, + DocURL: url, + } + } + for _, test := range []testCase{ + { + name: "nolinks", + profiles: []*Profile{ + profile(""), + profile(""), + }, + want: "", + }, + { + name: "single", + profiles: []*Profile{ + profile(url1), + }, + want: url1, + }, + { + name: "mix", + profiles: []*Profile{ + profile(""), + profile(url1), + }, + want: url1, + }, + { + name: "different", + profiles: []*Profile{ + profile(url1), + profile(url2), + }, + want: url1, + }, + } { + t.Run(test.name, func(t *testing.T) { + merged, err := combineHeaders(test.profiles) + if err != nil { + t.Fatal(err) + } + got := merged.DocURL + if !reflect.DeepEqual(test.want, got) { + t.Errorf("unexpected links; want: %#v, got: %#v", test.want, got) + } + }) + } +} diff --git a/plugin/debug/pkg/profile/profile.go b/plugin/debug/pkg/profile/profile.go new file mode 100644 index 0000000..f47a243 --- /dev/null +++ b/plugin/debug/pkg/profile/profile.go @@ -0,0 +1,869 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + DocURL string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + docURLX int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. + NumLabel map[string][]int64 + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + Column int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := io.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = io.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber || !columnnumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if url := p.DocURL; url != "" { + ss = append(ss, fmt.Sprintf("Doc: %s", url)) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + l.Line[li].Column, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", "[vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") || m.File == "//anon" +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/plugin/debug/pkg/profile/profile_test.go b/plugin/debug/pkg/profile/profile_test.go new file mode 100644 index 0000000..fd4d202 --- /dev/null +++ b/plugin/debug/pkg/profile/profile_test.go @@ -0,0 +1,1942 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "bytes" + "flag" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "strings" + "sync" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" +) + +var update = flag.Bool("update", false, "Update the golden files") + +func TestParse(t *testing.T) { + const path = "testdata/" + + for _, source := range []string{ + "go.crc32.cpu", + "go.godoc.thread", + "gobench.cpu", + "gobench.heap", + "cppbench.cpu", + "cppbench.heap", + "cppbench.contention", + "cppbench.growth", + "cppbench.thread", + "cppbench.thread.all", + "cppbench.thread.none", + "java.cpu", + "java.heap", + "java.contention", + } { + inbytes, err := os.ReadFile(filepath.Join(path, source)) + if err != nil { + t.Fatal(err) + } + p, err := Parse(bytes.NewBuffer(inbytes)) + if err != nil { + t.Fatalf("%s: %s", source, err) + } + + js := p.String() + goldFilename := path + source + ".string" + if *update { + err := os.WriteFile(goldFilename, []byte(js), 0644) + if err != nil { + t.Errorf("failed to update the golden file file %q: %v", goldFilename, err) + } + } + gold, err := os.ReadFile(goldFilename) + if err != nil { + t.Fatalf("%s: %v", source, err) + } + + if js != string(gold) { + t.Errorf("diff %s %s", source, goldFilename) + d, err := proftest.Diff(gold, []byte(js)) + if err != nil { + t.Fatalf("%s: %v", source, err) + } + t.Error(source + "\n" + string(d) + "\n" + "new profile at:\n" + leaveTempfile([]byte(js))) + } + + // Reencode and decode. + var bw bytes.Buffer + if err := p.Write(&bw); err != nil { + t.Fatalf("%s: %v", source, err) + } + if p, err = Parse(&bw); err != nil { + t.Fatalf("%s: %v", source, err) + } + js2 := p.String() + if js2 != string(gold) { + d, err := proftest.Diff(gold, []byte(js2)) + if err != nil { + t.Fatalf("%s: %v", source, err) + } + t.Error(source + "\n" + string(d) + "\n" + "gold:\n" + goldFilename + + "\nnew profile at:\n" + leaveTempfile([]byte(js))) + } + } +} + +func TestParseError(t *testing.T) { + testcases := []string{ + "", + "garbage text", + "\x1f\x8b", // truncated gzip header + "\x1f\x8b\x08\x08\xbe\xe9\x20\x58\x00\x03\x65\x6d\x70\x74\x79\x00\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", // empty gzipped file + } + + for i, input := range testcases { + _, err := Parse(strings.NewReader(input)) + if err == nil { + t.Errorf("got nil, want error for input #%d", i) + } + } +} + +func TestParseConcatentated(t *testing.T) { + prof := testProfile1.Copy() + // Write the profile twice to buffer to create concatented profile. + var buf bytes.Buffer + prof.Write(&buf) + prof.Write(&buf) + _, err := Parse(&buf) + if err == nil { + t.Fatalf("got nil, want error") + } + if got, want := err.Error(), "parsing profile: concatenated profiles detected"; want != got { + t.Fatalf("got error %q, want error %q", got, want) + } +} + +func TestCheckValid(t *testing.T) { + const path = "testdata/java.cpu" + + inbytes, err := os.ReadFile(path) + if err != nil { + t.Fatalf("failed to read profile file %q: %v", path, err) + } + p, err := Parse(bytes.NewBuffer(inbytes)) + if err != nil { + t.Fatalf("failed to parse profile %q: %s", path, err) + } + + for _, tc := range []struct { + mutateFn func(*Profile) + wantErr string + }{ + { + mutateFn: func(p *Profile) { p.SampleType = nil }, + wantErr: "missing sample type information", + }, + { + mutateFn: func(p *Profile) { p.Sample[0] = nil }, + wantErr: "profile has nil sample", + }, + { + mutateFn: func(p *Profile) { p.Sample[0].Value = append(p.Sample[0].Value, 0) }, + wantErr: "sample has 3 values vs. 2 types", + }, + { + mutateFn: func(p *Profile) { p.Sample[0].Location[0] = nil }, + wantErr: "sample has nil location", + }, + { + mutateFn: func(p *Profile) { p.Location[0] = nil }, + wantErr: "profile has nil location", + }, + { + mutateFn: func(p *Profile) { p.Mapping = append(p.Mapping, nil) }, + wantErr: "profile has nil mapping", + }, + { + mutateFn: func(p *Profile) { p.Function[0] = nil }, + wantErr: "profile has nil function", + }, + { + mutateFn: func(p *Profile) { p.Location[0].Line = append(p.Location[0].Line, Line{}) }, + wantErr: "has a line with nil function", + }, + } { + t.Run(tc.wantErr, func(t *testing.T) { + p := p.Copy() + tc.mutateFn(p) + if err := p.CheckValid(); err == nil { + t.Errorf("CheckValid(): got no error, want error %q", tc.wantErr) + } else if !strings.Contains(err.Error(), tc.wantErr) { + t.Errorf("CheckValid(): got error %v, want error %q", err, tc.wantErr) + } + }) + } +} + +// leaveTempfile leaves |b| in a temporary file on disk and returns the +// temp filename. This is useful to recover a profile when the test +// fails. +func leaveTempfile(b []byte) string { + f1, err := os.CreateTemp("", "profile_test") + if err != nil { + panic(err) + } + if _, err := f1.Write(b); err != nil { + panic(err) + } + return f1.Name() +} + +const mainBinary = "/bin/main" + +var cpuM = []*Mapping{ + { + ID: 1, + Start: 0x10000, + Limit: 0x40000, + File: mainBinary, + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 2, + Start: 0x1000, + Limit: 0x4000, + File: "/lib/lib.so", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 3, + Start: 0x4000, + Limit: 0x5000, + File: "/lib/lib2_c.so.6", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 4, + Start: 0x5000, + Limit: 0x9000, + File: "/lib/lib.so_6 (deleted)", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 5, + Start: 0xffff000010080000, + Limit: 0xffffffffffffffff, + File: "[kernel.kallsyms]_text", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, +} + +var cpuF = []*Function{ + {ID: 1, Name: "main", SystemName: "main", Filename: "main.c"}, + {ID: 2, Name: "foo", SystemName: "foo", Filename: "foo.c"}, + {ID: 3, Name: "foo_caller", SystemName: "foo_caller", Filename: "foo.c"}, +} + +var cpuL = []*Location{ + { + ID: 1000, + Mapping: cpuM[1], + Address: 0x1000, + Line: []Line{ + {Function: cpuF[0], Line: 1, Column: 1}, + }, + }, + { + ID: 2000, + Mapping: cpuM[0], + Address: 0x2000, + Line: []Line{ + {Function: cpuF[1], Line: 2, Column: 2}, + {Function: cpuF[2], Line: 1, Column: 1}, + }, + }, + { + ID: 3000, + Mapping: cpuM[0], + Address: 0x3000, + Line: []Line{ + {Function: cpuF[1], Line: 2, Column: 2}, + {Function: cpuF[2], Line: 1, Column: 1}, + }, + }, + { + ID: 3001, + Mapping: cpuM[0], + Address: 0x3001, + Line: []Line{ + {Function: cpuF[2], Line: 2, Column: 2}, + }, + }, + { + ID: 3002, + Mapping: cpuM[0], + Address: 0x3002, + Line: []Line{ + {Function: cpuF[2], Line: 3, Column: 3}, + }, + }, + // Differs from 1000 due to address and column number. + { + ID: 1001, + Mapping: cpuM[1], + Address: 0x1001, + Line: []Line{ + {Function: cpuF[0], Line: 1, Column: 2}, + }, + }, +} + +var testProfile1 = &Profile{ + TimeNanos: 10000, + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[1], cpuL[0]}, + Value: []int64{100, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[2], cpuL[0]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[3], cpuL[0]}, + Value: []int64{10000, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile1NoMapping = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[1], cpuL[0]}, + Value: []int64{100, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[2], cpuL[0]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[3], cpuL[0]}, + Value: []int64{10000, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, +} + +var testProfile2 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{70, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[1], cpuL[0]}, + Value: []int64{60, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[2], cpuL[0]}, + Value: []int64{50, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[3], cpuL[0]}, + Value: []int64{40, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile3 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile4 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile5 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {10}, + "key2": {30}, + }, + NumUnit: map[string][]string{ + "key1": {"kilobytes"}, + "key2": {"kilobytes"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var testProfile6 = &Profile{ + TimeNanos: 10000, + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000, 1000}, + Label: map[string][]string{ + "key1": {"tag1"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[1], cpuL[0]}, + Value: []int64{100, 100}, + Label: map[string][]string{ + "key1": {"tag2"}, + "key3": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[2], cpuL[0]}, + Value: []int64{10, 10}, + Label: map[string][]string{ + "key1": {"tag3"}, + "key2": {"tag2"}, + }, + }, + { + Location: []*Location{cpuL[3], cpuL[0]}, + Value: []int64{10000, 10000}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[4], cpuL[0]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag4"}, + "key2": {"tag1"}, + }, + }, + { + Location: []*Location{cpuL[5]}, + Value: []int64{1, 1}, + Label: map[string][]string{ + "key1": {"tag5"}, + "key2": {"tag1"}, + }, + }, + }, + Location: cpuL, + Function: cpuF, + Mapping: cpuM, +} + +var aggTests = map[string]aggTest{ + "precise": {true, true, true, true, true, 6}, + "columns": {false, true, true, true, true, 5}, + "fileline": {false, true, true, false, true, 4}, + "inline_function": {false, true, false, false, true, 3}, + "function": {false, true, false, false, false, 2}, +} + +type aggTest struct { + precise, function, fileline, column, inlineFrame bool + rows int +} + +// totalSamples is the sum of sample.Value[0] for testProfile6. +const totalSamples = int64(11112) + +func TestAggregation(t *testing.T) { + prof := testProfile6.Copy() + for _, resolution := range []string{"precise", "columns", "fileline", "inline_function", "function"} { + a := aggTests[resolution] + if !a.precise { + if err := prof.Aggregate(a.inlineFrame, a.function, a.fileline, a.fileline, a.column, false); err != nil { + t.Error("aggregating to " + resolution + ":" + err.Error()) + } + } + if err := checkAggregation(prof, &a); err != nil { + t.Error("failed aggregation to " + resolution + ": " + err.Error()) + } + } +} + +// checkAggregation verifies that the profile remained consistent +// with its aggregation. +func checkAggregation(prof *Profile, a *aggTest) error { + // Check that the total number of samples for the rows was preserved. + total := int64(0) + + samples := make(map[string]bool) + for _, sample := range prof.Sample { + tb := locationHash(sample) + samples[tb] = true + total += sample.Value[0] + } + + if total != totalSamples { + return fmt.Errorf("sample total %d, want %d", total, totalSamples) + } + + // Check the number of unique sample locations + if a.rows != len(samples) { + return fmt.Errorf("number of samples %d, want %d", len(samples), a.rows) + } + + // Check that all mappings have the right detail flags. + for _, m := range prof.Mapping { + if m.HasFunctions != a.function { + return fmt.Errorf("unexpected mapping.HasFunctions %v, want %v", m.HasFunctions, a.function) + } + if m.HasFilenames != a.fileline { + return fmt.Errorf("unexpected mapping.HasFilenames %v, want %v", m.HasFilenames, a.fileline) + } + if m.HasLineNumbers != a.fileline { + return fmt.Errorf("unexpected mapping.HasLineNumbers %v, want %v", m.HasLineNumbers, a.fileline) + } + if m.HasInlineFrames != a.inlineFrame { + return fmt.Errorf("unexpected mapping.HasInlineFrames %v, want %v", m.HasInlineFrames, a.inlineFrame) + } + } + + // Check that aggregation has removed finer resolution data. + for _, l := range prof.Location { + if !a.inlineFrame && len(l.Line) > 1 { + return fmt.Errorf("found %d lines on location %d, want 1", len(l.Line), l.ID) + } + + for _, ln := range l.Line { + if !a.column && ln.Column != 0 { + return fmt.Errorf("found column %d on location %d, want:0", ln.Column, l.ID) + } + if !a.fileline && (ln.Function.Filename != "" || ln.Line != 0) { + return fmt.Errorf("found line %s:%d on location %d, want :0", + ln.Function.Filename, ln.Line, l.ID) + } + if !a.function && (ln.Function.Name != "") { + return fmt.Errorf(`found file %s location %d, want ""`, + ln.Function.Name, l.ID) + } + } + } + + return nil +} + +// TestScale tests that Scale() rounds values and drops samples +// as expected. +func TestScale(t *testing.T) { + for _, tc := range []struct { + desc string + ratio float64 + p *Profile + wantSamples [][]int64 + }{ + { + desc: "scale by 1", + ratio: 1.0, + p: testProfile1.Copy(), + wantSamples: [][]int64{ + {1000, 1000}, + {100, 100}, + {10, 10}, + {10000, 10000}, + {1, 1}, + }, + }, + { + desc: "sample values will be rounded up", + ratio: .66666, + p: testProfile1.Copy(), + wantSamples: [][]int64{ + {667, 667}, + {67, 67}, + {7, 7}, + {6667, 6667}, + {1, 1}, + }, + }, + { + desc: "sample values will be rounded down", + ratio: .33333, + p: testProfile1.Copy(), + wantSamples: [][]int64{ + {333, 333}, + {33, 33}, + {3, 3}, + {3333, 3333}, + }, + }, + { + desc: "all sample values will be dropped", + ratio: 0.00001, + p: testProfile1.Copy(), + wantSamples: [][]int64{}, + }, + } { + t.Run(tc.desc, func(t *testing.T) { + tc.p.Scale(tc.ratio) + if got, want := len(tc.p.Sample), len(tc.wantSamples); got != want { + t.Fatalf("got %d samples, want %d", got, want) + } + for i, s := range tc.p.Sample { + for j, got := range s.Value { + want := tc.wantSamples[i][j] + if want != got { + t.Errorf("For value %d of sample %d, got %d want %d", j, i, got, want) + } + } + } + }) + } +} + +// TestMergeMain tests merge leaves the main binary in place. +func TestMergeMain(t *testing.T) { + prof := testProfile1.Copy() + p1, err := Merge([]*Profile{prof}) + if err != nil { + t.Fatalf("merge error: %v", err) + } + if cpuM[0].File != p1.Mapping[0].File { + t.Errorf("want Mapping[0]=%s got %s", cpuM[0].File, p1.Mapping[0].File) + } +} + +func TestMerge(t *testing.T) { + // Aggregate a profile with itself and once again with a factor of + // -2. Should end up with an empty profile (all samples for a + // location should add up to 0). + + prof := testProfile1.Copy() + prof.Comments = []string{"comment1"} + p1, err := Merge([]*Profile{prof, prof}) + if err != nil { + t.Errorf("merge error: %v", err) + } + prof.Scale(-2) + prof, err = Merge([]*Profile{p1, prof}) + if err != nil { + t.Errorf("merge error: %v", err) + } + if got, want := len(prof.Comments), 1; got != want { + t.Errorf("len(prof.Comments) = %d, want %d", got, want) + } + + // Use aggregation to merge locations at function granularity. + if err := prof.Aggregate(false, true, false, false, false, false); err != nil { + t.Errorf("aggregating after merge: %v", err) + } + + samples := make(map[string]int64) + for _, s := range prof.Sample { + tb := locationHash(s) + samples[tb] = samples[tb] + s.Value[0] + } + for s, v := range samples { + if v != 0 { + t.Errorf("nonzero value for sample %s: %d", s, v) + } + } +} + +func TestMergeAll(t *testing.T) { + // Aggregate 10 copies of the profile. + profs := make([]*Profile, 10) + for i := 0; i < 10; i++ { + profs[i] = testProfile1.Copy() + } + prof, err := Merge(profs) + if err != nil { + t.Errorf("merge error: %v", err) + } + samples := make(map[string]int64) + for _, s := range prof.Sample { + tb := locationHash(s) + samples[tb] = samples[tb] + s.Value[0] + } + for _, s := range testProfile1.Sample { + tb := locationHash(s) + if samples[tb] != s.Value[0]*10 { + t.Errorf("merge got wrong value at %s : %d instead of %d", tb, samples[tb], s.Value[0]*10) + } + } +} + +func TestIsFoldedMerge(t *testing.T) { + testProfile1Folded := testProfile1.Copy() + testProfile1Folded.Location[0].IsFolded = true + testProfile1Folded.Location[1].IsFolded = true + + for _, tc := range []struct { + name string + profs []*Profile + wantLocationLen int + }{ + { + name: "folded and non-folded locations not merged", + profs: []*Profile{testProfile1.Copy(), testProfile1Folded.Copy()}, + wantLocationLen: 7, + }, + { + name: "identical folded locations are merged", + profs: []*Profile{testProfile1Folded.Copy(), testProfile1Folded.Copy()}, + wantLocationLen: 5, + }, + } { + t.Run(tc.name, func(t *testing.T) { + prof, err := Merge(tc.profs) + if err != nil { + t.Fatalf("merge error: %v", err) + } + if got, want := len(prof.Location), tc.wantLocationLen; got != want { + t.Fatalf("got %d locations, want %d locations", got, want) + } + }) + } +} + +func TestNumLabelMerge(t *testing.T) { + for _, tc := range []struct { + name string + profs []*Profile + wantNumLabels []map[string][]int64 + wantNumUnits []map[string][]string + }{ + { + name: "different label units not merged", + profs: []*Profile{testProfile4.Copy(), testProfile5.Copy()}, + wantNumLabels: []map[string][]int64{ + { + "key1": {10}, + "key2": {30}, + }, + { + "key1": {10}, + "key2": {30}, + }, + }, + wantNumUnits: []map[string][]string{ + { + "key1": {"bytes"}, + "key2": {"bytes"}, + }, + { + "key1": {"kilobytes"}, + "key2": {"kilobytes"}, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + prof, err := Merge(tc.profs) + if err != nil { + t.Errorf("merge error: %v", err) + } + + if want, got := len(tc.wantNumLabels), len(prof.Sample); want != got { + t.Fatalf("got %d samples, want %d samples", got, want) + } + for i, wantLabels := range tc.wantNumLabels { + numLabels := prof.Sample[i].NumLabel + if !reflect.DeepEqual(wantLabels, numLabels) { + t.Errorf("got numeric labels %v, want %v", numLabels, wantLabels) + } + + wantUnits := tc.wantNumUnits[i] + numUnits := prof.Sample[i].NumUnit + if !reflect.DeepEqual(wantUnits, numUnits) { + t.Errorf("got numeric labels %v, want %v", numUnits, wantUnits) + } + } + }) + } +} + +func TestEmptyMappingMerge(t *testing.T) { + // Aggregate a profile with itself and once again with a factor of + // -2. Should end up with an empty profile (all samples for a + // location should add up to 0). + + prof1 := testProfile1.Copy() + prof2 := testProfile1NoMapping.Copy() + p1, err := Merge([]*Profile{prof2, prof1}) + if err != nil { + t.Errorf("merge error: %v", err) + } + prof2.Scale(-2) + prof, err := Merge([]*Profile{p1, prof2}) + if err != nil { + t.Errorf("merge error: %v", err) + } + + // Use aggregation to merge locations at function granularity. + if err := prof.Aggregate(false, true, false, false, false, false); err != nil { + t.Errorf("aggregating after merge: %v", err) + } + + samples := make(map[string]int64) + for _, s := range prof.Sample { + tb := locationHash(s) + samples[tb] = samples[tb] + s.Value[0] + } + for s, v := range samples { + if v != 0 { + t.Errorf("nonzero value for sample %s: %d", s, v) + } + } +} + +func TestNormalizeBySameProfile(t *testing.T) { + pb := testProfile1.Copy() + p := testProfile1.Copy() + + if err := p.Normalize(pb); err != nil { + t.Fatal(err) + } + + for i, s := range p.Sample { + for j, v := range s.Value { + expectedSampleValue := testProfile1.Sample[i].Value[j] + if v != expectedSampleValue { + t.Errorf("For sample %d, value %d want %d got %d", i, j, expectedSampleValue, v) + } + } + } +} + +func TestNormalizeByDifferentProfile(t *testing.T) { + p := testProfile1.Copy() + pb := testProfile2.Copy() + + if err := p.Normalize(pb); err != nil { + t.Fatal(err) + } + + expectedSampleValues := [][]int64{ + {20, 1000}, + {2, 100}, + {199, 10000}, + {0, 1}, + } + + for i, s := range p.Sample { + for j, v := range s.Value { + if v != expectedSampleValues[i][j] { + t.Errorf("For sample %d, value %d want %d got %d", i, j, expectedSampleValues[i][j], v) + } + } + } +} + +func TestNormalizeByMultipleOfSameProfile(t *testing.T) { + pb := testProfile1.Copy() + for i, s := range pb.Sample { + for j, v := range s.Value { + pb.Sample[i].Value[j] = 10 * v + } + } + + p := testProfile1.Copy() + + err := p.Normalize(pb) + if err != nil { + t.Fatal(err) + } + + for i, s := range p.Sample { + for j, v := range s.Value { + expectedSampleValue := 10 * testProfile1.Sample[i].Value[j] + if v != expectedSampleValue { + t.Errorf("For sample %d, value %d, want %d got %d", i, j, expectedSampleValue, v) + } + } + } +} + +func TestNormalizeIncompatibleProfiles(t *testing.T) { + p := testProfile1.Copy() + pb := testProfile3.Copy() + + if err := p.Normalize(pb); err == nil { + t.Errorf("Expected an error") + } +} + +// locationHash constructs a string to use as a hashkey for a sample, based on its locations +func locationHash(s *Sample) string { + var tb string + for _, l := range s.Location { + for _, ln := range l.Line { + tb = tb + fmt.Sprintf("%s:%d:%d@%d ", ln.Function.Name, ln.Line, ln.Column, l.Address) + } + } + return tb +} + +func TestHasLabel(t *testing.T) { + var testcases = []struct { + desc string + labels map[string][]string + key string + value string + wantHasLabel bool + }{ + { + desc: "empty label does not have label", + labels: map[string][]string{}, + key: "key", + value: "value", + wantHasLabel: false, + }, + { + desc: "label with one key and value has label", + labels: map[string][]string{"key": {"value"}}, + key: "key", + value: "value", + wantHasLabel: true, + }, + { + desc: "label with one key and value does not have label", + labels: map[string][]string{"key": {"value"}}, + key: "key1", + value: "value1", + wantHasLabel: false, + }, + { + desc: "label with many keys and values has label", + labels: map[string][]string{ + "key1": {"value2", "value1"}, + "key2": {"value1", "value2", "value2"}, + "key3": {"value1", "value2", "value2"}, + }, + key: "key1", + value: "value1", + wantHasLabel: true, + }, + { + desc: "label with many keys and values does not have label", + labels: map[string][]string{ + "key1": {"value2", "value1"}, + "key2": {"value1", "value2", "value2"}, + "key3": {"value1", "value2", "value2"}, + }, + key: "key5", + value: "value5", + wantHasLabel: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + sample := &Sample{ + Label: tc.labels, + } + if gotHasLabel := sample.HasLabel(tc.key, tc.value); gotHasLabel != tc.wantHasLabel { + t.Errorf("sample.HasLabel(%q, %q) got %v, want %v", tc.key, tc.value, gotHasLabel, tc.wantHasLabel) + } + }) + } +} + +func TestDiffBaseSample(t *testing.T) { + var testcases = []struct { + desc string + labels map[string][]string + wantDiffBaseSample bool + }{ + { + desc: "empty label does not have label", + labels: map[string][]string{}, + wantDiffBaseSample: false, + }, + { + desc: "label with one key and value, including diff base label", + labels: map[string][]string{"pprof::base": {"true"}}, + wantDiffBaseSample: true, + }, + { + desc: "label with one key and value, not including diff base label", + labels: map[string][]string{"key": {"value"}}, + wantDiffBaseSample: false, + }, + { + desc: "label with many keys and values, including diff base label", + labels: map[string][]string{ + "pprof::base": {"value2", "true"}, + "key2": {"true", "value2", "value2"}, + "key3": {"true", "value2", "value2"}, + }, + wantDiffBaseSample: true, + }, + { + desc: "label with many keys and values, not including diff base label", + labels: map[string][]string{ + "key1": {"value2", "value1"}, + "key2": {"value1", "value2", "value2"}, + "key3": {"value1", "value2", "value2"}, + }, + wantDiffBaseSample: false, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + sample := &Sample{ + Label: tc.labels, + } + if gotHasLabel := sample.DiffBaseSample(); gotHasLabel != tc.wantDiffBaseSample { + t.Errorf("sample.DiffBaseSample() got %v, want %v", gotHasLabel, tc.wantDiffBaseSample) + } + }) + } +} + +func TestRemove(t *testing.T) { + var testcases = []struct { + desc string + samples []*Sample + removeKey string + wantLabels []map[string][]string + }{ + { + desc: "some samples have label already", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value1", "value2", "value3"}, + "key2": {"value1"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value2"}, + }, + }, + }, + removeKey: "key1", + wantLabels: []map[string][]string{ + {}, + {"key2": {"value1"}}, + {}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + profile := testProfile1.Copy() + profile.Sample = tc.samples + profile.RemoveLabel(tc.removeKey) + if got, want := len(profile.Sample), len(tc.wantLabels); got != want { + t.Fatalf("got %v samples, want %v samples", got, want) + } + for i, sample := range profile.Sample { + wantLabels := tc.wantLabels[i] + if got, want := len(sample.Label), len(wantLabels); got != want { + t.Errorf("got %v label keys for sample %v, want %v", got, i, want) + continue + } + for wantKey, wantValues := range wantLabels { + if gotValues, ok := sample.Label[wantKey]; ok { + if !reflect.DeepEqual(gotValues, wantValues) { + t.Errorf("for key %s, got values %v, want values %v", wantKey, gotValues, wantValues) + } + } else { + t.Errorf("for key %s got no values, want %v", wantKey, wantValues) + } + } + } + }) + } +} + +func TestSetLabel(t *testing.T) { + var testcases = []struct { + desc string + samples []*Sample + setKey string + setVal []string + wantLabels []map[string][]string + }{ + { + desc: "some samples have label already", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value1", "value2", "value3"}, + "key2": {"value1"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value2"}, + }, + }, + }, + setKey: "key1", + setVal: []string{"value1"}, + wantLabels: []map[string][]string{ + {"key1": {"value1"}}, + {"key1": {"value1"}, "key2": {"value1"}}, + {"key1": {"value1"}}, + }, + }, + { + desc: "no samples have labels", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + }, + setKey: "key1", + setVal: []string{"value1"}, + wantLabels: []map[string][]string{ + {"key1": {"value1"}}, + }, + }, + { + desc: "all samples have some labels, but not key being added", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key2": {"value2"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key3": {"value3"}, + }, + }, + }, + setKey: "key1", + setVal: []string{"value1"}, + wantLabels: []map[string][]string{ + {"key1": {"value1"}, "key2": {"value2"}}, + {"key1": {"value1"}, "key3": {"value3"}}, + }, + }, + { + desc: "all samples have key being added", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value1"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + Label: map[string][]string{ + "key1": {"value1"}, + }, + }, + }, + setKey: "key1", + setVal: []string{"value1"}, + wantLabels: []map[string][]string{ + {"key1": {"value1"}}, + {"key1": {"value1"}}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + profile := testProfile1.Copy() + profile.Sample = tc.samples + profile.SetLabel(tc.setKey, tc.setVal) + if got, want := len(profile.Sample), len(tc.wantLabels); got != want { + t.Fatalf("got %v samples, want %v samples", got, want) + } + for i, sample := range profile.Sample { + wantLabels := tc.wantLabels[i] + if got, want := len(sample.Label), len(wantLabels); got != want { + t.Errorf("got %v label keys for sample %v, want %v", got, i, want) + continue + } + for wantKey, wantValues := range wantLabels { + if gotValues, ok := sample.Label[wantKey]; ok { + if !reflect.DeepEqual(gotValues, wantValues) { + t.Errorf("for key %s, got values %v, want values %v", wantKey, gotValues, wantValues) + } + } else { + t.Errorf("for key %s got no values, want %v", wantKey, wantValues) + } + } + } + }) + } +} + +func TestSetNumLabel(t *testing.T) { + var testcases = []struct { + desc string + samples []*Sample + setKey string + setVal []int64 + setUnit []string + wantValues []map[string][]int64 + wantUnits []map[string][]string + }{ + { + desc: "some samples have label already", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {1, 2, 3}, + "key2": {1}, + }, + NumUnit: map[string][]string{ + "key1": {"bytes", "bytes", "bytes"}, + "key2": {"gallons"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {2}, + }, + NumUnit: map[string][]string{ + "key1": {"volts"}, + }, + }, + }, + setKey: "key1", + setVal: []int64{1}, + setUnit: []string{"bytes"}, + wantValues: []map[string][]int64{ + {"key1": {1}}, + {"key1": {1}, "key2": {1}}, + {"key1": {1}}, + }, + wantUnits: []map[string][]string{ + {"key1": {"bytes"}}, + {"key1": {"bytes"}, "key2": {"gallons"}}, + {"key1": {"bytes"}}, + }, + }, + { + desc: "no samples have labels", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + }, + setKey: "key1", + setVal: []int64{1}, + setUnit: []string{"bytes"}, + wantValues: []map[string][]int64{ + {"key1": {1}}, + }, + wantUnits: []map[string][]string{ + {"key1": {"bytes"}}, + }, + }, + { + desc: "all samples have some labels, but not key being added", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key2": {2}, + }, + NumUnit: map[string][]string{ + "key2": {"joules"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key3": {3}, + }, + NumUnit: map[string][]string{ + "key3": {"meters"}, + }, + }, + }, + setKey: "key1", + setVal: []int64{1}, + setUnit: []string{"seconds"}, + wantValues: []map[string][]int64{ + {"key1": {1}, "key2": {2}}, + {"key1": {1}, "key3": {3}}, + }, + wantUnits: []map[string][]string{ + {"key1": {"seconds"}, "key2": {"joules"}}, + {"key1": {"seconds"}, "key3": {"meters"}}, + }, + }, + { + desc: "all samples have key being added", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {1}, + }, + NumUnit: map[string][]string{ + "key1": {"exabytes"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {1}, + }, + NumUnit: map[string][]string{ + "key1": {"petabytes"}, + }, + }, + }, + setKey: "key1", + setVal: []int64{1, 2}, + setUnit: []string{"daltons", ""}, + wantValues: []map[string][]int64{ + {"key1": {1, 2}}, + {"key1": {1, 2}}, + }, + wantUnits: []map[string][]string{ + {"key1": {"daltons", ""}}, + {"key1": {"daltons", ""}}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + profile := testProfile1.Copy() + profile.Sample = tc.samples + profile.SetNumLabel(tc.setKey, tc.setVal, tc.setUnit) + if got, want := len(profile.Sample), len(tc.wantValues); got != want { + t.Fatalf("got %v samples, want %v samples", got, want) + } + if got, want := len(profile.Sample), len(tc.wantUnits); got != want { + t.Fatalf("got %v samples, want %v samples", got, want) + } + for i, sample := range profile.Sample { + wantValues := tc.wantValues[i] + if got, want := len(sample.NumLabel), len(wantValues); got != want { + t.Errorf("got %v label values for sample %v, want %v", got, i, want) + continue + } + for key, values := range wantValues { + if gotValues, ok := sample.NumLabel[key]; ok { + if !reflect.DeepEqual(gotValues, values) { + t.Errorf("for key %s, got values %v, want values %v", key, gotValues, values) + } + } else { + t.Errorf("for key %s got no values, want %v", key, values) + } + } + + wantUnits := tc.wantUnits[i] + if got, want := len(sample.NumUnit), len(wantUnits); got != want { + t.Errorf("got %v label units for sample %v, want %v", got, i, want) + continue + } + for key, units := range wantUnits { + if gotUnits, ok := sample.NumUnit[key]; ok { + if !reflect.DeepEqual(gotUnits, units) { + t.Errorf("for key %s, got units %v, want units %v", key, gotUnits, units) + } + } else { + t.Errorf("for key %s got no units, want %v", key, units) + } + } + } + }) + } +} + +func TestRemoveNumLabel(t *testing.T) { + var testcases = []struct { + desc string + samples []*Sample + removeKey string + wantValues []map[string][]int64 + wantUnits []map[string][]string + }{ + { + desc: "some samples have label already", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {1, 2, 3}, + "key2": {1}, + }, + NumUnit: map[string][]string{ + "key1": {"foo", "bar", "baz"}, + "key2": {"seconds"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key1": {2}, + }, + NumUnit: map[string][]string{ + "key1": {"seconds"}, + }, + }, + }, + removeKey: "key1", + wantValues: []map[string][]int64{ + {}, + {"key2": {1}}, + {}, + }, + wantUnits: []map[string][]string{ + {}, + {"key2": {"seconds"}}, + {}, + }, + }, + { + desc: "no samples have label", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + }, + }, + removeKey: "key1", + wantValues: []map[string][]int64{ + {}, + }, + wantUnits: []map[string][]string{ + {}, + }, + }, + { + desc: "all samples have some labels, but not key being removed", + samples: []*Sample{ + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key2": {2}, + }, + NumUnit: map[string][]string{ + "key2": {"terabytes"}, + }, + }, + { + Location: []*Location{cpuL[0]}, + Value: []int64{1000}, + NumLabel: map[string][]int64{ + "key3": {3}, + }, + NumUnit: map[string][]string{ + "key3": {""}, + }, + }, + }, + removeKey: "key1", + wantValues: []map[string][]int64{ + {"key2": {2}}, + {"key3": {3}}, + }, + wantUnits: []map[string][]string{ + {"key2": {"terabytes"}}, + {"key3": {""}}, + }, + }, + } + + for _, tc := range testcases { + t.Run(tc.desc, func(t *testing.T) { + profile := testProfile1.Copy() + profile.Sample = tc.samples + profile.RemoveNumLabel(tc.removeKey) + if got, want := len(profile.Sample), len(tc.wantValues); got != want { + t.Fatalf("got %v samples, want %v values", got, want) + } + if got, want := len(profile.Sample), len(tc.wantUnits); got != want { + t.Fatalf("got %v samples, want %v units", got, want) + } + for i, sample := range profile.Sample { + wantValues := tc.wantValues[i] + if got, want := len(sample.NumLabel), len(wantValues); got != want { + t.Errorf("got %v label values for sample %v, want %v", got, i, want) + continue + } + for key, values := range wantValues { + if gotValues, ok := sample.NumLabel[key]; ok { + if !reflect.DeepEqual(gotValues, values) { + t.Errorf("for key %s, got values %v, want values %v", key, gotValues, values) + } + } else { + t.Errorf("for key %s got no values, want %v", key, values) + } + } + wantUnits := tc.wantUnits[i] + if got, want := len(sample.NumLabel), len(wantUnits); got != want { + t.Errorf("got %v label values for sample %v, want %v", got, i, want) + continue + } + for key, units := range wantUnits { + if gotUnits, ok := sample.NumUnit[key]; ok { + if !reflect.DeepEqual(gotUnits, units) { + t.Errorf("for key %s, got units %v, want units %v", key, gotUnits, units) + } + } else { + t.Errorf("for key %s got no units, want %v", key, units) + } + } + } + }) + } +} + +func TestNumLabelUnits(t *testing.T) { + var tagFilterTests = []struct { + desc string + tagVals []map[string][]int64 + tagUnits []map[string][]string + wantUnits map[string]string + wantIgnoredUnits map[string][]string + }{ + { + "One sample, multiple keys, different specified units", + []map[string][]int64{{"key1": {131072}, "key2": {128}}}, + []map[string][]string{{"key1": {"bytes"}, "key2": {"kilobytes"}}}, + map[string]string{"key1": "bytes", "key2": "kilobytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, unit specified", + []map[string][]int64{{"key1": {8}}}, + []map[string][]string{{"key1": {"bytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, empty unit specified", + []map[string][]int64{{"key1": {8}}}, + []map[string][]string{{"key1": {""}}}, + map[string]string{"key1": "key1"}, + map[string][]string{}, + }, + { + "Key bytes, unit not specified", + []map[string][]int64{{"bytes": {8}}}, + []map[string][]string{nil}, + map[string]string{"bytes": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with one value, unit not specified", + []map[string][]int64{{"kilobytes": {8}}}, + []map[string][]string{nil}, + map[string]string{"kilobytes": "kilobytes"}, + map[string][]string{}, + }, + { + "Key request, unit not specified", + []map[string][]int64{{"request": {8}}}, + []map[string][]string{nil}, + map[string]string{"request": "bytes"}, + map[string][]string{}, + }, + { + "Key alignment, unit not specified", + []map[string][]int64{{"alignment": {8}}}, + []map[string][]string{nil}, + map[string]string{"alignment": "bytes"}, + map[string][]string{}, + }, + { + "One sample, one key with multiple values and two different units", + []map[string][]int64{{"key1": {8, 8}}}, + []map[string][]string{{"key1": {"bytes", "kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes"}}, + }, + { + "One sample, one key with multiple values and three different units", + []map[string][]int64{{"key1": {8, 8}}}, + []map[string][]string{{"key1": {"bytes", "megabytes", "kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes", "megabytes"}}, + }, + { + "Two samples, one key, different units specified", + []map[string][]int64{{"key1": {8}}, {"key1": {8}}}, + []map[string][]string{{"key1": {"bytes"}}, {"key1": {"kilobytes"}}}, + map[string]string{"key1": "bytes"}, + map[string][]string{"key1": {"kilobytes"}}, + }, + { + "Keys alignment, request, and bytes have units specified", + []map[string][]int64{{ + "alignment": {8}, + "request": {8}, + "bytes": {8}, + }}, + []map[string][]string{{ + "alignment": {"seconds"}, + "request": {"minutes"}, + "bytes": {"hours"}, + }}, + map[string]string{ + "alignment": "seconds", + "request": "minutes", + "bytes": "hours", + }, + map[string][]string{}, + }, + } + for _, test := range tagFilterTests { + p := &Profile{Sample: make([]*Sample, len(test.tagVals))} + for i, numLabel := range test.tagVals { + s := Sample{ + NumLabel: numLabel, + NumUnit: test.tagUnits[i], + } + p.Sample[i] = &s + } + units, ignoredUnits := p.NumLabelUnits() + if !reflect.DeepEqual(test.wantUnits, units) { + t.Errorf("%s: got %v units, want %v", test.desc, units, test.wantUnits) + } + if !reflect.DeepEqual(test.wantIgnoredUnits, ignoredUnits) { + t.Errorf("%s: got %v ignored units, want %v", test.desc, ignoredUnits, test.wantIgnoredUnits) + } + } +} + +func TestSetMain(t *testing.T) { + testProfile1.massageMappings() + if testProfile1.Mapping[0].File != mainBinary { + t.Errorf("got %s for main", testProfile1.Mapping[0].File) + } +} + +func TestParseKernelRelocation(t *testing.T) { + src := testProfile1.Copy() + if src.Mapping[len(src.Mapping)-1].KernelRelocationSymbol != "_text" { + t.Errorf("got %s for Mapping.KernelRelocationSymbol", src.Mapping[0].KernelRelocationSymbol) + } +} + +func TestEncodeDecodeDocURL(t *testing.T) { + input := testProfile1.Copy() + input.DocURL = "http://example.comp/url" + + // Encode/decode. + var buf bytes.Buffer + if err := input.Write(&buf); err != nil { + t.Fatal("encode: ", err) + } + output, err := Parse(&buf) + if err != nil { + t.Fatal("decode: ", err) + } + if want, got := input.String(), output.String(); want != got { + d, err := proftest.Diff([]byte(want), []byte(got)) + if err != nil { + t.Fatal(err) + } + t.Errorf("wrong result of encode/decode (-want,+got):\n%s\n", string(d)) + } +} + +// parallel runs n copies of fn in parallel. +func parallel(n int, fn func()) { + var wg sync.WaitGroup + wg.Add(n) + for i := 0; i < n; i++ { + go func() { + fn() + wg.Done() + }() + } + wg.Wait() +} + +func TestThreadSafety(t *testing.T) { + src := testProfile1.Copy() + parallel(4, func() { src.Copy() }) + parallel(4, func() { + var b bytes.Buffer + src.WriteUncompressed(&b) + }) + parallel(4, func() { + var b bytes.Buffer + src.Write(&b) + }) +} + +func BenchmarkParse(b *testing.B) { + data := proftest.LargeProfile(b) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Parse(bytes.NewBuffer(data)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkWrite(b *testing.B) { + p, err := Parse(bytes.NewBuffer(proftest.LargeProfile(b))) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + if err := p.WriteUncompressed(io.Discard); err != nil { + b.Fatal(err) + } + } +} diff --git a/plugin/debug/pkg/profile/proto.go b/plugin/debug/pkg/profile/proto.go new file mode 100644 index 0000000..a15696b --- /dev/null +++ b/plugin/debug/pkg/profile/proto.go @@ -0,0 +1,367 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, int64(u)) + } + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + *x = append(*x, u) + } + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/plugin/debug/pkg/profile/proto_test.go b/plugin/debug/pkg/profile/proto_test.go new file mode 100644 index 0000000..8ab9eef --- /dev/null +++ b/plugin/debug/pkg/profile/proto_test.go @@ -0,0 +1,181 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "bytes" + "testing" + + "m7s.live/v5/plugin/debug/pkg/internal/proftest" +) + +var testM = []*Mapping{ + { + ID: 1, + Start: 1, + Limit: 10, + Offset: 0, + File: "file1", + BuildID: "buildid1", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, + { + ID: 2, + Start: 10, + Limit: 30, + Offset: 9, + File: "file1", + BuildID: "buildid2", + HasFunctions: true, + HasFilenames: true, + HasLineNumbers: true, + HasInlineFrames: true, + }, +} + +var testF = []*Function{ + {ID: 1, Name: "func1", SystemName: "func1", Filename: "file1"}, + {ID: 2, Name: "func2", SystemName: "func2", Filename: "file1"}, + {ID: 3, Name: "func3", SystemName: "func3", Filename: "file2"}, + {ID: 4, Name: "func4", SystemName: "func4", Filename: "file3"}, + {ID: 5, Name: "func5", SystemName: "func5", Filename: "file4"}, +} + +var testL = []*Location{ + { + ID: 1, + Address: 1, + Mapping: testM[0], + Line: []Line{ + { + Function: testF[0], + Line: 2, + }, + { + Function: testF[1], + Line: 2222222, + }, + }, + }, + { + ID: 2, + Mapping: testM[1], + Address: 11, + Line: []Line{ + { + Function: testF[2], + Line: 2, + }, + }, + }, + { + ID: 3, + Mapping: testM[1], + Address: 12, + }, + { + ID: 4, + Mapping: testM[1], + Address: 12, + Line: []Line{ + { + Function: testF[4], + Line: 6, + }, + { + Function: testF[4], + Line: 6, + }, + }, + IsFolded: true, + }, +} + +var all = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 10, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "cpu", Unit: "cycles"}, + {Type: "object", Unit: "count"}, + }, + Sample: []*Sample{ + { + Location: []*Location{testL[0], testL[1], testL[2], testL[1], testL[1]}, + Label: map[string][]string{ + "key1": {"value1"}, + "key2": {"value2"}, + }, + Value: []int64{10, 20}, + }, + { + Location: []*Location{testL[1], testL[2], testL[0], testL[1]}, + Value: []int64{30, 40}, + Label: map[string][]string{ + "key1": {"value1"}, + "key2": {"value2"}, + }, + NumLabel: map[string][]int64{ + "key1": {1, 2}, + "key2": {3, 4}, + "bytes": {3, 4}, + "requests": {1, 1, 3, 4, 5}, + "alignment": {3, 4}, + }, + NumUnit: map[string][]string{ + "requests": {"", "", "seconds", "", "s"}, + "alignment": {"kilobytes", "kilobytes"}, + }, + }, + { + Location: []*Location{testL[1], testL[2], testL[0], testL[1]}, + Value: []int64{30, 40}, + NumLabel: map[string][]int64{ + "size": {0}, + }, + NumUnit: map[string][]string{ + "size": {"bytes"}, + }, + }, + }, + Function: testF, + Mapping: testM, + Location: testL, + Comments: []string{"Comment 1", "Comment 2"}, +} + +func TestMarshalUnmarshal(t *testing.T) { + // Write the profile, parse it, and ensure they're equal. + var buf bytes.Buffer + all.Write(&buf) + all2, err := Parse(&buf) + if err != nil { + t.Fatal(err) + } + + js1 := proftest.EncodeJSON(&all) + js2 := proftest.EncodeJSON(&all2) + if string(js1) != string(js2) { + t.Errorf("profiles differ") + d, err := proftest.Diff(js1, js2) + if err != nil { + t.Fatal(err) + } + t.Error("\n" + string(d)) + } +} diff --git a/plugin/debug/pkg/profile/prune.go b/plugin/debug/pkg/profile/prune.go new file mode 100644 index 0000000..b2f9fd5 --- /dev/null +++ b/plugin/debug/pkg/profile/prune.go @@ -0,0 +1,194 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + if pruneFromHere(fn.Name) { + break + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/plugin/debug/pkg/profile/prune_test.go b/plugin/debug/pkg/profile/prune_test.go new file mode 100644 index 0000000..aaf76b9 --- /dev/null +++ b/plugin/debug/pkg/profile/prune_test.go @@ -0,0 +1,230 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "strings" + "testing" +) + +func TestPrune(t *testing.T) { + for _, test := range []struct { + in *Profile + want string + }{ + {in1, out1}, + {in2, out2}, + } { + in := test.in.Copy() + in.RemoveUninteresting() + if err := in.CheckValid(); err != nil { + t.Error(err) + } + w := strings.Split(test.want, "\n") + for i, g := range strings.Split(in.String(), "\n") { + if i >= len(w) { + t.Fatalf("got trailing %s", g) + } + if strings.TrimSpace(g) != strings.TrimSpace(w[i]) { + t.Fatalf(`%d: got: "%s" want:"%s"`, i, g, w[i]) + } + } + } +} + +var funs = []*Function{ + {ID: 1, Name: "main", SystemName: "main", Filename: "main.c"}, + {ID: 2, Name: "fun1", SystemName: "fun1", Filename: "fun.c"}, + {ID: 3, Name: "fun2", SystemName: "fun2", Filename: "fun.c"}, + {ID: 4, Name: "fun3", SystemName: "fun3", Filename: "fun.c"}, + {ID: 5, Name: "fun4", SystemName: "fun4", Filename: "fun.c"}, + {ID: 6, Name: "fun5", SystemName: "fun5", Filename: "fun.c"}, + {ID: 7, Name: "unsimplified_fun(int)", SystemName: "unsimplified_fun(int)", Filename: "fun.c"}, + {ID: 8, Name: "Foo::(anonymous namespace)::Test::Bar", SystemName: "Foo::(anonymous namespace)::Test::Bar", Filename: "fun.c"}, + {ID: 9, Name: "Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar)", SystemName: "Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar)", Filename: "fun.c"}, + {ID: 10, Name: "Foo::operator()(::Bar)", SystemName: "Foo::operator()(::Bar)", Filename: "fun.c"}, +} + +var locs1 = []*Location{ + { + ID: 1, + Line: []Line{ + {Function: funs[0], Line: 1, Column: 7}, + }, + }, + { + ID: 2, + Line: []Line{ + {Function: funs[1], Line: 2}, + {Function: funs[2], Line: 1}, + }, + }, + { + ID: 3, + Line: []Line{ + {Function: funs[3], Line: 2}, + {Function: funs[1], Line: 1, Column: 7}, + }, + }, + { + ID: 4, + Line: []Line{ + {Function: funs[3], Line: 2}, + {Function: funs[1], Line: 2}, + {Function: funs[5], Line: 2}, + }, + }, +} + +var in1 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + { + Location: []*Location{locs1[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*Location{locs1[1], locs1[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*Location{locs1[2], locs1[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*Location{locs1[3], locs1[0]}, + Value: []int64{1, 1}, + }, + { + Location: []*Location{locs1[3], locs1[2], locs1[1], locs1[0]}, + Value: []int64{1, 1}, + }, + }, + Location: locs1, + Function: funs, + DropFrames: "fu.*[12]|banana", + KeepFrames: ".*[n2][n2]", +} + +const out1 = `PeriodType: cpu milliseconds +Period: 1 +Duration: 10s +Samples: +samples/count cpu/milliseconds + 1 1: 1 + 1 1: 2 1 + 1 1: 1 + 1 1: 4 1 + 1 1: 2 1 +Locations + 1: 0x0 main main.c:1:7 s=0 + 2: 0x0 fun2 fun.c:1:0 s=0 + 3: 0x0 fun3 fun.c:2:0 s=0 + fun1 fun.c:1:7 s=0 + 4: 0x0 fun5 fun.c:2:0 s=0 +Mappings +` + +var locs2 = []*Location{ + { + ID: 1, + Line: []Line{ + {Function: funs[0], Line: 1}, + }, + }, + { + ID: 2, + Line: []Line{ + {Function: funs[6], Line: 1}, + }, + }, + { + ID: 3, + Line: []Line{ + {Function: funs[7], Line: 1}, + }, + }, + { + ID: 4, + Line: []Line{ + {Function: funs[8], Line: 1}, + }, + }, + { + ID: 5, + Line: []Line{ + {Function: funs[9], Line: 1}, + }, + }, +} + +var in2 = &Profile{ + PeriodType: &ValueType{Type: "cpu", Unit: "milliseconds"}, + Period: 1, + DurationNanos: 10e9, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "milliseconds"}, + }, + Sample: []*Sample{ + // Unsimplified name with parameters shouldn't match. + { + Location: []*Location{locs2[1], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*Foo::.*::Bar.* should (and will be dropped) regardless of the anonymous namespace. + { + Location: []*Location{locs2[2], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*Foo::.*::Bar.* shouldn't match inside the parameter list. + { + Location: []*Location{locs2[3], locs2[0]}, + Value: []int64{1, 1}, + }, + // .*operator\(\) should match, regardless of parameters. + { + Location: []*Location{locs2[4], locs2[0]}, + Value: []int64{1, 1}, + }, + }, + Location: locs2, + Function: funs, + DropFrames: `unsimplified_fun\(int\)|.*Foo::.*::Bar.*|.*operator\(\)`, +} + +const out2 = `PeriodType: cpu milliseconds +Period: 1 +Duration: 10s +Samples: +samples/count cpu/milliseconds + 1 1: 2 1 + 1 1: 1 + 1 1: 4 1 + 1 1: 1 +Locations + 1: 0x0 main main.c:1:0 s=0 + 2: 0x0 unsimplified_fun(int) fun.c:1:0 s=0 + 3: 0x0 Foo::(anonymous namespace)::Test::Bar fun.c:1:0 s=0 + 4: 0x0 Hello::(anonymous namespace)::World(const Foo::(anonymous namespace)::Test::Bar) fun.c:1:0 s=0 + 5: 0x0 Foo::operator()(::Bar) fun.c:1:0 s=0 +Mappings +` diff --git a/plugin/debug/pkg/profile/testdata/cppbench.contention b/plugin/debug/pkg/profile/testdata/cppbench.contention new file mode 100644 index 0000000..66a64c9 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.contention @@ -0,0 +1,24 @@ +--- contentionz 1 --- +cycles/second = 3201000000 +sampling period = 100 +ms since reset = 16502830 +discarded samples = 0 + 19490304 27 @ 0xbccc97 0xc61202 0x42ed5f 0x42edc1 0x42e15a 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 768 1 @ 0xbccc97 0xa42dc7 0xa456e4 0x7fcdc2ff214e + 5760 2 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87eab 0xb8814c 0x4e969d 0x4faa17 0x4fc5f6 0x4fd028 0x4fd230 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 569088 1 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87f08 0xb8814c 0x42ed5f 0x42edc1 0x42e15a 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 2432 1 @ 0xbccc97 0xb82b73 0xb82bcb 0xb87eab 0xb8814c 0x7aa74c 0x7ab844 0x7ab914 0x79e9e9 0x79e326 0x4d299e 0x4d4b7b 0x4b7be8 0x4b7ff1 0x4d2dae 0x79e80a + 2034816 3 @ 0xbccc97 0xb82f0f 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e +--- Memory map: --- + 00400000-00fcb000: cppbench_server_main + 7fcdc231e000-7fcdc2321000: /libnss_cache-2.15.so + 7fcdc2522000-7fcdc252e000: /libnss_files-2.15.so + 7fcdc272f000-7fcdc28dd000: /libc-2.15.so + 7fcdc2ae7000-7fcdc2be2000: /libm-2.15.so + 7fcdc2de3000-7fcdc2dea000: /librt-2.15.so + 7fcdc2feb000-7fcdc3003000: /libpthread-2.15.so + 7fcdc3208000-7fcdc320a000: /libdl-2.15.so + 7fcdc340c000-7fcdc3415000: /libcrypt-2.15.so + 7fcdc3645000-7fcdc3669000: /ld-2.15.so + 7fff86bff000-7fff86c00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.contention.string b/plugin/debug/pkg/profile/testdata/cppbench.contention.string new file mode 100644 index 0000000..441f1ce --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.contention.string @@ -0,0 +1,65 @@ +PeriodType: contentions count +Period: 100 +Duration: 4h35 +Samples: +contentions/count delay/nanoseconds + 2700 608881724: 1 2 3 4 5 6 7 8 9 10 11 12 13 + 100 23992: 1 14 12 13 + 200 179943: 1 15 16 17 18 19 20 21 22 23 9 10 11 12 13 + 100 17778444: 1 15 16 24 18 3 4 5 6 7 8 9 10 11 12 13 + 100 75976: 1 15 16 17 18 25 26 27 28 29 30 31 32 33 34 9 + 300 63568134: 1 35 36 37 38 39 40 6 7 8 9 10 11 12 13 +Locations + 1: 0xbccc96 M=1 + 2: 0xc61201 M=1 + 3: 0x42ed5e M=1 + 4: 0x42edc0 M=1 + 5: 0x42e159 M=1 + 6: 0x5261ae M=1 + 7: 0x526ede M=1 + 8: 0x5280aa M=1 + 9: 0x79e809 M=1 + 10: 0x7a251a M=1 + 11: 0x7a296c M=1 + 12: 0xa456e3 M=1 + 13: 0x7fcdc2ff214d M=7 + 14: 0xa42dc6 M=1 + 15: 0xb82b72 M=1 + 16: 0xb82bca M=1 + 17: 0xb87eaa M=1 + 18: 0xb8814b M=1 + 19: 0x4e969c M=1 + 20: 0x4faa16 M=1 + 21: 0x4fc5f5 M=1 + 22: 0x4fd027 M=1 + 23: 0x4fd22f M=1 + 24: 0xb87f07 M=1 + 25: 0x7aa74b M=1 + 26: 0x7ab843 M=1 + 27: 0x7ab913 M=1 + 28: 0x79e9e8 M=1 + 29: 0x79e325 M=1 + 30: 0x4d299d M=1 + 31: 0x4d4b7a M=1 + 32: 0x4b7be7 M=1 + 33: 0x4b7ff0 M=1 + 34: 0x4d2dad M=1 + 35: 0xb82f0e M=1 + 36: 0xb83002 M=1 + 37: 0xb87d4f M=1 + 38: 0xc635ef M=1 + 39: 0x42ecc2 M=1 + 40: 0x42e14b M=1 +Mappings +1: 0x400000/0xfcb000/0x0 cppbench_server_main +2: 0x7fcdc231e000/0x7fcdc2321000/0x0 /libnss_cache-2.15.so +3: 0x7fcdc2522000/0x7fcdc252e000/0x0 /libnss_files-2.15.so +4: 0x7fcdc272f000/0x7fcdc28dd000/0x0 /libc-2.15.so +5: 0x7fcdc2ae7000/0x7fcdc2be2000/0x0 /libm-2.15.so +6: 0x7fcdc2de3000/0x7fcdc2dea000/0x0 /librt-2.15.so +7: 0x7fcdc2feb000/0x7fcdc3003000/0x0 /libpthread-2.15.so +8: 0x7fcdc3208000/0x7fcdc320a000/0x0 /libdl-2.15.so +9: 0x7fcdc340c000/0x7fcdc3415000/0x0 /libcrypt-2.15.so +10: 0x7fcdc3645000/0x7fcdc3669000/0x0 /ld-2.15.so +11: 0x7fff86bff000/0x7fff86c00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.cpu b/plugin/debug/pkg/profile/testdata/cppbench.cpu new file mode 100644 index 0000000..607015e Binary files /dev/null and b/plugin/debug/pkg/profile/testdata/cppbench.cpu differ diff --git a/plugin/debug/pkg/profile/testdata/cppbench.cpu.string b/plugin/debug/pkg/profile/testdata/cppbench.cpu.string new file mode 100644 index 0000000..251f913 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.cpu.string @@ -0,0 +1,179 @@ +PeriodType: cpu nanoseconds +Period: 10000000 +Samples: +samples/count cpu/nanoseconds + 1 10000000: 1 2 3 4 5 6 7 8 9 10 + 1 10000000: 11 2 3 4 5 6 7 8 9 10 + 1 10000000: 1 2 3 4 5 6 7 8 9 10 + 1 10000000: 12 13 14 15 16 17 18 3 4 5 6 7 8 9 10 + 542 5420000000: 19 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 20 17 18 3 4 5 6 7 8 9 10 + 10 100000000: 21 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 22 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 23 24 25 2 3 4 5 6 7 8 9 10 + 3 30000000: 26 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 27 16 17 18 3 4 5 6 7 8 9 10 + 2 20000000: 28 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 29 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 30 31 32 33 34 35 36 37 38 9 10 + 3 30000000: 39 40 41 24 25 2 3 4 5 6 7 8 9 10 + 2 20000000: 42 40 41 24 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 43 40 41 24 25 2 3 4 5 6 7 8 9 10 + 2 20000000: 44 45 41 24 25 2 3 4 5 6 7 8 9 10 + 67 670000000: 46 2 3 4 5 6 7 8 9 10 + 20 200000000: 47 2 3 4 5 6 7 8 9 10 + 12 120000000: 48 2 3 4 5 6 7 8 9 10 + 5 50000000: 11 2 3 4 5 6 7 8 9 10 + 1 10000000: 49 10 + 1 10000000: 50 51 52 13 14 15 16 17 18 3 4 5 6 7 8 9 10 + 2 20000000: 53 51 52 13 14 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 54 14 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 55 56 57 58 4 5 6 7 8 9 10 + 1 10000000: 59 41 24 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 60 41 24 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 61 62 63 64 40 41 24 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 65 66 67 68 69 70 71 72 73 74 75 37 38 9 10 + 1 10000000: 76 13 77 15 16 17 18 3 4 5 6 7 8 9 10 + 2 20000000: 78 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 79 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 80 13 77 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 81 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 82 13 14 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 83 13 77 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 83 13 14 15 16 17 18 3 4 5 6 7 8 9 10 + 1 10000000: 30 84 85 86 9 10 + 1 10000000: 87 88 40 41 24 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 89 90 91 92 8 9 10 + 1 10000000: 30 93 8 9 10 + 1 10000000: 30 84 94 9 10 + 1 10000000: 95 3 4 5 6 7 8 9 10 + 1 10000000: 96 97 3 4 5 6 7 8 9 10 + 1 10000000: 98 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 99 25 2 3 4 5 6 7 8 9 10 + 1 10000000: 100 101 102 41 24 25 2 3 4 5 6 7 8 9 10 + 2 20000000: 103 104 91 92 8 9 10 + 1 10000000: 105 104 91 92 8 9 10 + 1 10000000: 106 107 108 109 97 3 4 5 6 7 8 9 10 +Locations + 1: 0x42ef04 M=1 + 2: 0x42e14b M=1 + 3: 0x5261ae M=1 + 4: 0x526ede M=1 + 5: 0x5280aa M=1 + 6: 0x79e809 M=1 + 7: 0x7a251a M=1 + 8: 0x7a296c M=1 + 9: 0xa456e3 M=1 + 10: 0x7f5e541460fd M=7 + 11: 0x42ef17 M=1 + 12: 0xb867c0 M=1 + 13: 0xb82bca M=1 + 14: 0xb87eaa M=1 + 15: 0xb8814b M=1 + 16: 0x42ed5e M=1 + 17: 0x42edc0 M=1 + 18: 0x42e159 M=1 + 19: 0x42ed43 M=1 + 20: 0xc60ea0 M=1 + 21: 0x42ed40 M=1 + 22: 0xbf42fe M=1 + 23: 0xb87d6f M=1 + 24: 0xc635ef M=1 + 25: 0x42ecc2 M=1 + 26: 0xc60f0f M=1 + 27: 0xc610d7 M=1 + 28: 0xc61108 M=1 + 29: 0xb8816e M=1 + 30: 0xbc8f1c M=1 + 31: 0xbcae54 M=1 + 32: 0xbcb5f4 M=1 + 33: 0x40b687 M=1 + 34: 0x535244 M=1 + 35: 0x536bf4 M=1 + 36: 0x42eb0f M=1 + 37: 0x42de64 M=1 + 38: 0xa41281 M=1 + 39: 0xb82dea M=1 + 40: 0xb83002 M=1 + 41: 0xb87d4f M=1 + 42: 0xb82df1 M=1 + 43: 0xb82dd3 M=1 + 44: 0xb82c23 M=1 + 45: 0xb82fd1 M=1 + 46: 0x42ef13 M=1 + 47: 0x42ef0b M=1 + 48: 0x42ef0f M=1 + 49: 0x7f5e53999f13 M=4 + 50: 0xb8591b M=1 + 51: 0xb85e48 M=1 + 52: 0xb82ae3 M=1 + 53: 0xb85893 M=1 + 54: 0xb88cdc M=1 + 55: 0x698000 M=1 + 56: 0x653f4b M=1 + 57: 0x54dc65 M=1 + 58: 0x525120 M=1 + 59: 0xb88d84 M=1 + 60: 0xb88d98 M=1 + 61: 0xb86591 M=1 + 62: 0xb859de M=1 + 63: 0xb862de M=1 + 64: 0xb82d5e M=1 + 65: 0x967171 M=1 + 66: 0x964990 M=1 + 67: 0x448584 M=1 + 68: 0x5476d7 M=1 + 69: 0x4f1be0 M=1 + 70: 0x4f34db M=1 + 71: 0x4f8a9a M=1 + 72: 0x5388df M=1 + 73: 0x573c5a M=1 + 74: 0x4a4168 M=1 + 75: 0x42eb03 M=1 + 76: 0xb82a31 M=1 + 77: 0xb87f07 M=1 + 78: 0xb87e76 M=1 + 79: 0xb87e7e M=1 + 80: 0xb82a36 M=1 + 81: 0xb87ede M=1 + 82: 0xb82a55 M=1 + 83: 0xb82b08 M=1 + 84: 0xbcbcff M=1 + 85: 0xbcbea4 M=1 + 86: 0xa40112 M=1 + 87: 0xb85e87 M=1 + 88: 0xb82d77 M=1 + 89: 0x79eb32 M=1 + 90: 0x7a18e8 M=1 + 91: 0x7a1c44 M=1 + 92: 0x7a2726 M=1 + 93: 0x7a2690 M=1 + 94: 0x89f186 M=1 + 95: 0xc60eb7 M=1 + 96: 0x521c7f M=1 + 97: 0x5194c8 M=1 + 98: 0xc634f0 M=1 + 99: 0xc63245 M=1 + 100: 0xb867d8 M=1 + 101: 0xb82cf2 M=1 + 102: 0xb82f82 M=1 + 103: 0x7f5e538b9a93 M=4 + 104: 0x7a1955 M=1 + 105: 0x7f5e538b9a97 M=4 + 106: 0x7e0f10 M=1 + 107: 0x7e0b5d M=1 + 108: 0x6ab44f M=1 + 109: 0x521d51 M=1 +Mappings +1: 0x400000/0xfcb000/0x0 cppbench_server_main +2: 0x7f5e53061000/0x7f5e53062000/0x0 /lib/libnss_borg-2.15.so +3: 0x7f5e53264000/0x7f5e53270000/0x0 /lib/libnss_files-2.15.so +4: 0x7f5e53883000/0x7f5e53a31000/0x0 /lib/libc-2.15.so +5: 0x7f5e53c3b000/0x7f5e53d36000/0x0 /lib/libm-2.15.so +6: 0x7f5e53f37000/0x7f5e53f3e000/0x0 /lib/librt-2.15.so +7: 0x7f5e5413f000/0x7f5e54157000/0x0 /lib/libpthread-2.15.so +8: 0x7f5e5435c000/0x7f5e5435e000/0x0 /lib/libdl-2.15.so +9: 0x7f5e54560000/0x7f5e54569000/0x0 /lib/libcrypt-2.15.so +10: 0x7f5e54799000/0x7f5e547bd000/0x0 /lib/ld-2.15.so +11: 0x7ffffb56b000/0x7ffffb56d000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.growth b/plugin/debug/pkg/profile/testdata/cppbench.growth new file mode 100644 index 0000000..d06f78b --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.growth @@ -0,0 +1,99 @@ +heap profile: 85: 178257920 [ 85: 178257920] @ growthz + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0xafc0eb 0xb087b1 0xb0aa7d 0xb0b374 0xb12f10 0xb13a92 0xb0c443 0xb145f3 0xb147ca 0xa5dddd 0xbbffe6 0xa5e837 0xa65f94 0x5aac9e 0x535526 0x535144 0x5aa468 0x7e3ce7 0x7d13a2 0x7e0d28 0x6ab450 0x538d27 0x5390e8 0x5391e3 0x4e9603 0x4faa17 0x4fc5f6 + 1: 2097152 [ 1: 2097152] @ 0xc635c8 0x816900 0x8149fd 0x813aa0 0xbbff77 0x81421c 0x4ed414 0x4fd707 0x4de2a2 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7fcdc2ff214e + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0xbb5783 0x40acd8 0x61192e 0x4b9522 0x4b9f62 0x4ba025 0x40bd86 0x7fcdc276711d + 1: 2097152 [ 1: 2097152] @ 0xb83003 0xb87d50 0xc635f0 0x42d576 0xc25cc6 0x40651b +--- Memory map: --- + 00400000-00fcb000: cppbench_server_main + 7fcdc231e000-7fcdc2321000: /libnss_cache-2.15.so + 7fcdc2522000-7fcdc252e000: /libnss_files-2.15.so + 7fcdc272f000-7fcdc28dd000: /libc-2.15.so + 7fcdc2ae7000-7fcdc2be2000: /libm-2.15.so + 7fcdc2de3000-7fcdc2dea000: /librt-2.15.so + 7fcdc2feb000-7fcdc3003000: /libpthread-2.15.so + 7fcdc3208000-7fcdc320a000: /libdl-2.15.so + 7fcdc340c000-7fcdc3415000: /libcrypt-2.15.so + 7fcdc3645000-7fcdc3669000: /ld-2.15.so + 7fff86bff000-7fff86c00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.growth.string b/plugin/debug/pkg/profile/testdata/cppbench.growth.string new file mode 100644 index 0000000..842ff9f --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.growth.string @@ -0,0 +1,248 @@ +PeriodType: space bytes +Period: 1 +Samples: +objects/count space/bytes + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 14 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 4 5 6 7 8 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 + bytes:[2097152] + 1 2097152: 14 42 43 44 45 46 47 48 49 9 10 11 12 13 + bytes:[2097152] + 1 2097152: 1 2 3 50 51 52 53 54 55 56 57 + bytes:[2097152] + 1 2097152: 1 2 3 58 59 60 + bytes:[2097152] +Locations + 1: 0xb83002 M=1 + 2: 0xb87d4f M=1 + 3: 0xc635ef M=1 + 4: 0x42ecc2 M=1 + 5: 0x42e14b M=1 + 6: 0x5261ae M=1 + 7: 0x526ede M=1 + 8: 0x5280aa M=1 + 9: 0x79e809 M=1 + 10: 0x7a251a M=1 + 11: 0x7a296c M=1 + 12: 0xa456e3 M=1 + 13: 0x7fcdc2ff214d M=7 + 14: 0xc635c7 M=1 + 15: 0xafc0ea M=1 + 16: 0xb087b0 M=1 + 17: 0xb0aa7c M=1 + 18: 0xb0b373 M=1 + 19: 0xb12f0f M=1 + 20: 0xb13a91 M=1 + 21: 0xb0c442 M=1 + 22: 0xb145f2 M=1 + 23: 0xb147c9 M=1 + 24: 0xa5dddc M=1 + 25: 0xbbffe5 M=1 + 26: 0xa5e836 M=1 + 27: 0xa65f93 M=1 + 28: 0x5aac9d M=1 + 29: 0x535525 M=1 + 30: 0x535143 M=1 + 31: 0x5aa467 M=1 + 32: 0x7e3ce6 M=1 + 33: 0x7d13a1 M=1 + 34: 0x7e0d27 M=1 + 35: 0x6ab44f M=1 + 36: 0x538d26 M=1 + 37: 0x5390e7 M=1 + 38: 0x5391e2 M=1 + 39: 0x4e9602 M=1 + 40: 0x4faa16 M=1 + 41: 0x4fc5f5 M=1 + 42: 0x8168ff M=1 + 43: 0x8149fc M=1 + 44: 0x813a9f M=1 + 45: 0xbbff76 M=1 + 46: 0x81421b M=1 + 47: 0x4ed413 M=1 + 48: 0x4fd706 M=1 + 49: 0x4de2a1 M=1 + 50: 0xbb5782 M=1 + 51: 0x40acd7 M=1 + 52: 0x61192d M=1 + 53: 0x4b9521 M=1 + 54: 0x4b9f61 M=1 + 55: 0x4ba024 M=1 + 56: 0x40bd85 M=1 + 57: 0x7fcdc276711c M=4 + 58: 0x42d575 M=1 + 59: 0xc25cc5 M=1 + 60: 0x40651a M=1 +Mappings +1: 0x400000/0xfcb000/0x0 cppbench_server_main +2: 0x7fcdc231e000/0x7fcdc2321000/0x0 /libnss_cache-2.15.so +3: 0x7fcdc2522000/0x7fcdc252e000/0x0 /libnss_files-2.15.so +4: 0x7fcdc272f000/0x7fcdc28dd000/0x0 /libc-2.15.so +5: 0x7fcdc2ae7000/0x7fcdc2be2000/0x0 /libm-2.15.so +6: 0x7fcdc2de3000/0x7fcdc2dea000/0x0 /librt-2.15.so +7: 0x7fcdc2feb000/0x7fcdc3003000/0x0 /libpthread-2.15.so +8: 0x7fcdc3208000/0x7fcdc320a000/0x0 /libdl-2.15.so +9: 0x7fcdc340c000/0x7fcdc3415000/0x0 /libcrypt-2.15.so +10: 0x7fcdc3645000/0x7fcdc3669000/0x0 /ld-2.15.so +11: 0x7fff86bff000/0x7fff86c00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.heap b/plugin/debug/pkg/profile/testdata/cppbench.heap new file mode 100644 index 0000000..5622250 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.heap @@ -0,0 +1,47 @@ +heap profile: 144: 8498176 [ 144: 8498176] @ heapz_v2/524288 + 1: 9216 [ 1: 9216] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 144 [ 1: 144] @ 0xc635c8 0xa7479b 0xb65e6b 0xb65f80 0xa6d069 0xa6dc80 0xbbffe6 0xa5dd84 0xa7b7c6 0xaa88da 0xaa9db2 0xb59bae 0xb0c39c 0xb145f3 0xb147ca 0xa5dddd 0xbbffe6 0xa5e837 0xa65f94 0x5aac9e 0x535526 0x535144 0x5aa468 0x7e3ce7 0x7d13a2 0x7e0d28 0x6ab450 0x538d27 0x5390e8 0x5391e3 0x4e9603 + 7: 114688 [ 7: 114688] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 1792 [ 1: 1792] @ 0xc635c8 0x51a272 0x524997 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 13: 319488 [ 13: 319488] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 1792 [ 1: 1792] @ 0xc635c8 0xac95a0 0xacdc7c 0xace07b 0xace1ac 0xabd100 0xabe2a9 0x72f52e 0x655376 0x6558d3 0x41c711 0xc25cc6 0x40651b + 1: 2162688 [ 1: 2162688] @ 0xc63568 0xbc462e 0xbc4bb5 0xbc4eda 0x4a57b8 0x4b152c 0x4ae04c 0x4ad225 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 48 [ 1: 48] @ 0xc635c8 0x7be14a 0x7be675 0x6b312d 0xbaa17f 0xbaa142 0xbaabc6 0xbb092c 0x40bce4 0x7f47a4bab11d + 1: 262144 [ 1: 262144] @ 0xc635c8 0x816900 0x8149fd 0x8139f4 0xbbff77 0x81421c 0x4ed414 0x4fd707 0x4de2a2 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 320 [ 1: 320] @ 0xc635c8 0x721a59 0x43005e 0x7382a4 0x430590 0x435425 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 1792 [ 1: 1792] @ 0xc635c8 0x5413b0 0x541ab2 0xbaa17f 0xbaabc6 0x53507c 0xbaa17f 0xbaa9f9 0xbb0d21 0x40bce4 0x7f47a4bab11d + 1: 10240 [ 1: 10240] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 16: 327680 [ 16: 327680] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 160 [ 1: 160] @ 0xc635c8 0x578705 0x586247 0x592615 0x592745 0x592cb9 0xa456e4 0x7f47a54360fe + 1: 8192 [ 1: 8192] @ 0xc635c8 0xaaf469 0x52cad7 0x52e89b 0x527f32 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 2: 24576 [ 2: 24576] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 2097152 [ 1: 2097152] @ 0xc63568 0xbc463b 0xbc4bb5 0xbc4eda 0x4a57b8 0x4b152c 0x4ae04c 0x4ad225 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 448 [ 1: 448] @ 0xc635c8 0xafca3b 0xb09ba0 0xb09ec0 0xb12fec 0xb13a92 0xb13c93 0xb13d9d 0xa02777 0xbbff77 0xa026ec 0x5701e2 0x53541a 0x535144 0x5aa468 0x7e3ce7 0x7d13a2 0x7e0d28 0x6ab450 0x538d27 0x5390e8 0x5391e3 0x4e9603 0x4faa17 0x4fc5f6 0x4fd028 0x4fd230 0x79e80a 0x7a251b 0x7a296d 0xa456e4 + 47: 1925120 [ 47: 1925120] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 6656 [ 1: 6656] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 11: 292864 [ 11: 292864] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 4096 [ 1: 4096] @ 0xc635c8 0x75373b 0x7eb2d3 0x7ecc87 0x7ece56 0x7ed1ce 0x7ed360 0x7edb1a 0x7edbb5 0x7d50b0 0x4b9ba6 0x4b9f62 0x4ba025 0x40bd86 0x7f47a4bab11d + 1: 112 [ 1: 112] @ 0xc635c8 0x430498 0x435425 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 20480 [ 1: 20480] @ 0xc635c8 0x5a8b92 0x526bff 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 48 [ 1: 48] @ 0xc635c8 0x720c2e 0x5d35f0 0xbaa17f 0xbaabc6 0x42f03d 0xbaa17f 0xbaa9f9 0xbb0d21 0x40bce4 0x7f47a4bab11d + 1: 8192 [ 1: 8192] @ 0xc635c8 0xaaf3e6 0xab0ba0 0xab11be 0xab1639 0x52ebdc 0x527f32 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 2: 131072 [ 2: 131072] @ 0xc635c8 0xaaf469 0xaad4ce 0xb66bcd 0xb670f2 0xb659b5 0x63689b 0x548172 0x520cdc 0x521b82 0x5194c9 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 8192 [ 1: 8192] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 1: 512 [ 1: 512] @ 0xc635c8 0xaff12a 0xb0b331 0xb12f10 0xb13a92 0xb0c443 0xb145f3 0xb147ca 0xa5dddd 0xbbffe6 0xa5e837 0xa65f94 0x5aac9e 0x535526 0x535144 0x5aa468 0x7e3ce7 0x7d13a2 0x7e0d28 0x6ab450 0x538d27 0x5390e8 0x5391e3 0x4e9603 0x4faa17 0x4fc5f6 0x4fd028 0x4fd230 0x79e80a 0x7a251b 0x7a296d + 1: 4608 [ 1: 4608] @ 0xc635c8 0x464379 0xa6318d 0x7feee9 0x5ab69c 0x7b0b26 0x79e81a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe + 23: 753664 [ 23: 753664] @ 0xc635c8 0x42ecc3 0x42e14c 0x5261af 0x526edf 0x5280ab 0x79e80a 0x7a251b 0x7a296d 0xa456e4 0x7f47a54360fe +--- Memory map: --- + source=/home + 00400000-00fcb000: $source/cppbench_server_main + 7f47a4351000-7f47a4352000: /lib/libnss_borg-2.15.so + 7f47a4554000-7f47a4560000: /lib/libnss_files-2.15.so + 7f47a4b73000-7f47a4d21000: /lib/libc-2.15.so + 7f47a4f2b000-7f47a5026000: /lib/libm-2.15.so + 7f47a5227000-7f47a522e000: /lib/librt-2.15.so + 7f47a542f000-7f47a5447000: /lib/libpthread-2.15.so + 7f47a564c000-7f47a564e000: /lib/libdl-2.15.so + 7f47a5850000-7f47a5859000: /lib/libcrypt-2.15.so + 7f47a5a89000-7f47a5aad000: /lib/ld-2.15.so + 7fff63dfe000-7fff63e00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] + diff --git a/plugin/debug/pkg/profile/testdata/cppbench.heap.string b/plugin/debug/pkg/profile/testdata/cppbench.heap.string new file mode 100644 index 0000000..d099e59 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.heap.string @@ -0,0 +1,237 @@ +PeriodType: space bytes +Period: 524288 +Samples: +objects/count space/bytes + 57 528909: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[9216] + 3641 524360: 1 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 17 27 28 29 30 31 32 33 34 35 36 37 38 39 40 + bytes:[144] + 227 3727658: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[16384] + 293 525184: 1 41 42 5 6 7 8 9 10 11 + bytes:[1792] + 283 6976735: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[24576] + 293 525184: 1 43 44 45 46 47 48 49 50 51 52 53 54 + bytes:[1792] + 1 2198218: 55 56 57 58 59 60 61 62 7 8 9 10 11 + bytes:[2162688] + 10923 524312: 1 63 64 65 66 67 68 69 70 71 + bytes:[48] + 2 666237: 1 72 73 74 75 76 77 78 79 7 8 9 10 11 + bytes:[262144] + 1638 524448: 1 80 81 82 83 84 4 5 6 7 8 9 10 11 + bytes:[320] + 293 525184: 1 85 86 66 68 87 66 88 89 70 71 + bytes:[1792] + 51 529424: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[10240] + 417 8553514: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[20480] + 3277 524368: 1 90 91 92 93 94 10 11 + bytes:[160] + 64 528394: 1 95 96 97 98 7 8 9 10 11 + bytes:[8192] + 86 1060911: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[12288] + 1 2136279: 55 99 57 58 59 60 61 62 7 8 9 10 11 + bytes:[2097152] + 1170 524512: 1 100 101 102 103 104 105 106 107 75 108 109 110 31 32 33 34 35 36 37 38 39 40 111 112 113 114 7 8 9 10 + bytes:[448] + 625 25616628: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[40960] + 79 527623: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[6656] + 222 5914839: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[26624] + 128 526338: 1 115 116 117 118 119 120 121 122 123 124 125 126 127 71 + bytes:[4096] + 4681 524344: 1 128 84 4 5 6 7 8 9 10 11 + bytes:[112] + 26 534594: 1 129 130 6 7 8 9 10 11 + bytes:[20480] + 10923 524312: 1 131 132 66 68 133 66 88 89 70 71 + bytes:[48] + 64 528394: 1 134 135 136 137 138 98 7 8 9 10 11 + bytes:[8192] + 17 1115476: 1 95 139 140 141 142 143 144 145 146 147 4 5 6 7 8 9 10 11 + bytes:[65536] + 64 528394: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[8192] + 1024 524544: 1 148 149 150 104 151 24 25 26 17 27 28 29 30 31 32 33 34 35 36 37 38 39 40 111 112 113 114 7 8 9 + bytes:[512] + 114 526595: 1 152 153 154 155 156 157 8 9 10 11 + bytes:[4608] + 379 12439381: 1 2 3 4 5 6 7 8 9 10 11 + bytes:[32768] +Locations + 1: 0xc635c7 M=1 + 2: 0x42ecc2 M=1 + 3: 0x42e14b M=1 + 4: 0x5261ae M=1 + 5: 0x526ede M=1 + 6: 0x5280aa M=1 + 7: 0x79e809 M=1 + 8: 0x7a251a M=1 + 9: 0x7a296c M=1 + 10: 0xa456e3 M=1 + 11: 0x7f47a54360fd M=7 + 12: 0xa7479a M=1 + 13: 0xb65e6a M=1 + 14: 0xb65f7f M=1 + 15: 0xa6d068 M=1 + 16: 0xa6dc7f M=1 + 17: 0xbbffe5 M=1 + 18: 0xa5dd83 M=1 + 19: 0xa7b7c5 M=1 + 20: 0xaa88d9 M=1 + 21: 0xaa9db1 M=1 + 22: 0xb59bad M=1 + 23: 0xb0c39b M=1 + 24: 0xb145f2 M=1 + 25: 0xb147c9 M=1 + 26: 0xa5dddc M=1 + 27: 0xa5e836 M=1 + 28: 0xa65f93 M=1 + 29: 0x5aac9d M=1 + 30: 0x535525 M=1 + 31: 0x535143 M=1 + 32: 0x5aa467 M=1 + 33: 0x7e3ce6 M=1 + 34: 0x7d13a1 M=1 + 35: 0x7e0d27 M=1 + 36: 0x6ab44f M=1 + 37: 0x538d26 M=1 + 38: 0x5390e7 M=1 + 39: 0x5391e2 M=1 + 40: 0x4e9602 M=1 + 41: 0x51a271 M=1 + 42: 0x524996 M=1 + 43: 0xac959f M=1 + 44: 0xacdc7b M=1 + 45: 0xace07a M=1 + 46: 0xace1ab M=1 + 47: 0xabd0ff M=1 + 48: 0xabe2a8 M=1 + 49: 0x72f52d M=1 + 50: 0x655375 M=1 + 51: 0x6558d2 M=1 + 52: 0x41c710 M=1 + 53: 0xc25cc5 M=1 + 54: 0x40651a M=1 + 55: 0xc63567 M=1 + 56: 0xbc462d M=1 + 57: 0xbc4bb4 M=1 + 58: 0xbc4ed9 M=1 + 59: 0x4a57b7 M=1 + 60: 0x4b152b M=1 + 61: 0x4ae04b M=1 + 62: 0x4ad224 M=1 + 63: 0x7be149 M=1 + 64: 0x7be674 M=1 + 65: 0x6b312c M=1 + 66: 0xbaa17e M=1 + 67: 0xbaa141 M=1 + 68: 0xbaabc5 M=1 + 69: 0xbb092b M=1 + 70: 0x40bce3 M=1 + 71: 0x7f47a4bab11c M=4 + 72: 0x8168ff M=1 + 73: 0x8149fc M=1 + 74: 0x8139f3 M=1 + 75: 0xbbff76 M=1 + 76: 0x81421b M=1 + 77: 0x4ed413 M=1 + 78: 0x4fd706 M=1 + 79: 0x4de2a1 M=1 + 80: 0x721a58 M=1 + 81: 0x43005d M=1 + 82: 0x7382a3 M=1 + 83: 0x43058f M=1 + 84: 0x435424 M=1 + 85: 0x5413af M=1 + 86: 0x541ab1 M=1 + 87: 0x53507b M=1 + 88: 0xbaa9f8 M=1 + 89: 0xbb0d20 M=1 + 90: 0x578704 M=1 + 91: 0x586246 M=1 + 92: 0x592614 M=1 + 93: 0x592744 M=1 + 94: 0x592cb8 M=1 + 95: 0xaaf468 M=1 + 96: 0x52cad6 M=1 + 97: 0x52e89a M=1 + 98: 0x527f31 M=1 + 99: 0xbc463a M=1 + 100: 0xafca3a M=1 + 101: 0xb09b9f M=1 + 102: 0xb09ebf M=1 + 103: 0xb12feb M=1 + 104: 0xb13a91 M=1 + 105: 0xb13c92 M=1 + 106: 0xb13d9c M=1 + 107: 0xa02776 M=1 + 108: 0xa026eb M=1 + 109: 0x5701e1 M=1 + 110: 0x535419 M=1 + 111: 0x4faa16 M=1 + 112: 0x4fc5f5 M=1 + 113: 0x4fd027 M=1 + 114: 0x4fd22f M=1 + 115: 0x75373a M=1 + 116: 0x7eb2d2 M=1 + 117: 0x7ecc86 M=1 + 118: 0x7ece55 M=1 + 119: 0x7ed1cd M=1 + 120: 0x7ed35f M=1 + 121: 0x7edb19 M=1 + 122: 0x7edbb4 M=1 + 123: 0x7d50af M=1 + 124: 0x4b9ba5 M=1 + 125: 0x4b9f61 M=1 + 126: 0x4ba024 M=1 + 127: 0x40bd85 M=1 + 128: 0x430497 M=1 + 129: 0x5a8b91 M=1 + 130: 0x526bfe M=1 + 131: 0x720c2d M=1 + 132: 0x5d35ef M=1 + 133: 0x42f03c M=1 + 134: 0xaaf3e5 M=1 + 135: 0xab0b9f M=1 + 136: 0xab11bd M=1 + 137: 0xab1638 M=1 + 138: 0x52ebdb M=1 + 139: 0xaad4cd M=1 + 140: 0xb66bcc M=1 + 141: 0xb670f1 M=1 + 142: 0xb659b4 M=1 + 143: 0x63689a M=1 + 144: 0x548171 M=1 + 145: 0x520cdb M=1 + 146: 0x521b81 M=1 + 147: 0x5194c8 M=1 + 148: 0xaff129 M=1 + 149: 0xb0b330 M=1 + 150: 0xb12f0f M=1 + 151: 0xb0c442 M=1 + 152: 0x464378 M=1 + 153: 0xa6318c M=1 + 154: 0x7feee8 M=1 + 155: 0x5ab69b M=1 + 156: 0x7b0b25 M=1 + 157: 0x79e819 M=1 +Mappings +1: 0x400000/0xfcb000/0x0 /home/cppbench_server_main +2: 0x7f47a4351000/0x7f47a4352000/0x0 /lib/libnss_borg-2.15.so +3: 0x7f47a4554000/0x7f47a4560000/0x0 /lib/libnss_files-2.15.so +4: 0x7f47a4b73000/0x7f47a4d21000/0x0 /lib/libc-2.15.so +5: 0x7f47a4f2b000/0x7f47a5026000/0x0 /lib/libm-2.15.so +6: 0x7f47a5227000/0x7f47a522e000/0x0 /lib/librt-2.15.so +7: 0x7f47a542f000/0x7f47a5447000/0x0 /lib/libpthread-2.15.so +8: 0x7f47a564c000/0x7f47a564e000/0x0 /lib/libdl-2.15.so +9: 0x7f47a5850000/0x7f47a5859000/0x0 /lib/libcrypt-2.15.so +10: 0x7f47a5a89000/0x7f47a5aad000/0x0 /lib/ld-2.15.so +11: 0x7fff63dfe000/0x7fff63e00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread b/plugin/debug/pkg/profile/testdata/cppbench.thread new file mode 100644 index 0000000..0192dd6 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread @@ -0,0 +1,29 @@ +--- threadz 1 --- + +--- Thread 7f794ab90940 (name: main/14748) stack: --- + PC: 0x00bc8f1c: helper(arg *) + 0x0040be31: main + 0x7f7949a9811d: __libc_start_main +--- Thread 7f794964e700 (name: thread1/14751) stack: --- + PC: 0x7f794a32bf7d: nanosleep + 0x7f794a32414e: start_thread + creator: 0xa45b96 0xa460b4 0xbaa17f 0xbaa9f9 0xbb0d21 0x40bce4 0x7f7949a9811d +--- Thread 7f794934c700 (name: thread2/14752) stack: --- + PC: 0x00bc8f1c: Wait(int) + 0x7f794a32414e: start_thread + creator: 0xa45b96 0xa48928 0xbaa17f 0xbaa9f9 0xbb0d21 0x40bce4 0x7f7949a9811d +--- Thread 7f7948978700 (name: thread3/14759) stack: --- + [same as previous thread] +--- Memory map: --- + 00400000-00fcb000: /home/rsilvera/cppbench/cppbench_server_main + 7f794964f000-7f7949652000: /lib/libnss_cache-2.15.so + 7f7949853000-7f794985f000: /lib/libnss_files-2.15.so + 7f7949a60000-7f7949c0e000: /lib/libc-2.15.so + 7f7949e19000-7f7949f14000: /lib/libm-2.15.so + 7f794a115000-7f794a11c000: /lib/librt-2.15.so + 7f794a31d000-7f794a335000: /lib/libpthread-2.15.so + 7f794a53a000-7f794a53d000: /lib/libdl-2.15.so + 7f794a73e000-7f794a747000: /lib/libcrypt-2.15.so + 7f794a977000-7f794a99b000: /lib/ld-2.15.so + 7fffb8dff000-7fffb8e00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread.all b/plugin/debug/pkg/profile/testdata/cppbench.thread.all new file mode 100644 index 0000000..a3f8893 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread.all @@ -0,0 +1,33 @@ +--- threadz 1 --- + +--- Thread 7eff063d9940 (name: main/25376) stack: --- + PC: 0x00bc8f1c: helper(arg*) + 0x0040be31: main + 0x7eff052e111d: __libc_start_main +--- Thread 7eff04e97700 (name: thread1/25379) stack: --- + PC: 0x7eff05b74f7d: nanosleep + 0x7eff05b6d14e: start_thread + creator: + 0x0040bce4: main + 0x7eff052e111d: __libc_start_main +--- Thread 7eff04770700 (name: thread2/25382) stack: --- + PC: 0x00bc8f1c: Wait(int) + 0x7eff05b6d14e: start_thread + creator: + 0x0040bd6e: main + 0x7eff052e111d: __libc_start_main +--- Thread 7eff0464d700 (name: thread3/25383) stack: --- + [same as previous thread] +--- Memory map: --- + 00400000-00fcb000: /home/rsilvera/cppbench/cppbench_server_main + 7eff04e98000-7eff04e9b000: /lib/libnss_cache-2.15.so + 7eff0509c000-7eff050a8000: /lib/libnss_files-2.15.so + 7eff052a9000-7eff05457000: /lib/libc-2.15.so + 7eff05662000-7eff0575d000: /lib/libm-2.15.so + 7eff0595e000-7eff05965000: /lib/librt-2.15.so + 7eff05b66000-7eff05b7e000: /lib/libpthread-2.15.so + 7eff05d83000-7eff05d86000: /lib/libdl-2.15.so + 7eff05f87000-7eff05f90000: /lib/libcrypt-2.15.so + 7eff061c0000-7eff061e4000: /lib/ld-2.15.so + 7fff2edff000-7fff2ee00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread.all.string b/plugin/debug/pkg/profile/testdata/cppbench.thread.all.string new file mode 100644 index 0000000..c7c0f02 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread.all.string @@ -0,0 +1,28 @@ +PeriodType: thread count +Period: 1 +Samples: +thread/count + 1: 1 2 3 + 1: 4 5 6 3 + 2: 1 5 7 3 +Locations + 1: 0xbc8f1c M=1 + 2: 0x40be30 M=1 + 3: 0x7eff052e111c M=4 + 4: 0x7eff05b74f7d M=7 + 5: 0x7eff05b6d14d M=7 + 6: 0x40bce3 M=1 + 7: 0x40bd6d M=1 +Mappings +1: 0x400000/0xfcb000/0x0 /home/rsilvera/cppbench/cppbench_server_main +2: 0x7eff04e98000/0x7eff04e9b000/0x0 /lib/libnss_cache-2.15.so +3: 0x7eff0509c000/0x7eff050a8000/0x0 /lib/libnss_files-2.15.so +4: 0x7eff052a9000/0x7eff05457000/0x0 /lib/libc-2.15.so +5: 0x7eff05662000/0x7eff0575d000/0x0 /lib/libm-2.15.so +6: 0x7eff0595e000/0x7eff05965000/0x0 /lib/librt-2.15.so +7: 0x7eff05b66000/0x7eff05b7e000/0x0 /lib/libpthread-2.15.so +8: 0x7eff05d83000/0x7eff05d86000/0x0 /lib/libdl-2.15.so +9: 0x7eff05f87000/0x7eff05f90000/0x0 /lib/libcrypt-2.15.so +10: 0x7eff061c0000/0x7eff061e4000/0x0 /lib/ld-2.15.so +11: 0x7fff2edff000/0x7fff2ee00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread.none b/plugin/debug/pkg/profile/testdata/cppbench.thread.none new file mode 100644 index 0000000..6ab2421 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread.none @@ -0,0 +1,27 @@ +--- threadz 1 --- + +--- Thread 7eff063d9940 (name: main/25376) stack: --- + PC: 0xbc8f1c 0xbcae55 0xbcb5f5 0x40b688 0x4d5f51 0x40be31 0x7eff052e111d +--- Thread 7eff04b95700 (name: thread1/25380) stack: --- + PC: 0xbc8f1c 0xbcbd00 0xa47f60 0xa456e4 0x7eff05b6d14e + creator: 0xa45b96 0xa48928 0xbaa17f 0xbaa9f9 0xbb0d21 0x40bce4 0x7eff052e111d +--- Thread 7eff04893700 (name: thread2/25381) stack: --- + PC: 0x7eff052dfa93 0x7a1956 0x7a1c45 0x7a2727 0x7a296d 0xa456e4 + 0x7eff05b6d14e + creator: 0xa45b96 0x7a37d2 0x7a3e8d 0xbbff77 0x79ec1c 0x40bd6e 0x7eff052e111d +--- Thread 7eff04770700 (name: thread3/25382) stack: --- + PC: 0xbc8f1c 0x7a2691 0x7a296d 0xa456e4 0x7eff05b6d14e + creator: 0xa45b96 0x7a37d2 0x7a3e8d 0xbbff77 0x79ec1c 0x40bd6e 0x7eff052e111d +--- Memory map: --- + 00400000-00fcb000: /home/rsilvera/cppbench/cppbench_server_main.unstripped + 7eff04e98000-7eff04e9b000: /lib/libnss_cache-2.15.so + 7eff0509c000-7eff050a8000: /lib/libnss_files-2.15.so + 7eff052a9000-7eff05457000: /lib/libc-2.15.so + 7eff05662000-7eff0575d000: /lib/libm-2.15.so + 7eff0595e000-7eff05965000: /lib/librt-2.15.so + 7eff05b66000-7eff05b7e000: /lib/libpthread-2.15.so + 7eff05d83000-7eff05d86000: /lib/libdl-2.15.so + 7eff05f87000-7eff05f90000: /lib/libcrypt-2.15.so + 7eff061c0000-7eff061e4000: /lib/ld-2.15.so + 7fff2edff000-7fff2ee00000: [vdso] + ffffffffff600000-ffffffffff601000: [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread.none.string b/plugin/debug/pkg/profile/testdata/cppbench.thread.none.string new file mode 100644 index 0000000..af0ad3c --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread.none.string @@ -0,0 +1,50 @@ +PeriodType: thread count +Period: 1 +Samples: +thread/count + 1: 1 2 3 4 5 6 7 + 1: 1 8 9 10 11 12 13 14 15 16 17 7 + 1: 18 19 20 21 22 10 11 12 23 24 25 26 27 7 + 1: 1 28 22 10 11 12 23 24 25 26 27 7 +Locations + 1: 0xbc8f1c M=1 + 2: 0xbcae54 M=1 + 3: 0xbcb5f4 M=1 + 4: 0x40b687 M=1 + 5: 0x4d5f50 M=1 + 6: 0x40be30 M=1 + 7: 0x7eff052e111c M=4 + 8: 0xbcbcff M=1 + 9: 0xa47f5f M=1 + 10: 0xa456e3 M=1 + 11: 0x7eff05b6d14d M=7 + 12: 0xa45b95 M=1 + 13: 0xa48927 M=1 + 14: 0xbaa17e M=1 + 15: 0xbaa9f8 M=1 + 16: 0xbb0d20 M=1 + 17: 0x40bce3 M=1 + 18: 0x7eff052dfa93 M=4 + 19: 0x7a1955 M=1 + 20: 0x7a1c44 M=1 + 21: 0x7a2726 M=1 + 22: 0x7a296c M=1 + 23: 0x7a37d1 M=1 + 24: 0x7a3e8c M=1 + 25: 0xbbff76 M=1 + 26: 0x79ec1b M=1 + 27: 0x40bd6d M=1 + 28: 0x7a2690 M=1 +Mappings +1: 0x400000/0xfcb000/0x0 /home/rsilvera/cppbench/cppbench_server_main.unstripped +2: 0x7eff04e98000/0x7eff04e9b000/0x0 /lib/libnss_cache-2.15.so +3: 0x7eff0509c000/0x7eff050a8000/0x0 /lib/libnss_files-2.15.so +4: 0x7eff052a9000/0x7eff05457000/0x0 /lib/libc-2.15.so +5: 0x7eff05662000/0x7eff0575d000/0x0 /lib/libm-2.15.so +6: 0x7eff0595e000/0x7eff05965000/0x0 /lib/librt-2.15.so +7: 0x7eff05b66000/0x7eff05b7e000/0x0 /lib/libpthread-2.15.so +8: 0x7eff05d83000/0x7eff05d86000/0x0 /lib/libdl-2.15.so +9: 0x7eff05f87000/0x7eff05f90000/0x0 /lib/libcrypt-2.15.so +10: 0x7eff061c0000/0x7eff061e4000/0x0 /lib/ld-2.15.so +11: 0x7fff2edff000/0x7fff2ee00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/cppbench.thread.string b/plugin/debug/pkg/profile/testdata/cppbench.thread.string new file mode 100644 index 0000000..bf3f0f3 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/cppbench.thread.string @@ -0,0 +1,33 @@ +PeriodType: thread count +Period: 1 +Samples: +thread/count + 1: 1 2 3 + 1: 4 5 6 7 8 9 10 11 3 + 2: 1 5 6 12 8 9 10 11 3 +Locations + 1: 0xbc8f1c M=1 + 2: 0x40be30 M=1 + 3: 0x7f7949a9811c M=4 + 4: 0x7f794a32bf7d M=7 + 5: 0x7f794a32414d M=7 + 6: 0xa45b95 M=1 + 7: 0xa460b3 M=1 + 8: 0xbaa17e M=1 + 9: 0xbaa9f8 M=1 + 10: 0xbb0d20 M=1 + 11: 0x40bce3 M=1 + 12: 0xa48927 M=1 +Mappings +1: 0x400000/0xfcb000/0x0 /home/rsilvera/cppbench/cppbench_server_main +2: 0x7f794964f000/0x7f7949652000/0x0 /lib/libnss_cache-2.15.so +3: 0x7f7949853000/0x7f794985f000/0x0 /lib/libnss_files-2.15.so +4: 0x7f7949a60000/0x7f7949c0e000/0x0 /lib/libc-2.15.so +5: 0x7f7949e19000/0x7f7949f14000/0x0 /lib/libm-2.15.so +6: 0x7f794a115000/0x7f794a11c000/0x0 /lib/librt-2.15.so +7: 0x7f794a31d000/0x7f794a335000/0x0 /lib/libpthread-2.15.so +8: 0x7f794a53a000/0x7f794a53d000/0x0 /lib/libdl-2.15.so +9: 0x7f794a73e000/0x7f794a747000/0x0 /lib/libcrypt-2.15.so +10: 0x7f794a977000/0x7f794a99b000/0x0 /lib/ld-2.15.so +11: 0x7fffb8dff000/0x7fffb8e00000/0x0 [vdso] +12: 0xffffffffff600000/0xffffffffff601000/0x0 [vsyscall] diff --git a/plugin/debug/pkg/profile/testdata/go.crc32.cpu b/plugin/debug/pkg/profile/testdata/go.crc32.cpu new file mode 100644 index 0000000..ce08313 Binary files /dev/null and b/plugin/debug/pkg/profile/testdata/go.crc32.cpu differ diff --git a/plugin/debug/pkg/profile/testdata/go.crc32.cpu.string b/plugin/debug/pkg/profile/testdata/go.crc32.cpu.string new file mode 100644 index 0000000..c2838b8 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/go.crc32.cpu.string @@ -0,0 +1,87 @@ +PeriodType: cpu nanoseconds +Period: 10000000 +Samples: +samples/count cpu/nanoseconds + 1 10000000: 1 2 3 4 5 + 2 20000000: 6 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 2 20000000: 8 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 4 40000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 2 20000000: 6 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 2 20000000: 6 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 2 20000000: 8 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 2 20000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 3 30000000: 7 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 2 20000000: 1 2 3 4 5 + 2 20000000: 7 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 2 20000000: 7 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 2 20000000: 6 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 6 2 3 4 5 + 1 10000000: 8 2 3 4 5 + 1 10000000: 1 2 3 4 5 + 85 850000000: 9 2 3 4 5 + 21 210000000: 10 2 3 4 5 + 1 10000000: 7 2 3 4 5 + 24 240000000: 11 2 3 4 5 +Locations + 1: 0x430b93 M=1 + 2: 0x4317eb M=1 + 3: 0x42a065 M=1 + 4: 0x42a31b M=1 + 5: 0x415d0f M=1 + 6: 0x430baa M=1 + 7: 0x430bb5 M=1 + 8: 0x430ba6 M=1 + 9: 0x430bac M=1 + 10: 0x430b9f M=1 + 11: 0x430bb3 M=1 +Mappings +1: 0x0/0xffffffffffffffff/0x0 diff --git a/plugin/debug/pkg/profile/testdata/go.godoc.thread b/plugin/debug/pkg/profile/testdata/go.godoc.thread new file mode 100644 index 0000000..1c8582b --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/go.godoc.thread @@ -0,0 +1,8 @@ +threadcreate profile: total 7 +1 @ 0x44cb3 0x45045 0x45323 0x45534 0x47e9c 0x47c98 0x44ba2 0x2720fe 0x271fb5 +1 @ 0x44cb3 0x45045 0x45323 0x45534 0x46716 0x51584 0x461e0 +1 @ 0x44cb3 0x45045 0x45323 0x45547 0x46716 0x40963 0x461e0 +1 @ 0x44cb3 0x45045 0x45323 0x45547 0x4562e 0x460ed 0x51a59 +1 @ 0x44cb3 0x45045 0x441ae 0x461e0 +1 @ 0x44cb3 0x44e04 0x44b80 0x5192d +1 @ 0x440e2 0x5191a diff --git a/plugin/debug/pkg/profile/testdata/go.godoc.thread.string b/plugin/debug/pkg/profile/testdata/go.godoc.thread.string new file mode 100644 index 0000000..095f7ce --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/go.godoc.thread.string @@ -0,0 +1,37 @@ +PeriodType: threadcreate count +Period: 1 +Samples: +threadcreate/count + 1: 1 2 3 4 5 6 7 8 9 + 1: 1 2 3 4 10 11 12 + 1: 1 2 3 13 10 14 12 + 1: 1 2 3 13 15 16 17 + 1: 1 2 18 12 + 1: 1 19 20 21 + 1: 22 23 +Locations + 1: 0x44cb2 M=1 + 2: 0x45044 M=1 + 3: 0x45322 M=1 + 4: 0x45533 M=1 + 5: 0x47e9b M=1 + 6: 0x47c97 M=1 + 7: 0x44ba1 M=1 + 8: 0x2720fd M=1 + 9: 0x271fb4 M=1 + 10: 0x46715 M=1 + 11: 0x51583 M=1 + 12: 0x461df M=1 + 13: 0x45546 M=1 + 14: 0x40962 M=1 + 15: 0x4562d M=1 + 16: 0x460ec M=1 + 17: 0x51a58 M=1 + 18: 0x441ad M=1 + 19: 0x44e03 M=1 + 20: 0x44b7f M=1 + 21: 0x5192c M=1 + 22: 0x440e1 M=1 + 23: 0x51919 M=1 +Mappings +1: 0x0/0xffffffffffffffff/0x0 diff --git a/plugin/debug/pkg/profile/testdata/gobench.cpu b/plugin/debug/pkg/profile/testdata/gobench.cpu new file mode 100644 index 0000000..e921d21 Binary files /dev/null and b/plugin/debug/pkg/profile/testdata/gobench.cpu differ diff --git a/plugin/debug/pkg/profile/testdata/gobench.cpu.string b/plugin/debug/pkg/profile/testdata/gobench.cpu.string new file mode 100644 index 0000000..7df1533 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/gobench.cpu.string @@ -0,0 +1,415 @@ +PeriodType: cpu nanoseconds +Period: 10000000 +Samples: +samples/count cpu/nanoseconds + 1 10000000: 1 2 + 1 10000000: 3 2 + 1 10000000: 4 2 + 1 10000000: 5 2 + 1 10000000: 6 2 + 1 10000000: 7 2 + 1 10000000: 8 2 + 1 10000000: 9 2 + 1 10000000: 10 2 + 1 10000000: 11 2 + 1 10000000: 12 2 + 1 10000000: 13 2 + 1 10000000: 14 2 + 1 10000000: 15 2 + 1 10000000: 16 2 + 1 10000000: 17 2 + 1 10000000: 18 2 + 1 10000000: 16 2 + 1 10000000: 19 2 + 1 10000000: 20 2 + 1 10000000: 21 2 + 1 10000000: 22 2 + 1 10000000: 23 2 + 1 10000000: 24 2 + 1 10000000: 25 2 + 1 10000000: 15 2 + 1 10000000: 26 2 + 1 10000000: 9 2 + 1 10000000: 27 2 + 1 10000000: 28 2 + 1 10000000: 29 2 + 1 10000000: 30 2 + 1 10000000: 31 2 + 1 10000000: 32 2 + 1 10000000: 24 2 + 1 10000000: 30 2 + 1 10000000: 33 2 + 1 10000000: 34 2 + 1 10000000: 35 2 + 1 10000000: 36 2 + 1 10000000: 27 2 + 1 10000000: 37 2 + 1 10000000: 38 2 + 1 10000000: 19 2 + 1 10000000: 39 2 + 1 10000000: 40 2 + 1 10000000: 41 2 + 1 10000000: 16 2 + 1 10000000: 42 2 + 1 10000000: 43 2 + 1 10000000: 44 2 + 1 10000000: 45 2 + 1 10000000: 46 2 + 1 10000000: 47 2 + 1 10000000: 48 2 + 1 10000000: 40 2 + 1 10000000: 10 2 + 1 10000000: 49 2 + 1 10000000: 50 2 + 1 10000000: 51 2 + 1 10000000: 52 2 + 1 10000000: 53 2 + 1 10000000: 30 2 + 1 10000000: 54 2 + 1 10000000: 55 2 + 1 10000000: 36 2 + 1 10000000: 56 2 + 1 10000000: 57 2 + 1 10000000: 58 2 + 1 10000000: 59 2 + 1 10000000: 60 2 + 1 10000000: 61 2 + 1 10000000: 57 2 + 1 10000000: 62 2 + 1 10000000: 63 2 + 1 10000000: 30 2 + 1 10000000: 64 2 + 1 10000000: 16 2 + 1 10000000: 65 2 + 1 10000000: 26 2 + 1 10000000: 40 2 + 1 10000000: 66 2 + 1 10000000: 58 2 + 1 10000000: 67 2 + 1 10000000: 68 2 + 1 10000000: 69 2 + 1 10000000: 70 2 + 1 10000000: 71 2 + 1 10000000: 72 2 + 1 10000000: 51 2 + 1 10000000: 73 2 + 1 10000000: 74 2 + 1 10000000: 75 2 + 1 10000000: 76 2 + 1 10000000: 77 2 + 1 10000000: 78 2 + 1 10000000: 79 2 + 1 10000000: 80 2 + 1 10000000: 81 2 + 1 10000000: 82 2 + 1 10000000: 83 2 + 1 10000000: 84 2 + 1 10000000: 85 2 + 1 10000000: 86 2 + 1 10000000: 10 2 + 1 10000000: 87 2 + 1 10000000: 88 2 + 1 10000000: 89 2 + 1 10000000: 90 2 + 1 10000000: 63 2 + 1 10000000: 91 2 + 1 10000000: 5 2 + 1 10000000: 92 2 + 1 10000000: 93 2 + 1 10000000: 94 2 + 1 10000000: 19 2 + 1 10000000: 95 2 + 1 10000000: 30 2 + 1 10000000: 96 2 + 1 10000000: 10 2 + 1 10000000: 97 2 + 1 10000000: 98 2 + 1 10000000: 99 2 + 1 10000000: 62 2 + 1 10000000: 92 2 + 1 10000000: 100 2 + 1 10000000: 101 2 + 1 10000000: 39 2 + 1 10000000: 102 2 + 1 10000000: 86 2 + 1 10000000: 33 2 + 1 10000000: 103 2 + 1 10000000: 104 2 + 1 10000000: 13 2 + 2 20000000: 105 2 + 1 10000000: 106 2 + 1 10000000: 52 2 + 1 10000000: 24 2 + 1 10000000: 107 2 + 1 10000000: 108 2 + 1 10000000: 52 2 + 1 10000000: 109 2 + 1 10000000: 5 2 + 1 10000000: 82 2 + 1 10000000: 8 2 + 1 10000000: 110 2 + 1 10000000: 111 2 + 1 10000000: 112 2 + 1 10000000: 113 2 + 1 10000000: 114 2 + 1 10000000: 115 2 + 1 10000000: 116 2 + 1 10000000: 19 2 + 1 10000000: 64 2 + 1 10000000: 106 2 + 1 10000000: 117 2 + 1 10000000: 30 2 + 1 10000000: 118 2 + 1 10000000: 86 2 + 1 10000000: 119 2 + 1 10000000: 120 2 + 1 10000000: 121 2 + 1 10000000: 81 2 + 2 20000000: 10 2 + 1 10000000: 19 2 + 1 10000000: 122 2 + 1 10000000: 123 2 + 1 10000000: 105 2 + 1 10000000: 124 2 + 1 10000000: 125 2 + 1 10000000: 46 2 + 1 10000000: 8 2 + 10 100000000: 21 2 + 7 70000000: 126 2 + 3 30000000: 9 2 + 1 10000000: 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 + 1 10000000: 144 2 + 5 50000000: 145 2 + 25 250000000: 146 2 + 1 10000000: 147 2 + 1 10000000: 148 149 150 134 135 136 137 138 139 140 141 142 143 + 1 10000000: 151 152 153 154 155 135 136 137 138 139 140 141 142 143 + 1 10000000: 156 157 153 154 155 135 136 137 138 139 140 141 142 143 + 1 10000000: 158 159 132 133 134 135 136 137 138 139 140 141 142 143 + 4 40000000: 27 2 + 4 40000000: 160 2 + 1 10000000: 116 2 + 5 50000000: 161 2 + 20 200000000: 162 163 164 135 136 137 138 139 140 141 142 143 + 1 10000000: 165 166 167 164 135 136 137 138 139 140 141 142 143 + 1 10000000: 168 169 167 164 135 136 137 138 139 140 141 142 143 + 2 20000000: 170 171 172 142 143 + 2 20000000: 173 171 172 142 143 + 1 10000000: 105 174 175 154 155 176 177 140 141 142 143 + 1 10000000: 178 179 176 177 140 141 142 143 + 1 10000000: 180 181 182 181 183 184 185 186 187 188 189 190 191 192 193 194 143 + 7 70000000: 195 2 + 2 20000000: 196 2 + 8 80000000: 16 2 + 1 10000000: 197 2 + 1 10000000: 146 198 199 135 136 137 138 139 140 141 142 143 + 1 10000000: 200 199 135 136 137 138 139 140 141 142 143 + 3 30000000: 162 179 135 136 137 138 139 140 141 142 143 + 1 10000000: 201 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 + 1 10000000: 202 167 152 153 154 155 135 136 137 138 139 140 141 142 143 + 6 60000000: 162 163 152 153 154 155 135 136 137 138 139 140 141 142 143 +Locations + 1: 0x410bc0 M=1 + 2: 0x41a770 M=1 + 3: 0x410b4b M=1 + 4: 0x40f534 M=1 + 5: 0x40f018 M=1 + 6: 0x421f4f M=1 + 7: 0x40e46f M=1 + 8: 0x40f0e3 M=1 + 9: 0x4286c7 M=1 + 10: 0x40f15b M=1 + 11: 0x40efb1 M=1 + 12: 0x41250d M=1 + 13: 0x427854 M=1 + 14: 0x40e688 M=1 + 15: 0x410b61 M=1 + 16: 0x40fa72 M=1 + 17: 0x40e92a M=1 + 18: 0x421ff1 M=1 + 19: 0x42830d M=1 + 20: 0x41cf23 M=1 + 21: 0x40e7cb M=1 + 22: 0x40ea46 M=1 + 23: 0x40f792 M=1 + 24: 0x40f023 M=1 + 25: 0x40ee50 M=1 + 26: 0x40c6ab M=1 + 27: 0x40fa51 M=1 + 28: 0x40f14b M=1 + 29: 0x421fca M=1 + 30: 0x4285d3 M=1 + 31: 0x410ba9 M=1 + 32: 0x40e75f M=1 + 33: 0x4277a1 M=1 + 34: 0x40e89f M=1 + 35: 0x40ea54 M=1 + 36: 0x40f0ab M=1 + 37: 0x40ef9b M=1 + 38: 0x410d6a M=1 + 39: 0x40e455 M=1 + 40: 0x427856 M=1 + 41: 0x40e80b M=1 + 42: 0x40f5ef M=1 + 43: 0x40fb2a M=1 + 44: 0x422786 M=1 + 45: 0x40f031 M=1 + 46: 0x40f49d M=1 + 47: 0x40f331 M=1 + 48: 0x40e927 M=1 + 49: 0x40f558 M=1 + 50: 0x410b56 M=1 + 51: 0x40eac1 M=1 + 52: 0x40e813 M=1 + 53: 0x40e7df M=1 + 54: 0x40f53d M=1 + 55: 0x40f180 M=1 + 56: 0x410b94 M=1 + 57: 0x40fbf6 M=1 + 58: 0x40f026 M=1 + 59: 0x40f0dc M=1 + 60: 0x40e9d3 M=1 + 61: 0x40fa7b M=1 + 62: 0x40e877 M=1 + 63: 0x4048a8 M=1 + 64: 0x40f02e M=1 + 65: 0x4048b8 M=1 + 66: 0x4277d0 M=1 + 67: 0x40f5cb M=1 + 68: 0x40fbae M=1 + 69: 0x40e8c2 M=1 + 70: 0x40f64b M=1 + 71: 0x40e82e M=1 + 72: 0x421f22 M=1 + 73: 0x40fa67 M=1 + 74: 0x40fbb1 M=1 + 75: 0x40f568 M=1 + 76: 0x40e461 M=1 + 77: 0x40ef85 M=1 + 78: 0x40f58b M=1 + 79: 0x40f08d M=1 + 80: 0x40e75c M=1 + 81: 0x410c22 M=1 + 82: 0x40fa59 M=1 + 83: 0x40f091 M=1 + 84: 0x40eb69 M=1 + 85: 0x41075a M=1 + 86: 0x40e7e9 M=1 + 87: 0x40fa97 M=1 + 88: 0x4131eb M=1 + 89: 0x40f769 M=1 + 90: 0x40f54e M=1 + 91: 0x4277d5 M=1 + 92: 0x40f0ca M=1 + 93: 0x40f051 M=1 + 94: 0x40e94f M=1 + 95: 0x40fc11 M=1 + 96: 0x41815b M=1 + 97: 0x40f4b3 M=1 + 98: 0x421fe8 M=1 + 99: 0x40e79e M=1 + 100: 0x413f29 M=1 + 101: 0x427822 M=1 + 102: 0x40ef3d M=1 + 103: 0x40e440 M=1 + 104: 0x40e767 M=1 + 105: 0x42783b M=1 + 106: 0x40fa85 M=1 + 107: 0x40fb36 M=1 + 108: 0x410bae M=1 + 109: 0x40f0d7 M=1 + 110: 0x410ba4 M=1 + 111: 0x40e87b M=1 + 112: 0x40e7c0 M=1 + 113: 0x40eae0 M=1 + 114: 0x410a99 M=1 + 115: 0x40e7bd M=1 + 116: 0x40f09d M=1 + 117: 0x410b70 M=1 + 118: 0x40f32d M=1 + 119: 0x4283ec M=1 + 120: 0x40f010 M=1 + 121: 0x40e97a M=1 + 122: 0x40f19a M=1 + 123: 0x40e779 M=1 + 124: 0x40f61d M=1 + 125: 0x40f4e1 M=1 + 126: 0x40f58f M=1 + 127: 0x41ef43 M=1 + 128: 0x41ef96 M=1 + 129: 0x41f089 M=1 + 130: 0x41f360 M=1 + 131: 0x41fc8e M=1 + 132: 0x4204c7 M=1 + 133: 0x422b03 M=1 + 134: 0x420cee M=1 + 135: 0x422150 M=1 + 136: 0x4221d9 M=1 + 137: 0x41dc0c M=1 + 138: 0x41db47 M=1 + 139: 0x672125 M=1 + 140: 0x4ac6fd M=1 + 141: 0x4abf98 M=1 + 142: 0x491fbd M=1 + 143: 0x41931f M=1 + 144: 0x40e844 M=1 + 145: 0x421ff8 M=1 + 146: 0x4277e4 M=1 + 147: 0x40e990 M=1 + 148: 0x41c53f M=1 + 149: 0x422746 M=1 + 150: 0x422b42 M=1 + 151: 0x412b5f M=1 + 152: 0x40d47b M=1 + 153: 0x40cf5e M=1 + 154: 0x40cceb M=1 + 155: 0x420b5e M=1 + 156: 0x413ab9 M=1 + 157: 0x40d56e M=1 + 158: 0x41f5a6 M=1 + 159: 0x420149 M=1 + 160: 0x40f531 M=1 + 161: 0x410b8d M=1 + 162: 0x427ac9 M=1 + 163: 0x412b91 M=1 + 164: 0x420ee3 M=1 + 165: 0x4134a8 M=1 + 166: 0x412dc7 M=1 + 167: 0x412afa M=1 + 168: 0x413a9d M=1 + 169: 0x412bf6 M=1 + 170: 0x671ed3 M=1 + 171: 0x4ac6ad M=1 + 172: 0x4abdd8 M=1 + 173: 0x671ebe M=1 + 174: 0x40c8ae M=1 + 175: 0x40d00a M=1 + 176: 0x422081 M=1 + 177: 0x672148 M=1 + 178: 0x427ad1 M=1 + 179: 0x420e54 M=1 + 180: 0x5718ff M=1 + 181: 0x575ab6 M=1 + 182: 0x572114 M=1 + 183: 0x571257 M=1 + 184: 0x462494 M=1 + 185: 0x475ea6 M=1 + 186: 0x473682 M=1 + 187: 0x471fd7 M=1 + 188: 0x471ac0 M=1 + 189: 0x46f1b2 M=1 + 190: 0x46ef32 M=1 + 191: 0x4ab9e0 M=1 + 192: 0x4acce1 M=1 + 193: 0x4ac7b6 M=1 + 194: 0x4ace6a M=1 + 195: 0x410b8a M=1 + 196: 0x40f56e M=1 + 197: 0x428176 M=1 + 198: 0x4120f3 M=1 + 199: 0x420be8 M=1 + 200: 0x412100 M=1 + 201: 0x41ef39 M=1 + 202: 0x412e38 M=1 +Mappings +1: 0x0/0xffffffffffffffff/0x0 diff --git a/plugin/debug/pkg/profile/testdata/gobench.heap b/plugin/debug/pkg/profile/testdata/gobench.heap new file mode 100644 index 0000000..ed44903 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/gobench.heap @@ -0,0 +1,16 @@ +heap profile: 13: 1595680 [47130736: 2584596557304] @ heap/1048576 +1: 524288 [3: 1572864] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x74920f 0x6295ac 0x629855 0x462769 0x419320 +1: 524288 [1: 524288] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x74920f 0x63963f 0x419320 +1: 262144 [1: 262144] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x451a39 0x451ba5 0x450683 0x450077 0x4525a4 0x58e034 0x419320 +1: 262144 [1: 262144] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x451a39 0x451ba5 0x450683 0x450077 0x4524d4 0x401090 0x4011a1 0x416dff 0x419320 +1: 10240 [642: 6574080] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x477637 0x47718b 0x477056 0x4799b2 0x46bfd7 0x419320 +1: 4096 [1: 4096] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x526126 0x5261ea 0x4683d4 0x467e09 0x419320 +1: 4096 [1: 4096] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41db48 0x53fbf3 0x53f85f 0x545f52 0x545a70 0x419320 +1: 2048 [1: 2048] @ 0x420cef 0x420fa9 0x414b22 0x414d20 0x4901be 0x419320 +1: 1280 [1: 1280] @ 0x420cef 0x422082 0x48dbe3 0x48d15c 0x48cdd0 0x4a9dc0 0x545bfe 0x543ac7 0x419320 +1: 384 [1: 384] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41dd68 0x41dcbd 0x429150 0x429add 0x42e013 0x4307e2 0x4366ff 0x42c1c2 0x653e4d 0x64bdc5 0x64c359 0x65a73d 0x64cdb1 0x64be73 0x64c359 0x64c59a 0x64c205 0x64c359 0x64b778 0x5cd55c 0x45dbc3 0x543e70 0x559166 0x55ba54 0x559691 0x559985 0x5a19ff 0x543e70 +1: 288 [1: 288] @ 0x420cef 0x420fa9 0x419e19 0x41a1a8 0x419f63 0x48f09f 0x48d991 0x48cdd0 0x4a9dc0 0x545bfe 0x543ac7 0x419320 +1: 288 [2: 296] @ +1: 96 [1: 96] @ 0x420cef 0x424f35 0x4255d1 0x6fc293 0x6f9c88 0x6f9944 0x6f96be 0x6f966b 0x59f39a 0x468318 0x467e09 0x419320 +0: 0 [1: 1024] @ 0x420cef 0x422151 0x4221da 0x41dc0d 0x41dd68 0x41dcbd 0x6d71a3 0x6da87d 0x7b2c3b 0x419320 +0: 0 [1: 16] @ 0x420cef 0x422048 0x40b517 0x40b746 0x6d9ca2 0x4761c5 0x475ea7 0x46fc4f 0x46f180 0x46ef33 0x4ab821 0x4acc32 0x4ac7b7 0x4ace36 0x419320 diff --git a/plugin/debug/pkg/profile/testdata/gobench.heap.string b/plugin/debug/pkg/profile/testdata/gobench.heap.string new file mode 100644 index 0000000..01306ce --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/gobench.heap.string @@ -0,0 +1,137 @@ +PeriodType: space bytes +Period: 524288 +Samples: +alloc_objects/count alloc_space/bytes inuse_objects/count inuse_space/bytes + 4 2488234 1 829411: 1 2 3 4 5 6 7 8 9 10 + bytes:[524288] + 1 829411 1 829411: 1 2 3 4 5 6 11 10 + bytes:[524288] + 2 666237 2 666237: 1 2 3 4 5 12 13 14 15 16 17 10 + bytes:[262144] + 2 666237 2 666237: 1 2 3 4 5 12 13 14 15 18 19 20 21 10 + bytes:[262144] + 33192 339890635 51 529424: 1 2 3 4 5 22 23 24 25 26 10 + bytes:[10240] + 128 526338 128 526338: 1 2 3 4 5 27 28 29 30 10 + bytes:[4096] + 128 526338 128 526338: 1 2 3 4 5 31 32 33 34 10 + bytes:[4096] + 256 525312 256 525312: 1 35 36 37 38 10 + bytes:[2048] + 410 524928 410 524928: 1 39 40 41 42 43 44 45 10 + bytes:[1280] + 1365 524480 1365 524480: 1 2 3 4 46 47 48 49 50 51 52 53 54 55 56 57 58 59 56 60 61 56 62 63 64 65 66 67 68 69 70 65 + bytes:[384] + 1820 524432 1820 524432: 1 35 71 72 73 74 75 42 43 44 45 10 + bytes:[288] + 7085 1048724 1820 524432: + bytes:[288] + 5461 524336 5461 524336: 1 76 77 78 79 80 81 82 83 84 30 10 + bytes:[96] + 512 524800 0 0: 1 2 3 4 46 47 85 86 87 10 + bytes:[1024] + 32768 524296 0 0: 1 88 89 90 91 92 93 94 95 96 97 98 99 100 10 + bytes:[16] +Locations + 1: 0x420cee M=1 + 2: 0x422150 M=1 + 3: 0x4221d9 M=1 + 4: 0x41dc0c M=1 + 5: 0x41db47 M=1 + 6: 0x74920e M=1 + 7: 0x6295ab M=1 + 8: 0x629854 M=1 + 9: 0x462768 M=1 + 10: 0x41931f M=1 + 11: 0x63963e M=1 + 12: 0x451a38 M=1 + 13: 0x451ba4 M=1 + 14: 0x450682 M=1 + 15: 0x450076 M=1 + 16: 0x4525a3 M=1 + 17: 0x58e033 M=1 + 18: 0x4524d3 M=1 + 19: 0x40108f M=1 + 20: 0x4011a0 M=1 + 21: 0x416dfe M=1 + 22: 0x477636 M=1 + 23: 0x47718a M=1 + 24: 0x477055 M=1 + 25: 0x4799b1 M=1 + 26: 0x46bfd6 M=1 + 27: 0x526125 M=1 + 28: 0x5261e9 M=1 + 29: 0x4683d3 M=1 + 30: 0x467e08 M=1 + 31: 0x53fbf2 M=1 + 32: 0x53f85e M=1 + 33: 0x545f51 M=1 + 34: 0x545a6f M=1 + 35: 0x420fa8 M=1 + 36: 0x414b21 M=1 + 37: 0x414d1f M=1 + 38: 0x4901bd M=1 + 39: 0x422081 M=1 + 40: 0x48dbe2 M=1 + 41: 0x48d15b M=1 + 42: 0x48cdcf M=1 + 43: 0x4a9dbf M=1 + 44: 0x545bfd M=1 + 45: 0x543ac6 M=1 + 46: 0x41dd67 M=1 + 47: 0x41dcbc M=1 + 48: 0x42914f M=1 + 49: 0x429adc M=1 + 50: 0x42e012 M=1 + 51: 0x4307e1 M=1 + 52: 0x4366fe M=1 + 53: 0x42c1c1 M=1 + 54: 0x653e4c M=1 + 55: 0x64bdc4 M=1 + 56: 0x64c358 M=1 + 57: 0x65a73c M=1 + 58: 0x64cdb0 M=1 + 59: 0x64be72 M=1 + 60: 0x64c599 M=1 + 61: 0x64c204 M=1 + 62: 0x64b777 M=1 + 63: 0x5cd55b M=1 + 64: 0x45dbc2 M=1 + 65: 0x543e6f M=1 + 66: 0x559165 M=1 + 67: 0x55ba53 M=1 + 68: 0x559690 M=1 + 69: 0x559984 M=1 + 70: 0x5a19fe M=1 + 71: 0x419e18 M=1 + 72: 0x41a1a7 M=1 + 73: 0x419f62 M=1 + 74: 0x48f09e M=1 + 75: 0x48d990 M=1 + 76: 0x424f34 M=1 + 77: 0x4255d0 M=1 + 78: 0x6fc292 M=1 + 79: 0x6f9c87 M=1 + 80: 0x6f9943 M=1 + 81: 0x6f96bd M=1 + 82: 0x6f966a M=1 + 83: 0x59f399 M=1 + 84: 0x468317 M=1 + 85: 0x6d71a2 M=1 + 86: 0x6da87c M=1 + 87: 0x7b2c3a M=1 + 88: 0x422047 M=1 + 89: 0x40b516 M=1 + 90: 0x40b745 M=1 + 91: 0x6d9ca1 M=1 + 92: 0x4761c4 M=1 + 93: 0x475ea6 M=1 + 94: 0x46fc4e M=1 + 95: 0x46f17f M=1 + 96: 0x46ef32 M=1 + 97: 0x4ab820 M=1 + 98: 0x4acc31 M=1 + 99: 0x4ac7b6 M=1 + 100: 0x4ace35 M=1 +Mappings +1: 0x0/0xffffffffffffffff/0x0 diff --git a/plugin/debug/pkg/profile/testdata/java.contention b/plugin/debug/pkg/profile/testdata/java.contention new file mode 100644 index 0000000..fb484b7 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/java.contention @@ -0,0 +1,43 @@ +--- contentionz 1 --- +format = java +resolution = microseconds +sampling period = 100 +ms since reset = 6019923 + 1 1 @ 0x00000003 0x00000004 + 14 1 @ 0x0000000d 0x0000000e 0x0000000f 0x00000010 0x00000011 0x00000012 0x00000013 0x00000014 0x00000017 0x00000018 0x00000019 0x0000001a 0x0000001b 0x0000001c 0x00000014 0x00000029 0x0000002a 0x0000002b 0x0000002c 0x0000002d 0x0000002e 0x0000002f 0x00000030 0x00000031 0x00000032 0x00000033 0x00000034 0x00000035 + 2 2 @ 0x00000003 0x00000004 + 2 3 @ 0x00000036 0x00000037 0x00000038 + + + 0x0000003 com.example.function03 (source.java:03) + 0x0000004 com.example.function04 (source.java:04) + 0x000000d com.example.function0d (source.java:0) + 0x000000e com.example.function0e (source.java:0) + 0x000000f com.example.function0f (source.java:0) + 0x0000010 com.example.function10 (source.java:10) + 0x0000011 com.example.function11 (source.java:11) + 0x0000012 com.example.function12 (source.java:12) + 0x0000013 com.example.function13 (source.java:13) + 0x0000014 com.example.function14 (source.java:14) + 0x0000017 com.example.function17 (source.java:17) + 0x0000018 com.example.function18 (source.java:18) + 0x0000019 com.example.function19 (source.java:19) + 0x000001a com.example.function1a (source.java:1) + 0x000001b com.example.function1b (source.java:1) + 0x000001c com.example.function1c (source.java:1) + 0x0000029 com.example.function29 (source.java:29) + 0x000002a com.example.function2a (source.java:2) + 0x000002b com.example.function2b (source.java:2) + 0x000002c com.example.function2c (source.java:2) + 0x000002d com.example.function2d (source.java:2) + 0x000002e com.example.function2e (source.java:2) + 0x000002f com.example.function2f (source.java:2) + 0x0000030 com.example.function30 (source.java:30) + 0x0000031 com.example.function31 (source.java:31) + 0x0000032 com.example.function32 (source.java:32) + 0x0000033 com.example.function33 (source.java:33) + 0x0000034 com.example.function34 (source.java:34) + 0x0000035 com.example.function35 (source.java:35) + 0x0000036 com.example.function36 (source.java:36) + 0x0000037 com.example.function37 (source.java:37) + 0x0000038 com.example.function38 (source.java:38) diff --git a/plugin/debug/pkg/profile/testdata/java.contention.string b/plugin/debug/pkg/profile/testdata/java.contention.string new file mode 100644 index 0000000..34b811d --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/java.contention.string @@ -0,0 +1,43 @@ +PeriodType: contentions count +Period: 100 +Duration: 1h40 +Samples: +contentions/count delay/microseconds + 100 100: 1 2 + 100 1400: 3 4 5 6 7 8 9 10 11 12 13 14 15 16 10 17 18 19 20 21 22 23 24 25 26 27 28 29 + 200 200: 1 2 + 300 200: 30 31 32 +Locations + 1: 0x0 com.example.function03 source.java:3:0 s=0 + 2: 0x0 com.example.function04 source.java:4:0 s=0 + 3: 0x0 com.example.function0d source.java:0:0 s=0 + 4: 0x0 com.example.function0e source.java:0:0 s=0 + 5: 0x0 com.example.function0f source.java:0:0 s=0 + 6: 0x0 com.example.function10 source.java:10:0 s=0 + 7: 0x0 com.example.function11 source.java:11:0 s=0 + 8: 0x0 com.example.function12 source.java:12:0 s=0 + 9: 0x0 com.example.function13 source.java:13:0 s=0 + 10: 0x0 com.example.function14 source.java:14:0 s=0 + 11: 0x0 com.example.function17 source.java:17:0 s=0 + 12: 0x0 com.example.function18 source.java:18:0 s=0 + 13: 0x0 com.example.function19 source.java:19:0 s=0 + 14: 0x0 com.example.function1a source.java:1:0 s=0 + 15: 0x0 com.example.function1b source.java:1:0 s=0 + 16: 0x0 com.example.function1c source.java:1:0 s=0 + 17: 0x0 com.example.function29 source.java:29:0 s=0 + 18: 0x0 com.example.function2a source.java:2:0 s=0 + 19: 0x0 com.example.function2b source.java:2:0 s=0 + 20: 0x0 com.example.function2c source.java:2:0 s=0 + 21: 0x0 com.example.function2d source.java:2:0 s=0 + 22: 0x0 com.example.function2e source.java:2:0 s=0 + 23: 0x0 com.example.function2f source.java:2:0 s=0 + 24: 0x0 com.example.function30 source.java:30:0 s=0 + 25: 0x0 com.example.function31 source.java:31:0 s=0 + 26: 0x0 com.example.function32 source.java:32:0 s=0 + 27: 0x0 com.example.function33 source.java:33:0 s=0 + 28: 0x0 com.example.function34 source.java:34:0 s=0 + 29: 0x0 com.example.function35 source.java:35:0 s=0 + 30: 0x0 com.example.function36 source.java:36:0 s=0 + 31: 0x0 com.example.function37 source.java:37:0 s=0 + 32: 0x0 com.example.function38 source.java:38:0 s=0 +Mappings diff --git a/plugin/debug/pkg/profile/testdata/java.cpu b/plugin/debug/pkg/profile/testdata/java.cpu new file mode 100644 index 0000000..593588b Binary files /dev/null and b/plugin/debug/pkg/profile/testdata/java.cpu differ diff --git a/plugin/debug/pkg/profile/testdata/java.cpu.string b/plugin/debug/pkg/profile/testdata/java.cpu.string new file mode 100644 index 0000000..e15decb --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/java.cpu.string @@ -0,0 +1,78 @@ +PeriodType: cpu nanoseconds +Period: 10000000 +Samples: +samples/count cpu/nanoseconds + 0 0: 1 + 0 0: 2 + 2 20000000: 3 + 1 10000000: 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 + 1 10000000: 19 20 21 22 23 16 17 18 + 1 10000000: 24 25 26 27 28 29 30 31 32 + 1 10000000: 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 29 30 31 32 + 1 10000000: 54 55 56 57 58 59 60 61 62 11 63 64 16 17 18 +Locations + 1: 0x0 GC :0:0 s=0 + 2: 0x0 Compile :0:0 s=0 + 3: 0x0 VM :0:0 s=0 + 4: 0x0 com.example.function06 source.java:6:0 s=0 + 5: 0x0 com.example.function07 source.java:7:0 s=0 + 6: 0x0 com.example.function08 source.java:8:0 s=0 + 7: 0x0 com.example.function09 source.java:9:0 s=0 + 8: 0x0 com.example.function0a source.java:0:0 s=0 + 9: 0x0 com.example.function0b source.java:0:0 s=0 + 10: 0x0 com.example.function0c source.java:0:0 s=0 + 11: 0x0 com.example.function0d source.java:0:0 s=0 + 12: 0x0 com.example.function0e source.java:0:0 s=0 + 13: 0x0 com.example.function0f source.java:0:0 s=0 + 14: 0x0 com.example.function10 source.java:10:0 s=0 + 15: 0x0 com.example.function11 source.java:11:0 s=0 + 16: 0x0 com.example.function12 source.java:12:0 s=0 + 17: 0x0 com.example.function13 source.java:13:0 s=0 + 18: 0x0 com.example.function14 source.java:14:0 s=0 + 19: 0x0 com.example.function1d source.java:1:0 s=0 + 20: 0x0 com.example.function1e source.java:1:0 s=0 + 21: 0x0 com.example.function1f source.java:1:0 s=0 + 22: 0x0 com.example.function20 source.java:20:0 s=0 + 23: 0x0 com.example.function21 source.java:21:0 s=0 + 24: 0x0 com.example.function22 source.java:22:0 s=0 + 25: 0x0 com.example.function23 source.java:23:0 s=0 + 26: 0x0 com.example.function24 source.java:24:0 s=0 + 27: 0x0 com.example.function25 source.java:25:0 s=0 + 28: 0x0 com.example.function26 source.java:26:0 s=0 + 29: 0x0 com.example.function27 source.java:27:0 s=0 + 30: 0x0 com.example.function28 source.java:28:0 s=0 + 31: 0x0 com.example.function29 source.java:29:0 s=0 + 32: 0x0 com.example.function2a source.java:2:0 s=0 + 33: 0x0 com.example.function2b source.java:2:0 s=0 + 34: 0x0 com.example.function2c source.java:2:0 s=0 + 35: 0x0 com.example.function2d source.java:2:0 s=0 + 36: 0x0 com.example.function2e source.java:2:0 s=0 + 37: 0x0 com.example.function2f source.java:2:0 s=0 + 38: 0x0 com.example.function30 source.java:30:0 s=0 + 39: 0x0 com.example.function31 source.java:31:0 s=0 + 40: 0x0 com.example.function32 source.java:32:0 s=0 + 41: 0x0 com.example.function33 source.java:33:0 s=0 + 42: 0x0 com.example.function34 source.java:34:0 s=0 + 43: 0x0 com.example.function35 source.java:35:0 s=0 + 44: 0x0 com.example.function36 source.java:36:0 s=0 + 45: 0x0 com.example.function37 source.java:37:0 s=0 + 46: 0x0 com.example.function38 source.java:38:0 s=0 + 47: 0x0 com.example.function39 source.java:39:0 s=0 + 48: 0x0 com.example.function3a source.java:3:0 s=0 + 49: 0x0 com.example.function3b source.java:3:0 s=0 + 50: 0x0 com.example.function3c source.java:3:0 s=0 + 51: 0x0 com.example.function3d source.java:3:0 s=0 + 52: 0x0 com.example.function3e source.java:3:0 s=0 + 53: 0x0 com.example.function3f source.java:3:0 s=0 + 54: 0x0 com.example.function40 source.java:40:0 s=0 + 55: 0x0 com.example.function41 source.java:41:0 s=0 + 56: 0x0 com.example.function42 source.java:42:0 s=0 + 57: 0x0 com.example.function43 source.java:43:0 s=0 + 58: 0x0 com.example.function44 source.java:44:0 s=0 + 59: 0x0 com.example.function45 source.java:45:0 s=0 + 60: 0x0 com.example.function46 source.java:46:0 s=0 + 61: 0x0 com.example.function47 source.java:47:0 s=0 + 62: 0x0 com.example.function48 source.java:48:0 s=0 + 63: 0x0 com.example.function49 source.java:49:0 s=0 + 64: 0x0 com.example.function4a source.java:4:0 s=0 +Mappings diff --git a/plugin/debug/pkg/profile/testdata/java.heap b/plugin/debug/pkg/profile/testdata/java.heap new file mode 100644 index 0000000..95e4f6e --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/java.heap @@ -0,0 +1,133 @@ +--- heapz 1 --- +format = java +resolution = bytes + 7048 1 @ 0x00000003 0x00000004 0x00000005 0x00000006 0x00000007 0x00000008 0x00000009 0x0000000a 0x0000000b 0x0000000c 0x0000000d 0x0000000e 0x0000000f 0x00000010 0x00000011 0x00000018 0x00000019 0x0000001a 0x0000001b 0x0000001c 0x0000001d 0x0000001e 0x0000001f 0x00000020 0x00000021 0x00000022 0x00000023 0x00000024 0x00000025 0x00000026 0x00000027 0x00000023 0x00000028 0x00000029 0x0000001d 0x0000001e 0x0000001f 0x00000020 0x00000021 0x00000027 0x00000023 0x00000028 0x00000029 0x0000001d 0x0000001e 0x0000001f 0x00000020 0x00000021 0x0000002a 0x00000027 0x00000023 0x00000028 0x00000029 0x0000001d 0x0000001e 0x0000001f 0x00000020 + 4752 9 @ 0x0000002b 0x0000002c 0x0000002d 0x0000002e + 880 1 @ 0x00000035 0x00000036 0x00000037 0x00000038 0x00000039 0x0000003a 0x0000003b 0x00000011 0x0000003d 0x0000003e 0x0000003f 0x00000040 0x00000041 0x00000042 0x00000011 0x00000049 0x0000004a 0x0000004b 0x0000004c 0x0000004d 0x0000004e 0x0000004b 0x0000004f 0x0000004b 0x00000050 0x00000051 0x00000052 0x00000053 0x00000054 0x00000055 0x00000056 0x00000057 + 560 1 @ 0x00000035 0x00000036 0x00000037 0x00000038 0x00000039 0x0000003a 0x0000003b 0x00000011 0x0000003d 0x0000003e 0x0000003f 0x00000040 0x00000041 0x00000042 0x00000011 0x0000005e 0x0000005f 0x00000060 0x00000061 0x00000062 0x00000063 0x00000064 0x00000065 0x00000066 0x00000067 0x00000068 0x00000069 0x0000006a 0x0000006b 0x0000006c 0x0000006d 0x0000006e 0x0000006f 0x00000070 0x00000071 0x00000072 0x00000073 0x00000074 0x00000075 0x00000067 0x00000068 + 528 1 @ 0x00000076 0x00000077 0x00000078 0x00000079 0x0000007a 0x0000007b 0x00000011 0x00000081 0x00000011 0x00000082 0x0000004e 0x0000004b 0x0000004f 0x0000004b 0x00000050 0x00000051 0x00000052 0x00000053 0x00000054 0x00000055 0x00000056 0x00000057 + 440 1 @ 0x00000083 0x00000084 0x00000085 0x00000086 0x00000087 0x00000088 0x00000089 0x0000008a 0x0000008b 0x0000008c 0x0000008d 0x0000008e 0x0000008f 0x00000090 0x00000091 0x00000092 0x00000093 0x00000094 0x00000095 0x00000096 + 240 5 @ 0x00000097 + + + 0x00000003 com.example.function003 (Source003.java:103) + 0x00000004 com.example.function004 (Source004.java:104) + 0x00000005 com.example.function005 (Source005.java:105) + 0x00000006 com.example.function006 (Source006.java:106) + 0x00000007 com.example.function007 (Source007.java:107) + 0x00000008 com.example.function008 (Source008.java:108) + 0x00000009 com.example.function009 (Source009.java:109) + 0x0000000a com.example.function00a (Source00a.java:10) + 0x0000000b com.example.function00b (Source00b.java:10) + 0x0000000c com.example.function00c (Source00c.java:10) + 0x0000000d com.example.function00d (Source00d.java:10) + 0x0000000e com.example.function00e (Source00e.java:10) + 0x0000000f com.example.function00f (Source00f.java:10) + 0x00000010 com.example.function010 (Source010.java:110) + 0x00000011 com.example.function011 (Source011.java:111) + 0x00000018 com.example.function018 (Source018.java:118) + 0x00000019 com.example.function019 (Source019.java:119) + 0x0000001a com.example.function01a (Source01a.java:11) + 0x0000001b com.example.function01b (Source01b.java:11) + 0x0000001c com.example.function01c (Source01c.java:11) + 0x0000001d com.example.function01d (Source01d.java:11) + 0x0000001e com.example.function01e (Source01e.java:11) + 0x0000001f com.example.function01f (Source01f.java:11) + 0x00000020 com.example.function020 (Source020.java:120) + 0x00000021 com.example.function021 (Source021.java:121) + 0x00000022 com.example.function022 (Source022.java:122) + 0x00000023 com.example.function023 (Source023.java:123) + 0x00000024 com.example.function024 (Source024.java:124) + 0x00000025 com.example.function025 (Source025.java:125) + 0x00000026 com.example.function026 (Source026.java:126) + 0x00000027 com.example.function027 (Source027.java:127) + 0x00000028 com.example.function028 (Source028.java:128) + 0x00000029 com.example.function029 (Source029.java:129) + 0x0000002a com.example.function02a (Source02a.java:12) + 0x0000002b com.example.function02b (Source02b.java:12) + 0x0000002c com.example.function02c (Source02c.java:12) + 0x0000002d com.example.function02d (Source02d.java:12) + 0x0000002e com.example.function02e (Source02e.java:12) + 0x00000035 com.example.function035 (Source035.java:135) + 0x00000036 com.example.function036 (Source036.java:136) + 0x00000037 com.example.function037 (Source037.java:137) + 0x00000038 com.example.function038 (Source038.java:138) + 0x00000039 com.example.function039 (Source039.java:139) + 0x0000003a com.example.function03a (Source03a.java:13) + 0x0000003b com.example.function03b (Source03b.java:13) + 0x0000003d com.example.function03d (Source03d.java:13) + 0x0000003e com.example.function03e (Source03e.java:13) + 0x0000003f com.example.function03f (Source03f.java:13) + 0x00000040 com.example.function040 (Source040.java:140) + 0x00000041 com.example.function041 (Source041.java:141) + 0x00000042 com.example.function042 (Source042.java:142) + 0x00000049 com.example.function049 (Source049.java:149) + 0x0000004a com.example.function04a (Source04a.java:14) + 0x0000004b com.example.function04b (Source04b.java:14) + 0x0000004c com.example.function04c (Source04c.java:14) + 0x0000004d com.example.function04d (Source04d.java:14) + 0x0000004e com.example.function04e (Source04e.java:14) + 0x0000004f com.example.function04f (Source04f.java:14) + 0x00000050 com.example.function050 (Source050.java:150) + 0x00000051 com.example.function051 (Source051.java:151) + 0x00000052 com.example.function052 (Source052.java:152) + 0x00000053 com.example.function053 (Source053.java:153) + 0x00000054 com.example.function054 (Source054.java:154) + 0x00000055 com.example.function055 (Source055.java:155) + 0x00000056 com.example.function056 (Source056.java:156) + 0x00000057 com.example.function057 (Source057.java:157) + 0x0000005a com.example.function05a (Source05a.java:15) + 0x0000005e com.example.function05e (Source05e.java:15) + 0x0000005f com.example.function05f (Source05f.java:15) + 0x00000060 com.example.function060 (Source060.java:160) + 0x00000061 com.example.function061 (Source061.java:161) + 0x00000062 com.example.function062 (Source062.java:162) + 0x00000063 com.example.function063 (Source063.java:163) + 0x00000064 com.example.function064 (Source064.java:164) + 0x00000065 com.example.function065 (Source065.java:165) + 0x00000066 com.example.function066 (Source066.java:166) + 0x00000067 com.example.function067 (Source067.java:167) + 0x00000068 com.example.function068 (Source068.java:168) + 0x00000069 com.example.function069 (Source069.java:169) + 0x0000006a com.example.function06a (Source06a.java:16) + 0x0000006b com.example.function06b (Source06b.java:16) + 0x0000006c com.example.function06c (Source06c.java:16) + 0x0000006d com.example.function06d (Source06d.java:16) + 0x0000006e com.example.function06e (Source06e.java:16) + 0x0000006f com.example.function06f (Source06f.java:16) + 0x00000070 com.example.function070 (Source070.java:170) + 0x00000071 com.example.function071 (Source071.java:171) + 0x00000072 com.example.function072 (Source072.java:172) + 0x00000073 com.example.function073 (Source073.java:173) + 0x00000074 com.example.function074 (Source074.java:174) + 0x00000075 com.example.function075 (Source075.java:175) + 0x00000076 com.example.function076 (Source076.java:176) + 0x00000077 com.example.function077 (Source077.java:177) + 0x00000078 com.example.function078 (Source078.java:178) + 0x00000079 com.example.function079 (Source079.java:179) + 0x0000007a com.example.function07a (Source07a.java:17) + 0x0000007b com.example.function07b (Source07b.java:17) + 0x0000007d com.example.function07d (Source07d.java:17) + 0x00000081 com.example.function081 (Source081.java:181) + 0x00000082 com.example.function082 (Source082.java:182) + 0x00000083 com.example.function083 (Source083.java:183) + 0x00000084 com.example.function084 (Source084.java:184) + 0x00000085 com.example.function085 (Source085.java:185) + 0x00000086 com.example.function086 (Source086.java:186) + 0x00000087 com.example.function087 (Source087.java:187) + 0x00000088 com.example.function088 (Source088.java:188) + 0x00000089 com.example.function089 (Source089.java:189) + 0x0000008a com.example.function08a (Source08a.java:18) + 0x0000008b com.example.function08b (Source08b.java:18) + 0x0000008c com.example.function08c (Source08c.java:18) + 0x0000008d com.example.function08d (Source08d.java:18) + 0x0000008e com.example.function08e (Source08e.java:18) + 0x0000008f com.example.function08f (Source08f.java:18) + 0x00000090 com.example.function090 (Source090.java:190) + 0x00000091 com.example.function091 (Source091.java:191) + 0x00000092 com.example.function092 (Source092.java:192) + 0x00000093 com.example.function093 (Source093.java:193) + 0x00000094 com.example.function094 (Source094.java:194) + 0x00000095 com.example.function095 (Source095.java:195) + 0x00000096 com.example.function096 (Source096.java:196) + 0x00000097 com.example.function097 (Source097.java:197) diff --git a/plugin/debug/pkg/profile/testdata/java.heap.string b/plugin/debug/pkg/profile/testdata/java.heap.string new file mode 100644 index 0000000..5e1b549 --- /dev/null +++ b/plugin/debug/pkg/profile/testdata/java.heap.string @@ -0,0 +1,139 @@ +PeriodType: +Period: 0 +Samples: +inuse_objects/count inuse_space/bytes + 74 527819: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 27 32 33 21 22 23 24 25 31 27 32 33 21 22 23 24 25 34 31 27 32 33 21 22 23 24 + bytes:[7048] + 8941 4720968: 35 36 37 38 + bytes:[528] + 596 524728: 39 40 41 42 43 44 45 15 46 47 48 49 50 51 15 52 53 54 55 56 57 54 58 54 59 60 61 62 63 64 65 66 + bytes:[880] + 936 524568: 39 40 41 42 43 44 45 15 46 47 48 49 50 51 15 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 76 77 + bytes:[560] + 993 524552: 91 92 93 94 95 96 15 97 15 98 57 54 58 54 59 60 61 62 63 64 65 66 + bytes:[528] + 1192 524508: 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 + bytes:[440] + 54615 2621560: 119 + bytes:[48] +Locations + 1: 0x0 com.example.function003 Source003.java:103:0 s=0 + 2: 0x0 com.example.function004 Source004.java:104:0 s=0 + 3: 0x0 com.example.function005 Source005.java:105:0 s=0 + 4: 0x0 com.example.function006 Source006.java:106:0 s=0 + 5: 0x0 com.example.function007 Source007.java:107:0 s=0 + 6: 0x0 com.example.function008 Source008.java:108:0 s=0 + 7: 0x0 com.example.function009 Source009.java:109:0 s=0 + 8: 0x0 com.example.function00a Source00a.java:10:0 s=0 + 9: 0x0 com.example.function00b Source00b.java:10:0 s=0 + 10: 0x0 com.example.function00c Source00c.java:10:0 s=0 + 11: 0x0 com.example.function00d Source00d.java:10:0 s=0 + 12: 0x0 com.example.function00e Source00e.java:10:0 s=0 + 13: 0x0 com.example.function00f Source00f.java:10:0 s=0 + 14: 0x0 com.example.function010 Source010.java:110:0 s=0 + 15: 0x0 com.example.function011 Source011.java:111:0 s=0 + 16: 0x0 com.example.function018 Source018.java:118:0 s=0 + 17: 0x0 com.example.function019 Source019.java:119:0 s=0 + 18: 0x0 com.example.function01a Source01a.java:11:0 s=0 + 19: 0x0 com.example.function01b Source01b.java:11:0 s=0 + 20: 0x0 com.example.function01c Source01c.java:11:0 s=0 + 21: 0x0 com.example.function01d Source01d.java:11:0 s=0 + 22: 0x0 com.example.function01e Source01e.java:11:0 s=0 + 23: 0x0 com.example.function01f Source01f.java:11:0 s=0 + 24: 0x0 com.example.function020 Source020.java:120:0 s=0 + 25: 0x0 com.example.function021 Source021.java:121:0 s=0 + 26: 0x0 com.example.function022 Source022.java:122:0 s=0 + 27: 0x0 com.example.function023 Source023.java:123:0 s=0 + 28: 0x0 com.example.function024 Source024.java:124:0 s=0 + 29: 0x0 com.example.function025 Source025.java:125:0 s=0 + 30: 0x0 com.example.function026 Source026.java:126:0 s=0 + 31: 0x0 com.example.function027 Source027.java:127:0 s=0 + 32: 0x0 com.example.function028 Source028.java:128:0 s=0 + 33: 0x0 com.example.function029 Source029.java:129:0 s=0 + 34: 0x0 com.example.function02a Source02a.java:12:0 s=0 + 35: 0x0 com.example.function02b Source02b.java:12:0 s=0 + 36: 0x0 com.example.function02c Source02c.java:12:0 s=0 + 37: 0x0 com.example.function02d Source02d.java:12:0 s=0 + 38: 0x0 com.example.function02e Source02e.java:12:0 s=0 + 39: 0x0 com.example.function035 Source035.java:135:0 s=0 + 40: 0x0 com.example.function036 Source036.java:136:0 s=0 + 41: 0x0 com.example.function037 Source037.java:137:0 s=0 + 42: 0x0 com.example.function038 Source038.java:138:0 s=0 + 43: 0x0 com.example.function039 Source039.java:139:0 s=0 + 44: 0x0 com.example.function03a Source03a.java:13:0 s=0 + 45: 0x0 com.example.function03b Source03b.java:13:0 s=0 + 46: 0x0 com.example.function03d Source03d.java:13:0 s=0 + 47: 0x0 com.example.function03e Source03e.java:13:0 s=0 + 48: 0x0 com.example.function03f Source03f.java:13:0 s=0 + 49: 0x0 com.example.function040 Source040.java:140:0 s=0 + 50: 0x0 com.example.function041 Source041.java:141:0 s=0 + 51: 0x0 com.example.function042 Source042.java:142:0 s=0 + 52: 0x0 com.example.function049 Source049.java:149:0 s=0 + 53: 0x0 com.example.function04a Source04a.java:14:0 s=0 + 54: 0x0 com.example.function04b Source04b.java:14:0 s=0 + 55: 0x0 com.example.function04c Source04c.java:14:0 s=0 + 56: 0x0 com.example.function04d Source04d.java:14:0 s=0 + 57: 0x0 com.example.function04e Source04e.java:14:0 s=0 + 58: 0x0 com.example.function04f Source04f.java:14:0 s=0 + 59: 0x0 com.example.function050 Source050.java:150:0 s=0 + 60: 0x0 com.example.function051 Source051.java:151:0 s=0 + 61: 0x0 com.example.function052 Source052.java:152:0 s=0 + 62: 0x0 com.example.function053 Source053.java:153:0 s=0 + 63: 0x0 com.example.function054 Source054.java:154:0 s=0 + 64: 0x0 com.example.function055 Source055.java:155:0 s=0 + 65: 0x0 com.example.function056 Source056.java:156:0 s=0 + 66: 0x0 com.example.function057 Source057.java:157:0 s=0 + 67: 0x0 com.example.function05e Source05e.java:15:0 s=0 + 68: 0x0 com.example.function05f Source05f.java:15:0 s=0 + 69: 0x0 com.example.function060 Source060.java:160:0 s=0 + 70: 0x0 com.example.function061 Source061.java:161:0 s=0 + 71: 0x0 com.example.function062 Source062.java:162:0 s=0 + 72: 0x0 com.example.function063 Source063.java:163:0 s=0 + 73: 0x0 com.example.function064 Source064.java:164:0 s=0 + 74: 0x0 com.example.function065 Source065.java:165:0 s=0 + 75: 0x0 com.example.function066 Source066.java:166:0 s=0 + 76: 0x0 com.example.function067 Source067.java:167:0 s=0 + 77: 0x0 com.example.function068 Source068.java:168:0 s=0 + 78: 0x0 com.example.function069 Source069.java:169:0 s=0 + 79: 0x0 com.example.function06a Source06a.java:16:0 s=0 + 80: 0x0 com.example.function06b Source06b.java:16:0 s=0 + 81: 0x0 com.example.function06c Source06c.java:16:0 s=0 + 82: 0x0 com.example.function06d Source06d.java:16:0 s=0 + 83: 0x0 com.example.function06e Source06e.java:16:0 s=0 + 84: 0x0 com.example.function06f Source06f.java:16:0 s=0 + 85: 0x0 com.example.function070 Source070.java:170:0 s=0 + 86: 0x0 com.example.function071 Source071.java:171:0 s=0 + 87: 0x0 com.example.function072 Source072.java:172:0 s=0 + 88: 0x0 com.example.function073 Source073.java:173:0 s=0 + 89: 0x0 com.example.function074 Source074.java:174:0 s=0 + 90: 0x0 com.example.function075 Source075.java:175:0 s=0 + 91: 0x0 com.example.function076 Source076.java:176:0 s=0 + 92: 0x0 com.example.function077 Source077.java:177:0 s=0 + 93: 0x0 com.example.function078 Source078.java:178:0 s=0 + 94: 0x0 com.example.function079 Source079.java:179:0 s=0 + 95: 0x0 com.example.function07a Source07a.java:17:0 s=0 + 96: 0x0 com.example.function07b Source07b.java:17:0 s=0 + 97: 0x0 com.example.function081 Source081.java:181:0 s=0 + 98: 0x0 com.example.function082 Source082.java:182:0 s=0 + 99: 0x0 com.example.function083 Source083.java:183:0 s=0 + 100: 0x0 com.example.function084 Source084.java:184:0 s=0 + 101: 0x0 com.example.function085 Source085.java:185:0 s=0 + 102: 0x0 com.example.function086 Source086.java:186:0 s=0 + 103: 0x0 com.example.function087 Source087.java:187:0 s=0 + 104: 0x0 com.example.function088 Source088.java:188:0 s=0 + 105: 0x0 com.example.function089 Source089.java:189:0 s=0 + 106: 0x0 com.example.function08a Source08a.java:18:0 s=0 + 107: 0x0 com.example.function08b Source08b.java:18:0 s=0 + 108: 0x0 com.example.function08c Source08c.java:18:0 s=0 + 109: 0x0 com.example.function08d Source08d.java:18:0 s=0 + 110: 0x0 com.example.function08e Source08e.java:18:0 s=0 + 111: 0x0 com.example.function08f Source08f.java:18:0 s=0 + 112: 0x0 com.example.function090 Source090.java:190:0 s=0 + 113: 0x0 com.example.function091 Source091.java:191:0 s=0 + 114: 0x0 com.example.function092 Source092.java:192:0 s=0 + 115: 0x0 com.example.function093 Source093.java:193:0 s=0 + 116: 0x0 com.example.function094 Source094.java:194:0 s=0 + 117: 0x0 com.example.function095 Source095.java:195:0 s=0 + 118: 0x0 com.example.function096 Source096.java:196:0 s=0 + 119: 0x0 com.example.function097 Source097.java:197:0 s=0 +Mappings diff --git a/plugin/debug/pkg/third_party/svgpan/LICENSE b/plugin/debug/pkg/third_party/svgpan/LICENSE new file mode 100644 index 0000000..35bc174 --- /dev/null +++ b/plugin/debug/pkg/third_party/svgpan/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009-2017 Andrea Leofreddi . All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS +OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation are those of the +authors and should not be interpreted as representing official policies, either expressed +or implied, of Andrea Leofreddi. diff --git a/plugin/debug/pkg/third_party/svgpan/svgpan.go b/plugin/debug/pkg/third_party/svgpan/svgpan.go new file mode 100644 index 0000000..afb124d --- /dev/null +++ b/plugin/debug/pkg/third_party/svgpan/svgpan.go @@ -0,0 +1,11 @@ +// SVG pan and zoom library. +// See copyright notice in string constant below. + +package svgpan + +import _ "embed" + +// https://github.com/aleofreddi/svgpan + +//go:embed svgpan.js +var JSSource string diff --git a/plugin/debug/pkg/third_party/svgpan/svgpan.js b/plugin/debug/pkg/third_party/svgpan/svgpan.js new file mode 100644 index 0000000..2c4951e --- /dev/null +++ b/plugin/debug/pkg/third_party/svgpan/svgpan.js @@ -0,0 +1,261 @@ +/** + * SVGPan library 1.2.2 + * ====================== + * + * Given an unique existing element with id "viewport" (or when missing, the + * first g-element), including the the library into any SVG adds the following + * capabilities: + * + * - Mouse panning + * - Mouse zooming (using the wheel) + * - Object dragging + * + * You can configure the behaviour of the pan/zoom/drag with the variables + * listed in the CONFIGURATION section of this file. + * + * This code is licensed under the following BSD license: + * + * Copyright 2009-2019 Andrea Leofreddi . All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS + * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY + * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are those of the + * authors and should not be interpreted as representing official policies, either expressed + * or implied, of Andrea Leofreddi. + */ + +"use strict"; + +/// CONFIGURATION +/// ====> + +var enablePan = 1; // 1 or 0: enable or disable panning (default enabled) +var enableZoom = 1; // 1 or 0: enable or disable zooming (default enabled) +var enableDrag = 0; // 1 or 0: enable or disable dragging (default disabled) +var zoomScale = 0.2; // Zoom sensitivity + +/// <==== +/// END OF CONFIGURATION + +var root = document.documentElement; +var state = 'none', svgRoot = null, stateTarget, stateOrigin, stateTf; + +setupHandlers(root); + +/** + * Register handlers + */ +function setupHandlers(root){ + setAttributes(root, { + "onmouseup" : "handleMouseUp(evt)", + "onmousedown" : "handleMouseDown(evt)", + "onmousemove" : "handleMouseMove(evt)", + //"onmouseout" : "handleMouseUp(evt)", // Decomment this to stop the pan functionality when dragging out of the SVG element + }); + + if(navigator.userAgent.toLowerCase().indexOf('webkit') >= 0) + window.addEventListener('mousewheel', handleMouseWheel, false); // Chrome/Safari + else + window.addEventListener('DOMMouseScroll', handleMouseWheel, false); // Others +} + +/** + * Retrieves the root element for SVG manipulation. The element is then cached into the svgRoot global variable. + */ +function getRoot(root) { + if(svgRoot == null) { + var r = root.getElementById("viewport") ? root.getElementById("viewport") : root.documentElement, t = r; + + while(t != root) { + if(t.getAttribute("viewBox")) { + setCTM(r, t.getCTM()); + + t.removeAttribute("viewBox"); + } + + t = t.parentNode; + } + + svgRoot = r; + } + + return svgRoot; +} + +/** + * Instance an SVGPoint object with given event coordinates. + */ +function getEventPoint(evt) { + var p = root.createSVGPoint(); + + p.x = evt.clientX; + p.y = evt.clientY; + + return p; +} + +/** + * Sets the current transform matrix of an element. + */ +function setCTM(element, matrix) { + var s = "matrix(" + matrix.a + "," + matrix.b + "," + matrix.c + "," + matrix.d + "," + matrix.e + "," + matrix.f + ")"; + + element.setAttribute("transform", s); +} + +/** + * Dumps a matrix to a string (useful for debug). + */ +function dumpMatrix(matrix) { + var s = "[ " + matrix.a + ", " + matrix.c + ", " + matrix.e + "\n " + matrix.b + ", " + matrix.d + ", " + matrix.f + "\n 0, 0, 1 ]"; + + return s; +} + +/** + * Sets attributes of an element. + */ +function setAttributes(element, attributes){ + for (var i in attributes) + element.setAttributeNS(null, i, attributes[i]); +} + +/** + * Handle mouse wheel event. + */ +function handleMouseWheel(evt) { + if(!enableZoom) + return; + + if(evt.preventDefault) + evt.preventDefault(); + + evt.returnValue = false; + + var svgDoc = evt.target.ownerDocument; + + var delta; + + if(evt.wheelDelta) + delta = evt.wheelDelta / 360; // Chrome/Safari + else + delta = evt.detail / -9; // Mozilla + + var z = Math.pow(1 + zoomScale, delta); + + var g = getRoot(svgDoc); + + var p = getEventPoint(evt); + + p = p.matrixTransform(g.getCTM().inverse()); + + // Compute new scale matrix in current mouse position + var k = root.createSVGMatrix().translate(p.x, p.y).scale(z).translate(-p.x, -p.y); + + setCTM(g, g.getCTM().multiply(k)); + + if(typeof(stateTf) == "undefined") + stateTf = g.getCTM().inverse(); + + stateTf = stateTf.multiply(k.inverse()); +} + +/** + * Handle mouse move event. + */ +function handleMouseMove(evt) { + if(evt.preventDefault) + evt.preventDefault(); + + evt.returnValue = false; + + var svgDoc = evt.target.ownerDocument; + + var g = getRoot(svgDoc); + + if(state == 'pan' && enablePan) { + // Pan mode + var p = getEventPoint(evt).matrixTransform(stateTf); + + setCTM(g, stateTf.inverse().translate(p.x - stateOrigin.x, p.y - stateOrigin.y)); + } else if(state == 'drag' && enableDrag) { + // Drag mode + var p = getEventPoint(evt).matrixTransform(g.getCTM().inverse()); + + setCTM(stateTarget, root.createSVGMatrix().translate(p.x - stateOrigin.x, p.y - stateOrigin.y).multiply(g.getCTM().inverse()).multiply(stateTarget.getCTM())); + + stateOrigin = p; + } +} + +/** + * Handle click event. + */ +function handleMouseDown(evt) { + if(evt.preventDefault) + evt.preventDefault(); + + evt.returnValue = false; + + var svgDoc = evt.target.ownerDocument; + + var g = getRoot(svgDoc); + + if( + evt.target.tagName == "svg" + || !enableDrag // Pan anyway when drag is disabled and the user clicked on an element + ) { + // Pan mode + state = 'pan'; + + stateTf = g.getCTM().inverse(); + + stateOrigin = getEventPoint(evt).matrixTransform(stateTf); + } else { + // Drag mode + state = 'drag'; + + stateTarget = evt.target; + + stateTf = g.getCTM().inverse(); + + stateOrigin = getEventPoint(evt).matrixTransform(stateTf); + } +} + +/** + * Handle mouse button release event. + */ +function handleMouseUp(evt) { + if(evt.preventDefault) + evt.preventDefault(); + + evt.returnValue = false; + + var svgDoc = evt.target.ownerDocument; + + if(state == 'pan' || state == 'drag') { + // Quit pan mode + state = ''; + } +} diff --git a/plugin/gb28181/api.go b/plugin/gb28181/api.go index 191a7bc..f7562d1 100644 --- a/plugin/gb28181/api.go +++ b/plugin/gb28181/api.go @@ -3,14 +3,11 @@ package plugin_gb28181 import ( "context" "net/http" - "os" "strings" - "time" "google.golang.org/protobuf/types/known/emptypb" "google.golang.org/protobuf/types/known/timestamppb" - "m7s.live/v5" - "m7s.live/v5/pkg/util" + "m7s.live/v5/pkg/config" "m7s.live/v5/plugin/gb28181/pb" gb28181 "m7s.live/v5/plugin/gb28181/pkg" ) @@ -58,57 +55,21 @@ func (gb *GB28181Plugin) List(context.Context, *emptypb.Empty) (ret *pb.Response return } -func (gb *GB28181Plugin) replayPS(pub *m7s.Publisher, f *os.File) { - defer f.Close() - var t uint16 - puber := gb28181.NewPSPublisher(pub) - go puber.Demux() - defer close(puber.Receiver.FeedChan) - for l := make([]byte, 6); pub.State != m7s.PublisherStateDisposed; time.Sleep(time.Millisecond * time.Duration(t)) { - _, err := f.Read(l) - if err != nil { - return - } - payloadLen := util.ReadBE[int](l[:4]) - payload := make([]byte, payloadLen) - t = util.ReadBE[uint16](l[4:]) - _, err = f.Read(payload) - if err != nil { - return - } - if err = puber.Receiver.ReadRTP(payload); err != nil { - gb.Error("replayPS", "err", err) - } - if pub.IsStopped() { - return - } - } -} - func (gb *GB28181Plugin) api_ps_replay(w http.ResponseWriter, r *http.Request) { dump := r.URL.Query().Get("dump") streamPath := r.PathValue("streamPath") if dump == "" { dump = "dump/ps" } - f, err := os.OpenFile(dump, os.O_RDONLY, 0644) - if err != nil { - util.ReturnError(http.StatusInternalServerError, err.Error(), w, r) - } else { - if streamPath == "" { - if strings.HasPrefix(dump, "/") { - streamPath = "replay" + dump - } else { - streamPath = "replay/" + dump - } - } - var pub *m7s.Publisher - if pub, err = gb.Publish(gb.Context, streamPath); err == nil { - pub.Type = m7s.PublishTypeReplay - go gb.replayPS(pub, f) - util.ReturnOK(w, r) + if streamPath == "" { + if strings.HasPrefix(dump, "/") { + streamPath = "replay" + dump } else { - util.ReturnError(http.StatusInternalServerError, err.Error(), w, r) + streamPath = "replay/" + dump } } + var puller gb28181.DumpPuller + puller.GetPullJob().Init(&puller, &gb.Plugin, streamPath, config.Pull{ + URL: dump, + }, nil) } diff --git a/plugin/gb28181/pkg/transceiver.go b/plugin/gb28181/pkg/transceiver.go index 598c3b9..02706dd 100644 --- a/plugin/gb28181/pkg/transceiver.go +++ b/plugin/gb28181/pkg/transceiver.go @@ -1,6 +1,7 @@ package gb28181 import ( + "errors" "net" "os" @@ -28,6 +29,8 @@ type PSPublisher struct { Receiver Receiver } +var ErrRTPReceiveLost = errors.New("rtp receive lost") + type Receiver struct { task.Task rtp.Packet @@ -136,9 +139,13 @@ func (dec *PSPublisher) decProgramStreamMap() (err error) { } func (p *Receiver) ReadRTP(rtp util.Buffer) (err error) { + lastSeq := p.SequenceNumber if err = p.Unmarshal(rtp); err != nil { return } + if p.SequenceNumber != lastSeq+1 { + return ErrRTPReceiveLost + } if p.Enabled(p, task.TraceLevel) { p.Trace("rtp", "len", rtp.Len(), "seq", p.SequenceNumber, "payloadType", p.PayloadType, "ssrc", p.SSRC) } diff --git a/server.go b/server.go index c83554a..4c4624f 100644 --- a/server.go +++ b/server.go @@ -59,7 +59,7 @@ type ( PulseInterval time.Duration `default:"5s" desc:"心跳事件间隔"` //心跳事件间隔 DisableAll bool `default:"false" desc:"禁用所有插件"` //禁用所有插件 StreamAlias map[config.Regexp]string `desc:"流别名"` - Device []*PullProxy + PullProxy []*PullProxy } WaitStream struct { StreamPath string @@ -328,7 +328,7 @@ func (s *Server) Start() (err error) { if s.DB != nil { s.DB.AutoMigrate(&PullProxy{}) } - for _, d := range s.Device { + for _, d := range s.PullProxy { if d.ID != 0 { d.server = s if d.Type == "" {