mirror of
https://github.com/langhuihui/monibuca.git
synced 2025-12-24 13:48:04 +08:00
Compare commits
76 Commits
dwdcth-pat
...
revert-169
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
421f77c1de | ||
|
|
d2715d61d5 | ||
|
|
3649b667d9 | ||
|
|
d74b9efdbf | ||
|
|
7f2712ae71 | ||
|
|
3189973690 | ||
|
|
f440d4cbbf | ||
|
|
df348e6946 | ||
|
|
5a2af0e7fd | ||
|
|
d26690c7fa | ||
|
|
d8047931c9 | ||
|
|
c6bb61eba8 | ||
|
|
a6114700d7 | ||
|
|
55d54734e7 | ||
|
|
cbe9f2d645 | ||
|
|
0caba3d496 | ||
|
|
2fe53ce68f | ||
|
|
e2d81f5fa6 | ||
|
|
f884cb8376 | ||
|
|
7ca1c1da0a | ||
|
|
564c37d123 | ||
|
|
e0c3051fa1 | ||
|
|
2247c1c3af | ||
|
|
d12d9ff421 | ||
|
|
5a4b88a5a8 | ||
|
|
ddcdf831ae | ||
|
|
4677bb796e | ||
|
|
1fe5951c9c | ||
|
|
5f1ee80fbc | ||
|
|
5972451ff0 | ||
|
|
88cd32ac9c | ||
|
|
15a8c4b612 | ||
|
|
f3bea7ebb7 | ||
|
|
6c1113f226 | ||
|
|
6eac815e48 | ||
|
|
1fbaa70117 | ||
|
|
18e47d5ee3 | ||
|
|
09f32bbb03 | ||
|
|
79150b05de | ||
|
|
4ae2a8c7e2 | ||
|
|
6d0c48c45d | ||
|
|
12e0af7222 | ||
|
|
fc55f620ed | ||
|
|
335af79dde | ||
|
|
0fd16f070c | ||
|
|
fdf81335bf | ||
|
|
300304954c | ||
|
|
ebade42c73 | ||
|
|
16d8f00e85 | ||
|
|
0913df7b8c | ||
|
|
3ea37046ff | ||
|
|
b89b90eb40 | ||
|
|
146cbd98b4 | ||
|
|
2dfc10e994 | ||
|
|
3ce37cde94 | ||
|
|
0ba2e1b270 | ||
|
|
6037cbe18d | ||
|
|
0042568dff | ||
|
|
b3a3e37429 | ||
|
|
c1616740ec | ||
|
|
4a66d542ce | ||
|
|
af8ab607bf | ||
|
|
d30b123de9 | ||
|
|
d78225c357 | ||
|
|
3041d11648 | ||
|
|
e47e039d29 | ||
|
|
a1e672790f | ||
|
|
7e3db70daa | ||
|
|
d9f29c16f9 | ||
|
|
c6c1596d98 | ||
|
|
04fbefd537 | ||
|
|
645596d319 | ||
|
|
1ed078d240 | ||
|
|
8c0de3b388 | ||
|
|
305ef1834a | ||
|
|
2eb847c0c4 |
13
.github/FUNDING.yml
vendored
Normal file
13
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# These are supported funding model platforms
|
||||
|
||||
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
||||
patreon: # Replace with a single Patreon username
|
||||
open_collective: monibuca
|
||||
ko_fi: # Replace with a single Ko-fi username
|
||||
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
||||
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
||||
liberapay: # Replace with a single Liberapay username
|
||||
issuehunt: # Replace with a single IssueHunt username
|
||||
otechie: # Replace with a single Otechie username
|
||||
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
||||
custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
||||
96
.github/workflows/go.yml
vendored
Normal file
96
.github/workflows/go.yml
vendored
Normal file
@@ -0,0 +1,96 @@
|
||||
name: Go
|
||||
|
||||
on:
|
||||
create:
|
||||
tags:
|
||||
- 'v5*'
|
||||
env:
|
||||
dest: bin
|
||||
jobs:
|
||||
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 1
|
||||
|
||||
- name: Set up Env
|
||||
run: echo "version=${GITHUB_REF:11}" >> $GITHUB_ENV
|
||||
- name: Set beta
|
||||
if: contains(env.version, 'beta')
|
||||
run: echo "dest=beta" >> $GITHUB_ENV
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.23.4
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: runner.os−go−{ { hashFiles('**/go.sum') } }
|
||||
restore-keys: ${{ runner.os }}-go-
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: v1.8.3
|
||||
args: release --rm-dist
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
# - name: Checkout m7s-import
|
||||
# uses: actions/checkout@v3
|
||||
# with:
|
||||
# repository: langhuihui/m7s-import
|
||||
# path: m7s-import
|
||||
# persist-credentials: false # otherwise, the token used is the GITHUB_TOKEN, instead of your personal access token.
|
||||
# fetch-depth: 0
|
||||
|
||||
# - name: Add bin to m7s-import
|
||||
# if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
# run: |
|
||||
# cd m7s-import
|
||||
# mkdir -p apps/m7s-website/src/public/bin
|
||||
# cp ../dist/m7s_${{ env.version }}_windows_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_windows_amd64.tar.gz
|
||||
# cp ../dist/m7s_${{ env.version }}_darwin_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_darwin_amd64.tar.gz
|
||||
# cp ../dist/m7s_${{ env.version }}_darwin_arm64.tar.gz apps/m7s-website/src/public/bin/m7s_darwin_arm64.tar.gz
|
||||
# cp ../dist/m7s_${{ env.version }}_linux_amd64.tar.gz apps/m7s-website/src/public/bin/m7s_linux_amd64.tar.gz
|
||||
# cp ../dist/m7s_${{ env.version }}_linux_arm64.tar.gz apps/m7s-website/src/public/bin/m7s_linux_arm64.tar.gz
|
||||
# ls apps/m7s-website/src/public/bin
|
||||
- name: copy
|
||||
if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
mkdir -p bin
|
||||
cp dist/m7s_${{ env.version }}_windows_amd64.tar.gz bin/m7s_v5_windows_amd64.tar.gz
|
||||
cp dist/m7s_${{ env.version }}_darwin_amd64.tar.gz bin/m7s_v5_darwin_amd64.tar.gz
|
||||
cp dist/m7s_${{ env.version }}_darwin_arm64.tar.gz bin/m7s_v5_darwin_arm64.tar.gz
|
||||
cp dist/m7s_${{ env.version }}_linux_amd64.tar.gz bin/m7s_v5_linux_amd64.tar.gz
|
||||
cp dist/m7s_${{ env.version }}_linux_arm64.tar.gz bin/m7s_v5_linux_arm64.tar.gz
|
||||
ls bin
|
||||
- uses: jakejarvis/s3-sync-action@master
|
||||
# with:
|
||||
# args: --acl public-read --follow-symlinks --delete
|
||||
env:
|
||||
AWS_S3_ENDPOINT: https://${{ secrets.R2_DOMAIN }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.R2_KEY }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.R2_SECRET }}
|
||||
AWS_S3_BUCKET: monibuca
|
||||
SOURCE_DIR: 'bin'
|
||||
DEST_DIR: ${{ env.dest }}
|
||||
- name: docker build
|
||||
if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
tar -zxvf bin/m7s_linux_amd64.tar.gz
|
||||
mv m7s monibuca_linux
|
||||
docker login -u langhuihui -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker build -t langhuihui/monibuca:v5 .
|
||||
docker push langhuihui/monibuca:v5
|
||||
- name: docker push
|
||||
if: success() && !contains(env.version, 'beta')
|
||||
run: |
|
||||
docker tag langhuihui/monibuca:v5 langhuihui/monibuca:${{ env.version }}
|
||||
docker push langhuihui/monibuca:${{ env.version }}
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -12,4 +12,9 @@ bin
|
||||
!plugin/record
|
||||
*.flv
|
||||
pullcf.yaml
|
||||
admin.zip
|
||||
*.zip
|
||||
__debug*
|
||||
.cursorrules
|
||||
example/default/*
|
||||
!example/default/main.go
|
||||
!example/default/config.yaml
|
||||
|
||||
129
README_CN.md
129
README_CN.md
@@ -1,45 +1,114 @@
|
||||
# 介绍
|
||||
monibuca 是一款纯 go 开发的扩展性极强的高性能流媒体服务器开发框架
|
||||
# Monibuca v5
|
||||
|
||||
[](https://pkg.go.dev/m7s.live/v5)
|
||||
|
||||
Monibuca(简称 m7s)是一款纯 Go 开发的开源流媒体服务器开发框架,支持多种流媒体协议。
|
||||
|
||||
## 特性
|
||||
|
||||
- 🚀 高性能:采用纯 Go 开发,充分利用 Go 的并发特性
|
||||
- 🔌 插件化架构:核心功能都以插件形式提供,可按需加载
|
||||
- 🛠 可扩展性强:支持自定义插件开发
|
||||
- 📽 多协议支持:
|
||||
- RTMP
|
||||
- HTTP-FLV
|
||||
- HLS
|
||||
- WebRTC
|
||||
- GB28181
|
||||
- SRT
|
||||
- 🎯 低延迟:针对实时性场景优化
|
||||
- 📊 实时监控:支持 Prometheus 监控集成
|
||||
- 🔄 集群支持:支持分布式部署
|
||||
|
||||
## 快速开始
|
||||
|
||||
### 安装
|
||||
|
||||
1. 确保已安装 Go 1.23 或更高版本
|
||||
2. 创建新项目并初始化:
|
||||
|
||||
```bash
|
||||
mkdir my-m7s-server && cd my-m7s-server
|
||||
go mod init my-m7s-server
|
||||
```
|
||||
|
||||
3. 创建主程序:
|
||||
|
||||
# 使用
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/hls"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
_ "m7s.live/v5/plugin/rtmp"
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/snap"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m7s.Run(context.Background(), "config.yaml")
|
||||
}
|
||||
```
|
||||
|
||||
### 配置说明
|
||||
|
||||
创建 `config.yaml` 配置文件:
|
||||
|
||||
```yaml
|
||||
# 全局配置
|
||||
global:
|
||||
http: :8080
|
||||
|
||||
# 插件配置
|
||||
rtmp:
|
||||
tcp: :1935
|
||||
```
|
||||
|
||||
## 构建选项
|
||||
|
||||
| 构建标签 | 描述 |
|
||||
| ---------- | ---------------------- |
|
||||
| disable_rm | 禁用内存池 |
|
||||
| sqlite | 启用 SQLite 存储 |
|
||||
| sqliteCGO | 启用 SQLite CGO 版本 |
|
||||
| mysql | 启用 MySQL 存储 |
|
||||
| postgres | 启用 PostgreSQL 存储 |
|
||||
| duckdb | 启用 DuckDB 存储 |
|
||||
| taskpanic | 抛出 panic(用于测试) |
|
||||
|
||||
## 项目结构
|
||||
|
||||
```
|
||||
## 构建标签
|
||||
monibuca/
|
||||
├── plugin/ # 官方插件目录
|
||||
├── pkg/ # 核心包
|
||||
├── example/ # 示例代码
|
||||
├── doc/ # 文档
|
||||
└── scripts/ # 实用脚本
|
||||
```
|
||||
|
||||
| 标签 | 描述 |
|
||||
|-----------|-----------------|
|
||||
| disable_rm | 禁用内存池 |
|
||||
| sqlite | 启用 sqlite |
|
||||
| sqliteCGO | 启用 sqlite cgo版本 |
|
||||
| mysql | 启用 mysql |
|
||||
| postgres | 启用 postgres |
|
||||
| duckdb | 启用 duckdb |
|
||||
| taskpanic | 抛出 panic,用于测试 |
|
||||
## 插件开发
|
||||
|
||||
查看 [plugin/README_CN.md](./plugin/README_CN.md) 了解如何开发自定义插件。
|
||||
|
||||
## 更多示例
|
||||
## Prometheus 监控
|
||||
|
||||
查看 example 目录
|
||||
|
||||
# 创建插件
|
||||
|
||||
到 plugin 目录下查看 README_CN.md
|
||||
|
||||
# Prometheus
|
||||
配置 Prometheus:
|
||||
|
||||
```yaml
|
||||
scrape_configs:
|
||||
@@ -48,3 +117,21 @@ scrape_configs:
|
||||
static_configs:
|
||||
- targets: ["localhost:8080"]
|
||||
```
|
||||
|
||||
## 示例
|
||||
|
||||
更多使用示例请查看 [example](./example) 目录。
|
||||
|
||||
## 贡献指南
|
||||
|
||||
欢迎提交 Pull Request 或 Issue。
|
||||
|
||||
## 许可证
|
||||
|
||||
本项目采用 AGPL 许可证,详见 [LICENSE](./LICENSE) 文件。
|
||||
|
||||
## 相关资源
|
||||
|
||||
- [官方文档](https://docs.m7s.live/)
|
||||
- [API 参考](https://pkg.go.dev/m7s.live/v5)
|
||||
- [示例代码](./example)
|
||||
|
||||
621
api.go
621
api.go
@@ -4,17 +4,19 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"m7s.live/v5/pkg/db"
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
myip "github.com/husanpao/ip"
|
||||
"github.com/mcuadros/go-defaults"
|
||||
"github.com/shirou/gopsutil/v4/cpu"
|
||||
"github.com/shirou/gopsutil/v4/disk"
|
||||
@@ -33,11 +35,17 @@ import (
|
||||
var localIP string
|
||||
var empty = &emptypb.Empty{}
|
||||
|
||||
func init() {
|
||||
// Add auto-migration for User model
|
||||
db.AutoMigrations = append(db.AutoMigrations, &db.User{})
|
||||
}
|
||||
|
||||
func (s *Server) SysInfo(context.Context, *emptypb.Empty) (res *pb.SysInfoResponse, err error) {
|
||||
if localIP == "" {
|
||||
if conn, err := net.Dial("udp", "114.114.114.114:80"); err == nil {
|
||||
localIP, _, _ = strings.Cut(conn.LocalAddr().String(), ":")
|
||||
}
|
||||
localIP = myip.LocalIP()
|
||||
// if conn, err := net.Dial("udp", "114.114.114.114:80"); err == nil {
|
||||
// localIP, _, _ = strings.Cut(conn.LocalAddr().String(), ":")
|
||||
// }
|
||||
}
|
||||
res = &pb.SysInfoResponse{
|
||||
Code: 0,
|
||||
@@ -45,6 +53,7 @@ func (s *Server) SysInfo(context.Context, *emptypb.Empty) (res *pb.SysInfoRespon
|
||||
Data: &pb.SysInfoData{
|
||||
Version: Version,
|
||||
LocalIP: localIP,
|
||||
PublicIP: util.GetPublicIP(""),
|
||||
StartTime: timestamppb.New(s.StartTime),
|
||||
GoVersion: runtime.Version(),
|
||||
Os: runtime.GOOS,
|
||||
@@ -54,14 +63,28 @@ func (s *Server) SysInfo(context.Context, *emptypb.Empty) (res *pb.SysInfoRespon
|
||||
}
|
||||
for p := range s.Plugins.Range {
|
||||
res.Data.Plugins = append(res.Data.Plugins, &pb.PluginInfo{
|
||||
Name: p.Meta.Name,
|
||||
Version: p.Meta.Version,
|
||||
Disabled: p.Disabled,
|
||||
Name: p.Meta.Name,
|
||||
PushAddr: p.PushAddr,
|
||||
PlayAddr: p.PlayAddr,
|
||||
Description: p.GetDescriptions(),
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) DisabledPlugins(ctx context.Context, _ *emptypb.Empty) (res *pb.DisabledPluginsResponse, err error) {
|
||||
res = &pb.DisabledPluginsResponse{
|
||||
Data: make([]*pb.PluginInfo, len(s.disabledPlugins)),
|
||||
}
|
||||
for i, p := range s.disabledPlugins {
|
||||
res.Data[i] = &pb.PluginInfo{
|
||||
Name: p.Meta.Name,
|
||||
Description: p.GetDescriptions(),
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// /api/stream/annexb/{streamPath}
|
||||
func (s *Server) api_Stream_AnnexB_(rw http.ResponseWriter, r *http.Request) {
|
||||
publisher, ok := s.Streams.Get(r.PathValue("streamPath"))
|
||||
@@ -104,29 +127,43 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
tmp, _ := json.Marshal(pub.GetDescriptions())
|
||||
res = &pb.StreamInfoResponse{
|
||||
Data: &pb.StreamInfo{
|
||||
Meta: string(tmp),
|
||||
Path: pub.StreamPath,
|
||||
State: int32(pub.State),
|
||||
StartTime: timestamppb.New(pub.StartTime),
|
||||
Subscribers: int32(pub.Subscribers.Length),
|
||||
PluginName: pub.Plugin.Meta.Name,
|
||||
Type: pub.Type,
|
||||
Speed: float32(pub.Speed),
|
||||
StopOnIdle: pub.DelayCloseTimeout > 0,
|
||||
IsPaused: pub.Paused != nil,
|
||||
Gop: int32(pub.GOP),
|
||||
BufferTime: durationpb.New(pub.BufferTime),
|
||||
Meta: string(tmp),
|
||||
Path: pub.StreamPath,
|
||||
State: int32(pub.State),
|
||||
StartTime: timestamppb.New(pub.StartTime),
|
||||
// Subscribers: int32(pub.Subscribers.Length),
|
||||
PluginName: pub.Plugin.Meta.Name,
|
||||
Type: pub.Type,
|
||||
Speed: float32(pub.Speed),
|
||||
StopOnIdle: pub.DelayCloseTimeout > 0,
|
||||
IsPaused: pub.Paused != nil,
|
||||
Gop: int32(pub.GOP),
|
||||
BufferTime: durationpb.New(pub.BufferTime),
|
||||
},
|
||||
}
|
||||
|
||||
var audioBpsOut, videoBpsOut uint32
|
||||
var serverSubCount int32
|
||||
for sub := range pub.Subscribers.Range {
|
||||
if sub.AudioReader != nil {
|
||||
audioBpsOut += sub.AudioReader.BPS
|
||||
}
|
||||
if sub.VideoReader != nil {
|
||||
videoBpsOut += sub.VideoReader.BPS
|
||||
}
|
||||
if sub.Type == SubscribeTypeServer {
|
||||
serverSubCount++
|
||||
}
|
||||
}
|
||||
res.Data.Subscribers = serverSubCount
|
||||
if t := pub.AudioTrack.AVTrack; t != nil {
|
||||
if t.ICodecCtx != nil {
|
||||
res.Data.AudioTrack = &pb.AudioTrackInfo{
|
||||
Codec: t.FourCC().String(),
|
||||
Meta: t.GetInfo(),
|
||||
Bps: uint32(t.BPS),
|
||||
Fps: uint32(t.FPS),
|
||||
Delta: pub.AudioTrack.Delta.String(),
|
||||
Codec: t.FourCC().String(),
|
||||
Meta: t.GetInfo(),
|
||||
Bps: uint32(t.BPS),
|
||||
BpsOut: audioBpsOut,
|
||||
Fps: uint32(t.FPS),
|
||||
Delta: pub.AudioTrack.Delta.String(),
|
||||
}
|
||||
res.Data.AudioTrack.SampleRate = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetSampleRate())
|
||||
res.Data.AudioTrack.Channels = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetChannels())
|
||||
@@ -135,12 +172,13 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
if t := pub.VideoTrack.AVTrack; t != nil {
|
||||
if t.ICodecCtx != nil {
|
||||
res.Data.VideoTrack = &pb.VideoTrackInfo{
|
||||
Codec: t.FourCC().String(),
|
||||
Meta: t.GetInfo(),
|
||||
Bps: uint32(t.BPS),
|
||||
Fps: uint32(t.FPS),
|
||||
Delta: pub.VideoTrack.Delta.String(),
|
||||
Gop: uint32(pub.GOP),
|
||||
Codec: t.FourCC().String(),
|
||||
Meta: t.GetInfo(),
|
||||
Bps: uint32(t.BPS),
|
||||
BpsOut: videoBpsOut,
|
||||
Fps: uint32(t.FPS),
|
||||
Delta: pub.VideoTrack.Delta.String(),
|
||||
Gop: uint32(pub.GOP),
|
||||
}
|
||||
res.Data.VideoTrack.Width = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Width())
|
||||
res.Data.VideoTrack.Height = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Height())
|
||||
@@ -150,9 +188,28 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
}
|
||||
|
||||
func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.StreamInfoResponse, err error) {
|
||||
var recordings []*pb.RecordingDetail
|
||||
s.Records.Call(func() error {
|
||||
for record := range s.Records.Range {
|
||||
if record.StreamPath == req.StreamPath {
|
||||
recordings = append(recordings, &pb.RecordingDetail{
|
||||
FilePath: record.FilePath,
|
||||
Mode: record.Mode,
|
||||
Fragment: durationpb.New(record.Fragment),
|
||||
Append: record.Append,
|
||||
PluginName: record.Plugin.Meta.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
s.Streams.Call(func() error {
|
||||
if pub, ok := s.Streams.Get(req.StreamPath); ok {
|
||||
res, err = s.getStreamInfo(pub)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res.Data.Recording = recordings
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
@@ -164,13 +221,30 @@ func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res
|
||||
func (s *Server) TaskTree(context.Context, *emptypb.Empty) (res *pb.TaskTreeResponse, err error) {
|
||||
var fillData func(m task.ITask) *pb.TaskTreeData
|
||||
fillData = func(m task.ITask) (res *pb.TaskTreeData) {
|
||||
res = &pb.TaskTreeData{Id: m.GetTaskID(), Pointer: uint64(uintptr(unsafe.Pointer(m.GetTask()))), State: uint32(m.GetState()), Type: uint32(m.GetTaskType()), Owner: m.GetOwnerType(), StartTime: timestamppb.New(m.GetTask().StartTime), Description: m.GetDescriptions()}
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
t := m.GetTask()
|
||||
res = &pb.TaskTreeData{
|
||||
Id: m.GetTaskID(),
|
||||
Pointer: uint64(t.GetTaskPointer()),
|
||||
State: uint32(m.GetState()),
|
||||
Type: uint32(m.GetTaskType()),
|
||||
Owner: m.GetOwnerType(),
|
||||
StartTime: timestamppb.New(t.StartTime),
|
||||
Description: m.GetDescriptions(),
|
||||
StartReason: t.StartReason,
|
||||
}
|
||||
if job, ok := m.(task.IJob); ok {
|
||||
if blockedTask := job.Blocked(); blockedTask != nil {
|
||||
res.Blocked = fillData(blockedTask)
|
||||
}
|
||||
for t := range job.RangeSubTask {
|
||||
res.Children = append(res.Children, fillData(t))
|
||||
child := fillData(t)
|
||||
if child == nil {
|
||||
continue
|
||||
}
|
||||
res.Children = append(res.Children, child)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -180,7 +254,7 @@ func (s *Server) TaskTree(context.Context, *emptypb.Empty) (res *pb.TaskTreeResp
|
||||
}
|
||||
|
||||
func (s *Server) StopTask(ctx context.Context, req *pb.RequestWithId64) (resp *pb.SuccessResponse, err error) {
|
||||
t := (*task.Task)(unsafe.Pointer(uintptr(req.Id)))
|
||||
t := task.FromPointer(uintptr(req.Id))
|
||||
if t == nil {
|
||||
return nil, pkg.ErrNotFound
|
||||
}
|
||||
@@ -189,7 +263,7 @@ func (s *Server) StopTask(ctx context.Context, req *pb.RequestWithId64) (resp *p
|
||||
}
|
||||
|
||||
func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp *pb.SuccessResponse, err error) {
|
||||
t := (*task.Task)(unsafe.Pointer(uintptr(req.Id)))
|
||||
t := task.FromPointer(uintptr(req.Id))
|
||||
if t == nil {
|
||||
return nil, pkg.ErrNotFound
|
||||
}
|
||||
@@ -198,10 +272,6 @@ func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp
|
||||
}
|
||||
|
||||
func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb.RecordingListResponse, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
s.Records.Call(func() error {
|
||||
resp = &pb.RecordingListResponse{}
|
||||
for record := range s.Records.Range {
|
||||
@@ -209,6 +279,7 @@ func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb
|
||||
StreamPath: record.StreamPath,
|
||||
StartTime: timestamppb.New(record.StartTime),
|
||||
Type: reflect.TypeOf(record.recorder).String(),
|
||||
Pointer: uint64(record.GetTaskPointer()),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
@@ -238,6 +309,7 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
|
||||
Timestamp: ar.AbsTime,
|
||||
Delay: ar.Delay,
|
||||
State: int32(ar.State),
|
||||
Bps: ar.BPS,
|
||||
}
|
||||
}
|
||||
if vr := subscriber.VideoReader; vr != nil {
|
||||
@@ -246,6 +318,7 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
|
||||
Timestamp: vr.AbsTime,
|
||||
Delay: vr.Delay,
|
||||
State: int32(vr.State),
|
||||
Bps: vr.BPS,
|
||||
}
|
||||
}
|
||||
subscribers = append(subscribers, snap)
|
||||
@@ -310,7 +383,14 @@ func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.RawQuery != "" {
|
||||
streamPath += "?" + r.URL.RawQuery
|
||||
}
|
||||
suber, err := s.Subscribe(r.Context(), streamPath)
|
||||
suber, err := s.SubscribeWithConfig(r.Context(), streamPath, config.Subscribe{
|
||||
SubVideo: true,
|
||||
SubType: SubscribeTypeAPI,
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
sse := util.NewSSE(rw, r.Context())
|
||||
PlayBlock(suber, (func(frame *pkg.AVFrame) (err error))(nil), func(frame *pkg.AVFrame) (err error) {
|
||||
var snap pb.TrackSnapShot
|
||||
@@ -334,6 +414,42 @@ func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) api_AudioTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
streamPath := r.PathValue("streamPath")
|
||||
if r.URL.RawQuery != "" {
|
||||
streamPath += "?" + r.URL.RawQuery
|
||||
}
|
||||
suber, err := s.SubscribeWithConfig(r.Context(), streamPath, config.Subscribe{
|
||||
SubAudio: true,
|
||||
SubType: SubscribeTypeAPI,
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
sse := util.NewSSE(rw, r.Context())
|
||||
PlayBlock(suber, func(frame *pkg.AVFrame) (err error) {
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = frame.Sequence
|
||||
snap.Timestamp = uint32(frame.Timestamp / time.Millisecond)
|
||||
snap.WriteTime = timestamppb.New(frame.WriteTime)
|
||||
snap.Wrap = make([]*pb.Wrap, len(frame.Wraps))
|
||||
snap.KeyFrame = frame.IDR
|
||||
for i, wrap := range frame.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
}
|
||||
return sse.WriteJSON(&snap)
|
||||
}, (func(frame *pkg.AVFrame) (err error))(nil))
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.TrackSnapShotResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
if pub, ok := s.Streams.Get(req.StreamPath); ok && pub.HasVideoTrack() {
|
||||
@@ -351,7 +467,6 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
}
|
||||
}
|
||||
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
|
||||
//if v.TryRLock() {
|
||||
if len(v.Wraps) > 0 {
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = v.Sequence
|
||||
@@ -369,8 +484,6 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
}
|
||||
data.Ring = append(data.Ring, &snap)
|
||||
}
|
||||
//v.RUnlock()
|
||||
//}
|
||||
})
|
||||
res = &pb.TrackSnapShotResponse{
|
||||
Code: 0,
|
||||
@@ -385,19 +498,16 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
return
|
||||
}
|
||||
|
||||
// Restart stops the server with a restart error and returns
|
||||
// a success response. This method is used to restart the server
|
||||
// gracefully.
|
||||
func (s *Server) Restart(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
if s, ok := Servers.Get(req.Id); ok {
|
||||
s.Stop(pkg.ErrRestart)
|
||||
}
|
||||
s.Stop(pkg.ErrRestart)
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
|
||||
func (s *Server) Shutdown(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
if s, ok := Servers.Get(req.Id); ok {
|
||||
s.Stop(task.ErrStopByUser)
|
||||
} else {
|
||||
return nil, pkg.ErrNotFound
|
||||
}
|
||||
s.Stop(task.ErrStopByUser)
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
|
||||
@@ -481,6 +591,20 @@ func (s *Server) StopPublish(ctx context.Context, req *pb.StreamSnapRequest) (re
|
||||
|
||||
// /api/stream/list
|
||||
func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *pb.StreamListResponse, err error) {
|
||||
recordingMap := make(map[string][]*pb.RecordingDetail)
|
||||
s.Records.Call(func() error {
|
||||
for record := range s.Records.Range {
|
||||
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
|
||||
FilePath: record.FilePath,
|
||||
Mode: record.Mode,
|
||||
Fragment: durationpb.New(record.Fragment),
|
||||
Append: record.Append,
|
||||
PluginName: record.Plugin.Meta.Name,
|
||||
Pointer: uint64(record.GetTaskPointer()),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
s.Streams.Call(func() error {
|
||||
var streams []*pb.StreamInfo
|
||||
for publisher := range s.Streams.Range {
|
||||
@@ -488,6 +612,7 @@ func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
info.Data.Recording = recordingMap[info.Data.Path]
|
||||
streams = append(streams, info.Data)
|
||||
}
|
||||
res = &pb.StreamListResponse{Data: streams, Total: int32(s.Streams.Length), PageNum: req.PageNum, PageSize: req.PageSize}
|
||||
@@ -529,6 +654,16 @@ func (s *Server) Api_Stream_Position_SSE(rw http.ResponseWriter, r *http.Request
|
||||
}, rw, r)
|
||||
}
|
||||
|
||||
// func (s *Server) Api_Vod_Position(rw http.ResponseWriter, r *http.Request) {
|
||||
// streamPath := r.URL.Query().Get("streamPath")
|
||||
// s.Streams.Call(func() error {
|
||||
// if pub, ok := s.Streams.Get(streamPath); ok {
|
||||
// t = pub.GetPosition()
|
||||
// }
|
||||
// return nil
|
||||
// })
|
||||
// }
|
||||
|
||||
func (s *Server) Summary(context.Context, *emptypb.Empty) (res *pb.SummaryResponse, err error) {
|
||||
dur := time.Since(s.lastSummaryTime)
|
||||
if dur < time.Second {
|
||||
@@ -601,8 +736,27 @@ func (s *Server) api_Config_JSON_(rw http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) GetConfigFile(_ context.Context, req *emptypb.Empty) (res *pb.GetConfigFileResponse, err error) {
|
||||
res = &pb.GetConfigFileResponse{}
|
||||
res.Data = string(s.configFileContent)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) UpdateConfigFile(_ context.Context, req *pb.UpdateConfigFileRequest) (res *pb.SuccessResponse, err error) {
|
||||
if s.configFileContent != nil {
|
||||
s.configFileContent = []byte(req.Content)
|
||||
os.WriteFile(filepath.Join(ExecDir, s.conf.(string)), s.configFileContent, 0644)
|
||||
res = &pb.SuccessResponse{}
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetConfig(_ context.Context, req *pb.GetConfigRequest) (res *pb.GetConfigResponse, err error) {
|
||||
res = &pb.GetConfigResponse{}
|
||||
res = &pb.GetConfigResponse{
|
||||
Data: &pb.ConfigData{},
|
||||
}
|
||||
var conf *config.Config
|
||||
if req.Name == "global" {
|
||||
conf = &s.Config
|
||||
@@ -619,19 +773,19 @@ func (s *Server) GetConfig(_ context.Context, req *pb.GetConfigRequest) (res *pb
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res.File = string(mm)
|
||||
res.Data.File = string(mm)
|
||||
|
||||
mm, err = yaml.Marshal(conf.Modify)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res.Modified = string(mm)
|
||||
res.Data.Modified = string(mm)
|
||||
|
||||
mm, err = yaml.Marshal(conf.GetMap())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
res.Merged = string(mm)
|
||||
res.Data.Merged = string(mm)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -658,33 +812,36 @@ func (s *Server) ModifyConfig(_ context.Context, req *pb.ModifyConfigRequest) (r
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetDeviceList(ctx context.Context, req *emptypb.Empty) (res *pb.DeviceListResponse, err error) {
|
||||
res = &pb.DeviceListResponse{}
|
||||
for device := range s.Devices.Range {
|
||||
res.Data = append(res.Data, &pb.DeviceInfo{
|
||||
Name: device.Name,
|
||||
CreateTime: timestamppb.New(device.CreatedAt),
|
||||
UpdateTime: timestamppb.New(device.UpdatedAt),
|
||||
Type: device.Type,
|
||||
PullURL: device.URL,
|
||||
ParentID: uint32(device.ParentID),
|
||||
Status: uint32(device.Status),
|
||||
ID: uint32(device.ID),
|
||||
PullOnStart: device.PullOnStart,
|
||||
StopOnIdle: device.StopOnIdle,
|
||||
Audio: device.Audio,
|
||||
RecordPath: device.Record.FilePath,
|
||||
RecordFragment: durationpb.New(device.Record.Fragment),
|
||||
Description: device.Description,
|
||||
Rtt: uint32(device.RTT.Milliseconds()),
|
||||
StreamPath: device.GetStreamPath(),
|
||||
})
|
||||
}
|
||||
func (s *Server) GetPullProxyList(ctx context.Context, req *emptypb.Empty) (res *pb.PullProxyListResponse, err error) {
|
||||
res = &pb.PullProxyListResponse{}
|
||||
s.PullProxies.Call(func() error {
|
||||
for device := range s.PullProxies.Range {
|
||||
res.Data = append(res.Data, &pb.PullProxyInfo{
|
||||
Name: device.Name,
|
||||
CreateTime: timestamppb.New(device.CreatedAt),
|
||||
UpdateTime: timestamppb.New(device.UpdatedAt),
|
||||
Type: device.Type,
|
||||
PullURL: device.URL,
|
||||
ParentID: uint32(device.ParentID),
|
||||
Status: uint32(device.Status),
|
||||
ID: uint32(device.ID),
|
||||
PullOnStart: device.PullOnStart,
|
||||
StopOnIdle: device.StopOnIdle,
|
||||
Audio: device.Audio,
|
||||
RecordPath: device.Record.FilePath,
|
||||
RecordFragment: durationpb.New(device.Record.Fragment),
|
||||
Description: device.Description,
|
||||
Rtt: uint32(device.RTT.Milliseconds()),
|
||||
StreamPath: device.GetStreamPath(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) AddDevice(ctx context.Context, req *pb.DeviceInfo) (res *pb.SuccessResponse, err error) {
|
||||
device := &Device{
|
||||
func (s *Server) AddPullProxy(ctx context.Context, req *pb.PullProxyInfo) (res *pb.SuccessResponse, err error) {
|
||||
device := &PullProxy{
|
||||
server: s,
|
||||
Name: req.Name,
|
||||
Type: req.Type,
|
||||
@@ -693,6 +850,28 @@ func (s *Server) AddDevice(ctx context.Context, req *pb.DeviceInfo) (res *pb.Suc
|
||||
Description: req.Description,
|
||||
StreamPath: req.StreamPath,
|
||||
}
|
||||
if device.Type == "" {
|
||||
var u *url.URL
|
||||
u, err = url.Parse(req.PullURL)
|
||||
if err != nil {
|
||||
s.Error("parse pull url failed", "error", err)
|
||||
return
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "srt", "rtsp", "rtmp":
|
||||
device.Type = u.Scheme
|
||||
default:
|
||||
ext := filepath.Ext(u.Path)
|
||||
switch ext {
|
||||
case ".m3u8":
|
||||
device.Type = "hls"
|
||||
case ".flv":
|
||||
device.Type = "flv"
|
||||
case ".mp4":
|
||||
device.Type = "mp4"
|
||||
}
|
||||
}
|
||||
}
|
||||
defaults.SetDefaults(&device.Pull)
|
||||
defaults.SetDefaults(&device.Record)
|
||||
device.URL = req.PullURL
|
||||
@@ -705,22 +884,52 @@ func (s *Server) AddDevice(ctx context.Context, req *pb.DeviceInfo) (res *pb.Suc
|
||||
return
|
||||
}
|
||||
s.DB.Create(device)
|
||||
s.Devices.Add(device)
|
||||
if req.StreamPath == "" {
|
||||
device.StreamPath = device.GetStreamPath()
|
||||
}
|
||||
s.PullProxies.Add(device)
|
||||
res = &pb.SuccessResponse{}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) UpdateDevice(ctx context.Context, req *pb.DeviceInfo) (res *pb.SuccessResponse, err error) {
|
||||
func (s *Server) UpdatePullProxy(ctx context.Context, req *pb.PullProxyInfo) (res *pb.SuccessResponse, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
target := &Device{}
|
||||
s.DB.First(target, req.ID)
|
||||
target := &PullProxy{
|
||||
server: s,
|
||||
}
|
||||
err = s.DB.First(target, req.ID).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
target.Name = req.Name
|
||||
target.URL = req.PullURL
|
||||
target.ParentID = uint(req.ParentID)
|
||||
target.Type = req.Type
|
||||
if target.Type == "" {
|
||||
var u *url.URL
|
||||
u, err = url.Parse(req.PullURL)
|
||||
if err != nil {
|
||||
s.Error("parse pull url failed", "error", err)
|
||||
return
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "srt", "rtsp", "rtmp":
|
||||
target.Type = u.Scheme
|
||||
default:
|
||||
ext := filepath.Ext(u.Path)
|
||||
switch ext {
|
||||
case ".m3u8":
|
||||
target.Type = "hls"
|
||||
case ".flv":
|
||||
target.Type = "flv"
|
||||
case ".mp4":
|
||||
target.Type = "mp4"
|
||||
}
|
||||
}
|
||||
}
|
||||
target.PullOnStart = req.PullOnStart
|
||||
target.StopOnIdle = req.StopOnIdle
|
||||
target.Audio = req.Audio
|
||||
@@ -730,37 +939,59 @@ func (s *Server) UpdateDevice(ctx context.Context, req *pb.DeviceInfo) (res *pb.
|
||||
target.RTT = time.Duration(int(req.Rtt)) * time.Millisecond
|
||||
target.StreamPath = req.StreamPath
|
||||
s.DB.Save(target)
|
||||
var needStopOld *PullProxy
|
||||
s.PullProxies.Call(func() error {
|
||||
if device, ok := s.PullProxies.Get(uint(req.ID)); ok {
|
||||
if target.URL != device.URL || device.Audio != target.Audio || device.StreamPath != target.StreamPath || device.Record.FilePath != target.Record.FilePath || device.Record.Fragment != target.Record.Fragment {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
needStopOld = device
|
||||
return nil
|
||||
}
|
||||
if device.PullOnStart != target.PullOnStart && target.PullOnStart && device.Handler != nil && device.Status == PullProxyStatusOnline {
|
||||
device.Handler.Pull()
|
||||
}
|
||||
device.Name = target.Name
|
||||
device.PullOnStart = target.PullOnStart
|
||||
device.StopOnIdle = target.StopOnIdle
|
||||
device.Description = target.Description
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if needStopOld != nil {
|
||||
needStopOld.WaitStopped()
|
||||
s.PullProxies.Add(target)
|
||||
}
|
||||
res = &pb.SuccessResponse{}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) RemoveDevice(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
func (s *Server) RemovePullProxy(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
res = &pb.SuccessResponse{}
|
||||
if req.Id > 0 {
|
||||
tx := s.DB.Delete(&Device{
|
||||
tx := s.DB.Delete(&PullProxy{
|
||||
ID: uint(req.Id),
|
||||
})
|
||||
err = tx.Error
|
||||
s.Devices.Call(func() error {
|
||||
if device, ok := s.Devices.Get(uint(req.Id)); ok {
|
||||
s.PullProxies.Call(func() error {
|
||||
if device, ok := s.PullProxies.Get(uint(req.Id)); ok {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
} else if req.StreamPath != "" {
|
||||
var deviceList []Device
|
||||
var deviceList []PullProxy
|
||||
s.DB.Find(&deviceList, "stream_path=?", req.StreamPath)
|
||||
if len(deviceList) > 0 {
|
||||
for _, device := range deviceList {
|
||||
tx := s.DB.Delete(&Device{}, device.ID)
|
||||
tx := s.DB.Delete(&PullProxy{}, device.ID)
|
||||
err = tx.Error
|
||||
s.Devices.Call(func() error {
|
||||
if device, ok := s.Devices.Get(uint(device.ID)); ok {
|
||||
s.PullProxies.Call(func() error {
|
||||
if device, ok := s.PullProxies.Get(uint(device.ID)); ok {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
}
|
||||
return nil
|
||||
@@ -871,3 +1102,209 @@ func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasReque
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetPushProxyList(ctx context.Context, req *emptypb.Empty) (res *pb.PushProxyListResponse, err error) {
|
||||
res = &pb.PushProxyListResponse{}
|
||||
s.PushProxies.Call(func() error {
|
||||
for device := range s.PushProxies.Range {
|
||||
res.Data = append(res.Data, &pb.PushProxyInfo{
|
||||
Name: device.Name,
|
||||
CreateTime: timestamppb.New(device.CreatedAt),
|
||||
UpdateTime: timestamppb.New(device.UpdatedAt),
|
||||
Type: device.Type,
|
||||
PushURL: device.URL,
|
||||
ParentID: uint32(device.ParentID),
|
||||
Status: uint32(device.Status),
|
||||
ID: uint32(device.ID),
|
||||
PushOnStart: device.PushOnStart,
|
||||
Audio: device.Audio,
|
||||
Description: device.Description,
|
||||
Rtt: uint32(device.RTT.Milliseconds()),
|
||||
StreamPath: device.GetStreamPath(),
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) AddPushProxy(ctx context.Context, req *pb.PushProxyInfo) (res *pb.SuccessResponse, err error) {
|
||||
device := &PushProxy{
|
||||
server: s,
|
||||
Name: req.Name,
|
||||
Type: req.Type,
|
||||
ParentID: uint(req.ParentID),
|
||||
PushOnStart: req.PushOnStart,
|
||||
Description: req.Description,
|
||||
StreamPath: req.StreamPath,
|
||||
}
|
||||
|
||||
if device.Type == "" {
|
||||
var u *url.URL
|
||||
u, err = url.Parse(req.PushURL)
|
||||
if err != nil {
|
||||
s.Error("parse pull url failed", "error", err)
|
||||
return
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "srt", "rtsp", "rtmp":
|
||||
device.Type = u.Scheme
|
||||
default:
|
||||
ext := filepath.Ext(u.Path)
|
||||
switch ext {
|
||||
case ".m3u8":
|
||||
device.Type = "hls"
|
||||
case ".flv":
|
||||
device.Type = "flv"
|
||||
case ".mp4":
|
||||
device.Type = "mp4"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
defaults.SetDefaults(&device.Push)
|
||||
device.URL = req.PushURL
|
||||
device.Audio = req.Audio
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
s.DB.Create(device)
|
||||
s.PushProxies.Add(device)
|
||||
res = &pb.SuccessResponse{}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) UpdatePushProxy(ctx context.Context, req *pb.PushProxyInfo) (res *pb.SuccessResponse, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
target := &PushProxy{
|
||||
server: s,
|
||||
}
|
||||
err = s.DB.First(target, req.ID).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
target.Name = req.Name
|
||||
target.URL = req.PushURL
|
||||
target.ParentID = uint(req.ParentID)
|
||||
target.Type = req.Type
|
||||
if target.Type == "" {
|
||||
var u *url.URL
|
||||
u, err = url.Parse(req.PushURL)
|
||||
if err != nil {
|
||||
s.Error("parse pull url failed", "error", err)
|
||||
return
|
||||
}
|
||||
switch u.Scheme {
|
||||
case "srt", "rtsp", "rtmp":
|
||||
target.Type = u.Scheme
|
||||
default:
|
||||
ext := filepath.Ext(u.Path)
|
||||
switch ext {
|
||||
case ".m3u8":
|
||||
target.Type = "hls"
|
||||
case ".flv":
|
||||
target.Type = "flv"
|
||||
case ".mp4":
|
||||
target.Type = "mp4"
|
||||
}
|
||||
}
|
||||
}
|
||||
target.PushOnStart = req.PushOnStart
|
||||
target.Audio = req.Audio
|
||||
target.Description = req.Description
|
||||
target.RTT = time.Duration(int(req.Rtt)) * time.Millisecond
|
||||
target.StreamPath = req.StreamPath
|
||||
s.DB.Save(target)
|
||||
var needStopOld *PushProxy
|
||||
s.PushProxies.Call(func() error {
|
||||
if device, ok := s.PushProxies.Get(uint(req.ID)); ok {
|
||||
if target.URL != device.URL || device.Audio != target.Audio || device.StreamPath != target.StreamPath {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
needStopOld = device
|
||||
return nil
|
||||
}
|
||||
if device.PushOnStart != target.PushOnStart && target.PushOnStart && device.Handler != nil && device.Status == PushProxyStatusOnline {
|
||||
device.Handler.Push()
|
||||
}
|
||||
device.Name = target.Name
|
||||
device.PushOnStart = target.PushOnStart
|
||||
device.Description = target.Description
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if needStopOld != nil {
|
||||
needStopOld.WaitStopped()
|
||||
s.PushProxies.Add(target)
|
||||
}
|
||||
res = &pb.SuccessResponse{}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) RemovePushProxy(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
res = &pb.SuccessResponse{}
|
||||
if req.Id > 0 {
|
||||
tx := s.DB.Delete(&PushProxy{
|
||||
ID: uint(req.Id),
|
||||
})
|
||||
err = tx.Error
|
||||
s.PushProxies.Call(func() error {
|
||||
if device, ok := s.PushProxies.Get(uint(req.Id)); ok {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
} else if req.StreamPath != "" {
|
||||
var deviceList []PushProxy
|
||||
s.DB.Find(&deviceList, "stream_path=?", req.StreamPath)
|
||||
if len(deviceList) > 0 {
|
||||
for _, device := range deviceList {
|
||||
tx := s.DB.Delete(&PushProxy{}, device.ID)
|
||||
err = tx.Error
|
||||
s.PushProxies.Call(func() error {
|
||||
if device, ok := s.PushProxies.Get(uint(device.ID)); ok {
|
||||
device.Stop(task.ErrStopByUser)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
} else {
|
||||
res.Message = "parameter wrong"
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res *pb.TransformListResponse, err error) {
|
||||
res = &pb.TransformListResponse{}
|
||||
s.Transforms.Call(func() error {
|
||||
for transform := range s.Transforms.Range {
|
||||
info := &pb.Transform{
|
||||
StreamPath: transform.StreamPath,
|
||||
Target: transform.Target,
|
||||
}
|
||||
if transform.TransformJob != nil {
|
||||
info.PluginName = transform.TransformJob.Plugin.Meta.Name
|
||||
var result []byte
|
||||
result, err = yaml.Marshal(transform.TransformJob.Config)
|
||||
if err != nil {
|
||||
s.Error("marshal transform config failed", "error", err)
|
||||
return err
|
||||
}
|
||||
info.Config = string(result)
|
||||
}
|
||||
res.Data = append(res.Data, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
175
device.go
175
device.go
@@ -1,175 +0,0 @@
|
||||
package m7s
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gorm.io/gorm"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/task"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
const (
|
||||
DeviceStatusOffline byte = iota
|
||||
DeviceStatusOnline
|
||||
DeviceStatusPulling
|
||||
DeviceStatusDisabled
|
||||
)
|
||||
|
||||
type (
|
||||
IDevice interface {
|
||||
Pull()
|
||||
}
|
||||
Device struct {
|
||||
server *Server `gorm:"-:all"`
|
||||
task.Work `gorm:"-:all" yaml:"-"`
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt, UpdatedAt time.Time `yaml:"-"`
|
||||
DeletedAt gorm.DeletedAt `yaml:"-"`
|
||||
Name string
|
||||
StreamPath string
|
||||
PullOnStart, Audio, StopOnIdle bool
|
||||
config.Pull `gorm:"embedded;embeddedPrefix:pull_"`
|
||||
config.Record `gorm:"embedded;embeddedPrefix:record_"`
|
||||
ParentID uint
|
||||
Type string
|
||||
Status byte
|
||||
Description string
|
||||
RTT time.Duration
|
||||
Handler IDevice `gorm:"-:all" yaml:"-"`
|
||||
}
|
||||
DeviceManager struct {
|
||||
task.Manager[uint, *Device]
|
||||
}
|
||||
DeviceTask struct {
|
||||
task.TickTask
|
||||
Device *Device
|
||||
Plugin *Plugin
|
||||
}
|
||||
HTTPDevice struct {
|
||||
DeviceTask
|
||||
tcpAddr *net.TCPAddr
|
||||
url *url.URL
|
||||
}
|
||||
)
|
||||
|
||||
func (d *Device) GetKey() uint {
|
||||
return d.ID
|
||||
}
|
||||
|
||||
func (d *Device) GetStreamPath() string {
|
||||
if d.StreamPath == "" {
|
||||
return fmt.Sprintf("device/%s/%d", d.Type, d.ID)
|
||||
}
|
||||
return d.StreamPath
|
||||
}
|
||||
|
||||
func (d *Device) Start() (err error) {
|
||||
for plugin := range d.server.Plugins.Range {
|
||||
if devicePlugin, ok := plugin.handler.(IDevicePlugin); ok && strings.EqualFold(d.Type, plugin.Meta.Name) {
|
||||
deviceTask := devicePlugin.OnDeviceAdd(d)
|
||||
if deviceTask == nil {
|
||||
continue
|
||||
}
|
||||
if deviceTask, ok := deviceTask.(IDevice); ok {
|
||||
d.Handler = deviceTask
|
||||
}
|
||||
if t, ok := deviceTask.(task.ITask); ok {
|
||||
if ticker, ok := t.(task.IChannelTask); ok {
|
||||
t.OnStart(func() {
|
||||
ticker.Tick(nil)
|
||||
})
|
||||
}
|
||||
d.AddTask(t)
|
||||
} else {
|
||||
d.ChangeStatus(DeviceStatusOnline)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (d *Device) ChangeStatus(status byte) {
|
||||
if d.Status == status {
|
||||
return
|
||||
}
|
||||
from := d.Status
|
||||
d.Info("device status changed", "from", from, "to", status)
|
||||
d.Status = status
|
||||
d.Update()
|
||||
switch status {
|
||||
case DeviceStatusOnline:
|
||||
if d.PullOnStart && from == DeviceStatusOffline {
|
||||
d.Handler.Pull()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Device) Update() {
|
||||
if d.server.DB != nil {
|
||||
d.server.DB.Omit("deleted_at").Save(d)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeviceTask) Dispose() {
|
||||
d.Device.ChangeStatus(DeviceStatusOffline)
|
||||
d.TickTask.Dispose()
|
||||
d.Plugin.Server.Streams.Call(func() error {
|
||||
if stream, ok := d.Plugin.Server.Streams.Get(d.Device.GetStreamPath()); ok {
|
||||
stream.Stop(task.ErrStopByUser)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (d *DeviceTask) Pull() {
|
||||
var pubConf = d.Plugin.config.Publish
|
||||
pubConf.PubAudio = d.Device.Audio
|
||||
pubConf.DelayCloseTimeout = util.Conditional(d.Device.StopOnIdle, time.Second*5, 0)
|
||||
d.Plugin.handler.Pull(d.Device.GetStreamPath(), d.Device.Pull, &pubConf)
|
||||
}
|
||||
|
||||
func (d *HTTPDevice) Start() (err error) {
|
||||
d.url, err = url.Parse(d.Device.URL)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if ips, err := net.LookupIP(d.url.Hostname()); err != nil {
|
||||
return err
|
||||
} else if len(ips) == 0 {
|
||||
return fmt.Errorf("no IP found for host: %s", d.url.Hostname())
|
||||
} else {
|
||||
d.tcpAddr, err = net.ResolveTCPAddr("tcp", net.JoinHostPort(ips[0].String(), d.url.Port()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if d.tcpAddr.Port == 0 {
|
||||
if d.url.Scheme == "https" || d.url.Scheme == "wss" {
|
||||
d.tcpAddr.Port = 443
|
||||
} else {
|
||||
d.tcpAddr.Port = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
return d.DeviceTask.Start()
|
||||
}
|
||||
|
||||
func (d *HTTPDevice) GetTickInterval() time.Duration {
|
||||
return time.Second * 10
|
||||
}
|
||||
|
||||
func (d *HTTPDevice) Tick(any) {
|
||||
startTime := time.Now()
|
||||
conn, err := net.DialTCP("tcp", nil, d.tcpAddr)
|
||||
if err != nil {
|
||||
d.Device.ChangeStatus(DeviceStatusOffline)
|
||||
return
|
||||
}
|
||||
conn.Close()
|
||||
d.Device.RTT = time.Since(startTime)
|
||||
d.Device.ChangeStatus(DeviceStatusOnline)
|
||||
}
|
||||
132
doc/arch.md
Normal file
132
doc/arch.md
Normal file
@@ -0,0 +1,132 @@
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Core["Core System"]
|
||||
Server["Server"]
|
||||
ConfigManager["Config Manager"]
|
||||
LogManager["Log Manager"]
|
||||
TaskManager["Task Manager"]
|
||||
PluginRegistry["Plugin Registry"]
|
||||
MetricsCollector["Metrics Collector"]
|
||||
EventBus["Event Bus"]
|
||||
end
|
||||
|
||||
subgraph Media["Media Processing"]
|
||||
CodecRegistry["Codec Registry"]
|
||||
MediaEngine["Media Engine"]
|
||||
AVTracks["AV Tracks"]
|
||||
MediaFormats["Media Formats"]
|
||||
MediaTransform["Media Transform"]
|
||||
end
|
||||
|
||||
subgraph Streams["Stream Management"]
|
||||
StreamManager["Stream Manager"]
|
||||
Publisher["Publisher"]
|
||||
Subscriber["Subscriber"]
|
||||
StreamBuffer["Stream Buffer"]
|
||||
StreamState["Stream State"]
|
||||
StreamEvents["Stream Events"]
|
||||
AliasManager["Alias Manager"]
|
||||
end
|
||||
|
||||
subgraph Plugins["Plugin System"]
|
||||
PluginLoader["Plugin Loader"]
|
||||
PluginConfig["Plugin Config"]
|
||||
PluginLifecycle["Plugin Lifecycle"]
|
||||
PluginAPI["Plugin API"]
|
||||
|
||||
subgraph PluginTypes["Plugin Types"]
|
||||
RTSP["RTSP"]
|
||||
HLS["HLS"]
|
||||
WebRTC["WebRTC"]
|
||||
GB28181["GB28181"]
|
||||
RTMP["RTMP"]
|
||||
Room["Room"]
|
||||
Debug["Debug"]
|
||||
end
|
||||
end
|
||||
|
||||
subgraph Storage["Storage System"]
|
||||
RecordManager["Record Manager"]
|
||||
FileManager["File Manager"]
|
||||
StorageQuota["Storage Quota"]
|
||||
StorageEvents["Storage Events"]
|
||||
end
|
||||
|
||||
subgraph API["API Layer"]
|
||||
GRPCServer["gRPC Server"]
|
||||
HTTPServer["HTTP Server"]
|
||||
WebhookManager["Webhook Manager"]
|
||||
AuthManager["Auth Manager"]
|
||||
SSEHandler["SSE Handler"]
|
||||
MetricsAPI["Metrics API"]
|
||||
end
|
||||
|
||||
subgraph Forwarding["Stream Forwarding"]
|
||||
ForwardingManager["Forwarding Manager"]
|
||||
PullManager["Pull Manager"]
|
||||
PushManager["Push Manager"]
|
||||
TranscodeManager["Transcode Manager"]
|
||||
end
|
||||
|
||||
%% Core System Relationships
|
||||
Core --> Plugins
|
||||
Core --> API
|
||||
Core --> Streams
|
||||
Core --> Storage
|
||||
Core --> Media
|
||||
Core --> Forwarding
|
||||
|
||||
%% Plugin System Relationships
|
||||
PluginLoader --> PluginTypes
|
||||
PluginTypes --> StreamManager
|
||||
PluginTypes --> ForwardingManager
|
||||
PluginTypes --> API
|
||||
|
||||
%% Stream Management Relationships
|
||||
StreamManager --> Publisher
|
||||
StreamManager --> Subscriber
|
||||
Publisher --> AVTracks
|
||||
Subscriber --> AVTracks
|
||||
Publisher --> StreamEvents
|
||||
Subscriber --> StreamEvents
|
||||
|
||||
%% Media Processing Relationships
|
||||
MediaEngine --> CodecRegistry
|
||||
MediaEngine --> MediaTransform
|
||||
MediaTransform --> AVTracks
|
||||
MediaFormats --> MediaTransform
|
||||
|
||||
%% API Layer Relationships
|
||||
GRPCServer --> AuthManager
|
||||
HTTPServer --> AuthManager
|
||||
WebhookManager --> EventBus
|
||||
MetricsAPI --> MetricsCollector
|
||||
|
||||
%% Forwarding Relationships
|
||||
ForwardingManager --> PullManager
|
||||
ForwardingManager --> PushManager
|
||||
ForwardingManager --> TranscodeManager
|
||||
PullManager --> Publisher
|
||||
PushManager --> Subscriber
|
||||
|
||||
%% Storage Relationships
|
||||
RecordManager --> Publisher
|
||||
FileManager --> StorageEvents
|
||||
StorageQuota --> StorageEvents
|
||||
|
||||
classDef core fill:#f9f,stroke:#333,stroke-width:2px
|
||||
classDef plugin fill:#bbf,stroke:#333,stroke-width:2px
|
||||
classDef stream fill:#bfb,stroke:#333,stroke-width:2px
|
||||
classDef api fill:#fbb,stroke:#333,stroke-width:2px
|
||||
classDef media fill:#fbf,stroke:#333,stroke-width:2px
|
||||
classDef storage fill:#bff,stroke:#333,stroke-width:2px
|
||||
classDef forward fill:#ffb,stroke:#333,stroke-width:2px
|
||||
|
||||
class Server,ConfigManager,LogManager,TaskManager,PluginRegistry,MetricsCollector,EventBus core
|
||||
class PluginLoader,PluginConfig,PluginLifecycle,PluginAPI,RTSP,HLS,WebRTC,GB28181,RTMP,Room,Debug plugin
|
||||
class StreamManager,Publisher,Subscriber,StreamBuffer,StreamState,StreamEvents,AliasManager stream
|
||||
class GRPCServer,HTTPServer,WebhookManager,AuthManager,SSEHandler,MetricsAPI api
|
||||
class CodecRegistry,MediaEngine,AVTracks,MediaFormats,MediaTransform media
|
||||
class RecordManager,FileManager,StorageQuota,StorageEvents storage
|
||||
class ForwardingManager,PullManager,PushManager,TranscodeManager forward
|
||||
```
|
||||
154
doc/cluster.md
Normal file
154
doc/cluster.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# Monibuca 集群架构设计
|
||||
|
||||
本文档描述了 Monibuca 的集群架构设计,包括推流负载均衡和拉流负载均衡的实现方案。
|
||||
|
||||
## 整体架构
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph 负载均衡层
|
||||
LB[负载均衡器/API网关]
|
||||
end
|
||||
|
||||
subgraph 集群节点
|
||||
N1[节点1]
|
||||
N2[节点2]
|
||||
N3[节点3]
|
||||
end
|
||||
|
||||
subgraph 服务发现
|
||||
Redis[(Redis/etcd)]
|
||||
end
|
||||
|
||||
Client1[推流客户端] --> LB
|
||||
Client2[拉流客户端] --> LB
|
||||
|
||||
LB --> N1
|
||||
LB --> N2
|
||||
LB --> N3
|
||||
|
||||
N1 <--> Redis
|
||||
N2 <--> Redis
|
||||
N3 <--> Redis
|
||||
|
||||
%% 节点间互通连接
|
||||
N1 <-.流媒体同步.-> N2
|
||||
N2 <-.流媒体同步.-> N3
|
||||
N1 <-.流媒体同步.-> N3
|
||||
```
|
||||
|
||||
## 节点间流媒体同步
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant C as 拉流客户端
|
||||
participant N2 as 节点2
|
||||
participant R as Redis/etcd
|
||||
participant N1 as 节点1(源流所在)
|
||||
|
||||
C->>N2: 请求拉流(Stream1)
|
||||
N2->>R: 查询Stream1位置
|
||||
R-->>N2: 返回Stream1在节点1
|
||||
N2->>N1: 请求Stream1
|
||||
N1-->>N2: 建立节点间流传输
|
||||
Note over N1,N2: 使用高效的节点间传输协议
|
||||
N2->>R: 注册Stream1副本信息
|
||||
N2-->>C: 向客户端推送流
|
||||
```
|
||||
|
||||
## 推流负载均衡
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant P as 推流客户端
|
||||
participant LB as 负载均衡器
|
||||
participant R as Redis/etcd
|
||||
participant N1 as 节点1
|
||||
participant N2 as 节点2
|
||||
|
||||
P->>LB: 发起推流请求
|
||||
LB->>R: 获取可用节点列表
|
||||
R-->>LB: 返回节点信息
|
||||
LB->>LB: 根据负载算法选择节点
|
||||
LB-->>P: 返回推流节点地址
|
||||
P->>N1: 建立推流连接
|
||||
N1->>R: 注册流信息
|
||||
```
|
||||
|
||||
## 拉流负载均衡
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant C as 拉流客户端
|
||||
participant LB as 负载均衡器
|
||||
participant R as Redis/etcd
|
||||
participant N1 as 源节点
|
||||
participant N2 as 边缘节点
|
||||
|
||||
C->>LB: 发起拉流请求
|
||||
LB->>R: 查询流信息
|
||||
R-->>LB: 返回流所在节点
|
||||
alt 就近节点已有流
|
||||
LB-->>C: 返回就近节点地址
|
||||
C->>N2: 建立拉流连接
|
||||
else 需要回源
|
||||
LB-->>C: 返回边缘节点地址
|
||||
C->>N2: 建立拉流连接
|
||||
N2->>N1: 回源拉流
|
||||
N2->>R: 注册流信息
|
||||
end
|
||||
```
|
||||
|
||||
## 关键特性
|
||||
|
||||
1. **高可用性**
|
||||
- 节点故障自动切换
|
||||
- 无单点故障设计
|
||||
- 服务自动发现
|
||||
- 多节点流媒体冗余备份
|
||||
|
||||
2. **负载均衡策略**
|
||||
- 基于节点负载的动态调度
|
||||
- 就近接入原则
|
||||
- 带宽占用均衡
|
||||
- 考虑节点间流量成本
|
||||
|
||||
3. **扩展性**
|
||||
- 支持水平扩展
|
||||
- 动态添加删除节点
|
||||
- 平滑扩容/缩容
|
||||
- 节点间按需同步流
|
||||
|
||||
4. **监控和管理**
|
||||
- 集群状态实时监控
|
||||
- 流量统计和分析
|
||||
- 节点健康检查
|
||||
- 跨节点流媒体质量监控
|
||||
|
||||
## 实现考虑
|
||||
|
||||
1. **服务发现**
|
||||
- 使用 Redis 或 etcd 存储集群节点信息
|
||||
- 定期更新节点状态和负载信息
|
||||
- 支持节点心跳检测
|
||||
- 维护流媒体在各节点的分布信息
|
||||
|
||||
2. **负载均衡算法**
|
||||
- 考虑 CPU 使用率
|
||||
- 考虑内存使用情况
|
||||
- 考虑带宽使用情况
|
||||
- 考虑地理位置因素
|
||||
- 考虑节点间网络质量
|
||||
|
||||
3. **容错机制**
|
||||
- 节点故障自动摘除
|
||||
- 流媒体自动切换
|
||||
- 会话保持机制
|
||||
- 节点间流媒体备份策略
|
||||
|
||||
4. **节点间通信**
|
||||
- 高效的流媒体转发协议
|
||||
- 节点间带宽优化
|
||||
- 流媒体缓存策略
|
||||
- 按需拉流和预加载策略
|
||||
- QoS保证机制
|
||||
158
doc_CN/stream_alias_tech.md
Normal file
158
doc_CN/stream_alias_tech.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Monibuca 流别名功能技术实现文档
|
||||
|
||||
## 1. 功能概述
|
||||
|
||||
流别名(Stream Alias)是 Monibuca 中的一个重要功能,它允许为已存在的流创建一个或多个别名,使得同一个流可以通过不同的路径被访问。这个功能在以下场景特别有用:
|
||||
|
||||
- 为长路径的流创建简短别名
|
||||
- 动态修改流的访问路径
|
||||
- 实现流的重定向功能
|
||||
|
||||
## 2. 核心数据结构
|
||||
|
||||
### 2.1 AliasStream 结构
|
||||
|
||||
```go
|
||||
type AliasStream struct {
|
||||
*Publisher // 继承自 Publisher
|
||||
AutoRemove bool // 是否自动移除
|
||||
StreamPath string // 原始流路径
|
||||
Alias string // 别名路径
|
||||
}
|
||||
```
|
||||
|
||||
### 2.2 StreamAlias 消息结构
|
||||
|
||||
```protobuf
|
||||
message StreamAlias {
|
||||
string streamPath = 1; // 原始流路径
|
||||
string alias = 2; // 别名
|
||||
bool autoRemove = 3; // 是否自动移除
|
||||
uint32 status = 4; // 状态
|
||||
}
|
||||
```
|
||||
|
||||
## 3. 核心功能实现
|
||||
|
||||
### 3.1 别名创建和修改
|
||||
|
||||
当调用 `SetStreamAlias` API 创建或修改别名时,系统会:
|
||||
|
||||
1. 验证并解析目标流路径
|
||||
2. 检查目标流是否存在
|
||||
3. 处理以下场景:
|
||||
- 修改现有别名:更新自动移除标志和流路径
|
||||
- 创建新别名:初始化新的 AliasStream 结构
|
||||
4. 处理订阅者转移或唤醒等待的订阅者
|
||||
|
||||
### 3.2 Publisher 启动时的别名处理
|
||||
|
||||
当一个 Publisher 启动时,系统会:
|
||||
|
||||
1. 检查是否存在指向该 Publisher 的别名
|
||||
2. 对于每个匹配的别名:
|
||||
- 如果别名的 Publisher 为空,设置为新的 Publisher
|
||||
- 如果别名已有 Publisher,转移订阅者到新的 Publisher
|
||||
3. 唤醒所有等待该流的订阅者
|
||||
|
||||
### 3.3 Publisher 销毁时的别名处理
|
||||
|
||||
Publisher 销毁时的处理流程:
|
||||
|
||||
1. 检查是否因被踢出而停止
|
||||
2. 从 Streams 中移除 Publisher
|
||||
3. 遍历所有别名,对于指向该 Publisher 的别名:
|
||||
- 如果设置了自动移除,则删除该别名
|
||||
- 否则保留别名结构
|
||||
4. 处理相关订阅者
|
||||
|
||||
### 3.4 订阅者处理机制
|
||||
|
||||
当新的订阅请求到来时:
|
||||
|
||||
1. 检查是否存在匹配的别名
|
||||
2. 如果存在别名:
|
||||
- 别名对应的 Publisher 存在:添加订阅者
|
||||
- Publisher 不存在:触发 OnSubscribe 事件
|
||||
3. 如果不存在别名:
|
||||
- 检查是否有匹配的正则表达式别名
|
||||
- 检查原始流是否存在
|
||||
- 根据情况添加订阅者或加入等待列表
|
||||
|
||||
## 4. API 接口
|
||||
|
||||
### 4.1 设置别名
|
||||
|
||||
```http
|
||||
POST /api/stream/alias
|
||||
```
|
||||
|
||||
请求体:
|
||||
```json
|
||||
{
|
||||
"streamPath": "原始流路径",
|
||||
"alias": "别名路径",
|
||||
"autoRemove": false
|
||||
}
|
||||
```
|
||||
|
||||
### 4.2 获取别名列表
|
||||
|
||||
```http
|
||||
GET /api/stream/alias
|
||||
```
|
||||
|
||||
响应体:
|
||||
```json
|
||||
{
|
||||
"code": 0,
|
||||
"message": "",
|
||||
"data": [
|
||||
{
|
||||
"streamPath": "原始流路径",
|
||||
"alias": "别名路径",
|
||||
"autoRemove": false,
|
||||
"status": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## 5. 状态说明
|
||||
|
||||
别名状态(status)说明:
|
||||
- 0:初始状态
|
||||
- 1:别名已关联 Publisher
|
||||
- 2:存在同名的原始流
|
||||
|
||||
## 6. 最佳实践
|
||||
|
||||
1. 使用自动移除(autoRemove)
|
||||
- 当需要临时重定向流时,建议启用自动移除
|
||||
- 这样在原始流结束时,别名会自动清理
|
||||
|
||||
2. 别名命名建议
|
||||
- 使用简短且有意义的别名
|
||||
- 避免使用特殊字符
|
||||
- 建议使用规范的路径格式
|
||||
|
||||
3. 性能考虑
|
||||
- 别名机制采用高效的内存映射
|
||||
- 订阅者转移时保持连接状态
|
||||
- 支持动态修改,无需重启服务
|
||||
|
||||
## 7. 注意事项
|
||||
|
||||
1. 别名冲突处理
|
||||
- 当创建的别名与现有流路径冲突时,系统会进行适当处理
|
||||
- 建议在创建别名前检查是否存在冲突
|
||||
|
||||
2. 订阅者行为
|
||||
- 别名修改时,现有订阅者会被转移到新的流
|
||||
- 确保客户端能够处理流重定向
|
||||
|
||||
3. 资源管理
|
||||
- 及时清理不需要的别名
|
||||
- 合理使用自动移除功能
|
||||
- 监控别名状态,避免资源泄露
|
||||
```
|
||||
206
doc_CN/stream_alias_usage.md
Normal file
206
doc_CN/stream_alias_usage.md
Normal file
@@ -0,0 +1,206 @@
|
||||
# Monibuca 流别名功能使用指南
|
||||
|
||||
## 1. 功能简介
|
||||
|
||||
流别名是 Monibuca 提供的一个强大功能,它允许您为同一个流创建多个不同的访问路径。这个功能不仅可以简化流的访问方式,更重要的是能够实现无缝的流内容切换,特别适合直播过程中插入广告等场景。
|
||||
|
||||
## 2. 基本使用方法
|
||||
|
||||
### 2.1 创建别名
|
||||
|
||||
通过 HTTP API 创建别名:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "live/original",
|
||||
"alias": "live/simple",
|
||||
"autoRemove": false
|
||||
}'
|
||||
```
|
||||
|
||||
### 2.2 查看当前别名列表
|
||||
|
||||
```bash
|
||||
curl http://localhost:8080/api/stream/alias
|
||||
```
|
||||
|
||||
### 2.3 删除别名
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"alias": "live/simple"
|
||||
}'
|
||||
```
|
||||
|
||||
## 3. 实战案例:直播广告插入
|
||||
|
||||
### 3.1 场景描述
|
||||
|
||||
在直播过程中,经常需要在适当的时机插入广告。使用流别名功能,我们可以实现:
|
||||
- 无缝切换between直播内容和广告
|
||||
- 保持观众的持续观看体验
|
||||
- 灵活控制广告的插入时机
|
||||
- 支持多个广告源的轮换播放
|
||||
|
||||
### 3.2 实现步骤
|
||||
|
||||
1. **准备工作**
|
||||
```bash
|
||||
# 假设主直播流的路径为:live/main
|
||||
# 广告流的路径为:ads/ad1
|
||||
```
|
||||
|
||||
2. **创建主直播的别名**
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "live/main",
|
||||
"alias": "live/show",
|
||||
"autoRemove": false
|
||||
}'
|
||||
```
|
||||
|
||||
3. **需要插入广告时**
|
||||
```bash
|
||||
# 将别名指向广告流
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "ads/ad1",
|
||||
"alias": "live/show",
|
||||
"autoRemove": false
|
||||
}'
|
||||
```
|
||||
|
||||
4. **广告播放结束后**
|
||||
```bash
|
||||
# 将别名重新指向主直播流
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "live/main",
|
||||
"alias": "live/show",
|
||||
"autoRemove": false
|
||||
}'
|
||||
```
|
||||
|
||||
### 3.3 效果说明
|
||||
|
||||
1. **对观众端的影响**
|
||||
- 观众始终访问 `live/show` 这个固定地址
|
||||
- 切换过程对观众无感知
|
||||
- 不会出现黑屏或卡顿
|
||||
- 无需刷新播放器
|
||||
|
||||
2. **对直播系统的影响**
|
||||
- 主播端推流不受影响
|
||||
- 支持多路广告源预加载
|
||||
- 可以实现精确的时间控制
|
||||
- 系统资源占用小
|
||||
|
||||
## 4. 进阶使用技巧
|
||||
|
||||
### 4.1 广告轮播方案
|
||||
|
||||
```bash
|
||||
# 创建多个广告流的别名
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "ads/ad1",
|
||||
"alias": "ads/current",
|
||||
"autoRemove": true
|
||||
}'
|
||||
|
||||
# 通过脚本定时切换不同的广告
|
||||
for ad in ad1 ad2 ad3; do
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"streamPath\": \"ads/$ad\",
|
||||
\"alias\": \"ads/current\",
|
||||
\"autoRemove\": true
|
||||
}"
|
||||
sleep 30 # 每个广告播放30秒
|
||||
done
|
||||
```
|
||||
|
||||
### 4.2 使用自动移除功能
|
||||
|
||||
当广告流结束时自动切回主流:
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/stream/alias \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"streamPath": "ads/ad1",
|
||||
"alias": "live/show",
|
||||
"autoRemove": true
|
||||
}'
|
||||
```
|
||||
|
||||
### 4.3 条件触发广告
|
||||
|
||||
结合 Monibuca 的其他功能,可以实现:
|
||||
- 观众数量达到阈值时插入广告
|
||||
- 直播时长达到特定值时插入广告
|
||||
- 根据直播内容标签触发相关广告
|
||||
|
||||
## 5. 最佳实践建议
|
||||
|
||||
1. **广告内容预加载**
|
||||
- 提前准备好广告流
|
||||
- 确保广告源的稳定性
|
||||
- 使用缓存机制提高切换速度
|
||||
|
||||
2. **合理的切换策略**
|
||||
- 避免频繁切换影响用户体验
|
||||
- 选择适当的切换时机
|
||||
- 保持广告时长的合理控制
|
||||
|
||||
3. **监控和统计**
|
||||
- 记录广告播放情况
|
||||
- 监控切换过程是否平滑
|
||||
- 统计观众观看数据
|
||||
|
||||
4. **容错处理**
|
||||
- 广告流异常时快速切回主流
|
||||
- 设置合理的超时时间
|
||||
- 做好日志记录
|
||||
|
||||
## 6. 常见问题解答
|
||||
|
||||
1. **Q: 切换时观众会感知到卡顿吗?**
|
||||
A: 不会。流别名的切换是服务器端的操作,对客户端播放器完全透明。
|
||||
|
||||
2. **Q: 如何确保广告按预期时间播放?**
|
||||
A: 可以通过脚本控制切换时间,并配合自动移除功能来确保准确性。
|
||||
|
||||
3. **Q: 支持多少个并发的别名?**
|
||||
A: 理论上没有限制,但建议根据服务器资源合理使用。
|
||||
|
||||
4. **Q: 如何处理广告流异常的情况?**
|
||||
A: 建议使用自动移除功能,并配合监控系统及时发现和处理异常。
|
||||
|
||||
## 7. 注意事项
|
||||
|
||||
1. **资源管理**
|
||||
- 及时清理不再使用的别名
|
||||
- 避免创建过多无用的别名
|
||||
- 定期检查别名状态
|
||||
|
||||
2. **性能考虑**
|
||||
- 控制并发别名数量
|
||||
- 合理设置缓存策略
|
||||
- 监控服务器资源使用情况
|
||||
|
||||
3. **用户体验**
|
||||
- 控制广告频率和时长
|
||||
- 确保切换的流畅性
|
||||
- 考虑不同网络环境的用户
|
||||
```
|
||||
@@ -1,13 +1,5 @@
|
||||
global:
|
||||
loglevel: debug
|
||||
disableall: true
|
||||
#console:
|
||||
# secret: 00aea3af031f134d6307618b05ec4899
|
||||
cascadeserver:
|
||||
enable: true
|
||||
quic:
|
||||
listenaddr: :44944
|
||||
#flv:
|
||||
# pull:
|
||||
# pullonstart:
|
||||
# live/test: /Users/dexter/Movies/jb-demo.flv
|
||||
listenaddr: :44944
|
||||
7
example/8080/hook.yaml
Normal file
7
example/8080/hook.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
hook:
|
||||
server_keep_alive:
|
||||
url: "http://your-webhook-endpoint"
|
||||
method: "POST"
|
||||
interval: 60 # 每60秒发送一次保活信息
|
||||
retryTimes: 3
|
||||
retryInterval: 1s
|
||||
@@ -1,15 +1,12 @@
|
||||
global:
|
||||
# loglevel: debug
|
||||
http:
|
||||
listenaddr: :8081
|
||||
listenaddrtls: :8555
|
||||
tcp:
|
||||
listenaddr: :50052
|
||||
loglevel: debug
|
||||
disableall: true
|
||||
http: :8081
|
||||
tcp: :50052
|
||||
cascadeclient:
|
||||
enable: true
|
||||
server: localhost:44944
|
||||
pull:
|
||||
enableregexp: true
|
||||
pullonsub:
|
||||
secret: dexter
|
||||
onsub:
|
||||
pull:
|
||||
.*: m7s://$0
|
||||
#console:
|
||||
# secret: de2c0bb9fd47684adc07a426e139239b
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
global:
|
||||
loglevel: debug
|
||||
enablelogin: false
|
||||
# db:
|
||||
# dbtype: mysql
|
||||
# dsn: root:Monibuca#!4@tcp(sh-cynosdbmysql-grp-kxt43lv6.sql.tencentcdb.com:28520)/lkm7s_v5?parseTime=true
|
||||
@@ -7,10 +8,13 @@ srt:
|
||||
listenaddr: :6000
|
||||
passphrase: foobarfoobar
|
||||
gb28181:
|
||||
enable: false
|
||||
autoinvite: true
|
||||
sip:
|
||||
listenaddr:
|
||||
- udp::5060
|
||||
# pull:
|
||||
# live/test: dump/34020000001320000001
|
||||
onsub:
|
||||
pull:
|
||||
.* : $0
|
||||
@@ -21,6 +25,9 @@ mp4:
|
||||
onsub:
|
||||
pull:
|
||||
^vod/(.+)$: $1
|
||||
cascadeserver:
|
||||
quic:
|
||||
listenaddr: :44944
|
||||
# llhls:
|
||||
# onpub:
|
||||
# transform:
|
||||
@@ -32,7 +39,41 @@ mp4:
|
||||
# onpub:
|
||||
# transform:
|
||||
# .* : 5s x 3
|
||||
rtsp:
|
||||
#rtsp:
|
||||
# pull:
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@giroro.tpddns.cn:1554/Streaming/Channels/101
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@localhost:8554/live/test
|
||||
|
||||
snap:
|
||||
enable: false
|
||||
snapsavemanual: false # 手动截图是否保存文件
|
||||
snapwatermark:
|
||||
text: "Monibuca$T{2006-01-02 15:04:05}"
|
||||
fontpath: "/System/Library/Fonts/STHeiti Light.ttc" # mac字体路径
|
||||
# fontpath: "/usr/share/fonts/opentype/noto/NotoSansCJK-Regular.ttc" # linux字体路径 思源黑体
|
||||
# fontpath: "C:/Windows/Fonts/msyh.ttf" # windows字体路径 微软雅黑
|
||||
fontsize: 36
|
||||
fontcolor: "rgba(255,165,0,1)"
|
||||
offsetx: 10
|
||||
offsety: 10
|
||||
snapmode: 1
|
||||
snaptimeinterval: 1s
|
||||
snapsavepath: "./snaps"
|
||||
snapiframeinterval: 3
|
||||
snapquerytimedelta: 3 # 查询截图时允许的最大时间差(秒)
|
||||
filter: "^live/.*"
|
||||
onpub:
|
||||
transform:
|
||||
.* : $0
|
||||
|
||||
crypto:
|
||||
enable: false
|
||||
isstatic: false
|
||||
algo: aes_ctr # 加密算法 支持 aes_ctr xor_c
|
||||
encryptlen: 1024
|
||||
secret:
|
||||
key: your key
|
||||
iv: your iv
|
||||
onpub:
|
||||
transform:
|
||||
.* : $0
|
||||
@@ -5,6 +5,9 @@ import (
|
||||
"flag"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
|
||||
_ "m7s.live/v5/plugin/crypto"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
@@ -16,6 +19,7 @@ import (
|
||||
_ "m7s.live/v5/plugin/rtmp"
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/snap"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
|
||||
40
go.mod
40
go.mod
@@ -11,9 +11,11 @@ require (
|
||||
github.com/cilium/ebpf v0.15.0
|
||||
github.com/cloudwego/goref v0.0.0-20240724113447-685d2a9523c8
|
||||
github.com/deepch/vdk v0.0.27
|
||||
github.com/disintegration/imaging v1.6.2
|
||||
github.com/emiago/sipgo v0.22.0
|
||||
github.com/go-delve/delve v1.23.0
|
||||
github.com/gobwas/ws v1.3.2
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1
|
||||
github.com/husanpao/ip v0.0.0-20220711082147-73160bb611a8
|
||||
@@ -22,19 +24,20 @@ require (
|
||||
github.com/mcuadros/go-defaults v1.2.0
|
||||
github.com/ncruces/go-sqlite3 v0.18.1
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0
|
||||
github.com/pion/interceptor v0.1.29
|
||||
github.com/pion/interceptor v0.1.37
|
||||
github.com/pion/logging v0.2.2
|
||||
github.com/pion/rtcp v1.2.14
|
||||
github.com/pion/rtp v1.8.6
|
||||
github.com/pion/rtcp v1.2.15
|
||||
github.com/pion/rtp v1.8.10
|
||||
github.com/pion/sdp/v3 v3.0.9
|
||||
github.com/pion/webrtc/v3 v3.2.12
|
||||
github.com/pion/webrtc/v4 v4.0.7
|
||||
github.com/quic-go/quic-go v0.43.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
github.com/samber/slog-common v0.17.1
|
||||
github.com/shirou/gopsutil/v4 v4.24.8
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7
|
||||
golang.org/x/text v0.17.0
|
||||
golang.org/x/image v0.22.0
|
||||
golang.org/x/text v0.20.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
@@ -61,6 +64,7 @@ require (
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/gobwas/httphead v0.1.0 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/uuid v1.6.0 // indirect
|
||||
github.com/hashicorp/golang-lru v1.0.2 // indirect
|
||||
@@ -81,16 +85,24 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/julianday v1.0.0 // indirect
|
||||
github.com/pion/datachannel v1.5.6 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v2 v2.2.11 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.4 // indirect
|
||||
github.com/pion/ice/v2 v2.3.9 // indirect
|
||||
github.com/pion/ice/v4 v4.0.3 // indirect
|
||||
github.com/pion/mdns v0.0.12 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/sctp v1.8.16 // indirect
|
||||
github.com/pion/sctp v1.8.35 // indirect
|
||||
github.com/pion/srtp/v2 v2.0.15 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/stun v0.6.1 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v2 v2.2.5 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v2 v2.1.2 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pion/webrtc/v4 v4.0.7 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
@@ -101,7 +113,7 @@ require (
|
||||
github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b // indirect
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/stretchr/testify v1.9.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/tetratelabs/wazero v1.8.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
@@ -112,9 +124,10 @@ require (
|
||||
github.com/valyala/histogram v1.2.0 // indirect
|
||||
github.com/valyala/quicktemplate v1.8.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect
|
||||
github.com/wlynxg/anet v0.0.5 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/arch v0.6.0 // indirect
|
||||
golang.org/x/sync v0.8.0 // indirect
|
||||
golang.org/x/sync v0.9.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect
|
||||
)
|
||||
|
||||
@@ -126,16 +139,17 @@ require (
|
||||
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
|
||||
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
|
||||
github.com/gorilla/websocket v1.5.1
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd
|
||||
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
|
||||
github.com/phsym/console-slog v0.3.1
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/quangngotan95/go-m3u8 v0.1.0
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.26.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 // indirect
|
||||
golang.org/x/crypto v0.29.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/net v0.27.0
|
||||
golang.org/x/sys v0.25.0
|
||||
golang.org/x/net v0.31.0
|
||||
golang.org/x/sys v0.27.0
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
61
go.sum
61
go.sum
@@ -59,6 +59,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deepch/vdk v0.0.27 h1:j/SHaTiZhA47wRpaue8NRp7P9xwOOO/lunxrDJBwcao=
|
||||
github.com/deepch/vdk v0.0.27/go.mod h1:JlgGyR2ld6+xOIHa7XAxJh+stSDBAkdNvIPkUIdIywk=
|
||||
github.com/disintegration/imaging v1.6.2 h1:w1LecBlG2Lnp8B3jk5zSuNqd7b4DXhcjwek1ei82L+c=
|
||||
github.com/disintegration/imaging v1.6.2/go.mod h1:44/5580QXChDfwIclfc/PCwrr44amcmDAg8hxG0Ewe4=
|
||||
github.com/emiago/sipgo v0.22.0 h1:GaQ51m26M9QnVBVY2aDJ/mXqq/BDfZ1A+nW7XgU/4Ts=
|
||||
github.com/emiago/sipgo v0.22.0/go.mod h1:a77FgPEEjJvfYWYfP3p53u+dNhWEMb/VGVS6guvBzx0=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
@@ -86,6 +88,10 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm
|
||||
github.com/gobwas/ws v1.3.2 h1:zlnbNHxumkRvfPWgfXu8RBwyNR1x8wh9cf5PTOCqs9Q=
|
||||
github.com/gobwas/ws v1.3.2/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk=
|
||||
github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g=
|
||||
github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
@@ -124,6 +130,8 @@ github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/husanpao/ip v0.0.0-20220711082147-73160bb611a8 h1:4Jk58quTZmzJcTrLlbB5L1Q6qXu49EIjCReWxcBFWKo=
|
||||
github.com/husanpao/ip v0.0.0-20220711082147-73160bb611a8/go.mod h1:medl9/CfYoQlqAXtAARmMW5dAX2UOdwwkhaszYPk0AM=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd h1:EVX1s+XNss9jkRW9K6XGJn2jL2lB1h5H804oKPsxOec=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
|
||||
github.com/icholy/digest v0.1.22 h1:dRIwCjtAcXch57ei+F0HSb5hmprL873+q7PoVojdMzM=
|
||||
github.com/icholy/digest v0.1.22/go.mod h1:uLAeDdWKIWNFMH0wqbwchbTQOmJWhzSnL7zmqSPqEEc=
|
||||
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
|
||||
@@ -207,39 +215,61 @@ github.com/phsym/console-slog v0.3.1/go.mod h1:oJskjp/X6e6c0mGpfP8ELkfKUsrkDifYR
|
||||
github.com/pion/datachannel v1.5.5/go.mod h1:iMz+lECmfdCMqFRhXhcA/219B0SQlbpoR2V118yimL0=
|
||||
github.com/pion/datachannel v1.5.6 h1:1IxKJntfSlYkpUj8LlYRSWpYiTTC02nUrOE8T3DqGeg=
|
||||
github.com/pion/datachannel v1.5.6/go.mod h1:1eKT6Q85pRnr2mHiWHxJwO50SfZRtWHTsNIVb/NfGW4=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v2 v2.2.7/go.mod h1:8WiMkebSHFD0T+dIU+UeBaoV7kDhOW5oDCzZ7WZ/F9s=
|
||||
github.com/pion/dtls/v2 v2.2.11 h1:9U/dpCYl1ySttROPWJgqWKEylUdT0fXp/xst6JwY5Ks=
|
||||
github.com/pion/dtls/v2 v2.2.11/go.mod h1:d9SYc9fch0CqK90mRk1dC7AkzzpwJj6u2GU3u+9pqFE=
|
||||
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
|
||||
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
|
||||
github.com/pion/ice v0.7.18 h1:KbAWlzWRUdX9SmehBh3gYpIFsirjhSQsCw6K2MjYMK0=
|
||||
github.com/pion/ice/v2 v2.3.9 h1:7yZpHf3PhPxJGT4JkMj1Y8Rl5cQ6fB709iz99aeMd/U=
|
||||
github.com/pion/ice/v2 v2.3.9/go.mod h1:lT3kv5uUIlHfXHU/ZRD7uKD/ufM202+eTa3C/umgGf4=
|
||||
github.com/pion/ice/v4 v4.0.3 h1:9s5rI1WKzF5DRqhJ+Id8bls/8PzM7mau0mj1WZb4IXE=
|
||||
github.com/pion/ice/v4 v4.0.3/go.mod h1:VfHy0beAZ5loDT7BmJ2LtMtC4dbawIkkkejHPRZNB3Y=
|
||||
github.com/pion/interceptor v0.1.17/go.mod h1:SY8kpmfVBvrbUzvj2bsXz7OJt5JvmVNZ+4Kjq7FcwrI=
|
||||
github.com/pion/interceptor v0.1.29 h1:39fsnlP1U8gw2JzOFWdfCU82vHvhW9o0rZnZF56wF+M=
|
||||
github.com/pion/interceptor v0.1.29/go.mod h1:ri+LGNjRUc5xUNtDEPzfdkmSqISixVTBF/z/Zms/6T4=
|
||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/mdns v0.0.7/go.mod h1:4iP2UbeFhLI/vWju/bw6ZfwjJzk0z8DNValjGxR/dD8=
|
||||
github.com/pion/mdns v0.0.12 h1:CiMYlY+O0azojWDmxdNr7ADGrnZ+V6Ilfner+6mSVK8=
|
||||
github.com/pion/mdns v0.0.12/go.mod h1:VExJjv8to/6Wqm1FXK+Ii/Z9tsVk/F5sD/N70cnYFbk=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.10/go.mod h1:ztfEwXZNLGyF1oQDttz/ZKIBaeeg/oWbRYqzBM9TL1I=
|
||||
github.com/pion/rtcp v1.2.14 h1:KCkGV3vJ+4DAJmvP0vaQShsb0xkRfWkO540Gy102KyE=
|
||||
github.com/pion/rtcp v1.2.14/go.mod h1:sn6qjxvnwyAkkPzPULIbVqSKI5Dv54Rv7VG0kNxh9L4=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.7.13/go.mod h1:bDb5n+BFZxXx0Ea7E5qe+klMuqiBrP+w8XSjiWtCUko=
|
||||
github.com/pion/rtp v1.8.6 h1:MTmn/b0aWWsAzux2AmP8WGllusBVw4NPYPVFFd7jUPw=
|
||||
github.com/pion/rtp v1.8.6/go.mod h1:pBGHaFt/yW7bf1jjWAoUjpSNoDnw98KTMg+jWWvziqU=
|
||||
github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU=
|
||||
github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
||||
github.com/pion/sctp v1.8.5/go.mod h1:SUFFfDpViyKejTAdwD1d/HQsCu+V/40cCs2nZIvC3s0=
|
||||
github.com/pion/sctp v1.8.7/go.mod h1:g1Ul+ARqZq5JEmoFy87Q/4CePtKnTJ1QCL9dBBdN6AU=
|
||||
github.com/pion/sctp v1.8.13/go.mod h1:YKSgO/bO/6aOMP9LCie1DuD7m+GamiK2yIiPM6vH+GA=
|
||||
github.com/pion/sctp v1.8.16 h1:PKrMs+o9EMLRvFfXq59WFsC+V8mN1wnKzqrv+3D/gYY=
|
||||
github.com/pion/sctp v1.8.16/go.mod h1:P6PbDVA++OJMrVNg2AL3XtYHV4uD6dvfyOovCgMs0PE=
|
||||
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
|
||||
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
|
||||
github.com/pion/sdp/v3 v3.0.6/go.mod h1:iiFWFpQO8Fy3S5ldclBkpXqmWy02ns78NOKoLLL0YQw=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pion/srtp v1.5.2 h1:25DmvH+fqKZDqvX64vTwnycVwL9ooJxHF/gkX16bDBY=
|
||||
github.com/pion/srtp/v2 v2.0.15 h1:+tqRtXGsGwHC0G0IUIAzRmdkHvriF79IHVfZGfHrQoA=
|
||||
github.com/pion/srtp/v2 v2.0.15/go.mod h1:b/pQOlDrbB0HEH5EUAQXzSYxikFbNcNuKmF8tM0hCtw=
|
||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
||||
github.com/pion/stun v0.6.1 h1:8lp6YejULeHBF8NmV8e2787BogQhduZugh5PdhDyyN4=
|
||||
github.com/pion/stun v0.6.1/go.mod h1:/hO7APkX4hZKu/D0f2lHzNyvdkTGtIy3NDmLR7kSz/8=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport v0.14.1 h1:XSM6olwW+o8J4SCmOBb/BpwZypkHeyM0PGFCxNQBr40=
|
||||
github.com/pion/transport v0.14.1/go.mod h1:4tGmbk00NeYA3rUa9+n+dzCCoKkcy3YlYb99Jn2fNnI=
|
||||
github.com/pion/transport/v2 v2.0.0/go.mod h1:HS2MEBJTwD+1ZI2eSXSvHJx/HnzQqRy2/LXxt6eVMHc=
|
||||
@@ -252,10 +282,16 @@ github.com/pion/transport/v2 v2.2.5/go.mod h1:q2U/tf9FEfnSBGSW6w5Qp5PFWRLRj3NjLh
|
||||
github.com/pion/transport/v3 v3.0.1/go.mod h1:UY7kiITrlMv7/IKgd5eTUcaahZx5oUN3l9SzK5f5xE0=
|
||||
github.com/pion/transport/v3 v3.0.2 h1:r+40RJR25S9w3jbA6/5uEPTzcdn7ncyU44RWCbHkLg4=
|
||||
github.com/pion/transport/v3 v3.0.2/go.mod h1:nIToODoOlb5If2jF9y2Igfx3PFYWfuXi37m0IlWa/D0=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v2 v2.1.2 h1:wj0cAoGKltaZ790XEGW9HwoUewqjliwmhtxCuB2ApyM=
|
||||
github.com/pion/turn/v2 v2.1.2/go.mod h1:1kjnPkBcex3dhCU2Am+AAmxDcGhLX3WnMfmkNpvSTQU=
|
||||
github.com/pion/webrtc/v3 v3.2.12 h1:pVqz5NdtTqyhKIhMcXR8bPp709kCf9blyAhDjoVRLvA=
|
||||
github.com/pion/webrtc/v3 v3.2.12/go.mod h1:/Oz6K95CGWaN+3No+Z0NYvgOPOr3aY8UyTlMm/dec3A=
|
||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
||||
github.com/pion/webrtc/v4 v3.2.12 h1:pVqz5NdtTqyhKIhMcXR8bPp709kCf9blyAhDjoVRLvA=
|
||||
github.com/pion/webrtc/v4 v3.2.12/go.mod h1:/Oz6K95CGWaN+3No+Z0NYvgOPOr3aY8UyTlMm/dec3A=
|
||||
github.com/pion/webrtc/v4 v4.0.7 h1:aeq78uVnFZd2umXW0O9A2VFQYuS7+BZxWetQvSp2jPo=
|
||||
github.com/pion/webrtc/v4 v4.0.7/go.mod h1:oFVBBVSHU3vAEwSgnk3BuKCwAUwpDwQhko1EDwyZWbU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||
@@ -318,6 +354,8 @@ github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXl
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I=
|
||||
github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g=
|
||||
github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
|
||||
@@ -341,6 +379,8 @@ github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJ
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/wlynxg/anet v0.0.5 h1:J3VJGi1gvo0JwZ/P1/Yc/8p63SoW98B5dHkYDmpgvvU=
|
||||
github.com/wlynxg/anet v0.0.5/go.mod h1:eay5PRQr7fIVAMbTbchTnO9gG65Hg/uYGdc7mguHxoA=
|
||||
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7 h1:e9n2WNcfvs20aLgpDhKoaJgrU/EeAvuNnWLBm31Q5Fw=
|
||||
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7/go.mod h1:WSZ59bidJOO40JSJmLqlkBJrjZCtjbKKkygEMfzY/kc=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -363,8 +403,13 @@ golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDf
|
||||
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
|
||||
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/image v0.22.0 h1:UtK5yLUzilVrkjMAZAZ34DXGpASN8i8pj8g+O+yd10g=
|
||||
golang.org/x/image v0.22.0/go.mod h1:9hPFhljd4zZ1GNSIZJ49sqbp45GKK9t6w+iXvGqZUz4=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
@@ -395,13 +440,15 @@ golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
|
||||
golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
|
||||
golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
|
||||
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
|
||||
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
|
||||
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -443,6 +490,8 @@ golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34=
|
||||
golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -468,8 +517,8 @@ golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
|
||||
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
|
||||
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
|
||||
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
||||
24
goreleaser.yml
Normal file
24
goreleaser.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
project_name: m7s
|
||||
archives:
|
||||
-
|
||||
files:
|
||||
- favicon.ico
|
||||
builds:
|
||||
- id: "all"
|
||||
main: ./example/default/main.go
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
tags:
|
||||
- sqlite
|
||||
ldflags:
|
||||
- -s -w -X main.version={{.Tag}}
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
goarch:
|
||||
- arm64
|
||||
- amd64
|
||||
hooks:
|
||||
pre:
|
||||
- go mod tidy
|
||||
686
pb/auth.pb.go
Normal file
686
pb/auth.pb.go
Normal file
@@ -0,0 +1,686 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.19.1
|
||||
// source: auth.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type LoginRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
||||
Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LoginRequest) Reset() {
|
||||
*x = LoginRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LoginRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LoginRequest) ProtoMessage() {}
|
||||
|
||||
func (x *LoginRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LoginRequest.ProtoReflect.Descriptor instead.
|
||||
func (*LoginRequest) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *LoginRequest) GetUsername() string {
|
||||
if x != nil {
|
||||
return x.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *LoginRequest) GetPassword() string {
|
||||
if x != nil {
|
||||
return x.Password
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type LoginSuccess struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
|
||||
UserInfo *UserInfo `protobuf:"bytes,2,opt,name=userInfo,proto3" json:"userInfo,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LoginSuccess) Reset() {
|
||||
*x = LoginSuccess{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LoginSuccess) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LoginSuccess) ProtoMessage() {}
|
||||
|
||||
func (x *LoginSuccess) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LoginSuccess.ProtoReflect.Descriptor instead.
|
||||
func (*LoginSuccess) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *LoginSuccess) GetToken() string {
|
||||
if x != nil {
|
||||
return x.Token
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *LoginSuccess) GetUserInfo() *UserInfo {
|
||||
if x != nil {
|
||||
return x.UserInfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LoginResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Data *LoginSuccess `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LoginResponse) Reset() {
|
||||
*x = LoginResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LoginResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LoginResponse) ProtoMessage() {}
|
||||
|
||||
func (x *LoginResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LoginResponse.ProtoReflect.Descriptor instead.
|
||||
func (*LoginResponse) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *LoginResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *LoginResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *LoginResponse) GetData() *LoginSuccess {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type LogoutRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LogoutRequest) Reset() {
|
||||
*x = LogoutRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LogoutRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LogoutRequest) ProtoMessage() {}
|
||||
|
||||
func (x *LogoutRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LogoutRequest.ProtoReflect.Descriptor instead.
|
||||
func (*LogoutRequest) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *LogoutRequest) GetToken() string {
|
||||
if x != nil {
|
||||
return x.Token
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type LogoutResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *LogoutResponse) Reset() {
|
||||
*x = LogoutResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *LogoutResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*LogoutResponse) ProtoMessage() {}
|
||||
|
||||
func (x *LogoutResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use LogoutResponse.ProtoReflect.Descriptor instead.
|
||||
func (*LogoutResponse) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *LogoutResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *LogoutResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type UserInfoRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UserInfoRequest) Reset() {
|
||||
*x = UserInfoRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UserInfoRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UserInfoRequest) ProtoMessage() {}
|
||||
|
||||
func (x *UserInfoRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UserInfoRequest.ProtoReflect.Descriptor instead.
|
||||
func (*UserInfoRequest) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *UserInfoRequest) GetToken() string {
|
||||
if x != nil {
|
||||
return x.Token
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type UserInfo struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
|
||||
ExpiresAt int64 `protobuf:"varint,2,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` // Token expiration timestamp
|
||||
}
|
||||
|
||||
func (x *UserInfo) Reset() {
|
||||
*x = UserInfo{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UserInfo) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UserInfo) ProtoMessage() {}
|
||||
|
||||
func (x *UserInfo) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UserInfo.ProtoReflect.Descriptor instead.
|
||||
func (*UserInfo) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *UserInfo) GetUsername() string {
|
||||
if x != nil {
|
||||
return x.Username
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *UserInfo) GetExpiresAt() int64 {
|
||||
if x != nil {
|
||||
return x.ExpiresAt
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type UserInfoResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Data *UserInfo `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UserInfoResponse) Reset() {
|
||||
*x = UserInfoResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_auth_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UserInfoResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UserInfoResponse) ProtoMessage() {}
|
||||
|
||||
func (x *UserInfoResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_auth_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UserInfoResponse.ProtoReflect.Descriptor instead.
|
||||
func (*UserInfoResponse) Descriptor() ([]byte, []int) {
|
||||
return file_auth_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *UserInfoResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *UserInfoResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *UserInfoResponse) GetData() *UserInfo {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_auth_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_auth_proto_rawDesc = []byte{
|
||||
0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62,
|
||||
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
|
||||
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46,
|
||||
0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
|
||||
0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61,
|
||||
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61,
|
||||
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4e, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x53,
|
||||
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x28, 0x0a, 0x08,
|
||||
0x75, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c,
|
||||
0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x75, 0x73,
|
||||
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x63, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
|
||||
0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x53, 0x75,
|
||||
0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x0a, 0x0d, 0x4c,
|
||||
0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05,
|
||||
0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b,
|
||||
0x65, 0x6e, 0x22, 0x3e, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70,
|
||||
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01,
|
||||
0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73,
|
||||
0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
|
||||
0x67, 0x65, 0x22, 0x27, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
|
||||
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x45, 0x0a, 0x08, 0x55,
|
||||
0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61,
|
||||
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73,
|
||||
0x41, 0x74, 0x22, 0x62, 0x0a, 0x10, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
|
||||
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
|
||||
0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
|
||||
0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0xf4, 0x01, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12,
|
||||
0x48, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
|
||||
0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x70, 0x62, 0x2e,
|
||||
0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x14, 0x22, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x75, 0x74, 0x68,
|
||||
0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x3a, 0x01, 0x2a, 0x12, 0x4c, 0x0a, 0x06, 0x4c, 0x6f, 0x67,
|
||||
0x6f, 0x75, 0x74, 0x12, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52,
|
||||
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x6f,
|
||||
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x15, 0x22, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x6c, 0x6f,
|
||||
0x67, 0x6f, 0x75, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x54, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x55, 0x73,
|
||||
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72,
|
||||
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x62,
|
||||
0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
|
||||
0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f,
|
||||
0x61, 0x75, 0x74, 0x68, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x10, 0x5a,
|
||||
0x0e, 0x6d, 0x37, 0x73, 0x2e, 0x6c, 0x69, 0x76, 0x65, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x62, 0x62,
|
||||
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_auth_proto_rawDescOnce sync.Once
|
||||
file_auth_proto_rawDescData = file_auth_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_auth_proto_rawDescGZIP() []byte {
|
||||
file_auth_proto_rawDescOnce.Do(func() {
|
||||
file_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_auth_proto_rawDescData)
|
||||
})
|
||||
return file_auth_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
|
||||
var file_auth_proto_goTypes = []interface{}{
|
||||
(*LoginRequest)(nil), // 0: pb.LoginRequest
|
||||
(*LoginSuccess)(nil), // 1: pb.LoginSuccess
|
||||
(*LoginResponse)(nil), // 2: pb.LoginResponse
|
||||
(*LogoutRequest)(nil), // 3: pb.LogoutRequest
|
||||
(*LogoutResponse)(nil), // 4: pb.LogoutResponse
|
||||
(*UserInfoRequest)(nil), // 5: pb.UserInfoRequest
|
||||
(*UserInfo)(nil), // 6: pb.UserInfo
|
||||
(*UserInfoResponse)(nil), // 7: pb.UserInfoResponse
|
||||
}
|
||||
var file_auth_proto_depIdxs = []int32{
|
||||
6, // 0: pb.LoginSuccess.userInfo:type_name -> pb.UserInfo
|
||||
1, // 1: pb.LoginResponse.data:type_name -> pb.LoginSuccess
|
||||
6, // 2: pb.UserInfoResponse.data:type_name -> pb.UserInfo
|
||||
0, // 3: pb.Auth.Login:input_type -> pb.LoginRequest
|
||||
3, // 4: pb.Auth.Logout:input_type -> pb.LogoutRequest
|
||||
5, // 5: pb.Auth.GetUserInfo:input_type -> pb.UserInfoRequest
|
||||
2, // 6: pb.Auth.Login:output_type -> pb.LoginResponse
|
||||
4, // 7: pb.Auth.Logout:output_type -> pb.LogoutResponse
|
||||
7, // 8: pb.Auth.GetUserInfo:output_type -> pb.UserInfoResponse
|
||||
6, // [6:9] is the sub-list for method output_type
|
||||
3, // [3:6] is the sub-list for method input_type
|
||||
3, // [3:3] is the sub-list for extension type_name
|
||||
3, // [3:3] is the sub-list for extension extendee
|
||||
0, // [0:3] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_auth_proto_init() }
|
||||
func file_auth_proto_init() {
|
||||
if File_auth_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LoginRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LoginSuccess); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LoginResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LogoutRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*LogoutResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UserInfoRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UserInfo); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_auth_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UserInfoResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_auth_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 8,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_auth_proto_goTypes,
|
||||
DependencyIndexes: file_auth_proto_depIdxs,
|
||||
MessageInfos: file_auth_proto_msgTypes,
|
||||
}.Build()
|
||||
File_auth_proto = out.File
|
||||
file_auth_proto_rawDesc = nil
|
||||
file_auth_proto_goTypes = nil
|
||||
file_auth_proto_depIdxs = nil
|
||||
}
|
||||
327
pb/auth.pb.gw.go
Normal file
327
pb/auth.pb.gw.go
Normal file
@@ -0,0 +1,327 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: auth.proto
|
||||
|
||||
/*
|
||||
Package pb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package pb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_Auth_Login_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LoginRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Login(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_Login_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LoginRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Login(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Auth_Logout_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LogoutRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.Logout(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_Logout_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LogoutRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Logout(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
filter_Auth_GetUserInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
)
|
||||
|
||||
func request_Auth_GetUserInfo_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UserInfoRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Auth_GetUserInfo_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetUserInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_GetUserInfo_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UserInfoRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Auth_GetUserInfo_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetUserInfo(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterAuthHandlerServer registers the http handlers for service Auth to "mux".
|
||||
// UnaryRPC :call AuthServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead.
|
||||
func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AuthServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Auth_Login_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Login_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Auth_Logout_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Logout_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Auth_GetUserInfo_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_GetUserInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.DialContext(ctx, endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterAuthHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterAuthHandler registers the http handlers for service Auth to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterAuthHandlerClient(ctx, mux, NewAuthClient(conn))
|
||||
}
|
||||
|
||||
// RegisterAuthHandlerClient registers the http handlers for service Auth
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "AuthClient" to call the correct interceptors.
|
||||
func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Auth_Login_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Login_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Auth_Logout_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Logout_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Auth_GetUserInfo_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_GetUserInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Auth_Login_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "login"}, ""))
|
||||
|
||||
pattern_Auth_Logout_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "logout"}, ""))
|
||||
|
||||
pattern_Auth_GetUserInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "userinfo"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Auth_Login_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Auth_Logout_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Auth_GetUserInfo_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
65
pb/auth.proto
Normal file
65
pb/auth.proto
Normal file
@@ -0,0 +1,65 @@
|
||||
syntax = "proto3";
|
||||
package pb;
|
||||
option go_package = "m7s.live/v5/pb";
|
||||
|
||||
import "google/api/annotations.proto";
|
||||
|
||||
message LoginRequest {
|
||||
string username = 1;
|
||||
string password = 2;
|
||||
}
|
||||
|
||||
message LoginSuccess {
|
||||
string token = 1;
|
||||
UserInfo userInfo = 2;
|
||||
}
|
||||
|
||||
message LoginResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
LoginSuccess data = 3;
|
||||
}
|
||||
|
||||
message LogoutRequest {
|
||||
string token = 1;
|
||||
}
|
||||
|
||||
message LogoutResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
message UserInfoRequest {
|
||||
string token = 1;
|
||||
}
|
||||
|
||||
message UserInfo {
|
||||
string username = 1;
|
||||
int64 expires_at = 2; // Token expiration timestamp
|
||||
}
|
||||
|
||||
message UserInfoResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
UserInfo data = 3;
|
||||
}
|
||||
|
||||
service Auth {
|
||||
rpc Login(LoginRequest) returns (LoginResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/auth/login"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc Logout(LogoutRequest) returns (LogoutResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/auth/logout"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc GetUserInfo(UserInfoRequest) returns (UserInfoResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/auth/userinfo"
|
||||
};
|
||||
}
|
||||
}
|
||||
177
pb/auth_grpc.pb.go
Normal file
177
pb/auth_grpc.pb.go
Normal file
@@ -0,0 +1,177 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.19.1
|
||||
// source: auth.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// AuthClient is the client API for Auth service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type AuthClient interface {
|
||||
Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error)
|
||||
Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error)
|
||||
GetUserInfo(ctx context.Context, in *UserInfoRequest, opts ...grpc.CallOption) (*UserInfoResponse, error)
|
||||
}
|
||||
|
||||
type authClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewAuthClient(cc grpc.ClientConnInterface) AuthClient {
|
||||
return &authClient{cc}
|
||||
}
|
||||
|
||||
func (c *authClient) Login(ctx context.Context, in *LoginRequest, opts ...grpc.CallOption) (*LoginResponse, error) {
|
||||
out := new(LoginResponse)
|
||||
err := c.cc.Invoke(ctx, "/pb.Auth/Login", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *authClient) Logout(ctx context.Context, in *LogoutRequest, opts ...grpc.CallOption) (*LogoutResponse, error) {
|
||||
out := new(LogoutResponse)
|
||||
err := c.cc.Invoke(ctx, "/pb.Auth/Logout", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *authClient) GetUserInfo(ctx context.Context, in *UserInfoRequest, opts ...grpc.CallOption) (*UserInfoResponse, error) {
|
||||
out := new(UserInfoResponse)
|
||||
err := c.cc.Invoke(ctx, "/pb.Auth/GetUserInfo", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// AuthServer is the server API for Auth service.
|
||||
// All implementations must embed UnimplementedAuthServer
|
||||
// for forward compatibility
|
||||
type AuthServer interface {
|
||||
Login(context.Context, *LoginRequest) (*LoginResponse, error)
|
||||
Logout(context.Context, *LogoutRequest) (*LogoutResponse, error)
|
||||
GetUserInfo(context.Context, *UserInfoRequest) (*UserInfoResponse, error)
|
||||
mustEmbedUnimplementedAuthServer()
|
||||
}
|
||||
|
||||
// UnimplementedAuthServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedAuthServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedAuthServer) Login(context.Context, *LoginRequest) (*LoginResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Login not implemented")
|
||||
}
|
||||
func (UnimplementedAuthServer) Logout(context.Context, *LogoutRequest) (*LogoutResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Logout not implemented")
|
||||
}
|
||||
func (UnimplementedAuthServer) GetUserInfo(context.Context, *UserInfoRequest) (*UserInfoResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetUserInfo not implemented")
|
||||
}
|
||||
func (UnimplementedAuthServer) mustEmbedUnimplementedAuthServer() {}
|
||||
|
||||
// UnsafeAuthServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to AuthServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeAuthServer interface {
|
||||
mustEmbedUnimplementedAuthServer()
|
||||
}
|
||||
|
||||
func RegisterAuthServer(s grpc.ServiceRegistrar, srv AuthServer) {
|
||||
s.RegisterService(&Auth_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Auth_Login_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(LoginRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AuthServer).Login(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Auth/Login",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AuthServer).Login(ctx, req.(*LoginRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Auth_Logout_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(LogoutRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AuthServer).Logout(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Auth/Logout",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AuthServer).Logout(ctx, req.(*LogoutRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Auth_GetUserInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UserInfoRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(AuthServer).GetUserInfo(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/pb.Auth/GetUserInfo",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(AuthServer).GetUserInfo(ctx, req.(*UserInfoRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Auth_ServiceDesc is the grpc.ServiceDesc for Auth service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Auth_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "pb.Auth",
|
||||
HandlerType: (*AuthServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "Login",
|
||||
Handler: _Auth_Login_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Logout",
|
||||
Handler: _Auth_Logout_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetUserInfo",
|
||||
Handler: _Auth_GetUserInfo_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "auth.proto",
|
||||
}
|
||||
3343
pb/global.pb.go
3343
pb/global.pb.go
File diff suppressed because it is too large
Load Diff
1156
pb/global.pb.gw.go
1156
pb/global.pb.gw.go
File diff suppressed because it is too large
Load Diff
210
pb/global.proto
210
pb/global.proto
@@ -13,6 +13,11 @@ service api {
|
||||
get: "/api/sysinfo"
|
||||
};
|
||||
}
|
||||
rpc DisabledPlugins (google.protobuf.Empty) returns (DisabledPluginsResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/plugins/disabled"
|
||||
};
|
||||
}
|
||||
rpc Summary (google.protobuf.Empty) returns (SummaryResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/summary"
|
||||
@@ -20,12 +25,12 @@ service api {
|
||||
}
|
||||
rpc Shutdown (RequestWithId) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/shutdown/{id}"
|
||||
post: "/api/shutdown"
|
||||
};
|
||||
}
|
||||
rpc Restart (RequestWithId) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/restart/{id}"
|
||||
post: "/api/restart"
|
||||
};
|
||||
}
|
||||
rpc TaskTree (google.protobuf.Empty) returns (TaskTreeResponse) {
|
||||
@@ -126,6 +131,17 @@ service api {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc GetConfigFile (google.protobuf.Empty) returns (GetConfigFileResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/config/file"
|
||||
};
|
||||
}
|
||||
rpc UpdateConfigFile (UpdateConfigFileRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/config/file/update"
|
||||
body: "content"
|
||||
};
|
||||
}
|
||||
rpc GetConfig (GetConfigRequest) returns (GetConfigResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/config/get/{name}"
|
||||
@@ -142,26 +158,64 @@ service api {
|
||||
body: "yaml"
|
||||
};
|
||||
}
|
||||
rpc GetDeviceList (google.protobuf.Empty) returns (DeviceListResponse) {
|
||||
rpc GetPullProxyList (google.protobuf.Empty) returns (PullProxyListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/device/list"
|
||||
get: "/api/proxy/pull/list"
|
||||
additional_bindings {
|
||||
get: "/api/device/list"
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc AddDevice (DeviceInfo) returns (SuccessResponse) {
|
||||
rpc AddPullProxy (PullProxyInfo) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/device/add"
|
||||
post: "/api/proxy/pull/add"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/api/device/add"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc RemovePullProxy (RequestWithId) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/pull/remove/{id}"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/api/device/add/{id}"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc UpdatePullProxy (PullProxyInfo) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/pull/update"
|
||||
body: "*"
|
||||
additional_bindings {
|
||||
post: "/api/device/update"
|
||||
body: "*"
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc GetPushProxyList (google.protobuf.Empty) returns (PushProxyListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/proxy/push/list"
|
||||
};
|
||||
}
|
||||
rpc AddPushProxy (PushProxyInfo) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/push/add"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc RemoveDevice (RequestWithId) returns (SuccessResponse) {
|
||||
rpc RemovePushProxy (RequestWithId) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/device/remove/{id}"
|
||||
post: "/api/proxy/push/remove/{id}"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc UpdateDevice (DeviceInfo) returns (SuccessResponse) {
|
||||
rpc UpdatePushProxy (PushProxyInfo) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/device/update"
|
||||
post: "/api/proxy/push/update"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
@@ -170,6 +224,17 @@ service api {
|
||||
get: "/api/record/list"
|
||||
};
|
||||
}
|
||||
rpc GetTransformList (google.protobuf.Empty) returns (TransformListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/transform/list"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message DisabledPluginsResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated PluginInfo data = 3;
|
||||
}
|
||||
|
||||
message GetConfigRequest {
|
||||
@@ -188,12 +253,28 @@ message FormilyResponse {
|
||||
map<string, Formily> properties = 2;
|
||||
}
|
||||
|
||||
message GetConfigResponse {
|
||||
message ConfigData {
|
||||
string file = 1;
|
||||
string modified = 2;
|
||||
string merged = 3;
|
||||
}
|
||||
|
||||
message GetConfigFileResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
string data = 3;
|
||||
}
|
||||
|
||||
message GetConfigResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
ConfigData data = 3;
|
||||
}
|
||||
|
||||
message UpdateConfigFileRequest {
|
||||
string content = 1;
|
||||
}
|
||||
|
||||
message ModifyConfigRequest {
|
||||
string name = 1;
|
||||
string yaml = 2;
|
||||
@@ -230,19 +311,21 @@ message SummaryResponse {
|
||||
|
||||
message PluginInfo {
|
||||
string name = 1;
|
||||
string version = 2;
|
||||
bool disabled = 3;
|
||||
repeated string pushAddr = 2;
|
||||
repeated string playAddr = 3;
|
||||
map<string, string> description = 4;
|
||||
}
|
||||
|
||||
message SysInfoData {
|
||||
google.protobuf.Timestamp startTime = 1;
|
||||
string localIP = 2;
|
||||
string version = 3;
|
||||
string goVersion = 4;
|
||||
string os = 5;
|
||||
string arch = 6;
|
||||
int32 cpus = 7;
|
||||
repeated PluginInfo plugins = 8;
|
||||
string publicIP = 3;
|
||||
string version = 4;
|
||||
string goVersion = 5;
|
||||
string os = 6;
|
||||
string arch = 7;
|
||||
int32 cpus = 8;
|
||||
repeated PluginInfo plugins = 9;
|
||||
}
|
||||
|
||||
message SysInfoResponse {
|
||||
@@ -261,6 +344,7 @@ message TaskTreeData {
|
||||
uint32 state = 7;
|
||||
TaskTreeData blocked = 8;
|
||||
uint64 pointer = 9;
|
||||
string startReason = 10;
|
||||
}
|
||||
|
||||
message TaskTreeResponse {
|
||||
@@ -312,6 +396,16 @@ message StreamInfo {
|
||||
float speed = 12;
|
||||
google.protobuf.Duration bufferTime = 13;
|
||||
bool stopOnIdle = 14;
|
||||
repeated RecordingDetail recording = 15;
|
||||
}
|
||||
|
||||
message RecordingDetail {
|
||||
string filePath = 1;
|
||||
string mode = 2;
|
||||
google.protobuf.Duration fragment = 3;
|
||||
bool append = 4;
|
||||
string pluginName = 5;
|
||||
uint64 pointer = 6;
|
||||
}
|
||||
|
||||
message Wrap {
|
||||
@@ -343,9 +437,10 @@ message AudioTrackInfo {
|
||||
string delta = 2;
|
||||
string meta = 3;
|
||||
uint32 bps = 4;
|
||||
uint32 fps = 5;
|
||||
uint32 sampleRate = 6;
|
||||
uint32 channels =7;
|
||||
uint32 bps_out = 5;
|
||||
uint32 fps = 6;
|
||||
uint32 sampleRate = 7;
|
||||
uint32 channels =8;
|
||||
}
|
||||
|
||||
message TrackSnapShotData {
|
||||
@@ -366,10 +461,11 @@ message VideoTrackInfo {
|
||||
string delta = 2;
|
||||
string meta = 3;
|
||||
uint32 bps = 4;
|
||||
uint32 fps = 5;
|
||||
uint32 width = 6;
|
||||
uint32 height =7;
|
||||
uint32 gop = 8;
|
||||
uint32 bps_out = 5;
|
||||
uint32 fps = 6;
|
||||
uint32 width = 7;
|
||||
uint32 height =8;
|
||||
uint32 gop = 9;
|
||||
}
|
||||
|
||||
message SuccessResponse {
|
||||
@@ -402,6 +498,7 @@ message RingReaderSnapShot {
|
||||
uint32 timestamp = 2;
|
||||
uint32 delay = 3;
|
||||
int32 state = 4;
|
||||
uint32 bps = 5;
|
||||
}
|
||||
|
||||
message SubscriberSnapShot {
|
||||
@@ -427,13 +524,13 @@ message SubscribersResponse {
|
||||
repeated SubscriberSnapShot data = 6;
|
||||
}
|
||||
|
||||
message DeviceListResponse {
|
||||
message PullProxyListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated DeviceInfo data = 3;
|
||||
repeated PullProxyInfo data = 3;
|
||||
}
|
||||
|
||||
message DeviceInfo {
|
||||
message PullProxyInfo {
|
||||
uint32 ID = 1;
|
||||
google.protobuf.Timestamp createTime = 2;
|
||||
google.protobuf.Timestamp updateTime = 3; // 更新时间
|
||||
@@ -453,6 +550,29 @@ message DeviceInfo {
|
||||
string streamPath = 16; // 流路径
|
||||
}
|
||||
|
||||
message PushProxyInfo {
|
||||
uint32 ID = 1;
|
||||
google.protobuf.Timestamp createTime = 2;
|
||||
google.protobuf.Timestamp updateTime = 3;
|
||||
uint32 parentID = 4; // 父设备ID
|
||||
string name = 5; // 设备名称
|
||||
string type = 6; // 设备类型
|
||||
uint32 status = 7; // 设备状态
|
||||
string pushURL = 8; // 推流地址
|
||||
bool pushOnStart = 9; // 启动时推流
|
||||
bool audio = 10; // 是否推音频
|
||||
string description = 11; // 设备描述
|
||||
uint32 rtt = 12; // 平均RTT
|
||||
string streamPath = 13; // 流路径
|
||||
}
|
||||
|
||||
message PushProxyListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated PushProxyInfo data = 3;
|
||||
}
|
||||
|
||||
|
||||
message SetStreamAliasRequest {
|
||||
string streamPath = 1;
|
||||
string alias = 2;
|
||||
@@ -486,10 +606,42 @@ message Recording {
|
||||
string streamPath = 1;
|
||||
google.protobuf.Timestamp startTime = 2;
|
||||
string type = 3;
|
||||
uint64 pointer = 4;
|
||||
}
|
||||
|
||||
message RecordingListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated Recording data = 3;
|
||||
}
|
||||
|
||||
message PushInfo {
|
||||
string streamPath = 1;
|
||||
string targetURL = 2;
|
||||
google.protobuf.Timestamp startTime = 3;
|
||||
string status = 4;
|
||||
}
|
||||
|
||||
message PushListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated PushInfo data = 3;
|
||||
}
|
||||
|
||||
message AddPushRequest {
|
||||
string streamPath = 1;
|
||||
string targetURL = 2;
|
||||
}
|
||||
|
||||
message Transform {
|
||||
string streamPath = 1;
|
||||
string target = 2;
|
||||
string pluginName = 3;
|
||||
string config = 4;
|
||||
}
|
||||
|
||||
message TransformListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated Transform data = 3;
|
||||
}
|
||||
@@ -24,6 +24,7 @@ const _ = grpc.SupportPackageIsVersion7
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type ApiClient interface {
|
||||
SysInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SysInfoResponse, error)
|
||||
DisabledPlugins(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DisabledPluginsResponse, error)
|
||||
Summary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SummaryResponse, error)
|
||||
Shutdown(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
Restart(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
@@ -45,14 +46,21 @@ type ApiClient interface {
|
||||
SetStreamAlias(ctx context.Context, in *SetStreamAliasRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
StopPublish(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
StopSubscribe(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetConfigFile(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetConfigFileResponse, error)
|
||||
UpdateConfigFile(ctx context.Context, in *UpdateConfigFileRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error)
|
||||
GetFormily(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error)
|
||||
ModifyConfig(ctx context.Context, in *ModifyConfigRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetDeviceList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DeviceListResponse, error)
|
||||
AddDevice(ctx context.Context, in *DeviceInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemoveDevice(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdateDevice(ctx context.Context, in *DeviceInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetPullProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PullProxyListResponse, error)
|
||||
AddPullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePullProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetPushProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PushProxyListResponse, error)
|
||||
AddPushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePushProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetRecording(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*RecordingListResponse, error)
|
||||
GetTransformList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*TransformListResponse, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
@@ -72,6 +80,15 @@ func (c *apiClient) SysInfo(ctx context.Context, in *emptypb.Empty, opts ...grpc
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) DisabledPlugins(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DisabledPluginsResponse, error) {
|
||||
out := new(DisabledPluginsResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/DisabledPlugins", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) Summary(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*SummaryResponse, error) {
|
||||
out := new(SummaryResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/Summary", in, out, opts...)
|
||||
@@ -261,6 +278,24 @@ func (c *apiClient) StopSubscribe(ctx context.Context, in *RequestWithId, opts .
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetConfigFile(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*GetConfigFileResponse, error) {
|
||||
out := new(GetConfigFileResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetConfigFile", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdateConfigFile(ctx context.Context, in *UpdateConfigFileRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/UpdateConfigFile", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) {
|
||||
out := new(GetConfigResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetConfig", in, out, opts...)
|
||||
@@ -288,36 +323,72 @@ func (c *apiClient) ModifyConfig(ctx context.Context, in *ModifyConfigRequest, o
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetDeviceList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*DeviceListResponse, error) {
|
||||
out := new(DeviceListResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetDeviceList", in, out, opts...)
|
||||
func (c *apiClient) GetPullProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PullProxyListResponse, error) {
|
||||
out := new(PullProxyListResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetPullProxyList", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) AddDevice(ctx context.Context, in *DeviceInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) AddPullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/AddDevice", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/global.api/AddPullProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) RemoveDevice(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) RemovePullProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/RemoveDevice", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/global.api/RemovePullProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdateDevice(ctx context.Context, in *DeviceInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/UpdateDevice", in, out, opts...)
|
||||
err := c.cc.Invoke(ctx, "/global.api/UpdatePullProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetPushProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PushProxyListResponse, error) {
|
||||
out := new(PushProxyListResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetPushProxyList", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) AddPushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/AddPushProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) RemovePushProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/RemovePushProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/UpdatePushProxy", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -333,11 +404,21 @@ func (c *apiClient) GetRecording(ctx context.Context, in *emptypb.Empty, opts ..
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetTransformList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*TransformListResponse, error) {
|
||||
out := new(TransformListResponse)
|
||||
err := c.cc.Invoke(ctx, "/global.api/GetTransformList", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ApiServer is the server API for Api service.
|
||||
// All implementations must embed UnimplementedApiServer
|
||||
// for forward compatibility
|
||||
type ApiServer interface {
|
||||
SysInfo(context.Context, *emptypb.Empty) (*SysInfoResponse, error)
|
||||
DisabledPlugins(context.Context, *emptypb.Empty) (*DisabledPluginsResponse, error)
|
||||
Summary(context.Context, *emptypb.Empty) (*SummaryResponse, error)
|
||||
Shutdown(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
Restart(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
@@ -359,14 +440,21 @@ type ApiServer interface {
|
||||
SetStreamAlias(context.Context, *SetStreamAliasRequest) (*SuccessResponse, error)
|
||||
StopPublish(context.Context, *StreamSnapRequest) (*SuccessResponse, error)
|
||||
StopSubscribe(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
GetConfigFile(context.Context, *emptypb.Empty) (*GetConfigFileResponse, error)
|
||||
UpdateConfigFile(context.Context, *UpdateConfigFileRequest) (*SuccessResponse, error)
|
||||
GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error)
|
||||
GetFormily(context.Context, *GetConfigRequest) (*GetConfigResponse, error)
|
||||
ModifyConfig(context.Context, *ModifyConfigRequest) (*SuccessResponse, error)
|
||||
GetDeviceList(context.Context, *emptypb.Empty) (*DeviceListResponse, error)
|
||||
AddDevice(context.Context, *DeviceInfo) (*SuccessResponse, error)
|
||||
RemoveDevice(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdateDevice(context.Context, *DeviceInfo) (*SuccessResponse, error)
|
||||
GetPullProxyList(context.Context, *emptypb.Empty) (*PullProxyListResponse, error)
|
||||
AddPullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error)
|
||||
AddPushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error)
|
||||
GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error)
|
||||
mustEmbedUnimplementedApiServer()
|
||||
}
|
||||
|
||||
@@ -377,6 +465,9 @@ type UnimplementedApiServer struct {
|
||||
func (UnimplementedApiServer) SysInfo(context.Context, *emptypb.Empty) (*SysInfoResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SysInfo not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) DisabledPlugins(context.Context, *emptypb.Empty) (*DisabledPluginsResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DisabledPlugins not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) Summary(context.Context, *emptypb.Empty) (*SummaryResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Summary not implemented")
|
||||
}
|
||||
@@ -440,6 +531,12 @@ func (UnimplementedApiServer) StopPublish(context.Context, *StreamSnapRequest) (
|
||||
func (UnimplementedApiServer) StopSubscribe(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method StopSubscribe not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetConfigFile(context.Context, *emptypb.Empty) (*GetConfigFileResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetConfigFile not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdateConfigFile(context.Context, *UpdateConfigFileRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateConfigFile not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetConfig not implemented")
|
||||
}
|
||||
@@ -449,21 +546,36 @@ func (UnimplementedApiServer) GetFormily(context.Context, *GetConfigRequest) (*G
|
||||
func (UnimplementedApiServer) ModifyConfig(context.Context, *ModifyConfigRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ModifyConfig not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetDeviceList(context.Context, *emptypb.Empty) (*DeviceListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetDeviceList not implemented")
|
||||
func (UnimplementedApiServer) GetPullProxyList(context.Context, *emptypb.Empty) (*PullProxyListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPullProxyList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) AddDevice(context.Context, *DeviceInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddDevice not implemented")
|
||||
func (UnimplementedApiServer) AddPullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddPullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) RemoveDevice(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemoveDevice not implemented")
|
||||
func (UnimplementedApiServer) RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdateDevice(context.Context, *DeviceInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateDevice not implemented")
|
||||
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetPushProxyList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) AddPushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method AddPushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRecording not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetTransformList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
|
||||
|
||||
// UnsafeApiServer may be embedded to opt out of forward compatibility for this service.
|
||||
@@ -495,6 +607,24 @@ func _Api_SysInfo_Handler(srv interface{}, ctx context.Context, dec func(interfa
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_DisabledPlugins_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).DisabledPlugins(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/DisabledPlugins",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).DisabledPlugins(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_Summary_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -873,6 +1003,42 @@ func _Api_StopSubscribe_Handler(srv interface{}, ctx context.Context, dec func(i
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetConfigFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetConfigFile(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/GetConfigFile",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetConfigFile(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_UpdateConfigFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateConfigFileRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).UpdateConfigFile(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/UpdateConfigFile",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdateConfigFile(ctx, req.(*UpdateConfigFileRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetConfigRequest)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -927,74 +1093,146 @@ func _Api_ModifyConfig_Handler(srv interface{}, ctx context.Context, dec func(in
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetDeviceList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
func _Api_GetPullProxyList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetDeviceList(ctx, in)
|
||||
return srv.(ApiServer).GetPullProxyList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/GetDeviceList",
|
||||
FullMethod: "/global.api/GetPullProxyList",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetDeviceList(ctx, req.(*emptypb.Empty))
|
||||
return srv.(ApiServer).GetPullProxyList(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_AddDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeviceInfo)
|
||||
func _Api_AddPullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PullProxyInfo)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).AddDevice(ctx, in)
|
||||
return srv.(ApiServer).AddPullProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/AddDevice",
|
||||
FullMethod: "/global.api/AddPullProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).AddDevice(ctx, req.(*DeviceInfo))
|
||||
return srv.(ApiServer).AddPullProxy(ctx, req.(*PullProxyInfo))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_RemoveDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
func _Api_RemovePullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RequestWithId)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).RemoveDevice(ctx, in)
|
||||
return srv.(ApiServer).RemovePullProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/RemoveDevice",
|
||||
FullMethod: "/global.api/RemovePullProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).RemoveDevice(ctx, req.(*RequestWithId))
|
||||
return srv.(ApiServer).RemovePullProxy(ctx, req.(*RequestWithId))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_UpdateDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeviceInfo)
|
||||
func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PullProxyInfo)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).UpdateDevice(ctx, in)
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/UpdateDevice",
|
||||
FullMethod: "/global.api/UpdatePullProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdateDevice(ctx, req.(*DeviceInfo))
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*PullProxyInfo))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetPushProxyList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetPushProxyList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/GetPushProxyList",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetPushProxyList(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_AddPushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PushProxyInfo)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).AddPushProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/AddPushProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).AddPushProxy(ctx, req.(*PushProxyInfo))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_RemovePushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(RequestWithId)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).RemovePushProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/RemovePushProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).RemovePushProxy(ctx, req.(*RequestWithId))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PushProxyInfo)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/UpdatePushProxy",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*PushProxyInfo))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -1017,6 +1255,24 @@ func _Api_GetRecording_Handler(srv interface{}, ctx context.Context, dec func(in
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetTransformList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetTransformList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/global.api/GetTransformList",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetTransformList(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -1028,6 +1284,10 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "SysInfo",
|
||||
Handler: _Api_SysInfo_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DisabledPlugins",
|
||||
Handler: _Api_DisabledPlugins_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Summary",
|
||||
Handler: _Api_Summary_Handler,
|
||||
@@ -1112,6 +1372,14 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "StopSubscribe",
|
||||
Handler: _Api_StopSubscribe_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetConfigFile",
|
||||
Handler: _Api_GetConfigFile_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateConfigFile",
|
||||
Handler: _Api_UpdateConfigFile_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetConfig",
|
||||
Handler: _Api_GetConfig_Handler,
|
||||
@@ -1125,25 +1393,45 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
Handler: _Api_ModifyConfig_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetDeviceList",
|
||||
Handler: _Api_GetDeviceList_Handler,
|
||||
MethodName: "GetPullProxyList",
|
||||
Handler: _Api_GetPullProxyList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AddDevice",
|
||||
Handler: _Api_AddDevice_Handler,
|
||||
MethodName: "AddPullProxy",
|
||||
Handler: _Api_AddPullProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RemoveDevice",
|
||||
Handler: _Api_RemoveDevice_Handler,
|
||||
MethodName: "RemovePullProxy",
|
||||
Handler: _Api_RemovePullProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateDevice",
|
||||
Handler: _Api_UpdateDevice_Handler,
|
||||
MethodName: "UpdatePullProxy",
|
||||
Handler: _Api_UpdatePullProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetPushProxyList",
|
||||
Handler: _Api_GetPushProxyList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "AddPushProxy",
|
||||
Handler: _Api_AddPushProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "RemovePushProxy",
|
||||
Handler: _Api_RemovePushProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdatePushProxy",
|
||||
Handler: _Api_UpdatePushProxy_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRecording",
|
||||
Handler: _Api_GetRecording_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetTransformList",
|
||||
Handler: _Api_GetTransformList_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "global.proto",
|
||||
|
||||
@@ -85,9 +85,14 @@ func (a *AnnexB) Parse(t *AVTrack) (err error) {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if len(ctx.RecordInfo.PPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
if len(ctx.RecordInfo.SPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_IDR_Picture:
|
||||
t.Value.IDR = true
|
||||
}
|
||||
|
||||
86
pkg/auth/auth.go
Normal file
86
pkg/auth/auth.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang-jwt/jwt/v5"
|
||||
)
|
||||
|
||||
var (
|
||||
jwtSecret = []byte("m7s_secret_key") // In production, this should be properly configured
|
||||
tokenTTL = 24 * time.Hour
|
||||
// Add refresh threshold - refresh token if it expires in less than 30 minutes
|
||||
refreshThreshold = 30 * time.Minute
|
||||
)
|
||||
|
||||
// JWTClaims represents the JWT claims
|
||||
type JWTClaims struct {
|
||||
Username string `json:"username"`
|
||||
}
|
||||
|
||||
// TokenValidator is an interface for token validation
|
||||
type TokenValidator interface {
|
||||
ValidateToken(tokenString string) (*JWTClaims, error)
|
||||
}
|
||||
|
||||
// GenerateToken generates a new JWT token for a user
|
||||
func GenerateToken(username string) (string, error) {
|
||||
claims := jwt.RegisteredClaims{
|
||||
Subject: username,
|
||||
ExpiresAt: jwt.NewNumericDate(time.Now().Add(tokenTTL)),
|
||||
IssuedAt: jwt.NewNumericDate(time.Now()),
|
||||
NotBefore: jwt.NewNumericDate(time.Now()),
|
||||
}
|
||||
|
||||
token := jwt.NewWithClaims(jwt.SigningMethodHS256, claims)
|
||||
return token.SignedString(jwtSecret)
|
||||
}
|
||||
|
||||
// ValidateJWT validates a JWT token and returns the claims
|
||||
func ValidateJWT(tokenString string) (*JWTClaims, error) {
|
||||
token, err := jwt.ParseWithClaims(tokenString, &jwt.RegisteredClaims{}, func(token *jwt.Token) (interface{}, error) {
|
||||
if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {
|
||||
return nil, fmt.Errorf("unexpected signing method: %v", token.Header["alg"])
|
||||
}
|
||||
return jwtSecret, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok && token.Valid {
|
||||
return &JWTClaims{Username: claims.Subject}, nil
|
||||
}
|
||||
|
||||
return nil, errors.New("invalid token")
|
||||
}
|
||||
|
||||
// ShouldRefreshToken checks if a token should be refreshed based on its expiration time
|
||||
func ShouldRefreshToken(tokenString string) (bool, error) {
|
||||
token, err := jwt.ParseWithClaims(tokenString, &jwt.RegisteredClaims{}, func(token *jwt.Token) (interface{}, error) {
|
||||
return jwtSecret, nil
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if claims, ok := token.Claims.(*jwt.RegisteredClaims); ok && token.Valid {
|
||||
if claims.ExpiresAt != nil {
|
||||
timeUntilExpiry := time.Until(claims.ExpiresAt.Time)
|
||||
return timeUntilExpiry < refreshThreshold, nil
|
||||
}
|
||||
}
|
||||
return false, errors.New("invalid token")
|
||||
}
|
||||
|
||||
// RefreshToken validates the old token and generates a new one if it's still valid
|
||||
func RefreshToken(oldToken string) (string, error) {
|
||||
claims, err := ValidateJWT(oldToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return GenerateToken(claims.Username)
|
||||
}
|
||||
49
pkg/auth/middleware.go
Normal file
49
pkg/auth/middleware.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Middleware creates a new middleware for HTTP authentication
|
||||
func Middleware(validator TokenValidator) func(http.Handler) http.Handler {
|
||||
return func(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Skip auth for login endpoint
|
||||
if r.URL.Path == "/api/auth/login" {
|
||||
next.ServeHTTP(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
// Get token from Authorization header
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader == "" {
|
||||
http.Error(w, "missing authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
|
||||
claims, err := validator.ValidateToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(w, "invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if token needs refresh
|
||||
shouldRefresh, err := ShouldRefreshToken(tokenString)
|
||||
if err == nil && shouldRefresh {
|
||||
newToken, err := RefreshToken(tokenString)
|
||||
if err == nil {
|
||||
// Add new token to response headers
|
||||
w.Header().Set("New-Token", newToken)
|
||||
w.Header().Set("Access-Control-Expose-Headers", "New-Token")
|
||||
}
|
||||
}
|
||||
|
||||
// Add claims to context
|
||||
ctx := context.WithValue(r.Context(), "claims", claims)
|
||||
next.ServeHTTP(w, r.WithContext(ctx))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -3,10 +3,11 @@ package pkg
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/task"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -36,6 +37,7 @@ type AVRingReader struct {
|
||||
startTime time.Time
|
||||
AbsTime uint32
|
||||
Delay uint32
|
||||
BPS uint32 // Bytes per second
|
||||
}
|
||||
|
||||
func (r *AVRingReader) DecConfChanged() bool {
|
||||
@@ -171,7 +173,7 @@ func (r *AVRingReader) ReadFrame(conf *config.Subscribe) (err error) {
|
||||
}
|
||||
r.Delay = r.Track.LastValue.Sequence - r.Value.Sequence
|
||||
if r.Track.ICodecCtx != nil {
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay)
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
|
||||
} else {
|
||||
r.Warn("no codec")
|
||||
}
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"crypto/subtle"
|
||||
"crypto/tls"
|
||||
"log/slog"
|
||||
"m7s.live/v5/pkg/task"
|
||||
"net/http"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
|
||||
"time"
|
||||
@@ -45,7 +46,8 @@ func (config *HTTP) GetHandler() http.Handler {
|
||||
return config.mux
|
||||
}
|
||||
|
||||
func (config *HTTP) GetHttpMux() *http.ServeMux {
|
||||
func (config *HTTP) CreateHttpMux() *http.ServeMux {
|
||||
config.mux = http.NewServeMux()
|
||||
return config.mux
|
||||
}
|
||||
|
||||
@@ -107,7 +109,7 @@ func CORS(next http.Handler) http.Handler {
|
||||
header := w.Header()
|
||||
header.Set("Access-Control-Allow-Credentials", "true")
|
||||
header.Set("Cross-Origin-Resource-Policy", "cross-origin")
|
||||
header.Set("Access-Control-Allow-Headers", "Content-Type,Access-Token")
|
||||
header.Set("Access-Control-Allow-Headers", "Content-Type,Access-Token,Authorization")
|
||||
header.Set("Access-Control-Allow-Private-Network", "true")
|
||||
origin := r.Header["Origin"]
|
||||
if len(origin) == 0 {
|
||||
|
||||
@@ -73,6 +73,7 @@ func (task *ListenTCPWork) Start() (err error) {
|
||||
task.Info("listen tcp")
|
||||
} else {
|
||||
task.Error("failed to listen tcp", "error", err)
|
||||
return err
|
||||
}
|
||||
if task.handler == nil {
|
||||
return nil
|
||||
|
||||
@@ -15,10 +15,17 @@ const (
|
||||
RelayModeRemux = "remux"
|
||||
RelayModeRelay = "relay"
|
||||
RelayModeMix = "mix"
|
||||
|
||||
HookOnPublish HookType = "publish"
|
||||
HookOnSubscribe HookType = "subscribe"
|
||||
HookOnPublishEnd HookType = "publish_end"
|
||||
HookOnSubscribeEnd HookType = "subscribe_end"
|
||||
HookOnServerKeepAlive HookType = "server_keep_alive"
|
||||
)
|
||||
|
||||
type (
|
||||
Publish struct {
|
||||
HookType string
|
||||
Publish struct {
|
||||
MaxCount int `default:"0" desc:"最大发布者数量"` // 最大发布者数量
|
||||
PubAudio bool `default:"true" desc:"是否发布音频"`
|
||||
PubVideo bool `default:"true" desc:"是否发布视频"`
|
||||
@@ -29,10 +36,12 @@ type (
|
||||
IdleTimeout time.Duration `desc:"空闲(无订阅)超时"` // 空闲(无订阅)超时
|
||||
PauseTimeout time.Duration `default:"30s" desc:"暂停超时时间"` // 暂停超时
|
||||
BufferTime time.Duration `desc:"缓冲时长,0代表取最近关键帧"` // 缓冲长度(单位:秒),0代表取最近关键帧
|
||||
Speed float64 `default:"0" desc:"倍速"` // 倍速,0 为不限速
|
||||
Speed float64 `default:"0" desc:"发送速率"` // 发送速率,0 为不限速
|
||||
Scale float64 `default:"1" desc:"缩放倍数"` // 缩放倍数
|
||||
Key string `desc:"发布鉴权key"` // 发布鉴权key
|
||||
RingSize util.Range[int] `default:"20-1024" desc:"RingSize范围"` // 缓冲区大小范围
|
||||
RelayMode string `default:"remux" desc:"转发模式" enum:"remux:转格式,relay:纯转发,mix:混合转发"` // 转发模式
|
||||
PubType string `default:"server" desc:"发布类型"` // 发布类型
|
||||
Dump bool
|
||||
}
|
||||
Subscribe struct {
|
||||
@@ -46,7 +55,7 @@ type (
|
||||
WaitTimeout time.Duration `default:"10s" desc:"等待流超时时间"` // 等待流超时
|
||||
WriteBufferSize int `desc:"写缓冲大小"` // 写缓冲大小
|
||||
Key string `desc:"订阅鉴权key"` // 订阅鉴权key
|
||||
Internal bool `default:"false" desc:"是否内部订阅"` // 是否内部订阅
|
||||
SubType string `desc:"订阅类型"` // 订阅类型
|
||||
}
|
||||
HTTPValus map[string][]string
|
||||
Pull struct {
|
||||
@@ -87,6 +96,15 @@ type (
|
||||
Pull map[Regexp]Pull
|
||||
Transform map[Regexp]Transform
|
||||
}
|
||||
Webhook struct {
|
||||
URL string `yaml:"url" json:"url"` // Webhook 地址
|
||||
Method string `yaml:"method" json:"method" default:"POST"` // HTTP 方法
|
||||
Headers map[string]string `yaml:"headers" json:"headers"` // 自定义请求头
|
||||
TimeoutSeconds int `yaml:"timeout" json:"timeout" default:"5"` // 超时时间(秒)
|
||||
RetryTimes int `yaml:"retry" json:"retry" default:"3"` // 重试次数
|
||||
RetryInterval time.Duration `yaml:"retryInterval" json:"retryInterval" default:"1s"` // 重试间隔
|
||||
Interval int `yaml:"interval" json:"interval" default:"60"` // 保活间隔(秒)
|
||||
}
|
||||
Common struct {
|
||||
PublicIP string
|
||||
PublicIPv6 string
|
||||
@@ -98,6 +116,7 @@ type (
|
||||
Quic
|
||||
TCP
|
||||
UDP
|
||||
Hook map[HookType]Webhook
|
||||
Pull map[string]Pull
|
||||
Transform map[string]Transform
|
||||
OnSub OnSubscribe
|
||||
|
||||
4
pkg/db/db.go
Normal file
4
pkg/db/db.go
Normal file
@@ -0,0 +1,4 @@
|
||||
package db
|
||||
|
||||
// AutoMigrations is a slice of models that need to be auto-migrated
|
||||
var AutoMigrations []interface{}
|
||||
48
pkg/db/user.go
Normal file
48
pkg/db/user.go
Normal file
@@ -0,0 +1,48 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/bcrypt"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// User represents a user in the system
|
||||
type User struct {
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt gorm.DeletedAt `gorm:"index"`
|
||||
Username string `gorm:"uniqueIndex;size:64"`
|
||||
Password string `gorm:"size:60"` // bcrypt hash
|
||||
Role string `gorm:"size:20;default:'user'"` // admin or user
|
||||
LastLogin time.Time
|
||||
}
|
||||
|
||||
// BeforeCreate hook to hash password before saving
|
||||
func (u *User) BeforeCreate(tx *gorm.DB) error {
|
||||
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(u.Password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Password = string(hashedPassword)
|
||||
return nil
|
||||
}
|
||||
|
||||
// BeforeUpdate hook to hash password before updating
|
||||
func (u *User) BeforeUpdate(tx *gorm.DB) error {
|
||||
if u.Password != "" {
|
||||
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(u.Password), bcrypt.DefaultCost)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
u.Password = string(hashedPassword)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckPassword verifies if the provided password matches the hash
|
||||
func (u *User) CheckPassword(password string) bool {
|
||||
err := bcrypt.CompareHashAndPassword([]byte(u.Password), []byte(password))
|
||||
return err == nil
|
||||
}
|
||||
@@ -4,7 +4,9 @@ import "errors"
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrDisabled = errors.New("disabled")
|
||||
ErrStreamExist = errors.New("stream exist")
|
||||
ErrRecordExists = errors.New("record exists")
|
||||
ErrKick = errors.New("kick")
|
||||
ErrDiscard = errors.New("discard")
|
||||
ErrPublishTimeout = errors.New("publish timeout")
|
||||
@@ -22,4 +24,6 @@ var (
|
||||
ErrSeek = errors.New("seek")
|
||||
ErrRecordSamePath = errors.New("record same path")
|
||||
ErrTransformSame = errors.New("transform same")
|
||||
ErrNotListen = errors.New("not listen")
|
||||
ErrInvalidCredentials = errors.New("invalid credentials")
|
||||
)
|
||||
|
||||
49
pkg/log.go
49
pkg/log.go
@@ -21,26 +21,52 @@ func ParseLevel(level string) slog.Level {
|
||||
return lv.Level()
|
||||
}
|
||||
|
||||
type HandlerInfo struct {
|
||||
slog.Handler
|
||||
origin slog.Handler
|
||||
}
|
||||
|
||||
type MultiLogHandler struct {
|
||||
handlers []slog.Handler
|
||||
attrChildren sync.Map
|
||||
parentLevel *slog.Level
|
||||
level *slog.Level
|
||||
handlers []HandlerInfo
|
||||
attrChildren, groupChildren sync.Map
|
||||
parentLevel *slog.Level
|
||||
level *slog.Level
|
||||
}
|
||||
|
||||
func (m *MultiLogHandler) Add(h slog.Handler) {
|
||||
m.handlers = append(m.handlers, h)
|
||||
m.add(h, h)
|
||||
}
|
||||
|
||||
func (m *MultiLogHandler) add(origin slog.Handler, warp slog.Handler) {
|
||||
m.handlers = append(m.handlers, HandlerInfo{origin: origin, Handler: warp})
|
||||
m.attrChildren.Range(func(key, value any) bool {
|
||||
child := key.(*MultiLogHandler)
|
||||
child.Add(h.WithAttrs(value.([]slog.Attr)))
|
||||
child.add(origin, origin.WithAttrs(value.([]slog.Attr)))
|
||||
return true
|
||||
})
|
||||
m.groupChildren.Range(func(key, value any) bool {
|
||||
child := key.(*MultiLogHandler)
|
||||
child.add(origin, origin.WithGroup(value.(string)))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *MultiLogHandler) Remove(h slog.Handler) {
|
||||
if i := slices.Index(m.handlers, h); i != -1 {
|
||||
if i := slices.IndexFunc(m.handlers, func(info HandlerInfo) bool {
|
||||
return info.origin == h
|
||||
}); i != -1 {
|
||||
m.handlers = slices.Delete(m.handlers, i, i+1)
|
||||
}
|
||||
m.attrChildren.Range(func(key, value any) bool {
|
||||
child := key.(*MultiLogHandler)
|
||||
child.Remove(h)
|
||||
return true
|
||||
})
|
||||
m.groupChildren.Range(func(key, value any) bool {
|
||||
child := key.(*MultiLogHandler)
|
||||
child.Remove(h)
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (m *MultiLogHandler) SetLevel(level slog.Level) {
|
||||
@@ -72,7 +98,7 @@ func (m *MultiLogHandler) Handle(ctx context.Context, rec slog.Record) error {
|
||||
// WithAttrs implements slog.Handler.
|
||||
func (m *MultiLogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
result := &MultiLogHandler{
|
||||
handlers: make([]slog.Handler, len(m.handlers)),
|
||||
handlers: make([]HandlerInfo, len(m.handlers)),
|
||||
parentLevel: m.parentLevel,
|
||||
}
|
||||
m.attrChildren.Store(result, attrs)
|
||||
@@ -80,7 +106,7 @@ func (m *MultiLogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
result.parentLevel = m.level
|
||||
}
|
||||
for i, h := range m.handlers {
|
||||
result.handlers[i] = h.WithAttrs(attrs)
|
||||
result.handlers[i] = HandlerInfo{origin: h.origin, Handler: h.WithAttrs(attrs)}
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -88,14 +114,15 @@ func (m *MultiLogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
// WithGroup implements slog.Handler.
|
||||
func (m *MultiLogHandler) WithGroup(name string) slog.Handler {
|
||||
result := &MultiLogHandler{
|
||||
handlers: make([]slog.Handler, len(m.handlers)),
|
||||
handlers: make([]HandlerInfo, len(m.handlers)),
|
||||
parentLevel: m.parentLevel,
|
||||
}
|
||||
m.groupChildren.Store(result, name)
|
||||
if m.level != nil {
|
||||
result.parentLevel = m.level
|
||||
}
|
||||
for i, h := range m.handlers {
|
||||
result.handlers[i] = h.WithGroup(name)
|
||||
result.handlers[i] = HandlerInfo{origin: h.origin, Handler: h.WithGroup(name)}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
@@ -16,6 +17,13 @@ import (
|
||||
)
|
||||
|
||||
var idG atomic.Uint32
|
||||
var sourceFilePathPrefix string
|
||||
|
||||
func init() {
|
||||
if _, file, _, ok := runtime.Caller(0); ok {
|
||||
sourceFilePathPrefix = strings.TrimSuffix(file, "pkg/task/job.go")
|
||||
}
|
||||
}
|
||||
|
||||
func GetNextTaskID() uint32 {
|
||||
return idG.Add(1)
|
||||
@@ -112,9 +120,11 @@ func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
|
||||
}
|
||||
}
|
||||
_, file, line, ok := runtime.Caller(1)
|
||||
|
||||
if ok {
|
||||
task.StartReason = fmt.Sprintf("%s:%d", file, line)
|
||||
task.StartReason = fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line)
|
||||
}
|
||||
|
||||
mt.lazyRun.Do(func() {
|
||||
if mt.eventLoopLock.TryLock() {
|
||||
defer mt.eventLoopLock.Unlock()
|
||||
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
@@ -129,6 +130,10 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func FromPointer(pointer uintptr) *Task {
|
||||
return (*Task)(unsafe.Pointer(pointer))
|
||||
}
|
||||
|
||||
func (*Task) keepalive() bool {
|
||||
return false
|
||||
}
|
||||
@@ -167,6 +172,10 @@ func (task *Task) GetTask() *Task {
|
||||
return task
|
||||
}
|
||||
|
||||
func (task *Task) GetTaskPointer() uintptr {
|
||||
return uintptr(unsafe.Pointer(task))
|
||||
}
|
||||
|
||||
func (task *Task) getParent() *Job {
|
||||
return task.parent
|
||||
}
|
||||
@@ -369,13 +378,16 @@ func (task *Task) SetDescriptions(value Description) {
|
||||
}
|
||||
|
||||
func (task *Task) dispose() {
|
||||
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
|
||||
if task.state < TASK_STATE_STARTED {
|
||||
if task.Logger != nil {
|
||||
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
|
||||
}
|
||||
return
|
||||
}
|
||||
reason := task.StopReason()
|
||||
task.state = TASK_STATE_DISPOSING
|
||||
if task.Logger != nil {
|
||||
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
|
||||
if taskType != TASK_TYPE_CALL {
|
||||
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
|
||||
task.Debug("task dispose", yargs...)
|
||||
|
||||
14
pkg/util/linux.go
Normal file
14
pkg/util/linux.go
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CreateShutdownScript() error {
|
||||
return ioutil.WriteFile("shutdown.sh", []byte(fmt.Sprintf("kill -9 %d", os.Getpid())), 0777)
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
myip "github.com/husanpao/ip"
|
||||
@@ -278,22 +279,36 @@ var ipReg = regexp.MustCompile(`^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(2
|
||||
var privateIPReg = regexp.MustCompile(`^((10|192\.168|172\.(1[6-9]|2[0-9]|3[0-1]))\.){3}(10|192\.168|172\.(1[6-9]|2[0-9]|3[0-1]))$`)
|
||||
|
||||
var routes map[string]string
|
||||
var PublicIP string
|
||||
|
||||
func IsPrivateIP(ip string) bool {
|
||||
return privateIPReg.MatchString(ip)
|
||||
}
|
||||
|
||||
// TODO: map race
|
||||
func GetPublicIP(ip string) string {
|
||||
if routes == nil {
|
||||
routes = make(map[string]string)
|
||||
for k, v := range myip.LocalAndInternalIPs() {
|
||||
routes[k] = v
|
||||
if lastdot := strings.LastIndex(k, "."); lastdot >= 0 {
|
||||
routes[k[0:lastdot]] = k
|
||||
}
|
||||
func initRoutes() {
|
||||
PublicIP = myip.ExternalIP()
|
||||
for k, v := range myip.LocalAndInternalIPs() {
|
||||
routes[k] = v
|
||||
if lastdot := strings.LastIndex(k, "."); lastdot >= 0 {
|
||||
routes[k[0:lastdot]] = k
|
||||
}
|
||||
}
|
||||
initRoutesWait.Done()
|
||||
}
|
||||
|
||||
var initRoutesWait sync.WaitGroup
|
||||
|
||||
func init() {
|
||||
routes = make(map[string]string)
|
||||
initRoutesWait.Add(1)
|
||||
go initRoutes()
|
||||
}
|
||||
|
||||
func GetPublicIP(ip string) string {
|
||||
initRoutesWait.Wait()
|
||||
if ip == "" {
|
||||
return PublicIP
|
||||
}
|
||||
if publicIP, ok := routes[ip]; ok {
|
||||
return publicIP
|
||||
}
|
||||
|
||||
14
pkg/util/windows.go
Normal file
14
pkg/util/windows.go
Normal file
@@ -0,0 +1,14 @@
|
||||
//go:build windows
|
||||
// +build windows
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
func CreateShutdownScript() error {
|
||||
return ioutil.WriteFile("shutdown.bat", []byte(fmt.Sprintf("taskkill /pid %d -t -f", os.Getpid())), 0777)
|
||||
}
|
||||
326
plugin.go
326
plugin.go
@@ -1,7 +1,11 @@
|
||||
package m7s
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -10,7 +14,9 @@ import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
@@ -58,6 +64,7 @@ type (
|
||||
OnInit() error
|
||||
OnStop()
|
||||
Pull(string, config.Pull, *config.Publish)
|
||||
Push(string, config.Push, *config.Subscribe)
|
||||
Transform(*Publisher, config.Transform)
|
||||
OnPublish(*Publisher)
|
||||
}
|
||||
@@ -81,9 +88,11 @@ type (
|
||||
IQUICPlugin interface {
|
||||
OnQUICConnect(quic.Connection) task.ITask
|
||||
}
|
||||
|
||||
IDevicePlugin interface {
|
||||
OnDeviceAdd(device *Device) any
|
||||
IPullProxyPlugin interface {
|
||||
OnPullProxyAdd(pullProxy *PullProxy) any
|
||||
}
|
||||
IPushProxyPlugin interface {
|
||||
OnPushProxyAdd(pushProxy *PushProxy) any
|
||||
}
|
||||
)
|
||||
|
||||
@@ -101,7 +110,7 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
|
||||
p.Logger = s.Logger.With("plugin", plugin.Name)
|
||||
upperName := strings.ToUpper(plugin.Name)
|
||||
if os.Getenv(upperName+"_ENABLE") == "false" {
|
||||
p.Disabled = true
|
||||
p.disable("env")
|
||||
p.Warn("disabled by env")
|
||||
return
|
||||
}
|
||||
@@ -137,6 +146,7 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
|
||||
p.Disabled = false
|
||||
}
|
||||
if p.Disabled {
|
||||
p.disable("config")
|
||||
p.Warn("plugin disabled")
|
||||
return
|
||||
} else {
|
||||
@@ -151,7 +161,7 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
|
||||
s.DB, err = gorm.Open(factory(p.config.DSN), &gorm.Config{})
|
||||
if err != nil {
|
||||
s.Error("failed to connect database", "error", err, "dsn", s.config.DSN, "type", s.config.DBType)
|
||||
p.Disabled = true
|
||||
p.disable(fmt.Sprintf("database %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -209,13 +219,14 @@ var _ IPlugin = (*Plugin)(nil)
|
||||
|
||||
type Plugin struct {
|
||||
task.Work
|
||||
Disabled bool
|
||||
Meta *PluginMeta
|
||||
config config.Common
|
||||
config.Config
|
||||
handler IPlugin
|
||||
Server *Server
|
||||
DB *gorm.DB
|
||||
Disabled bool
|
||||
Meta *PluginMeta
|
||||
PushAddr, PlayAddr []string
|
||||
config config.Common
|
||||
handler IPlugin
|
||||
Server *Server
|
||||
DB *gorm.DB
|
||||
}
|
||||
|
||||
func (Plugin) nothing() {
|
||||
@@ -256,6 +267,12 @@ func (p *Plugin) settingPath() string {
|
||||
return filepath.Join(p.Server.SettingDir, strings.ToLower(p.Meta.Name)+".yaml")
|
||||
}
|
||||
|
||||
func (p *Plugin) disable(reason string) {
|
||||
p.Disabled = true
|
||||
p.SetDescription("disableReason", reason)
|
||||
p.Server.disabledPlugins = append(p.Server.disabledPlugins, p)
|
||||
}
|
||||
|
||||
func (p *Plugin) assign() {
|
||||
f, err := os.Open(p.settingPath())
|
||||
defer f.Close()
|
||||
@@ -280,6 +297,7 @@ func (p *Plugin) Start() (err error) {
|
||||
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
|
||||
if p.Meta.RegisterGRPCHandler != nil {
|
||||
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
|
||||
p.disable(fmt.Sprintf("grpc %v", err))
|
||||
return
|
||||
} else {
|
||||
p.Info("grpc handler registered")
|
||||
@@ -288,11 +306,18 @@ func (p *Plugin) Start() (err error) {
|
||||
}
|
||||
s.Plugins.Add(p)
|
||||
if err = p.listen(); err != nil {
|
||||
p.disable(fmt.Sprintf("listen %v", err))
|
||||
return
|
||||
}
|
||||
if err = p.handler.OnInit(); err != nil {
|
||||
p.disable(fmt.Sprintf("init %v", err))
|
||||
return
|
||||
}
|
||||
if p.config.Hook != nil {
|
||||
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
|
||||
p.AddTask(&ServerKeepAliveTask{plugin: p})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -305,40 +330,56 @@ func (p *Plugin) listen() (err error) {
|
||||
httpConf := &p.config.HTTP
|
||||
|
||||
if httpConf.ListenAddrTLS != "" && (httpConf.ListenAddrTLS != p.Server.config.HTTP.ListenAddrTLS) {
|
||||
p.SetDescription("httpTLS", strings.TrimPrefix(httpConf.ListenAddrTLS, ":"))
|
||||
p.AddDependTask(httpConf.CreateHTTPSWork(p.Logger))
|
||||
}
|
||||
|
||||
if httpConf.ListenAddr != "" && (httpConf.ListenAddr != p.Server.config.HTTP.ListenAddr) {
|
||||
p.SetDescription("http", strings.TrimPrefix(httpConf.ListenAddr, ":"))
|
||||
p.AddDependTask(httpConf.CreateHTTPWork(p.Logger))
|
||||
}
|
||||
|
||||
if tcphandler, ok := p.handler.(ITCPPlugin); ok {
|
||||
tcpConf := &p.config.TCP
|
||||
if tcpConf.ListenAddr != "" && tcpConf.AutoListen {
|
||||
if err = p.AddTask(tcpConf.CreateTCPWork(p.Logger, tcphandler.OnTCPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
if tcpConf.ListenAddr != "" {
|
||||
if tcpConf.AutoListen {
|
||||
if err = p.AddTask(tcpConf.CreateTCPWork(p.Logger, tcphandler.OnTCPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.SetDescription("tcp", strings.TrimPrefix(tcpConf.ListenAddr, ":"))
|
||||
}
|
||||
if tcpConf.ListenAddrTLS != "" && tcpConf.AutoListen {
|
||||
if err = p.AddTask(tcpConf.CreateTCPTLSWork(p.Logger, tcphandler.OnTCPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
if tcpConf.ListenAddrTLS != "" {
|
||||
if tcpConf.AutoListen {
|
||||
if err = p.AddTask(tcpConf.CreateTCPTLSWork(p.Logger, tcphandler.OnTCPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.SetDescription("tcpTLS", strings.TrimPrefix(tcpConf.ListenAddrTLS, ":"))
|
||||
}
|
||||
}
|
||||
|
||||
if udpHandler, ok := p.handler.(IUDPPlugin); ok {
|
||||
udpConf := &p.config.UDP
|
||||
if udpConf.ListenAddr != "" && udpConf.AutoListen {
|
||||
if err = p.AddTask(udpConf.CreateUDPWork(p.Logger, udpHandler.OnUDPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
if udpConf.ListenAddr != "" {
|
||||
if udpConf.AutoListen {
|
||||
if err = p.AddTask(udpConf.CreateUDPWork(p.Logger, udpHandler.OnUDPConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.SetDescription("udp", strings.TrimPrefix(udpConf.ListenAddr, ":"))
|
||||
}
|
||||
}
|
||||
|
||||
if quicHandler, ok := p.handler.(IQUICPlugin); ok {
|
||||
quicConf := &p.config.Quic
|
||||
if quicConf.ListenAddr != "" && quicConf.AutoListen {
|
||||
err = p.AddTask(quicConf.CreateQUICWork(p.Logger, quicHandler.OnQUICConnect)).WaitStarted()
|
||||
if quicConf.ListenAddr != "" {
|
||||
if quicConf.AutoListen {
|
||||
if err = p.AddTask(quicConf.CreateQUICWork(p.Logger, quicHandler.OnQUICConnect)).WaitStarted(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
p.SetDescription("quic", strings.TrimPrefix(quicConf.ListenAddr, ":"))
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -352,13 +393,78 @@ func (p *Plugin) OnStop() {
|
||||
|
||||
}
|
||||
|
||||
type WebHookTask struct {
|
||||
task.Task
|
||||
plugin *Plugin
|
||||
hookType config.HookType
|
||||
conf *config.Webhook
|
||||
data any
|
||||
jsonData []byte
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Start() error {
|
||||
if t.conf == nil || t.conf.URL == "" {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
var err error
|
||||
t.jsonData, err = json.Marshal(t.data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal webhook data: %w", err)
|
||||
}
|
||||
|
||||
t.SetRetry(t.conf.RetryTimes, t.conf.RetryInterval)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Run() error {
|
||||
req, err := http.NewRequest(t.conf.Method, t.conf.URL, bytes.NewBuffer(t.jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
for k, v := range t.conf.Headers {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
|
||||
client := &http.Client{
|
||||
Timeout: time.Duration(t.conf.TimeoutSeconds) * time.Second,
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.plugin.Error("webhook request failed", "error", err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
err = fmt.Errorf("webhook request failed with status: %d", resp.StatusCode)
|
||||
t.plugin.Error("webhook response error", "status", resp.StatusCode)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Plugin) SendWebhook(hookType config.HookType, conf config.Webhook, data any) *task.Task {
|
||||
webhookTask := &WebHookTask{
|
||||
plugin: p,
|
||||
hookType: hookType,
|
||||
conf: &conf,
|
||||
data: data,
|
||||
}
|
||||
return p.AddTask(webhookTask)
|
||||
}
|
||||
|
||||
// TODO: use alias stream
|
||||
func (p *Plugin) OnPublish(pub *Publisher) {
|
||||
onPublish := p.config.OnPub
|
||||
if p.Meta.Pusher != nil {
|
||||
for r, pushConf := range onPublish.Push {
|
||||
if pushConf.URL = r.Replace(pub.StreamPath, pushConf.URL); pushConf.URL != "" {
|
||||
p.Push(pub, pushConf)
|
||||
p.Push(pub.StreamPath, pushConf, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -392,6 +498,21 @@ func (p *Plugin) OnPublish(pub *Publisher) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Plugin) auth(streamPath string, key string, secret string, expire string) (err error) {
|
||||
if unixTime, err := strconv.ParseInt(expire, 16, 64); err != nil || time.Now().Unix() > unixTime {
|
||||
return fmt.Errorf("auth failed expired")
|
||||
}
|
||||
if len(secret) != 32 {
|
||||
return fmt.Errorf("auth failed secret length must be 32")
|
||||
}
|
||||
trueSecret := md5.Sum([]byte(key + streamPath + expire))
|
||||
if secret == hex.EncodeToString(trueSecret[:]) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("auth failed invalid secret")
|
||||
}
|
||||
|
||||
func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
// var avoidTrans bool
|
||||
//AVOID:
|
||||
@@ -431,7 +552,7 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
}
|
||||
func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf config.Publish) (publisher *Publisher, err error) {
|
||||
publisher = createPublisher(p, streamPath, conf)
|
||||
if p.config.EnableAuth {
|
||||
if p.config.EnableAuth && publisher.Type == PublishTypeServer {
|
||||
onAuthPub := p.Meta.OnAuthPub
|
||||
if onAuthPub == nil {
|
||||
onAuthPub = p.Server.Meta.OnAuthPub
|
||||
@@ -441,9 +562,20 @@ func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
} else if conf.Key != "" {
|
||||
if err = p.auth(publisher.StreamPath, conf.Key, publisher.Args.Get("secret"), publisher.Args.Get("expire")); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
err = p.Server.Streams.AddTask(publisher, ctx).WaitStarted()
|
||||
if err == nil {
|
||||
publisher.OnDispose(func() {
|
||||
p.sendPublishEndWebhook(publisher)
|
||||
})
|
||||
p.sendPublishWebhook(publisher)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -453,7 +585,7 @@ func (p *Plugin) Publish(ctx context.Context, streamPath string) (publisher *Pub
|
||||
|
||||
func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, conf config.Subscribe) (subscriber *Subscriber, err error) {
|
||||
subscriber = createSubscriber(p, streamPath, conf)
|
||||
if p.config.EnableAuth {
|
||||
if p.config.EnableAuth && subscriber.Type == SubscribeTypeServer {
|
||||
onAuthSub := p.Meta.OnAuthSub
|
||||
if onAuthSub == nil {
|
||||
onAuthSub = p.Server.Meta.OnAuthSub
|
||||
@@ -463,6 +595,11 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
} else if conf.Key != "" {
|
||||
if err = p.auth(subscriber.StreamPath, conf.Key, subscriber.Args.Get("secret"), subscriber.Args.Get("expire")); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
err = p.Server.Streams.AddTask(subscriber, ctx).WaitStarted()
|
||||
@@ -474,6 +611,12 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
|
||||
err = subscriber.StopReason()
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
subscriber.OnDispose(func() {
|
||||
p.sendSubscribeEndWebhook(subscriber)
|
||||
})
|
||||
p.sendSubscribeWebhook(subscriber)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -489,16 +632,16 @@ func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publi
|
||||
puller.GetPullJob().Init(puller, p, streamPath, conf, pubConf)
|
||||
}
|
||||
|
||||
func (p *Plugin) Push(pub *Publisher, conf config.Push) {
|
||||
func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subscribe) {
|
||||
pusher := p.Meta.Pusher()
|
||||
job := pusher.GetPushJob().Init(pusher, p, pub.StreamPath, conf)
|
||||
job.Depend(pub)
|
||||
pusher.GetPushJob().Init(pusher, p, streamPath, conf, subConf)
|
||||
}
|
||||
|
||||
func (p *Plugin) Record(pub *Publisher, conf config.Record, subConf *config.Subscribe) {
|
||||
func (p *Plugin) Record(pub *Publisher, conf config.Record, subConf *config.Subscribe) *RecordJob {
|
||||
recorder := p.Meta.Recorder()
|
||||
job := recorder.GetRecordJob().Init(recorder, p, pub.StreamPath, conf, subConf)
|
||||
job.Depend(pub)
|
||||
return job
|
||||
}
|
||||
|
||||
func (p *Plugin) Transform(pub *Publisher, conf config.Transform) {
|
||||
@@ -525,6 +668,32 @@ func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
|
||||
for patten, handler := range handlers {
|
||||
p.handle(patten, handler)
|
||||
}
|
||||
if p.config.EnableAuth && p.Server.ServerConfig.EnableLogin {
|
||||
p.handle("/api/secret/{type}/{streamPath...}", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
if authHeader == "" {
|
||||
http.Error(rw, "missing authorization header", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
|
||||
_, err := p.Server.ValidateToken(tokenString)
|
||||
if err != nil {
|
||||
http.Error(rw, "invalid token", http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
streamPath := r.PathValue("streamPath")
|
||||
t := r.PathValue("type")
|
||||
expire := r.URL.Query().Get("expire")
|
||||
if t == "publish" {
|
||||
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
} else if t == "subscribe" {
|
||||
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
}
|
||||
}))
|
||||
}
|
||||
if rootHandler, ok := p.handler.(http.Handler); ok {
|
||||
p.handle("/", rootHandler)
|
||||
}
|
||||
@@ -585,3 +754,102 @@ func (s *SaveConfig) Run() (err error) {
|
||||
func (s *SaveConfig) Dispose() {
|
||||
s.file.Close()
|
||||
}
|
||||
|
||||
func (p *Plugin) sendPublishWebhook(pub *Publisher) {
|
||||
if p.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
webhookData := map[string]interface{}{
|
||||
"event": "publish",
|
||||
"streamPath": pub.StreamPath,
|
||||
"args": pub.Args,
|
||||
"publishId": pub.ID,
|
||||
"remoteAddr": pub.RemoteAddr,
|
||||
"type": pub.Type,
|
||||
"pluginName": p.Meta.Name,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
p.SendWebhook(config.HookOnPublish, p.config.Hook[config.HookOnPublish], webhookData)
|
||||
if p.Server.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
p.Server.SendWebhook(config.HookOnPublish, p.Server.config.Hook[config.HookOnPublish], webhookData)
|
||||
}
|
||||
|
||||
func (p *Plugin) sendPublishEndWebhook(pub *Publisher) {
|
||||
if p.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
webhookData := map[string]interface{}{
|
||||
"event": "publish_end",
|
||||
"streamPath": pub.StreamPath,
|
||||
"publishId": pub.ID,
|
||||
"reason": pub.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
p.SendWebhook(config.HookOnPublishEnd, p.config.Hook[config.HookOnPublishEnd], webhookData)
|
||||
}
|
||||
|
||||
func (p *Plugin) sendSubscribeWebhook(sub *Subscriber) {
|
||||
if p.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
webhookData := map[string]interface{}{
|
||||
"event": "subscribe",
|
||||
"streamPath": sub.StreamPath,
|
||||
"publishId": sub.Publisher.ID,
|
||||
"subscriberId": sub.ID,
|
||||
"remoteAddr": sub.RemoteAddr,
|
||||
"type": sub.Type,
|
||||
"args": sub.Args,
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
p.SendWebhook(config.HookOnSubscribe, p.config.Hook[config.HookOnSubscribe], webhookData)
|
||||
}
|
||||
|
||||
func (p *Plugin) sendSubscribeEndWebhook(sub *Subscriber) {
|
||||
if p.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
webhookData := map[string]interface{}{
|
||||
"event": "subscribe_end",
|
||||
"streamPath": sub.StreamPath,
|
||||
"subscriberId": sub.ID,
|
||||
"reason": sub.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
if sub.Publisher != nil {
|
||||
webhookData["publishId"] = sub.Publisher.ID
|
||||
}
|
||||
p.SendWebhook(config.HookOnSubscribeEnd, p.config.Hook[config.HookOnSubscribeEnd], webhookData)
|
||||
}
|
||||
|
||||
func (p *Plugin) sendServerKeepAliveWebhook() {
|
||||
if p.config.Hook == nil {
|
||||
return
|
||||
}
|
||||
s := p.Server
|
||||
webhookData := map[string]interface{}{
|
||||
"event": "server_keep_alive",
|
||||
"timestamp": time.Now().Unix(),
|
||||
"streams": s.Streams.Length,
|
||||
"subscribers": s.Subscribers.Length,
|
||||
"publisherCount": s.Streams.Length,
|
||||
"subscriberCount": s.Subscribers.Length,
|
||||
"uptime": time.Since(s.StartTime).Seconds(),
|
||||
}
|
||||
p.SendWebhook(config.HookOnServerKeepAlive, p.config.Hook[config.HookOnServerKeepAlive], webhookData)
|
||||
}
|
||||
|
||||
type ServerKeepAliveTask struct {
|
||||
task.TickTask
|
||||
plugin *Plugin
|
||||
}
|
||||
|
||||
func (t *ServerKeepAliveTask) GetTickInterval() time.Duration {
|
||||
return time.Duration(t.plugin.config.Hook[config.HookOnServerKeepAlive].Interval) * time.Second
|
||||
}
|
||||
|
||||
func (t *ServerKeepAliveTask) Tick(now any) {
|
||||
t.plugin.sendServerKeepAliveWebhook()
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ type CascadeClientPlugin struct {
|
||||
var _ = m7s.InstallPlugin[CascadeClientPlugin](cascade.NewCascadePuller)
|
||||
|
||||
type CascadeClient struct {
|
||||
task.Task
|
||||
task.Work
|
||||
cfg *CascadeClientPlugin
|
||||
quic.Connection
|
||||
}
|
||||
@@ -58,7 +58,7 @@ func (task *CascadeClient) Start() (err error) {
|
||||
zapErr = res[0]
|
||||
}
|
||||
task.Error("connect to cascade server", "server", task.cfg.Server, "err", zapErr)
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -68,7 +68,7 @@ func (task *CascadeClient) Run() (err error) {
|
||||
for err == nil {
|
||||
var s quic.Stream
|
||||
if s, err = task.AcceptStream(task.Task.Context); err == nil {
|
||||
task.cfg.AddTask(&cascade.ReceiveRequestTask{
|
||||
task.AddTask(&cascade.ReceiveRequestTask{
|
||||
Stream: s,
|
||||
Handler: task.cfg.GetGlobalCommonConf().GetHandler(),
|
||||
Connection: task.Connection,
|
||||
|
||||
843
plugin/cascade/pb/cascade.pb.go
Normal file
843
plugin/cascade/pb/cascade.pb.go
Normal file
@@ -0,0 +1,843 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.19.1
|
||||
// source: cascade.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
_ "google.golang.org/genproto/googleapis/api/annotations"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// GetClientListRequest 获取客户端列表的请求
|
||||
type GetClientListRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
}
|
||||
|
||||
func (x *GetClientListRequest) Reset() {
|
||||
*x = GetClientListRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetClientListRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetClientListRequest) ProtoMessage() {}
|
||||
|
||||
func (x *GetClientListRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetClientListRequest.ProtoReflect.Descriptor instead.
|
||||
func (*GetClientListRequest) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
// GetClientListResponse 获取客户端列表的响应
|
||||
type GetClientListResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Data []*CascadeClient `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetClientListResponse) Reset() {
|
||||
*x = GetClientListResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetClientListResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetClientListResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetClientListResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetClientListResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetClientListResponse) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *GetClientListResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *GetClientListResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *GetClientListResponse) GetData() []*CascadeClient {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateClientRequest 创建客户端的请求
|
||||
type CreateClientRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Secret string `protobuf:"bytes,2,opt,name=secret,proto3" json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateClientRequest) Reset() {
|
||||
*x = CreateClientRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateClientRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateClientRequest) ProtoMessage() {}
|
||||
|
||||
func (x *CreateClientRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateClientRequest.ProtoReflect.Descriptor instead.
|
||||
func (*CreateClientRequest) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *CreateClientRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreateClientRequest) GetSecret() string {
|
||||
if x != nil {
|
||||
return x.Secret
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CreateClientResponse 创建客户端的响应
|
||||
type CreateClientResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Data *CascadeClient `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CreateClientResponse) Reset() {
|
||||
*x = CreateClientResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CreateClientResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CreateClientResponse) ProtoMessage() {}
|
||||
|
||||
func (x *CreateClientResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CreateClientResponse.ProtoReflect.Descriptor instead.
|
||||
func (*CreateClientResponse) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *CreateClientResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *CreateClientResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CreateClientResponse) GetData() *CascadeClient {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateClientRequest 更新客户端的请求
|
||||
type UpdateClientRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Secret string `protobuf:"bytes,3,opt,name=secret,proto3" json:"secret,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UpdateClientRequest) Reset() {
|
||||
*x = UpdateClientRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UpdateClientRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UpdateClientRequest) ProtoMessage() {}
|
||||
|
||||
func (x *UpdateClientRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateClientRequest.ProtoReflect.Descriptor instead.
|
||||
func (*UpdateClientRequest) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *UpdateClientRequest) GetId() uint32 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *UpdateClientRequest) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *UpdateClientRequest) GetSecret() string {
|
||||
if x != nil {
|
||||
return x.Secret
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// UpdateClientResponse 更新客户端的响应
|
||||
type UpdateClientResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
Data *CascadeClient `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UpdateClientResponse) Reset() {
|
||||
*x = UpdateClientResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UpdateClientResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UpdateClientResponse) ProtoMessage() {}
|
||||
|
||||
func (x *UpdateClientResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UpdateClientResponse.ProtoReflect.Descriptor instead.
|
||||
func (*UpdateClientResponse) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *UpdateClientResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *UpdateClientResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *UpdateClientResponse) GetData() *CascadeClient {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteClientRequest 删除客户端的请求
|
||||
type DeleteClientRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteClientRequest) Reset() {
|
||||
*x = DeleteClientRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteClientRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteClientRequest) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteClientRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteClientRequest.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteClientRequest) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *DeleteClientRequest) GetId() uint32 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// DeleteClientResponse 删除客户端的响应
|
||||
type DeleteClientResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
|
||||
Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DeleteClientResponse) Reset() {
|
||||
*x = DeleteClientResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DeleteClientResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DeleteClientResponse) ProtoMessage() {}
|
||||
|
||||
func (x *DeleteClientResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DeleteClientResponse.ProtoReflect.Descriptor instead.
|
||||
func (*DeleteClientResponse) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *DeleteClientResponse) GetCode() int32 {
|
||||
if x != nil {
|
||||
return x.Code
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *DeleteClientResponse) GetMessage() string {
|
||||
if x != nil {
|
||||
return x.Message
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CascadeClient 表示一个级联客户端的信息
|
||||
type CascadeClient struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
|
||||
Ip string `protobuf:"bytes,3,opt,name=ip,proto3" json:"ip,omitempty"`
|
||||
Online bool `protobuf:"varint,4,opt,name=online,proto3" json:"online,omitempty"`
|
||||
CreatedTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=createdTime,proto3" json:"createdTime,omitempty"`
|
||||
UpdatedTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=updatedTime,proto3" json:"updatedTime,omitempty"`
|
||||
}
|
||||
|
||||
func (x *CascadeClient) Reset() {
|
||||
*x = CascadeClient{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cascade_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *CascadeClient) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*CascadeClient) ProtoMessage() {}
|
||||
|
||||
func (x *CascadeClient) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cascade_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use CascadeClient.ProtoReflect.Descriptor instead.
|
||||
func (*CascadeClient) Descriptor() ([]byte, []int) {
|
||||
return file_cascade_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetId() uint32 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetName() string {
|
||||
if x != nil {
|
||||
return x.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetIp() string {
|
||||
if x != nil {
|
||||
return x.Ip
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetOnline() bool {
|
||||
if x != nil {
|
||||
return x.Online
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetCreatedTime() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.CreatedTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *CascadeClient) GetUpdatedTime() *timestamppb.Timestamp {
|
||||
if x != nil {
|
||||
return x.UpdatedTime
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_cascade_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cascade_proto_rawDesc = []byte{
|
||||
0x0a, 0x0d, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12,
|
||||
0x07, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d,
|
||||
0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x16, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22,
|
||||
0x71, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65,
|
||||
0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07,
|
||||
0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d,
|
||||
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x43,
|
||||
0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x64, 0x61,
|
||||
0x74, 0x61, 0x22, 0x41, 0x0a, 0x13, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73,
|
||||
0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x70, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a,
|
||||
0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64,
|
||||
0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x2a, 0x0a, 0x04, 0x64,
|
||||
0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x61, 0x73, 0x63,
|
||||
0x61, 0x64, 0x65, 0x2e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e,
|
||||
0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x51, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e,
|
||||
0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12,
|
||||
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
|
||||
0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01,
|
||||
0x28, 0x09, 0x52, 0x06, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x22, 0x70, 0x0a, 0x14, 0x55, 0x70,
|
||||
0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||
0x12, 0x2a, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16,
|
||||
0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x43, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65,
|
||||
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x0a, 0x13,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52,
|
||||
0x02, 0x69, 0x64, 0x22, 0x44, 0x0a, 0x14, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63,
|
||||
0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
|
||||
0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
|
||||
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xd7, 0x01, 0x0a, 0x0d, 0x43, 0x61,
|
||||
0x73, 0x63, 0x61, 0x64, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
|
||||
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e,
|
||||
0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
|
||||
0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
|
||||
0x06, 0x6f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x3c, 0x0a, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74,
|
||||
0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54,
|
||||
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3c, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64,
|
||||
0x54, 0x69, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
|
||||
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x54,
|
||||
0x69, 0x6d, 0x65, 0x32, 0xd4, 0x03, 0x0a, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x12, 0x6f,
|
||||
0x0a, 0x0d, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x12,
|
||||
0x1d, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e,
|
||||
0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1f,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x19, 0x12, 0x17, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65,
|
||||
0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x69, 0x73, 0x74, 0x12,
|
||||
0x71, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12,
|
||||
0x1c, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
|
||||
0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e,
|
||||
0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c,
|
||||
0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x73, 0x65,
|
||||
0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x3a,
|
||||
0x01, 0x2a, 0x12, 0x71, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65,
|
||||
0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x64,
|
||||
0x61, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
|
||||
0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74,
|
||||
0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x22, 0x19, 0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64,
|
||||
0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x75, 0x70, 0x64, 0x61,
|
||||
0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0x73, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43,
|
||||
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2e, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x22, 0x1e, 0x2f, 0x63, 0x61, 0x73,
|
||||
0x63, 0x61, 0x64, 0x65, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x64,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x2f, 0x7b, 0x69, 0x64, 0x7d, 0x42, 0x1f, 0x5a, 0x1d, 0x6d, 0x37,
|
||||
0x73, 0x2e, 0x6c, 0x69, 0x76, 0x65, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e,
|
||||
0x2f, 0x63, 0x61, 0x73, 0x63, 0x61, 0x64, 0x65, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cascade_proto_rawDescOnce sync.Once
|
||||
file_cascade_proto_rawDescData = file_cascade_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cascade_proto_rawDescGZIP() []byte {
|
||||
file_cascade_proto_rawDescOnce.Do(func() {
|
||||
file_cascade_proto_rawDescData = protoimpl.X.CompressGZIP(file_cascade_proto_rawDescData)
|
||||
})
|
||||
return file_cascade_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cascade_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_cascade_proto_goTypes = []interface{}{
|
||||
(*GetClientListRequest)(nil), // 0: cascade.GetClientListRequest
|
||||
(*GetClientListResponse)(nil), // 1: cascade.GetClientListResponse
|
||||
(*CreateClientRequest)(nil), // 2: cascade.CreateClientRequest
|
||||
(*CreateClientResponse)(nil), // 3: cascade.CreateClientResponse
|
||||
(*UpdateClientRequest)(nil), // 4: cascade.UpdateClientRequest
|
||||
(*UpdateClientResponse)(nil), // 5: cascade.UpdateClientResponse
|
||||
(*DeleteClientRequest)(nil), // 6: cascade.DeleteClientRequest
|
||||
(*DeleteClientResponse)(nil), // 7: cascade.DeleteClientResponse
|
||||
(*CascadeClient)(nil), // 8: cascade.CascadeClient
|
||||
(*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp
|
||||
}
|
||||
var file_cascade_proto_depIdxs = []int32{
|
||||
8, // 0: cascade.GetClientListResponse.data:type_name -> cascade.CascadeClient
|
||||
8, // 1: cascade.CreateClientResponse.data:type_name -> cascade.CascadeClient
|
||||
8, // 2: cascade.UpdateClientResponse.data:type_name -> cascade.CascadeClient
|
||||
9, // 3: cascade.CascadeClient.createdTime:type_name -> google.protobuf.Timestamp
|
||||
9, // 4: cascade.CascadeClient.updatedTime:type_name -> google.protobuf.Timestamp
|
||||
0, // 5: cascade.server.GetClientList:input_type -> cascade.GetClientListRequest
|
||||
2, // 6: cascade.server.CreateClient:input_type -> cascade.CreateClientRequest
|
||||
4, // 7: cascade.server.UpdateClient:input_type -> cascade.UpdateClientRequest
|
||||
6, // 8: cascade.server.DeleteClient:input_type -> cascade.DeleteClientRequest
|
||||
1, // 9: cascade.server.GetClientList:output_type -> cascade.GetClientListResponse
|
||||
3, // 10: cascade.server.CreateClient:output_type -> cascade.CreateClientResponse
|
||||
5, // 11: cascade.server.UpdateClient:output_type -> cascade.UpdateClientResponse
|
||||
7, // 12: cascade.server.DeleteClient:output_type -> cascade.DeleteClientResponse
|
||||
9, // [9:13] is the sub-list for method output_type
|
||||
5, // [5:9] is the sub-list for method input_type
|
||||
5, // [5:5] is the sub-list for extension type_name
|
||||
5, // [5:5] is the sub-list for extension extendee
|
||||
0, // [0:5] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cascade_proto_init() }
|
||||
func file_cascade_proto_init() {
|
||||
if File_cascade_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cascade_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetClientListRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetClientListResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateClientRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CreateClientResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UpdateClientRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UpdateClientResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteClientRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DeleteClientResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cascade_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*CascadeClient); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cascade_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
GoTypes: file_cascade_proto_goTypes,
|
||||
DependencyIndexes: file_cascade_proto_depIdxs,
|
||||
MessageInfos: file_cascade_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cascade_proto = out.File
|
||||
file_cascade_proto_rawDesc = nil
|
||||
file_cascade_proto_goTypes = nil
|
||||
file_cascade_proto_depIdxs = nil
|
||||
}
|
||||
412
plugin/cascade/pb/cascade.pb.gw.go
Normal file
412
plugin/cascade/pb/cascade.pb.gw.go
Normal file
@@ -0,0 +1,412 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: cascade.proto
|
||||
|
||||
/*
|
||||
Package pb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package pb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = metadata.Join
|
||||
|
||||
func request_Server_GetClientList_0(ctx context.Context, marshaler runtime.Marshaler, client ServerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq GetClientListRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.GetClientList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Server_GetClientList_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq GetClientListRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.GetClientList(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Server_CreateClient_0(ctx context.Context, marshaler runtime.Marshaler, client ServerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq CreateClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.CreateClient(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Server_CreateClient_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq CreateClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.CreateClient(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Server_UpdateClient_0(ctx context.Context, marshaler runtime.Marshaler, client ServerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UpdateClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.UpdateClient(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Server_UpdateClient_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UpdateClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.UpdateClient(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Server_DeleteClient_0(ctx context.Context, marshaler runtime.Marshaler, client ServerClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
|
||||
}
|
||||
|
||||
protoReq.Id, err = runtime.Uint32(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteClient(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Server_DeleteClient_0(ctx context.Context, marshaler runtime.Marshaler, server ServerServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq DeleteClientRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["id"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
|
||||
}
|
||||
|
||||
protoReq.Id, err = runtime.Uint32(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
|
||||
}
|
||||
|
||||
msg, err := server.DeleteClient(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterServerHandlerServer registers the http handlers for service Server to "mux".
|
||||
// UnaryRPC :call ServerServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterServerHandlerFromEndpoint instead.
|
||||
func RegisterServerHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ServerServer) error {
|
||||
|
||||
mux.Handle("GET", pattern_Server_GetClientList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/cascade.Server/GetClientList", runtime.WithHTTPPathPattern("/cascadeserver/api/list"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Server_GetClientList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_GetClientList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_CreateClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/cascade.Server/CreateClient", runtime.WithHTTPPathPattern("/cascadeserver/api/create"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Server_CreateClient_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_CreateClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_UpdateClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/cascade.Server/UpdateClient", runtime.WithHTTPPathPattern("/cascadeserver/api/update"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Server_UpdateClient_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_UpdateClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_DeleteClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/cascade.Server/DeleteClient", runtime.WithHTTPPathPattern("/cascadeserver/api/delete/{id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Server_DeleteClient_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_DeleteClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterServerHandlerFromEndpoint is same as RegisterServerHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterServerHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.DialContext(ctx, endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterServerHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterServerHandler registers the http handlers for service Server to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterServerHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterServerHandlerClient(ctx, mux, NewServerClient(conn))
|
||||
}
|
||||
|
||||
// RegisterServerHandlerClient registers the http handlers for service Server
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ServerClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ServerClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "ServerClient" to call the correct interceptors.
|
||||
func RegisterServerHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ServerClient) error {
|
||||
|
||||
mux.Handle("GET", pattern_Server_GetClientList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/cascade.Server/GetClientList", runtime.WithHTTPPathPattern("/cascadeserver/api/list"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Server_GetClientList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_GetClientList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_CreateClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/cascade.Server/CreateClient", runtime.WithHTTPPathPattern("/cascadeserver/api/create"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Server_CreateClient_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_CreateClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_UpdateClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/cascade.Server/UpdateClient", runtime.WithHTTPPathPattern("/cascadeserver/api/update"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Server_UpdateClient_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_UpdateClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Server_DeleteClient_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/cascade.Server/DeleteClient", runtime.WithHTTPPathPattern("/cascadeserver/api/delete/{id}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Server_DeleteClient_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Server_DeleteClient_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Server_GetClientList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"cascadeserver", "api", "list"}, ""))
|
||||
|
||||
pattern_Server_CreateClient_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"cascadeserver", "api", "create"}, ""))
|
||||
|
||||
pattern_Server_UpdateClient_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"cascadeserver", "api", "update"}, ""))
|
||||
|
||||
pattern_Server_DeleteClient_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"cascadeserver", "api", "delete", "id"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Server_GetClientList_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Server_CreateClient_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Server_UpdateClient_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Server_DeleteClient_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
99
plugin/cascade/pb/cascade.proto
Normal file
99
plugin/cascade/pb/cascade.proto
Normal file
@@ -0,0 +1,99 @@
|
||||
syntax = "proto3";
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
package cascade;
|
||||
|
||||
option go_package = "m7s.live/v5/plugin/cascade/pb";
|
||||
|
||||
// CascadeService 定义了级联服务的RPC接口
|
||||
service server {
|
||||
// GetClientList 获取所有级联客户端列表
|
||||
rpc GetClientList(GetClientListRequest) returns (GetClientListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/cascadeserver/api/list"
|
||||
};
|
||||
}
|
||||
|
||||
// CreateClient 创建新的级联客户端
|
||||
rpc CreateClient(CreateClientRequest) returns (CreateClientResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/cascadeserver/api/create"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// UpdateClient 更新级联客户端
|
||||
rpc UpdateClient(UpdateClientRequest) returns (UpdateClientResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/cascadeserver/api/update"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// DeleteClient 删除级联客户端
|
||||
rpc DeleteClient(DeleteClientRequest) returns (DeleteClientResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/cascadeserver/api/delete/{id}"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// GetClientListRequest 获取客户端列表的请求
|
||||
message GetClientListRequest {
|
||||
// 可以添加过滤条件等参数
|
||||
}
|
||||
|
||||
// GetClientListResponse 获取客户端列表的响应
|
||||
message GetClientListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
repeated CascadeClient data = 3;
|
||||
}
|
||||
|
||||
// CreateClientRequest 创建客户端的请求
|
||||
message CreateClientRequest {
|
||||
string name = 1;
|
||||
string secret = 2;
|
||||
}
|
||||
|
||||
// CreateClientResponse 创建客户端的响应
|
||||
message CreateClientResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
CascadeClient data = 3;
|
||||
}
|
||||
|
||||
// UpdateClientRequest 更新客户端的请求
|
||||
message UpdateClientRequest {
|
||||
uint32 id = 1;
|
||||
string name = 2;
|
||||
string secret = 3;
|
||||
}
|
||||
|
||||
// UpdateClientResponse 更新客户端的响应
|
||||
message UpdateClientResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
CascadeClient data = 3;
|
||||
}
|
||||
|
||||
// DeleteClientRequest 删除客户端的请求
|
||||
message DeleteClientRequest {
|
||||
uint32 id = 1;
|
||||
}
|
||||
|
||||
// DeleteClientResponse 删除客户端的响应
|
||||
message DeleteClientResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
// CascadeClient 表示一个级联客户端的信息
|
||||
message CascadeClient {
|
||||
uint32 id = 1;
|
||||
string name = 2;
|
||||
string ip = 3;
|
||||
bool online = 4;
|
||||
google.protobuf.Timestamp createdTime = 5;
|
||||
google.protobuf.Timestamp updatedTime = 6;
|
||||
}
|
||||
221
plugin/cascade/pb/cascade_grpc.pb.go
Normal file
221
plugin/cascade/pb/cascade_grpc.pb.go
Normal file
@@ -0,0 +1,221 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
// - protoc v3.19.1
|
||||
// source: cascade.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// ServerClient is the client API for Server service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type ServerClient interface {
|
||||
// GetClientList 获取所有级联客户端列表
|
||||
GetClientList(ctx context.Context, in *GetClientListRequest, opts ...grpc.CallOption) (*GetClientListResponse, error)
|
||||
// CreateClient 创建新的级联客户端
|
||||
CreateClient(ctx context.Context, in *CreateClientRequest, opts ...grpc.CallOption) (*CreateClientResponse, error)
|
||||
// UpdateClient 更新级联客户端
|
||||
UpdateClient(ctx context.Context, in *UpdateClientRequest, opts ...grpc.CallOption) (*UpdateClientResponse, error)
|
||||
// DeleteClient 删除级联客户端
|
||||
DeleteClient(ctx context.Context, in *DeleteClientRequest, opts ...grpc.CallOption) (*DeleteClientResponse, error)
|
||||
}
|
||||
|
||||
type serverClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewServerClient(cc grpc.ClientConnInterface) ServerClient {
|
||||
return &serverClient{cc}
|
||||
}
|
||||
|
||||
func (c *serverClient) GetClientList(ctx context.Context, in *GetClientListRequest, opts ...grpc.CallOption) (*GetClientListResponse, error) {
|
||||
out := new(GetClientListResponse)
|
||||
err := c.cc.Invoke(ctx, "/cascade.server/GetClientList", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *serverClient) CreateClient(ctx context.Context, in *CreateClientRequest, opts ...grpc.CallOption) (*CreateClientResponse, error) {
|
||||
out := new(CreateClientResponse)
|
||||
err := c.cc.Invoke(ctx, "/cascade.server/CreateClient", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *serverClient) UpdateClient(ctx context.Context, in *UpdateClientRequest, opts ...grpc.CallOption) (*UpdateClientResponse, error) {
|
||||
out := new(UpdateClientResponse)
|
||||
err := c.cc.Invoke(ctx, "/cascade.server/UpdateClient", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *serverClient) DeleteClient(ctx context.Context, in *DeleteClientRequest, opts ...grpc.CallOption) (*DeleteClientResponse, error) {
|
||||
out := new(DeleteClientResponse)
|
||||
err := c.cc.Invoke(ctx, "/cascade.server/DeleteClient", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ServerServer is the server API for Server service.
|
||||
// All implementations must embed UnimplementedServerServer
|
||||
// for forward compatibility
|
||||
type ServerServer interface {
|
||||
// GetClientList 获取所有级联客户端列表
|
||||
GetClientList(context.Context, *GetClientListRequest) (*GetClientListResponse, error)
|
||||
// CreateClient 创建新的级联客户端
|
||||
CreateClient(context.Context, *CreateClientRequest) (*CreateClientResponse, error)
|
||||
// UpdateClient 更新级联客户端
|
||||
UpdateClient(context.Context, *UpdateClientRequest) (*UpdateClientResponse, error)
|
||||
// DeleteClient 删除级联客户端
|
||||
DeleteClient(context.Context, *DeleteClientRequest) (*DeleteClientResponse, error)
|
||||
mustEmbedUnimplementedServerServer()
|
||||
}
|
||||
|
||||
// UnimplementedServerServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedServerServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedServerServer) GetClientList(context.Context, *GetClientListRequest) (*GetClientListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetClientList not implemented")
|
||||
}
|
||||
func (UnimplementedServerServer) CreateClient(context.Context, *CreateClientRequest) (*CreateClientResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method CreateClient not implemented")
|
||||
}
|
||||
func (UnimplementedServerServer) UpdateClient(context.Context, *UpdateClientRequest) (*UpdateClientResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdateClient not implemented")
|
||||
}
|
||||
func (UnimplementedServerServer) DeleteClient(context.Context, *DeleteClientRequest) (*DeleteClientResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteClient not implemented")
|
||||
}
|
||||
func (UnimplementedServerServer) mustEmbedUnimplementedServerServer() {}
|
||||
|
||||
// UnsafeServerServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to ServerServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeServerServer interface {
|
||||
mustEmbedUnimplementedServerServer()
|
||||
}
|
||||
|
||||
func RegisterServerServer(s grpc.ServiceRegistrar, srv ServerServer) {
|
||||
s.RegisterService(&Server_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Server_GetClientList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GetClientListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ServerServer).GetClientList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/cascade.server/GetClientList",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ServerServer).GetClientList(ctx, req.(*GetClientListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Server_CreateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CreateClientRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ServerServer).CreateClient(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/cascade.server/CreateClient",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ServerServer).CreateClient(ctx, req.(*CreateClientRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Server_UpdateClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(UpdateClientRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ServerServer).UpdateClient(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/cascade.server/UpdateClient",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ServerServer).UpdateClient(ctx, req.(*UpdateClientRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Server_DeleteClient_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(DeleteClientRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ServerServer).DeleteClient(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/cascade.server/DeleteClient",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ServerServer).DeleteClient(ctx, req.(*DeleteClientRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Server_ServiceDesc is the grpc.ServiceDesc for Server service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Server_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "cascade.server",
|
||||
HandlerType: (*ServerServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetClientList",
|
||||
Handler: _Server_GetClientList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "CreateClient",
|
||||
Handler: _Server_CreateClient_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "UpdateClient",
|
||||
Handler: _Server_UpdateClient_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteClient",
|
||||
Handler: _Server_DeleteClient_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "cascade.proto",
|
||||
}
|
||||
@@ -1,12 +1,6 @@
|
||||
package cascade
|
||||
|
||||
import (
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var ENDFLAG = []byte{0}
|
||||
|
||||
type Superior struct {
|
||||
}
|
||||
|
||||
var SubordinateMap util.Collection[uint, *Instance]
|
||||
|
||||
@@ -8,12 +8,9 @@ import (
|
||||
"github.com/gobwas/ws"
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
"github.com/quic-go/quic-go"
|
||||
"m7s.live/v5/pkg/task"
|
||||
)
|
||||
|
||||
type Http2Quic struct {
|
||||
task.Task
|
||||
quic.Connection
|
||||
quic.Stream
|
||||
}
|
||||
|
||||
@@ -56,7 +53,7 @@ func (q *Http2Quic) ServeWebSocket(w http.ResponseWriter, r *http.Request) {
|
||||
var err error
|
||||
for err == nil {
|
||||
msg, err = wsutil.ReadServerText(rw)
|
||||
q.Debug("read server", "msg", string(msg))
|
||||
// q.Debug("read server", "msg", string(msg))
|
||||
if err == nil {
|
||||
err = wsutil.WriteServerText(rw, msg)
|
||||
}
|
||||
@@ -65,13 +62,13 @@ func (q *Http2Quic) ServeWebSocket(w http.ResponseWriter, r *http.Request) {
|
||||
var msg []byte
|
||||
for err == nil {
|
||||
msg, err = wsutil.ReadClientText(rw)
|
||||
q.Debug("read client", "msg", string(msg))
|
||||
// q.Debug("read client", "msg", string(msg))
|
||||
if err == nil {
|
||||
err = wsutil.WriteClientText(rw, msg)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
q.Error("websocket", "err", err)
|
||||
// q.Error("websocket", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,14 +4,15 @@ import (
|
||||
"bufio"
|
||||
"context"
|
||||
"io"
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/task"
|
||||
flv "m7s.live/v5/plugin/flv/pkg"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
flv "m7s.live/v5/plugin/flv/pkg"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/task"
|
||||
)
|
||||
|
||||
type RelayAPIConfig struct {
|
||||
@@ -46,14 +47,14 @@ type ReceiveRequestTask struct {
|
||||
quic.Stream
|
||||
http.Handler
|
||||
*RelayAPIConfig
|
||||
req *http.Request
|
||||
}
|
||||
|
||||
func (task *ReceiveRequestTask) Start() (err error) {
|
||||
func (task *ReceiveRequestTask) Go() (err error) {
|
||||
reader := bufio.NewReader(task.Stream)
|
||||
var req *http.Request
|
||||
line0, _, err := reader.ReadLine()
|
||||
reqLine := strings.Split(string(line0), " ")
|
||||
var req *http.Request
|
||||
task.SetDescription("request", line0)
|
||||
if err == nil {
|
||||
ctx, cancel := context.WithCancel(task.Stream.Context())
|
||||
defer cancel()
|
||||
@@ -80,11 +81,9 @@ func (task *ReceiveRequestTask) Start() (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (task *ReceiveRequestTask) Run() (err error) {
|
||||
req := task.req
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
//h, pattern := task.handler.Handler(req)
|
||||
//if !task.Check(pattern) {
|
||||
// http.Error(task, "403 Forbidden", http.StatusForbidden)
|
||||
@@ -108,9 +107,8 @@ func (task *ReceiveRequestTask) Run() (err error) {
|
||||
}
|
||||
task.ServeHTTP(task, req)
|
||||
}
|
||||
io.ReadAll(task)
|
||||
|
||||
return err
|
||||
_, err = io.ReadAll(task)
|
||||
return
|
||||
}
|
||||
|
||||
func (task *ReceiveRequestTask) Dispose() {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package cascade
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
@@ -8,7 +10,7 @@ import (
|
||||
type Instance struct {
|
||||
gorm.Model
|
||||
Name string
|
||||
Secret string `gorm:"unique;index:idx_secret"`
|
||||
Secret sql.NullString `gorm:"unique;index:idx_secret"`
|
||||
IP string
|
||||
Online bool
|
||||
quic.Connection `gorm:"-"`
|
||||
|
||||
@@ -2,29 +2,62 @@ package plugin_cascade
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"database/sql"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/task"
|
||||
"m7s.live/v5/pkg/util"
|
||||
|
||||
"context"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
"m7s.live/v5/plugin/cascade/pb"
|
||||
cascade "m7s.live/v5/plugin/cascade/pkg"
|
||||
)
|
||||
|
||||
type CascadeServerPlugin struct {
|
||||
m7s.Plugin
|
||||
pb.UnimplementedServerServer
|
||||
AutoRegister bool `default:"true" desc:"下级自动注册"`
|
||||
RelayAPI cascade.RelayAPIConfig `desc:"访问控制"`
|
||||
clients util.Collection[uint, *cascade.Instance]
|
||||
}
|
||||
|
||||
var _ = m7s.InstallPlugin[CascadeServerPlugin]()
|
||||
func (c *CascadeServerPlugin) OnInit() (err error) {
|
||||
if c.GetCommonConf().Quic.ListenAddr == "" {
|
||||
return pkg.ErrNotListen
|
||||
}
|
||||
c.clients.L = &sync.RWMutex{}
|
||||
if c.DB == nil {
|
||||
return pkg.ErrNoDB
|
||||
}
|
||||
c.DB.AutoMigrate(&cascade.Instance{})
|
||||
var instance []*cascade.Instance
|
||||
c.DB.Find(&instance)
|
||||
for _, instance := range instance {
|
||||
c.clients.Add(instance)
|
||||
if instance.Online {
|
||||
instance.Online = false
|
||||
c.DB.Save(instance)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var _ = m7s.InstallPlugin[CascadeServerPlugin](m7s.DefaultYaml(`quic:
|
||||
listenaddr: :44944`), &pb.Server_ServiceDesc, pb.RegisterServerHandler)
|
||||
|
||||
type CascadeServer struct {
|
||||
task.Work
|
||||
quic.Connection
|
||||
conf *CascadeServerPlugin
|
||||
conf *CascadeServerPlugin
|
||||
client *cascade.Instance
|
||||
}
|
||||
|
||||
func (c *CascadeServerPlugin) OnQUICConnect(conn quic.Connection) task.ITask {
|
||||
@@ -36,10 +69,10 @@ func (c *CascadeServerPlugin) OnQUICConnect(conn quic.Connection) task.ITask {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (task *CascadeServer) Go() {
|
||||
func (task *CascadeServer) Go() (err error) {
|
||||
remoteAddr := task.Connection.RemoteAddr().String()
|
||||
stream, err := task.AcceptStream(task)
|
||||
if err != nil {
|
||||
var stream quic.Stream
|
||||
if stream, err = task.AcceptStream(task); err != nil {
|
||||
task.Error("AcceptStream", "err", err)
|
||||
return
|
||||
}
|
||||
@@ -50,20 +83,24 @@ func (task *CascadeServer) Go() {
|
||||
return
|
||||
}
|
||||
secret = secret[:len(secret)-1] // 去掉msg末尾的0
|
||||
var instance cascade.Instance
|
||||
child := &instance
|
||||
err = task.conf.DB.AutoMigrate(child)
|
||||
tx := task.conf.DB.First(child, "secret = ?", secret)
|
||||
if tx.Error == nil {
|
||||
cascade.SubordinateMap.Set(child)
|
||||
child := &cascade.Instance{}
|
||||
if secret != "" {
|
||||
tx := task.conf.DB.First(child, "secret = ?", secret)
|
||||
err = tx.Error
|
||||
} else {
|
||||
tx := task.conf.DB.First(child, "ip = ?", remoteAddr)
|
||||
err = tx.Error
|
||||
}
|
||||
if err == nil {
|
||||
task.conf.clients.Set(child)
|
||||
} else if task.conf.AutoRegister {
|
||||
child.Secret = secret
|
||||
child.Secret = sql.NullString{String: secret, Valid: secret != ""}
|
||||
child.IP = remoteAddr
|
||||
tx = task.conf.DB.First(child, "ip = ?", remoteAddr)
|
||||
if tx.Error != nil {
|
||||
task.conf.DB.Create(child)
|
||||
err = task.conf.DB.First(child, "ip = ?", remoteAddr).Error
|
||||
if err != nil {
|
||||
err = task.conf.DB.Create(child).Error
|
||||
}
|
||||
cascade.SubordinateMap.Set(child)
|
||||
task.conf.clients.Set(child)
|
||||
} else {
|
||||
task.Error("invalid secret:", "secret", secret)
|
||||
_, err = stream.Write([]byte{1, 0})
|
||||
@@ -74,8 +111,9 @@ func (task *CascadeServer) Go() {
|
||||
if child.Name == "" {
|
||||
child.Name = remoteAddr
|
||||
}
|
||||
task.conf.DB.Updates(child)
|
||||
err = task.conf.DB.Updates(child).Error
|
||||
child.Connection = task.Connection
|
||||
task.client = child
|
||||
_, err = stream.Write([]byte{0, 0})
|
||||
err = stream.Close()
|
||||
task.Info("client register:", "remoteAddr", remoteAddr)
|
||||
@@ -88,15 +126,30 @@ func (task *CascadeServer) Go() {
|
||||
task.AddTask(&receiveRequestTask)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (task *CascadeServer) Dispose() {
|
||||
if task.Connection != nil {
|
||||
task.Connection.CloseWithError(quic.ApplicationErrorCode(0), task.StopReason().Error())
|
||||
}
|
||||
if task.client != nil {
|
||||
task.client.Online = false
|
||||
task.conf.DB.Save(task.client)
|
||||
}
|
||||
}
|
||||
|
||||
// API_relay_ 用于转发请求, api/relay/:instanceId/*
|
||||
func (c *CascadeServerPlugin) API_relay_(w http.ResponseWriter, r *http.Request) {
|
||||
paths := strings.Split(r.URL.Path, "/")
|
||||
instanceId, err := strconv.ParseUint(paths[3], 10, 32)
|
||||
instance, ok := cascade.SubordinateMap.Get(uint(instanceId))
|
||||
instance, ok := c.clients.Get(uint(instanceId))
|
||||
if err != nil || !ok {
|
||||
//util.ReturnError(util.APIErrorNotFound, "instance not found", w, r)
|
||||
http.Error(w, "instance not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
if !instance.Online {
|
||||
http.Error(w, "instance offline", http.StatusServiceUnavailable)
|
||||
return
|
||||
}
|
||||
relayURL := "/" + strings.Join(paths[4:], "/")
|
||||
@@ -106,16 +159,94 @@ func (c *CascadeServerPlugin) API_relay_(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
c.Debug("relayQuic", "relayURL", relayURL)
|
||||
var relayer cascade.Http2Quic
|
||||
relayer.Connection = instance.Connection
|
||||
relayer.Stream, err = instance.OpenStream()
|
||||
relayer.Stream, err = instance.OpenStreamSync(r.Context())
|
||||
if err != nil {
|
||||
//util.ReturnError(util.APIErrorInternal, err.Error(), w, r)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
c.AddTask(&relayer)
|
||||
relayer.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// API_list 用于获取所有下级, api/list
|
||||
func (c *CascadeServerPlugin) API_list(w http.ResponseWriter, r *http.Request) {
|
||||
//util.ReturnFetchList(SubordinateMap.ToList, w, r)
|
||||
func (c *CascadeServerPlugin) GetClientList(ctx context.Context, req *pb.GetClientListRequest) (clientList *pb.GetClientListResponse, err error) {
|
||||
clientList = &pb.GetClientListResponse{}
|
||||
|
||||
for instance := range c.clients.Range {
|
||||
client := &pb.CascadeClient{
|
||||
Id: uint32(instance.ID),
|
||||
Name: instance.Name,
|
||||
Ip: instance.IP,
|
||||
Online: instance.Online,
|
||||
CreatedTime: timestamppb.New(instance.CreatedAt),
|
||||
UpdatedTime: timestamppb.New(instance.UpdatedAt),
|
||||
}
|
||||
clientList.Data = append(clientList.Data, client)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CascadeServerPlugin) CreateClient(ctx context.Context, req *pb.CreateClientRequest) (result *pb.CreateClientResponse, err error) {
|
||||
result = &pb.CreateClientResponse{}
|
||||
|
||||
instance := &cascade.Instance{
|
||||
Name: req.Name,
|
||||
Secret: sql.NullString{String: req.Secret, Valid: req.Secret != ""},
|
||||
}
|
||||
|
||||
if err = c.DB.Create(instance).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.clients.Set(instance)
|
||||
|
||||
result.Data = &pb.CascadeClient{
|
||||
Id: uint32(instance.ID),
|
||||
Name: instance.Name,
|
||||
CreatedTime: timestamppb.New(instance.CreatedAt),
|
||||
UpdatedTime: timestamppb.New(instance.UpdatedAt),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CascadeServerPlugin) UpdateClient(ctx context.Context, req *pb.UpdateClientRequest) (result *pb.UpdateClientResponse, err error) {
|
||||
result = &pb.UpdateClientResponse{}
|
||||
|
||||
instance := &cascade.Instance{}
|
||||
if err = c.DB.First(instance, req.Id).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
instance.Name = req.Name
|
||||
instance.Secret = sql.NullString{String: req.Secret, Valid: req.Secret != ""}
|
||||
|
||||
if err = c.DB.Save(instance).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.clients.Set(instance)
|
||||
|
||||
result.Data = &pb.CascadeClient{
|
||||
Id: uint32(instance.ID),
|
||||
Name: instance.Name,
|
||||
UpdatedTime: timestamppb.New(instance.UpdatedAt),
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CascadeServerPlugin) DeleteClient(ctx context.Context, req *pb.DeleteClientRequest) (result *pb.DeleteClientResponse, err error) {
|
||||
result = &pb.DeleteClientResponse{}
|
||||
|
||||
instance := &cascade.Instance{}
|
||||
if err = c.DB.First(instance, req.Id).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err = c.DB.Delete(instance).Error; err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
c.clients.RemoveByKey(instance.ID)
|
||||
return
|
||||
}
|
||||
|
||||
71
plugin/crypto/README.md
Normal file
71
plugin/crypto/README.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Monibuca 加密插件
|
||||
|
||||
该插件提供了对视频流进行加密的功能,支持多种加密算法,可以使用静态密钥或动态密钥。
|
||||
|
||||
## 配置说明
|
||||
|
||||
在 config.yaml 中添加如下配置:
|
||||
|
||||
```yaml
|
||||
crypto:
|
||||
isStatic: false # 是否使用静态密钥
|
||||
algo: "aes_ctr" # 加密算法:支持 aes_ctr、xor_s、xor_c
|
||||
encryptLen: 1024 # 加密字节长度
|
||||
secret:
|
||||
key: "your key" # 加密密钥
|
||||
iv: "your iv" # 加密向量(仅 aes_ctr 和 xor_c 需要)
|
||||
onpub:
|
||||
transform:
|
||||
.* : $0 # 哪些流需要加密,正则表达式,这里是所有流
|
||||
```
|
||||
|
||||
### 加密算法说明
|
||||
|
||||
1. `aes_ctr`: AES-CTR 模式加密
|
||||
- key 长度要求:32字节
|
||||
- iv 长度要求:16字节
|
||||
|
||||
2. `xor_s`: 简单异或加密
|
||||
- key 长度要求:32字节
|
||||
- 不需要 iv
|
||||
|
||||
3. `xor_c`: 复杂异或加密
|
||||
- key 长度要求:32字节
|
||||
- iv 长度要求:16字节
|
||||
|
||||
## 密钥获取
|
||||
|
||||
### API 接口
|
||||
|
||||
获取加密密钥的 API 接口:
|
||||
|
||||
```
|
||||
GET /crypto?stream={streamPath}
|
||||
```
|
||||
|
||||
参数说明:
|
||||
- stream: 流路径
|
||||
|
||||
返回示例:
|
||||
```text
|
||||
{key}.{iv}
|
||||
```
|
||||
|
||||
且返回的密钥格式为 rawstd base64 编码
|
||||
|
||||
### 密钥生成规则
|
||||
|
||||
1. 静态密钥模式 (isStatic: true)
|
||||
- 直接使用配置文件中的 key 和 iv
|
||||
|
||||
2. 动态密钥模式 (isStatic: false)
|
||||
- key = md5(配置的密钥 + 流路径)
|
||||
- iv = md5(流路径)的前16字节
|
||||
|
||||
|
||||
## 注意事项
|
||||
|
||||
1. 加密仅对视频帧的关键数据部分进行加密,保留了 NALU 头部信息
|
||||
2. 使用动态密钥时,确保配置文件中设置了有效的 secret.key
|
||||
3. 使用 AES-CTR 或 XOR-C 算法时,必须同时配置 key 和 iv
|
||||
4. 建议在生产环境中使用动态密钥模式,提高安全性
|
||||
43
plugin/crypto/api.go
Normal file
43
plugin/crypto/api.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package plugin_crypto
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cryptopkg "m7s.live/v5/plugin/crypto/pkg"
|
||||
)
|
||||
|
||||
func (p *CryptoPlugin) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
// 设置 CORS 头
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "GET, POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type")
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
|
||||
// 获取 stream 参数
|
||||
stream := r.URL.Query().Get("stream")
|
||||
if stream == "" {
|
||||
http.Error(w, "stream parameter is required", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
//判断 stream 是否存在
|
||||
if !p.Server.Streams.Has(stream) {
|
||||
http.Error(w, "stream not found", http.StatusNotFound)
|
||||
return
|
||||
}
|
||||
keyConf, err := cryptopkg.ValidateAndCreateKey(p.IsStatic, p.Algo, p.Secret.Key, p.Secret.Iv, stream)
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
// cryptor, err := method.GetCryptor(p.Algo, keyConf)
|
||||
// if err != nil {
|
||||
// http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
// return
|
||||
// }
|
||||
// w.Write([]byte(cryptor.GetKey()))
|
||||
|
||||
w.Write([]byte(fmt.Sprintf("%s.%s", base64.RawStdEncoding.EncodeToString([]byte(keyConf.Key)), base64.RawStdEncoding.EncodeToString([]byte(keyConf.Iv)))))
|
||||
}
|
||||
44
plugin/crypto/index.go
Normal file
44
plugin/crypto/index.go
Normal file
@@ -0,0 +1,44 @@
|
||||
package plugin_crypto
|
||||
|
||||
import (
|
||||
m7s "m7s.live/v5"
|
||||
crypto "m7s.live/v5/plugin/crypto/pkg"
|
||||
)
|
||||
|
||||
var _ = m7s.InstallPlugin[CryptoPlugin](crypto.NewTransform)
|
||||
|
||||
type CryptoPlugin struct {
|
||||
m7s.Plugin
|
||||
IsStatic bool `desc:"是否静态密钥" default:"false"`
|
||||
Algo string `desc:"加密算法" default:"aes_ctr"` //加密算法
|
||||
EncryptLen int `desc:"加密字节长度" default:"1024"` //加密字节长度
|
||||
Secret struct {
|
||||
Key string `desc:"加密密钥" default:"your key"` //加密密钥
|
||||
Iv string `desc:"加密向量" default:"your iv"` //加密向量
|
||||
} `desc:"密钥配置"`
|
||||
}
|
||||
|
||||
// OnInit 初始化插件时的回调函数
|
||||
func (p *CryptoPlugin) OnInit() (err error) {
|
||||
// 初始化全局配置
|
||||
crypto.GlobalConfig = crypto.Config{
|
||||
IsStatic: p.IsStatic,
|
||||
Algo: p.Algo,
|
||||
EncryptLen: p.EncryptLen,
|
||||
Secret: struct {
|
||||
Key string `desc:"加密密钥" default:"your key"`
|
||||
Iv string `desc:"加密向量" default:"your iv"`
|
||||
}{
|
||||
Key: p.Secret.Key,
|
||||
Iv: p.Secret.Iv,
|
||||
},
|
||||
}
|
||||
|
||||
p.Info("crypto config initialized",
|
||||
"algo", p.Algo,
|
||||
"isStatic", p.IsStatic,
|
||||
"encryptLen", p.EncryptLen,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
31
plugin/crypto/pkg/getkey_test.go
Executable file
31
plugin/crypto/pkg/getkey_test.go
Executable file
@@ -0,0 +1,31 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestGetKey(t *testing.T) {
|
||||
stream := "/hdl/live/test0.flv"
|
||||
host := "http://localhost:8080/crypto/?stream="
|
||||
|
||||
r, err := http.DefaultClient.Get(host + stream)
|
||||
if err != nil {
|
||||
t.Error("get", err)
|
||||
return
|
||||
}
|
||||
b, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
t.Error("read", err)
|
||||
return
|
||||
}
|
||||
b64 := strings.Split(string(b), ".")
|
||||
|
||||
key, err := base64.RawStdEncoding.DecodeString(b64[0])
|
||||
t.Log("key", key, err)
|
||||
iv, err := base64.RawStdEncoding.DecodeString(b64[1])
|
||||
t.Log("iv", iv, err)
|
||||
}
|
||||
99
plugin/crypto/pkg/method/aes_cbc.go
Executable file
99
plugin/crypto/pkg/method/aes_cbc.go
Executable file
@@ -0,0 +1,99 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
)
|
||||
|
||||
//加密过程:
|
||||
// 1、处理数据,对数据进行填充,采用PKCS7(当密钥长度不够时,缺几位补几个几)的方式。
|
||||
// 2、对数据进行加密,采用AES加密方法中CBC加密模式
|
||||
// 3、对得到的加密数据,进行base64加密,得到字符串
|
||||
// 解密过程相反
|
||||
|
||||
// AesEncrypt 加密 cbc 模式
|
||||
type AesCryptor struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
func newAesCbc(cfg Key) (ICryptor, error) {
|
||||
var cryptor *AesCryptor
|
||||
if cfg.Key == "" {
|
||||
return nil, errors.New("aes cryptor config no key")
|
||||
} else {
|
||||
cryptor = &AesCryptor{key: []byte(cfg.Key)}
|
||||
}
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCryptor("aes_cbc", newAesCbc)
|
||||
}
|
||||
|
||||
func (c *AesCryptor) Encrypt(origin []byte) ([]byte, error) {
|
||||
//创建加密实例
|
||||
block, err := aes.NewCipher(c.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//判断加密快的大小
|
||||
blockSize := block.BlockSize()
|
||||
//填充
|
||||
encryptBytes := pkcs7Padding(origin, blockSize)
|
||||
//初始化加密数据接收切片
|
||||
crypted := make([]byte, len(encryptBytes))
|
||||
//使用cbc加密模式
|
||||
blockMode := cipher.NewCBCEncrypter(block, c.key[:blockSize])
|
||||
//执行加密
|
||||
blockMode.CryptBlocks(crypted, encryptBytes)
|
||||
return crypted, nil
|
||||
}
|
||||
|
||||
func (c *AesCryptor) Decrypt(encrypted []byte) ([]byte, error) {
|
||||
//创建实例
|
||||
block, err := aes.NewCipher(c.key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
//获取块的大小
|
||||
blockSize := block.BlockSize()
|
||||
//使用cbc
|
||||
blockMode := cipher.NewCBCDecrypter(block, c.key[:blockSize])
|
||||
//初始化解密数据接收切片
|
||||
crypted := make([]byte, len(encrypted))
|
||||
//执行解密
|
||||
blockMode.CryptBlocks(crypted, encrypted)
|
||||
//去除填充
|
||||
crypted, err = pkcs7UnPadding(crypted)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return crypted, nil
|
||||
}
|
||||
|
||||
func (c *AesCryptor) GetKey() string {
|
||||
return base64.RawStdEncoding.EncodeToString(c.key)
|
||||
}
|
||||
|
||||
// pkcs7Padding 填充
|
||||
func pkcs7Padding(data []byte, blockSize int) []byte {
|
||||
//判断缺少几位长度。最少1,最多 blockSize
|
||||
padding := blockSize - len(data)%blockSize
|
||||
//补足位数。把切片[]byte{byte(padding)}复制padding个
|
||||
padText := bytes.Repeat([]byte{byte(padding)}, padding)
|
||||
return append(data, padText...)
|
||||
}
|
||||
|
||||
// pkcs7UnPadding 填充的反向操作
|
||||
func pkcs7UnPadding(data []byte) ([]byte, error) {
|
||||
length := len(data)
|
||||
if length == 0 {
|
||||
return nil, errors.New("加密字符串错误!")
|
||||
}
|
||||
//获取填充的个数
|
||||
unPadding := int(data[length-1])
|
||||
return data[:(length - unPadding)], nil
|
||||
}
|
||||
61
plugin/crypto/pkg/method/aes_ctr.go
Executable file
61
plugin/crypto/pkg/method/aes_ctr.go
Executable file
@@ -0,0 +1,61 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type AesCtrCryptor struct {
|
||||
key []byte
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func newAesCtr(cfg Key) (ICryptor, error) {
|
||||
var cryptor *AesCtrCryptor
|
||||
if cfg.Key == "" || cfg.Iv == "" {
|
||||
return nil, errors.New("aes ctr cryptor config no key")
|
||||
}
|
||||
cryptor = &AesCtrCryptor{key: []byte(cfg.Key), iv: []byte(cfg.Iv)}
|
||||
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCryptor("aes_ctr", newAesCtr)
|
||||
}
|
||||
|
||||
func (c *AesCtrCryptor) Encrypt(origin []byte) ([]byte, error) {
|
||||
|
||||
block, err := aes.NewCipher(c.key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
aesCtr := cipher.NewCTR(block, c.iv)
|
||||
|
||||
// EncryptRaw the plaintext
|
||||
ciphertext := make([]byte, len(origin))
|
||||
aesCtr.XORKeyStream(ciphertext, origin)
|
||||
return ciphertext, nil
|
||||
}
|
||||
|
||||
func (c *AesCtrCryptor) Decrypt(encrypted []byte) ([]byte, error) {
|
||||
block, err := aes.NewCipher(c.key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
aesCtr := cipher.NewCTR(block, c.iv)
|
||||
|
||||
// Decrypt the ciphertext
|
||||
plaintext := make([]byte, len(encrypted))
|
||||
aesCtr.XORKeyStream(plaintext, encrypted)
|
||||
return plaintext, nil
|
||||
}
|
||||
|
||||
func (c *AesCtrCryptor) GetKey() string {
|
||||
return fmt.Sprintf("%s.%s", base64.RawStdEncoding.EncodeToString(c.key), base64.RawStdEncoding.EncodeToString(c.iv))
|
||||
}
|
||||
28
plugin/crypto/pkg/method/aes_ctr.java
Executable file
28
plugin/crypto/pkg/method/aes_ctr.java
Executable file
@@ -0,0 +1,28 @@
|
||||
import javax.crypto.Cipher;
|
||||
import javax.crypto.spec.IvParameterSpec;
|
||||
import javax.crypto.spec.SecretKeySpec;
|
||||
|
||||
public class Aes256Ctr {
|
||||
|
||||
public static byte[] decrypt(byte[] ciphertext, byte[] key, byte[] iv) throws Exception {
|
||||
Cipher cipher = Cipher.getInstance("AES/CTR/NoPadding");
|
||||
SecretKeySpec secretKey = new SecretKeySpec(key, "AES");
|
||||
IvParameterSpec ivSpec = new IvParameterSpec(iv);
|
||||
cipher.init(Cipher.DECRYPT_MODE, secretKey, ivSpec);
|
||||
return cipher.doFinal(ciphertext);
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
int[] intArray = {253, 72, 209, 96, 36};
|
||||
byte[] ciphertext = new byte[intArray.length];
|
||||
for (int i = 0; i < intArray.length; i++) {
|
||||
ciphertext[i] = (byte) intArray[i];
|
||||
}
|
||||
// byte[] ciphertext = //byte[]{253,72,209,96,36]; // ciphertext to be decrypted
|
||||
byte[] key = "01234567012345670123456701234567".getBytes();// 256-bit key
|
||||
byte[] iv = "0123456701234567".getBytes();// initialization vector
|
||||
byte[] plaintext = decrypt(ciphertext, key, iv);
|
||||
System.out.println(new String(plaintext, "UTF-8"));
|
||||
}
|
||||
}
|
||||
12
plugin/crypto/pkg/method/aes_ctr.js
Executable file
12
plugin/crypto/pkg/method/aes_ctr.js
Executable file
@@ -0,0 +1,12 @@
|
||||
const crypto = require('crypto');
|
||||
|
||||
const key = Buffer.from('01234567012345670123456701234567', 'utf8');
|
||||
console.log(key)
|
||||
const nonce = Buffer.from('0123456701234567', 'utf8');
|
||||
console.log(nonce)
|
||||
const ciphertext = Buffer.from([253,72,209,96,36]);
|
||||
|
||||
const decipher = crypto.createDecipheriv('aes-256-ctr', key, nonce);
|
||||
const plaintext = decipher.update(ciphertext);
|
||||
const finalPlaintext = decipher.final();
|
||||
console.log(Buffer.concat([plaintext, finalPlaintext]).toString());
|
||||
14
plugin/crypto/pkg/method/aes_ctr_browser.js
Executable file
14
plugin/crypto/pkg/method/aes_ctr_browser.js
Executable file
@@ -0,0 +1,14 @@
|
||||
|
||||
var aesjs = require('aes-js');
|
||||
let ciphertext = Uint8Array.from([253, 72, 209, 96, 36]);
|
||||
|
||||
let key = aesjs.utils.utf8.toBytes('01234567012345670123456701234567');
|
||||
console.log(key)
|
||||
|
||||
let iv = aesjs.utils.utf8.toBytes('0123456701234567');
|
||||
console.log(iv)
|
||||
|
||||
var aesCtr = new aesjs.ModeOfOperation.ctr(key, new aesjs.Counter(iv));
|
||||
var decryptedBytes = aesCtr.decrypt(ciphertext);
|
||||
console.log(decryptedBytes)
|
||||
console.log(aesjs.utils.utf8.fromBytes(decryptedBytes))
|
||||
13
plugin/crypto/pkg/method/aes_ctr_node.js
Executable file
13
plugin/crypto/pkg/method/aes_ctr_node.js
Executable file
@@ -0,0 +1,13 @@
|
||||
const crypto = require('crypto');
|
||||
|
||||
let key = Buffer.from('01234567012345670123456701234567', 'utf8');
|
||||
console.log(key)
|
||||
let iv = Buffer.from('0123456701234567', 'utf8');
|
||||
console.log(iv)
|
||||
let ciphertext = Buffer.from([253,72,209,96,36]);
|
||||
|
||||
const decipher = crypto.createDecipheriv('aes-256-ctr', key, iv);
|
||||
const plaintext = decipher.update(ciphertext);
|
||||
const finalPlaintext = decipher.final();
|
||||
console.log(Buffer.concat([plaintext, finalPlaintext]).toString());
|
||||
|
||||
92
plugin/crypto/pkg/method/cryptor_test.go
Executable file
92
plugin/crypto/pkg/method/cryptor_test.go
Executable file
@@ -0,0 +1,92 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestStream(t *testing.T) {
|
||||
encKey, _ := CreateKey(32)
|
||||
macKey, _ := CreateKey(32)
|
||||
|
||||
plaintext := "0123456789012345"
|
||||
pt := []byte(plaintext)
|
||||
var cfg Key
|
||||
cfg.EncKey = string(encKey)
|
||||
cfg.MacKey = string(macKey)
|
||||
c, _ := GetCryptor("stream", cfg)
|
||||
t.Log("key", c.GetKey())
|
||||
encryptData, err := c.Encrypt(pt)
|
||||
t.Log("stream encrypt base64", base64.RawStdEncoding.EncodeToString(encryptData), err)
|
||||
decryptData, err := c.Decrypt(encryptData)
|
||||
t.Log("stream decrypt", string(decryptData), err)
|
||||
if string(decryptData) != plaintext {
|
||||
t.Error("decrypt error")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestAesCbc(t *testing.T) {
|
||||
|
||||
encKey, _ := CreateKey(16)
|
||||
|
||||
plaintext := "0123456789012345"
|
||||
pt := []byte(plaintext)
|
||||
|
||||
var cfg Key
|
||||
cfg.Key = string(encKey)
|
||||
c, _ := GetCryptor("aes_cbc", cfg)
|
||||
t.Log(c.GetKey())
|
||||
encryptData, err := c.Encrypt(pt)
|
||||
t.Log("aes_cbc encrypt base64", base64.RawStdEncoding.EncodeToString(encryptData), err)
|
||||
decryptData, err := c.Decrypt(encryptData)
|
||||
t.Log("aes_cbc decrypt", string(decryptData), err)
|
||||
|
||||
if string(decryptData) != plaintext {
|
||||
t.Error("decrypt error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAesCtr(t *testing.T) {
|
||||
|
||||
encKey, _ := CreateKey(32)
|
||||
iv, _ := CreateKey(16)
|
||||
plaintext := "0123456789012345"
|
||||
pt := []byte(plaintext)
|
||||
var cfg Key
|
||||
cfg.Key = string(encKey)
|
||||
cfg.Iv = string(iv)
|
||||
|
||||
c, _ := GetCryptor("aes_ctr", cfg)
|
||||
t.Log(c.GetKey())
|
||||
encryptData, err := c.Encrypt(pt)
|
||||
t.Log("aes_ctr encrypt ", string(encryptData), err)
|
||||
decryptData, err := c.Decrypt(encryptData)
|
||||
t.Log("aes_ctr decrypt", string(decryptData), err)
|
||||
|
||||
if string(decryptData) != plaintext {
|
||||
t.Error("decrypt error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestXor(t *testing.T) {
|
||||
|
||||
encKey, _ := CreateKey(32)
|
||||
iv, _ := CreateKey(16)
|
||||
plaintext := "0123456789012345"
|
||||
pt := []byte(plaintext)
|
||||
var cfg Key
|
||||
cfg.Key = string(encKey)
|
||||
cfg.Iv = string(iv)
|
||||
|
||||
c, _ := GetCryptor("xor", cfg)
|
||||
t.Log(c.GetKey())
|
||||
encryptData, err := c.Encrypt(pt)
|
||||
t.Log("xor encrypt ", string(encryptData), "len", len(string(encryptData)), err)
|
||||
decryptData, err := c.Decrypt(encryptData)
|
||||
t.Log("xor decrypt", string(decryptData), err)
|
||||
|
||||
if string(decryptData) != plaintext {
|
||||
t.Error("decrypt error")
|
||||
}
|
||||
}
|
||||
51
plugin/crypto/pkg/method/icrypto.go
Executable file
51
plugin/crypto/pkg/method/icrypto.go
Executable file
@@ -0,0 +1,51 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type ICryptor interface {
|
||||
Encrypt(origin []byte) ([]byte, error)
|
||||
Decrypt(encrypted []byte) ([]byte, error)
|
||||
GetKey() string // 获取密钥 格式:base64(key).base64(iv)
|
||||
}
|
||||
|
||||
const (
|
||||
CryptoEncrypt = iota + 1
|
||||
CryptoDecrypt
|
||||
)
|
||||
|
||||
type CryptoBuilder func(cfg Key) (ICryptor, error)
|
||||
|
||||
var (
|
||||
builders = make(map[string]CryptoBuilder)
|
||||
)
|
||||
|
||||
func RegisterCryptor(name string, builder CryptoBuilder) {
|
||||
builders[name] = builder
|
||||
}
|
||||
|
||||
func GetCryptor(cryptor string, cfg Key) (ICryptor, error) {
|
||||
builder, exists := builders[cryptor]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("Unknown ICryptor %q", cryptor)
|
||||
}
|
||||
return builder(cfg)
|
||||
}
|
||||
|
||||
func CreateKey(keySize int) ([]byte, error) {
|
||||
key := make([]byte, keySize)
|
||||
_, err := rand.Read(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return key, nil
|
||||
}
|
||||
|
||||
func Md5Sum(s string) string {
|
||||
ret := md5.Sum([]byte(s))
|
||||
return hex.EncodeToString(ret[:])
|
||||
}
|
||||
6
plugin/crypto/pkg/method/package.json
Executable file
6
plugin/crypto/pkg/method/package.json
Executable file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"dependencies": {
|
||||
"aes-js": "^3.1.2",
|
||||
"crypto-js": "^4.1.1"
|
||||
}
|
||||
}
|
||||
184
plugin/crypto/pkg/method/stream.go
Executable file
184
plugin/crypto/pkg/method/stream.go
Executable file
@@ -0,0 +1,184 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
type Key struct {
|
||||
Key string
|
||||
Iv string
|
||||
EncKey string
|
||||
MacKey string
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCryptor("stream", newStream)
|
||||
}
|
||||
|
||||
type StreamCryptor struct {
|
||||
enckey []byte
|
||||
macKey []byte
|
||||
encrypter *StreamEncrypter `yaml:"-"`
|
||||
decrypter *StreamDecrypter `json:"-"`
|
||||
}
|
||||
|
||||
func NewStreamEncrypter(encKey, macKey []byte) (*StreamEncrypter, error) {
|
||||
block, err := aes.NewCipher(encKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iv := make([]byte, block.BlockSize())
|
||||
_, err = rand.Read(iv)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream := cipher.NewCTR(block, iv)
|
||||
mac := hmac.New(sha256.New, macKey)
|
||||
|
||||
return &StreamEncrypter{
|
||||
Block: block,
|
||||
Stream: stream,
|
||||
Mac: mac,
|
||||
IV: iv,
|
||||
}, nil
|
||||
}
|
||||
func NewStreamDecrypter(encKey, macKey []byte, meta StreamMeta) (*StreamDecrypter, error) {
|
||||
block, err := aes.NewCipher(encKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stream := cipher.NewCTR(block, meta.IV)
|
||||
mac := hmac.New(sha256.New, macKey)
|
||||
|
||||
return &StreamDecrypter{
|
||||
Block: block,
|
||||
Stream: stream,
|
||||
Mac: mac,
|
||||
Meta: meta,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type StreamMeta struct {
|
||||
// IV is the initial value for the crypto function
|
||||
IV []byte
|
||||
// Hash is the sha256 hmac of the stream
|
||||
Hash []byte
|
||||
}
|
||||
|
||||
type StreamEncrypter struct {
|
||||
Source io.Reader
|
||||
Block cipher.Block
|
||||
Stream cipher.Stream
|
||||
Mac hash.Hash
|
||||
IV []byte
|
||||
}
|
||||
|
||||
// StreamDecrypter is a decrypter for a stream of data with authentication
|
||||
type StreamDecrypter struct {
|
||||
Source io.Reader
|
||||
Block cipher.Block
|
||||
Stream cipher.Stream
|
||||
Mac hash.Hash
|
||||
Meta StreamMeta
|
||||
}
|
||||
|
||||
// Read encrypts the bytes of the inner reader and places them into p
|
||||
func (s *StreamEncrypter) Read(p []byte) (int, error) {
|
||||
n, readErr := s.Source.Read(p)
|
||||
if n > 0 {
|
||||
s.Stream.XORKeyStream(p[:n], p[:n])
|
||||
err := writeHash(s.Mac, p[:n])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
return n, readErr
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
// Meta returns the encrypted stream metadata for use in decrypting. This should only be called after the stream is finished
|
||||
func (s *StreamEncrypter) Meta() StreamMeta {
|
||||
return StreamMeta{IV: s.IV, Hash: s.Mac.Sum(nil)}
|
||||
}
|
||||
|
||||
// Read reads bytes from the underlying reader and then decrypts them
|
||||
func (s *StreamDecrypter) Read(p []byte) (int, error) {
|
||||
n, readErr := s.Source.Read(p)
|
||||
if n > 0 {
|
||||
err := writeHash(s.Mac, p[:n])
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
s.Stream.XORKeyStream(p[:n], p[:n])
|
||||
return n, readErr
|
||||
}
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
func newStream(cfg Key) (ICryptor, error) {
|
||||
var cryptor *StreamCryptor
|
||||
if (cfg.EncKey == "") || (cfg.MacKey == "") {
|
||||
return nil, errors.New("stream cryptor config not enckey or mackey")
|
||||
} else {
|
||||
encKey := []byte(cfg.EncKey)
|
||||
macKey := []byte(cfg.MacKey)
|
||||
|
||||
encrypter, err := NewStreamEncrypter(encKey, macKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
decrypter, err := NewStreamDecrypter(encKey, macKey, encrypter.Meta())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cryptor = &StreamCryptor{
|
||||
enckey: encKey,
|
||||
macKey: macKey,
|
||||
encrypter: encrypter,
|
||||
decrypter: decrypter,
|
||||
}
|
||||
|
||||
}
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
func (c *StreamCryptor) Encrypt(origin []byte) ([]byte, error) {
|
||||
c.encrypter.Source = bytes.NewReader(origin)
|
||||
return io.ReadAll(c.encrypter)
|
||||
}
|
||||
|
||||
func (c *StreamCryptor) Decrypt(encrypted []byte) ([]byte, error) {
|
||||
c.decrypter.Source = bytes.NewReader(encrypted)
|
||||
return io.ReadAll(c.decrypter)
|
||||
}
|
||||
|
||||
func (c *StreamCryptor) GetKey() string {
|
||||
b64 := base64.RawStdEncoding
|
||||
return fmt.Sprintf("%s.%s.%s.%s",
|
||||
b64.EncodeToString(c.enckey),
|
||||
b64.EncodeToString(c.macKey),
|
||||
b64.EncodeToString(c.encrypter.IV),
|
||||
b64.EncodeToString(c.encrypter.Mac.Sum(nil)),
|
||||
)
|
||||
}
|
||||
|
||||
func writeHash(mac hash.Hash, p []byte) error {
|
||||
m, err := mac.Write(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m != len(p) {
|
||||
return errors.New("could not write all bytes to hmac")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
100
plugin/crypto/pkg/method/xor.go
Executable file
100
plugin/crypto/pkg/method/xor.go
Executable file
@@ -0,0 +1,100 @@
|
||||
package method
|
||||
|
||||
import (
|
||||
"crypto/subtle"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// SimpleXorCryptor 加密一次
|
||||
type SimpleXorCryptor struct {
|
||||
key []byte
|
||||
}
|
||||
|
||||
func newSimpleXor(cfg Key) (ICryptor, error) {
|
||||
var cryptor *SimpleXorCryptor
|
||||
if cfg.Key == "" {
|
||||
return nil, errors.New("xor cryptor config no key")
|
||||
} else {
|
||||
cryptor = &SimpleXorCryptor{key: []byte(cfg.Key)}
|
||||
}
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
// simpleXorEncryptDecrypt 对给定的字节数组进行 XOR 加密和解密
|
||||
// key 是用于加密和解密的密钥
|
||||
func simpleXorEncryptDecrypt(data []byte, key []byte) []byte {
|
||||
dataLen := len(data)
|
||||
result := make([]byte, dataLen)
|
||||
keyLen := len(key)
|
||||
for i := 0; i < dataLen; i += keyLen {
|
||||
end := i + keyLen
|
||||
if end > dataLen {
|
||||
end = dataLen
|
||||
}
|
||||
subtle.XORBytes(result[i:end], data[i:end], key[:end-i])
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *SimpleXorCryptor) Encrypt(origin []byte) ([]byte, error) {
|
||||
return simpleXorEncryptDecrypt(origin, c.key), nil
|
||||
}
|
||||
|
||||
func (c *SimpleXorCryptor) Decrypt(encrypted []byte) ([]byte, error) {
|
||||
return simpleXorEncryptDecrypt(encrypted, c.key), nil
|
||||
}
|
||||
|
||||
func (c *SimpleXorCryptor) GetKey() string {
|
||||
return base64.RawStdEncoding.EncodeToString(c.key)
|
||||
}
|
||||
|
||||
// 复杂的XOR加密器 加密两次
|
||||
type ComplexXorCryptor struct {
|
||||
key []byte
|
||||
iv []byte
|
||||
}
|
||||
|
||||
func newComplexXor(cfg Key) (ICryptor, error) {
|
||||
var cryptor *ComplexXorCryptor
|
||||
if cfg.Key == "" {
|
||||
return nil, errors.New("xor cryptor config no key")
|
||||
} else {
|
||||
cryptor = &ComplexXorCryptor{key: []byte(cfg.Key), iv: []byte(cfg.Iv)}
|
||||
}
|
||||
return cryptor, nil
|
||||
}
|
||||
|
||||
// complexXorEncryptDecrypt 对给定的字节数组进行 XOR 加密和解密
|
||||
func complexXorEncryptDecrypt(arrayBuffer, key, iv []byte) []byte {
|
||||
// Assuming the key and iv have been provided and are not nil
|
||||
if key == nil || iv == nil {
|
||||
panic("key and iv must not be nil")
|
||||
}
|
||||
|
||||
result := make([]byte, len(arrayBuffer))
|
||||
keyLen := len(key)
|
||||
ivLen := len(iv)
|
||||
|
||||
for i := 0; i < len(result); i++ {
|
||||
result[i] = arrayBuffer[i] ^ (key[i%keyLen] ^ iv[i%ivLen])
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *ComplexXorCryptor) Encrypt(origin []byte) ([]byte, error) {
|
||||
return complexXorEncryptDecrypt(origin, c.key, c.iv), nil
|
||||
}
|
||||
|
||||
func (c *ComplexXorCryptor) Decrypt(encrypted []byte) ([]byte, error) {
|
||||
return complexXorEncryptDecrypt(encrypted, c.key, c.iv), nil
|
||||
}
|
||||
|
||||
func (c *ComplexXorCryptor) GetKey() string {
|
||||
return base64.RawStdEncoding.EncodeToString(c.key) + "." + base64.RawStdEncoding.EncodeToString(c.iv)
|
||||
}
|
||||
|
||||
func init() {
|
||||
RegisterCryptor("xor_s", newSimpleXor)
|
||||
RegisterCryptor("xor_c", newComplexXor)
|
||||
}
|
||||
178
plugin/crypto/pkg/transform.go
Normal file
178
plugin/crypto/pkg/transform.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package crypto
|
||||
|
||||
import (
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
"fmt"
|
||||
|
||||
m7s "m7s.live/v5"
|
||||
"m7s.live/v5/plugin/crypto/pkg/method"
|
||||
)
|
||||
|
||||
// GlobalConfig 全局加密配置
|
||||
var GlobalConfig Config
|
||||
|
||||
type Config struct {
|
||||
IsStatic bool `desc:"是否静态密钥" default:"false"`
|
||||
Algo string `desc:"加密算法" default:"aes_ctr"` //加密算法
|
||||
EncryptLen int `desc:"加密字节长度" default:"1024"` //加密字节长度
|
||||
Secret struct {
|
||||
Key string `desc:"加密密钥" default:"your key"` //加密密钥
|
||||
Iv string `desc:"加密向量" default:"your iv"` //加密向量
|
||||
} `desc:"密钥配置"`
|
||||
}
|
||||
|
||||
type Transform struct {
|
||||
m7s.DefaultTransformer
|
||||
cryptor method.ICryptor
|
||||
}
|
||||
|
||||
func NewTransform() m7s.ITransformer {
|
||||
ret := &Transform{}
|
||||
ret.SetDescription(task.OwnerTypeKey, "Crypto")
|
||||
return ret
|
||||
}
|
||||
|
||||
// ValidateAndCreateKey 验证并创建加密密钥
|
||||
func ValidateAndCreateKey(isStatic bool, algo string, secretKey, secretIv, streamPath string) (keyConf method.Key, err error) {
|
||||
if isStatic {
|
||||
switch algo {
|
||||
case "aes_ctr":
|
||||
keyConf.Key = secretKey
|
||||
keyConf.Iv = secretIv
|
||||
if len(keyConf.Iv) != 16 || len(keyConf.Key) != 32 {
|
||||
return keyConf, fmt.Errorf("key or iv length is wrong")
|
||||
}
|
||||
case "xor_s":
|
||||
keyConf.Key = secretKey
|
||||
if len(keyConf.Key) != 32 {
|
||||
return keyConf, fmt.Errorf("key length is wrong")
|
||||
}
|
||||
case "xor_c":
|
||||
keyConf.Key = secretKey
|
||||
keyConf.Iv = secretIv
|
||||
if len(keyConf.Iv) != 16 || len(keyConf.Key) != 32 {
|
||||
return keyConf, fmt.Errorf("key or iv length is wrong")
|
||||
}
|
||||
default:
|
||||
return keyConf, fmt.Errorf("algo type is wrong")
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
动态加密
|
||||
key = md5(密钥+流名称)
|
||||
iv = md5(流名称)前一半
|
||||
*/
|
||||
if secretKey != "" {
|
||||
keyConf.Key = method.Md5Sum(secretKey + streamPath)
|
||||
keyConf.Iv = method.Md5Sum(streamPath)[:16]
|
||||
} else {
|
||||
return keyConf, fmt.Errorf("secret key is empty")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Transform) Start() error {
|
||||
// 在 Start 时获取并保存配置
|
||||
t.Info("transform job started")
|
||||
|
||||
keyConf, err := ValidateAndCreateKey(GlobalConfig.IsStatic, GlobalConfig.Algo, GlobalConfig.Secret.Key, GlobalConfig.Secret.Iv, t.TransformJob.StreamPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t.cryptor, err = method.GetCryptor(GlobalConfig.Algo, keyConf)
|
||||
if err != nil {
|
||||
t.Error("failed to create cryptor", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 使用 TransformJob 的 Subscribe 方法订阅流
|
||||
if err := t.TransformJob.Subscribe(); err != nil {
|
||||
t.Error("failed to subscribe stream", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
t.Info("crypto transform started",
|
||||
"stream", t.TransformJob.StreamPath,
|
||||
"algo", GlobalConfig.Algo,
|
||||
"isStatic", GlobalConfig.IsStatic,
|
||||
)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transform) Go() error {
|
||||
// 创建发布者
|
||||
if err := t.TransformJob.Publish(t.TransformJob.StreamPath + "/crypto"); err != nil {
|
||||
t.Error("failed to create publisher", "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// 处理音视频流
|
||||
return m7s.PlayBlock(t.TransformJob.Subscriber,
|
||||
func(audio *pkg.RawAudio) (err error) {
|
||||
copyAudio := &pkg.RawAudio{
|
||||
FourCC: audio.FourCC,
|
||||
Timestamp: audio.Timestamp,
|
||||
}
|
||||
audio.Memory.Range(func(b []byte) {
|
||||
copy(copyAudio.NextN(len(b)), b)
|
||||
})
|
||||
return t.TransformJob.Publisher.WriteAudio(copyAudio)
|
||||
},
|
||||
func(video *pkg.H26xFrame) error {
|
||||
// 处理视频帧
|
||||
if video.GetSize() == 0 {
|
||||
return nil
|
||||
}
|
||||
copyVideo := &pkg.H26xFrame{
|
||||
FourCC: video.FourCC,
|
||||
CTS: video.CTS,
|
||||
Timestamp: video.Timestamp,
|
||||
}
|
||||
|
||||
for _, nalu := range video.Nalus {
|
||||
mem := copyVideo.NextN(nalu.Size)
|
||||
copy(mem, nalu.ToBytes())
|
||||
needEncrypt := false
|
||||
if video.FourCC == codec.FourCC_H264 {
|
||||
switch codec.ParseH264NALUType(mem[0]) {
|
||||
case codec.NALU_Non_IDR_Picture, codec.NALU_IDR_Picture:
|
||||
needEncrypt = true
|
||||
}
|
||||
} else if video.FourCC == codec.FourCC_H265 {
|
||||
switch codec.ParseH265NALUType(mem[0]) {
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
needEncrypt = true
|
||||
}
|
||||
}
|
||||
if needEncrypt {
|
||||
encBytes, err := t.cryptor.Encrypt(mem[2:])
|
||||
if err == nil {
|
||||
copyVideo.Nalus.Append(append([]byte{mem[0], mem[1]}, encBytes...))
|
||||
} else {
|
||||
copyVideo.Nalus.Append(mem)
|
||||
}
|
||||
} else {
|
||||
copyVideo.Nalus.Append(mem)
|
||||
}
|
||||
}
|
||||
return t.TransformJob.Publisher.WriteVideo(copyVideo)
|
||||
})
|
||||
}
|
||||
|
||||
func (t *Transform) Dispose() {
|
||||
t.Info("crypto transform disposed",
|
||||
"stream", t.TransformJob.StreamPath,
|
||||
)
|
||||
}
|
||||
@@ -1,28 +1,44 @@
|
||||
package plugin_debug
|
||||
|
||||
import (
|
||||
myproc "github.com/cloudwego/goref/pkg/proc"
|
||||
"github.com/go-delve/delve/pkg/config"
|
||||
"github.com/go-delve/delve/service/debugger"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"m7s.live/v5"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
"runtime"
|
||||
runtimePPROF "runtime/pprof"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
myproc "github.com/cloudwego/goref/pkg/proc"
|
||||
"github.com/go-delve/delve/pkg/config"
|
||||
"github.com/go-delve/delve/service/debugger"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/plugin/debug/pb"
|
||||
debug "m7s.live/v5/plugin/debug/pkg"
|
||||
"m7s.live/v5/plugin/debug/pkg/profile"
|
||||
)
|
||||
|
||||
var _ = m7s.InstallPlugin[DebugPlugin]()
|
||||
var _ = m7s.InstallPlugin[DebugPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
|
||||
var conf, _ = config.LoadConfig()
|
||||
|
||||
type DebugPlugin struct {
|
||||
pb.UnimplementedApiServer
|
||||
m7s.Plugin
|
||||
ProfileDuration time.Duration `default:"10s" desc:"profile持续时间"`
|
||||
Profile string `desc:"采集profile存储文件"`
|
||||
ChartPeriod time.Duration `default:"1s" desc:"图表更新周期"`
|
||||
Grfout string `default:"grf.out" desc:"grf输出文件"`
|
||||
|
||||
// 添加缓存字段
|
||||
cpuProfileData *profile.Profile // 缓存 CPU Profile 数据
|
||||
cpuProfileOnce sync.Once // 确保只采集一次
|
||||
cpuProfileLock sync.Mutex // 保护缓存数据
|
||||
}
|
||||
|
||||
type WriteToFile struct {
|
||||
@@ -31,19 +47,15 @@ type WriteToFile struct {
|
||||
}
|
||||
|
||||
func (w *WriteToFile) Header() http.Header {
|
||||
// return w.w.Header()
|
||||
return w.header
|
||||
}
|
||||
|
||||
// func (w *WriteToFile) Write(p []byte) (int, error) {
|
||||
// // w.w.Write(p)
|
||||
// return w.Writer.Write(p)
|
||||
// }
|
||||
func (w *WriteToFile) WriteHeader(statusCode int) {
|
||||
// w.w.WriteHeader(statusCode)
|
||||
}
|
||||
func (w *WriteToFile) WriteHeader(statusCode int) {}
|
||||
|
||||
func (p *DebugPlugin) OnInit() error {
|
||||
// 启用阻塞分析
|
||||
runtime.SetBlockProfileRate(1) // 设置采样率为1纳秒
|
||||
|
||||
if p.Profile != "" {
|
||||
go func() {
|
||||
file, err := os.Create(p.Profile)
|
||||
@@ -114,3 +126,320 @@ func (p *DebugPlugin) Grf(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
w.Write([]byte("ok"))
|
||||
}
|
||||
|
||||
func (p *DebugPlugin) GetHeap(ctx context.Context, empty *emptypb.Empty) (*pb.HeapResponse, error) {
|
||||
// 创建临时文件用于存储堆信息
|
||||
f, err := os.CreateTemp("", "heap")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
|
||||
// 获取堆信息
|
||||
runtime.GC()
|
||||
if err := runtimePPROF.WriteHeapProfile(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 读取堆信息
|
||||
f.Seek(0, 0)
|
||||
prof, err := profile.Parse(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 准备响应数据
|
||||
resp := &pb.HeapResponse{
|
||||
Data: &pb.HeapData{
|
||||
Stats: &pb.HeapStats{},
|
||||
Objects: make([]*pb.HeapObject, 0),
|
||||
Edges: make([]*pb.HeapEdge, 0),
|
||||
},
|
||||
}
|
||||
|
||||
// 创建类型映射用于聚合统计
|
||||
typeMap := make(map[string]*pb.HeapObject)
|
||||
var totalSize int64
|
||||
|
||||
// 处理每个样本
|
||||
for _, sample := range prof.Sample {
|
||||
size := sample.Value[1] // 内存大小
|
||||
if size == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// 获取分配类型信息
|
||||
var typeName string
|
||||
if len(sample.Location) > 0 && len(sample.Location[0].Line) > 0 {
|
||||
if fn := sample.Location[0].Line[0].Function; fn != nil {
|
||||
typeName = fn.Name
|
||||
}
|
||||
}
|
||||
|
||||
// 创建或更新堆对象
|
||||
obj, exists := typeMap[typeName]
|
||||
if !exists {
|
||||
obj = &pb.HeapObject{
|
||||
Type: typeName,
|
||||
Address: fmt.Sprintf("%p", sample),
|
||||
Refs: make([]string, 0),
|
||||
}
|
||||
typeMap[typeName] = obj
|
||||
resp.Data.Objects = append(resp.Data.Objects, obj)
|
||||
}
|
||||
|
||||
obj.Count++
|
||||
obj.Size += size
|
||||
totalSize += size
|
||||
|
||||
// 构建引<E5BBBA><E5BC95><EFBFBD>关系
|
||||
for i := 1; i < len(sample.Location); i++ {
|
||||
loc := sample.Location[i]
|
||||
if len(loc.Line) == 0 || loc.Line[0].Function == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
callerName := loc.Line[0].Function.Name
|
||||
// 跳过系统函数
|
||||
if callerName == "" || strings.HasPrefix(callerName, "runtime.") {
|
||||
continue
|
||||
}
|
||||
|
||||
// 添加边
|
||||
edge := &pb.HeapEdge{
|
||||
From: callerName,
|
||||
To: typeName,
|
||||
FieldName: callerName,
|
||||
}
|
||||
resp.Data.Edges = append(resp.Data.Edges, edge)
|
||||
|
||||
// 将调用者添加到引用列表
|
||||
if !contains(obj.Refs, callerName) {
|
||||
obj.Refs = append(obj.Refs, callerName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 计算百分比
|
||||
for _, obj := range resp.Data.Objects {
|
||||
if totalSize > 0 {
|
||||
obj.SizePerc = float64(obj.Size) / float64(totalSize) * 100
|
||||
}
|
||||
}
|
||||
|
||||
// 按大小排序
|
||||
sort.Slice(resp.Data.Objects, func(i, j int) bool {
|
||||
return resp.Data.Objects[i].Size > resp.Data.Objects[j].Size
|
||||
})
|
||||
|
||||
// 获取运行时内存统计
|
||||
var ms runtime.MemStats
|
||||
runtime.ReadMemStats(&ms)
|
||||
|
||||
// 填充内存统计信息
|
||||
resp.Data.Stats.Alloc = ms.Alloc
|
||||
resp.Data.Stats.TotalAlloc = ms.TotalAlloc
|
||||
resp.Data.Stats.Sys = ms.Sys
|
||||
resp.Data.Stats.NumGC = ms.NumGC
|
||||
resp.Data.Stats.HeapAlloc = ms.HeapAlloc
|
||||
resp.Data.Stats.HeapSys = ms.HeapSys
|
||||
resp.Data.Stats.HeapIdle = ms.HeapIdle
|
||||
resp.Data.Stats.HeapInuse = ms.HeapInuse
|
||||
resp.Data.Stats.HeapReleased = ms.HeapReleased
|
||||
resp.Data.Stats.HeapObjects = ms.HeapObjects
|
||||
resp.Data.Stats.GcCPUFraction = ms.GCCPUFraction
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// 采集 CPU Profile 并缓存
|
||||
func (p *DebugPlugin) collectCPUProfile() error {
|
||||
p.cpuProfileLock.Lock()
|
||||
defer p.cpuProfileLock.Unlock()
|
||||
|
||||
// 如果已经采集过,直接返回
|
||||
if p.cpuProfileData != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 创建临时文件用于存储 CPU Profile 数据
|
||||
f, err := os.CreateTemp("", "cpu_profile")
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not create CPU profile: %v", err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
|
||||
// 开始 CPU profiling
|
||||
if err := runtimePPROF.StartCPUProfile(f); err != nil {
|
||||
return fmt.Errorf("could not start CPU profile: %v", err)
|
||||
}
|
||||
|
||||
// 采样指定时间
|
||||
time.Sleep(p.ProfileDuration)
|
||||
runtimePPROF.StopCPUProfile()
|
||||
|
||||
// 读取并解析 CPU Profile 数据
|
||||
f.Seek(0, 0)
|
||||
profileData, err := profile.Parse(f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not parse CPU profile: %v", err)
|
||||
}
|
||||
|
||||
// 缓存 CPU Profile 数据
|
||||
p.cpuProfileData = profileData
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetCpu 接口
|
||||
func (p *DebugPlugin) GetCpu(ctx context.Context, req *pb.CpuRequest) (*pb.CpuResponse, error) {
|
||||
// 如果需要刷新或者缓存中没有数据
|
||||
if req.Refresh || p.cpuProfileData == nil {
|
||||
p.cpuProfileLock.Lock()
|
||||
p.cpuProfileData = nil // 清除现有缓存
|
||||
p.cpuProfileOnce = sync.Once{} // 重置 Once
|
||||
p.cpuProfileLock.Unlock()
|
||||
}
|
||||
|
||||
// 如果请求指定了duration,临时更新ProfileDuration
|
||||
originalDuration := p.ProfileDuration
|
||||
if req.Duration > 0 {
|
||||
p.ProfileDuration = time.Duration(req.Duration) * time.Second
|
||||
}
|
||||
|
||||
// 确保采集 CPU Profile
|
||||
p.cpuProfileOnce.Do(func() {
|
||||
if err := p.collectCPUProfile(); err != nil {
|
||||
fmt.Printf("Failed to collect CPU profile: %v\n", err)
|
||||
}
|
||||
})
|
||||
|
||||
// 恢复原始的ProfileDuration
|
||||
if req.Duration > 0 {
|
||||
p.ProfileDuration = originalDuration
|
||||
}
|
||||
|
||||
// 如果缓存中没有数据,返回错误
|
||||
if p.cpuProfileData == nil {
|
||||
return nil, fmt.Errorf("CPU profile data is not available")
|
||||
}
|
||||
|
||||
// 使用缓存的 CPU Profile 数据构建响应
|
||||
resp := &pb.CpuResponse{
|
||||
Data: &pb.CpuData{
|
||||
TotalCpuTimeNs: uint64(p.cpuProfileData.DurationNanos),
|
||||
SamplingIntervalNs: uint64(p.cpuProfileData.Period),
|
||||
Functions: make([]*pb.FunctionProfile, 0),
|
||||
Goroutines: make([]*pb.GoroutineProfile, 0),
|
||||
SystemCalls: make([]*pb.SystemCall, 0),
|
||||
RuntimeStats: &pb.RuntimeStats{},
|
||||
},
|
||||
}
|
||||
|
||||
// 填充函数调用信息
|
||||
for _, sample := range p.cpuProfileData.Sample {
|
||||
functionProfile := &pb.FunctionProfile{
|
||||
FunctionName: sample.Location[0].Line[0].Function.Name,
|
||||
CpuTimeNs: uint64(sample.Value[0]),
|
||||
InvocationCount: uint64(sample.Value[1]),
|
||||
CallStack: make([]string, 0),
|
||||
}
|
||||
|
||||
// 填充调用栈信息
|
||||
for _, loc := range sample.Location {
|
||||
for _, line := range loc.Line {
|
||||
functionProfile.CallStack = append(functionProfile.CallStack, line.Function.Name)
|
||||
}
|
||||
}
|
||||
|
||||
resp.Data.Functions = append(resp.Data.Functions, functionProfile)
|
||||
}
|
||||
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetCpuGraph 接口
|
||||
func (p *DebugPlugin) GetCpuGraph(ctx context.Context, req *pb.CpuRequest) (*pb.CpuGraphResponse, error) {
|
||||
// 如果需要刷新或者缓存中没有数据
|
||||
if req.Refresh || p.cpuProfileData == nil {
|
||||
p.cpuProfileLock.Lock()
|
||||
p.cpuProfileData = nil // 清除现有缓存
|
||||
p.cpuProfileOnce = sync.Once{} // 重置 Once
|
||||
p.cpuProfileLock.Unlock()
|
||||
}
|
||||
|
||||
// 如果请求指定了duration,临时更新ProfileDuration
|
||||
originalDuration := p.ProfileDuration
|
||||
if req.Duration > 0 {
|
||||
p.ProfileDuration = time.Duration(req.Duration) * time.Second
|
||||
}
|
||||
|
||||
// 确保采集 CPU Profile
|
||||
p.cpuProfileOnce.Do(func() {
|
||||
if err := p.collectCPUProfile(); err != nil {
|
||||
fmt.Printf("Failed to collect CPU profile: %v\n", err)
|
||||
}
|
||||
})
|
||||
|
||||
// 恢复原始的ProfileDuration
|
||||
if req.Duration > 0 {
|
||||
p.ProfileDuration = originalDuration
|
||||
}
|
||||
|
||||
// 如果缓存中没有数据,返回错误
|
||||
if p.cpuProfileData == nil {
|
||||
return nil, fmt.Errorf("CPU profile data is not available")
|
||||
}
|
||||
|
||||
// 使用缓存的 CPU Profile 数据生成 dot 图
|
||||
dot, err := debug.GetDotGraph(p.cpuProfileData)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate dot graph: %v", err)
|
||||
}
|
||||
|
||||
return &pb.CpuGraphResponse{
|
||||
Data: dot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// 辅助函数:检查字符串切片是否包含特定字符串
|
||||
func contains(slice []string, str string) bool {
|
||||
for _, s := range slice {
|
||||
if s == str {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *DebugPlugin) GetHeapGraph(ctx context.Context, empty *emptypb.Empty) (*pb.HeapGraphResponse, error) {
|
||||
// 创建临时文件用于存储堆信息
|
||||
f, err := os.CreateTemp("", "heap")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
defer f.Close()
|
||||
|
||||
// 获取堆信息
|
||||
runtime.GC()
|
||||
if err := runtimePPROF.WriteHeapProfile(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// 读取堆信息
|
||||
f.Seek(0, 0)
|
||||
profile, err := profile.Parse(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Generate dot graph.
|
||||
dot, err := debug.GetDotGraph(profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.HeapGraphResponse{
|
||||
Data: dot,
|
||||
}, nil
|
||||
}
|
||||
|
||||
1250
plugin/debug/pb/debug.pb.go
Normal file
1250
plugin/debug/pb/debug.pb.go
Normal file
File diff suppressed because it is too large
Load Diff
348
plugin/debug/pb/debug.pb.gw.go
Normal file
348
plugin/debug/pb/debug.pb.gw.go
Normal file
@@ -0,0 +1,348 @@
|
||||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: debug.proto
|
||||
|
||||
/*
|
||||
Package pb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package pb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/proto"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var (
|
||||
_ codes.Code
|
||||
_ io.Reader
|
||||
_ status.Status
|
||||
_ = errors.New
|
||||
_ = runtime.String
|
||||
_ = utilities.NewDoubleArray
|
||||
_ = metadata.Join
|
||||
)
|
||||
|
||||
func request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq emptypb.Empty
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.GetHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq emptypb.Empty
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.GetHeap(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq emptypb.Empty
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := client.GetHeapGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq emptypb.Empty
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
msg, err := server.GetHeapGraph(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_Api_GetCpuGraph_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_Api_GetCpuGraph_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CpuRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpuGraph_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.GetCpuGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_Api_GetCpuGraph_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CpuRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpuGraph_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.GetCpuGraph(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
var filter_Api_GetCpu_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_Api_GetCpu_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CpuRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpu_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := client.GetCpu(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
func local_request_Api_GetCpu_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var (
|
||||
protoReq CpuRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpu_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
msg, err := server.GetCpu(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
}
|
||||
|
||||
// RegisterApiHandlerServer registers the http handlers for service Api to "mux".
|
||||
// UnaryRPC :call ApiServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterApiHandlerFromEndpoint instead.
|
||||
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
|
||||
func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ApiServer) error {
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetHeap_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetHeapGraph_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetCpuGraph_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetCpuGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetCpu_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetCpu_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterApiHandlerFromEndpoint is same as RegisterApiHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.NewClient(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
return RegisterApiHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterApiHandler registers the http handlers for service Api to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterApiHandlerClient(ctx, mux, NewApiClient(conn))
|
||||
}
|
||||
|
||||
// RegisterApiHandlerClient registers the http handlers for service Api
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ApiClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ApiClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "ApiClient" to call the correct interceptors. This client ignores the HTTP middlewares.
|
||||
func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ApiClient) error {
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetHeap_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetHeapGraph_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetCpuGraph_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetCpuGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
mux.Handle(http.MethodGet, pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetCpu_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
forward_Api_GetCpu_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Api_GetHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "heap"}, ""))
|
||||
pattern_Api_GetHeapGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "heap", "graph"}, ""))
|
||||
pattern_Api_GetCpuGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "cpu", "graph"}, ""))
|
||||
pattern_Api_GetCpu_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "cpu"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Api_GetHeap_0 = runtime.ForwardResponseMessage
|
||||
forward_Api_GetHeapGraph_0 = runtime.ForwardResponseMessage
|
||||
forward_Api_GetCpuGraph_0 = runtime.ForwardResponseMessage
|
||||
forward_Api_GetCpu_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
135
plugin/debug/pb/debug.proto
Normal file
135
plugin/debug/pb/debug.proto
Normal file
@@ -0,0 +1,135 @@
|
||||
syntax = "proto3";
|
||||
import "google/api/annotations.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
package debug;
|
||||
option go_package="m7s.live/v5/plugin/debug/pb";
|
||||
|
||||
service api {
|
||||
rpc GetHeap (google.protobuf.Empty) returns (HeapResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/debug/api/heap"
|
||||
};
|
||||
}
|
||||
rpc GetHeapGraph (google.protobuf.Empty) returns (HeapGraphResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/debug/api/heap/graph"
|
||||
};
|
||||
}
|
||||
rpc GetCpuGraph (CpuRequest) returns (CpuGraphResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/debug/api/cpu/graph"
|
||||
};
|
||||
}
|
||||
rpc GetCpu (CpuRequest) returns (CpuResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/debug/api/cpu"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// CPU分析请求参数
|
||||
message CpuRequest {
|
||||
bool refresh = 1; // 是否刷新数据
|
||||
uint32 duration = 2; // 分析时间(秒)
|
||||
}
|
||||
|
||||
message HeapObject {
|
||||
string type = 1;
|
||||
int64 count = 2;
|
||||
int64 size = 3;
|
||||
double sizePerc = 4;
|
||||
string address = 5;
|
||||
repeated string refs = 6;
|
||||
}
|
||||
|
||||
message HeapStats {
|
||||
uint64 alloc = 1;
|
||||
uint64 totalAlloc = 2;
|
||||
uint64 sys = 3;
|
||||
uint32 numGC = 4;
|
||||
uint64 heapAlloc = 5;
|
||||
uint64 heapSys = 6;
|
||||
uint64 heapIdle = 7;
|
||||
uint64 heapInuse = 8;
|
||||
uint64 heapReleased = 9;
|
||||
uint64 heapObjects = 10;
|
||||
double gcCPUFraction = 11;
|
||||
}
|
||||
|
||||
message HeapData {
|
||||
HeapStats stats = 1;
|
||||
repeated HeapObject objects = 2;
|
||||
repeated HeapEdge edges = 3;
|
||||
}
|
||||
|
||||
message HeapEdge {
|
||||
string from = 1;
|
||||
string to = 2;
|
||||
string fieldName = 3;
|
||||
}
|
||||
|
||||
message HeapResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
HeapData data = 3;
|
||||
}
|
||||
|
||||
message HeapGraphResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
string data = 3;
|
||||
}
|
||||
|
||||
message CpuGraphResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
string data = 3;
|
||||
}
|
||||
|
||||
// CPU 采样响应数据
|
||||
message CpuResponse {
|
||||
uint32 code = 1;
|
||||
string message = 2;
|
||||
CpuData data = 3;
|
||||
}
|
||||
|
||||
message CpuData {
|
||||
uint64 total_cpu_time_ns = 1; // 总 CPU 时间(纳秒)
|
||||
uint64 sampling_interval_ns = 2; // 采样间隔(纳秒)
|
||||
repeated FunctionProfile functions = 3; // 函数调用栈信息
|
||||
repeated GoroutineProfile goroutines = 4; // 协程信息
|
||||
repeated SystemCall system_calls = 5; // 系统调用信息
|
||||
RuntimeStats runtime_stats = 6; // 运行时统计信息
|
||||
}
|
||||
|
||||
// 函数调用栈信息
|
||||
message FunctionProfile {
|
||||
string function_name = 1; // 函数名称
|
||||
uint64 cpu_time_ns = 2; // 函数消耗的 CPU 时间(纳秒)
|
||||
uint64 invocation_count = 3; // 函数调用次数
|
||||
repeated string call_stack = 4; // 调用栈(从调用者到被调用者)
|
||||
bool is_inlined = 5; // 是否是内联函数
|
||||
}
|
||||
// 协程信息
|
||||
message GoroutineProfile {
|
||||
uint64 id = 1; // 协程 ID
|
||||
string state = 2; // 协程状态(如 running, blocked 等)
|
||||
uint64 cpu_time_ns = 3; // 协程消耗的 CPU 时间(纳秒)
|
||||
repeated string call_stack = 4; // 协程的调用栈
|
||||
}
|
||||
|
||||
// 系统调用信息
|
||||
message SystemCall {
|
||||
string name = 1; // 系统调用名称
|
||||
uint64 cpu_time_ns = 2; // 系统调用消耗的 CPU 时间(纳秒)
|
||||
uint64 count = 3; // 系统调用次数
|
||||
}
|
||||
|
||||
// 运行时统计信息
|
||||
message RuntimeStats {
|
||||
double gc_cpu_fraction = 1; // 垃圾回收占用的 CPU 时间比例
|
||||
uint64 gc_count = 2; // 垃圾回收次数
|
||||
uint64 gc_pause_time_ns = 3; // 垃圾回收暂停时间(纳秒)
|
||||
uint64 blocking_time_ns = 4; // 阻塞时间(纳秒)
|
||||
}
|
||||
236
plugin/debug/pb/debug_grpc.pb.go
Normal file
236
plugin/debug/pb/debug_grpc.pb.go
Normal file
@@ -0,0 +1,236 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc v5.29.1
|
||||
// source: debug.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.64.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
Api_GetHeap_FullMethodName = "/debug.api/GetHeap"
|
||||
Api_GetHeapGraph_FullMethodName = "/debug.api/GetHeapGraph"
|
||||
Api_GetCpuGraph_FullMethodName = "/debug.api/GetCpuGraph"
|
||||
Api_GetCpu_FullMethodName = "/debug.api/GetCpu"
|
||||
)
|
||||
|
||||
// ApiClient is the client API for Api service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type ApiClient interface {
|
||||
GetHeap(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapResponse, error)
|
||||
GetHeapGraph(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapGraphResponse, error)
|
||||
GetCpuGraph(ctx context.Context, in *CpuRequest, opts ...grpc.CallOption) (*CpuGraphResponse, error)
|
||||
GetCpu(ctx context.Context, in *CpuRequest, opts ...grpc.CallOption) (*CpuResponse, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewApiClient(cc grpc.ClientConnInterface) ApiClient {
|
||||
return &apiClient{cc}
|
||||
}
|
||||
|
||||
func (c *apiClient) GetHeap(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HeapResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetHeap_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetHeapGraph(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*HeapGraphResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(HeapGraphResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetHeapGraph_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetCpuGraph(ctx context.Context, in *CpuRequest, opts ...grpc.CallOption) (*CpuGraphResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CpuGraphResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetCpuGraph_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetCpu(ctx context.Context, in *CpuRequest, opts ...grpc.CallOption) (*CpuResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(CpuResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetCpu_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ApiServer is the server API for Api service.
|
||||
// All implementations must embed UnimplementedApiServer
|
||||
// for forward compatibility.
|
||||
type ApiServer interface {
|
||||
GetHeap(context.Context, *emptypb.Empty) (*HeapResponse, error)
|
||||
GetHeapGraph(context.Context, *emptypb.Empty) (*HeapGraphResponse, error)
|
||||
GetCpuGraph(context.Context, *CpuRequest) (*CpuGraphResponse, error)
|
||||
GetCpu(context.Context, *CpuRequest) (*CpuResponse, error)
|
||||
mustEmbedUnimplementedApiServer()
|
||||
}
|
||||
|
||||
// UnimplementedApiServer must be embedded to have
|
||||
// forward compatible implementations.
|
||||
//
|
||||
// NOTE: this should be embedded by value instead of pointer to avoid a nil
|
||||
// pointer dereference when methods are called.
|
||||
type UnimplementedApiServer struct{}
|
||||
|
||||
func (UnimplementedApiServer) GetHeap(context.Context, *emptypb.Empty) (*HeapResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetHeap not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetHeapGraph(context.Context, *emptypb.Empty) (*HeapGraphResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetHeapGraph not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetCpuGraph(context.Context, *CpuRequest) (*CpuGraphResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetCpuGraph not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetCpu(context.Context, *CpuRequest) (*CpuResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetCpu not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
|
||||
func (UnimplementedApiServer) testEmbeddedByValue() {}
|
||||
|
||||
// UnsafeApiServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to ApiServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeApiServer interface {
|
||||
mustEmbedUnimplementedApiServer()
|
||||
}
|
||||
|
||||
func RegisterApiServer(s grpc.ServiceRegistrar, srv ApiServer) {
|
||||
// If the following call pancis, it indicates UnimplementedApiServer was
|
||||
// embedded by pointer and is nil. This will cause panics if an
|
||||
// unimplemented method is ever invoked, so we test this at initialization
|
||||
// time to prevent it from happening at runtime later due to I/O.
|
||||
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
|
||||
t.testEmbeddedByValue()
|
||||
}
|
||||
s.RegisterService(&Api_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _Api_GetHeap_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetHeap(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetHeap_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetHeap(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetHeapGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(emptypb.Empty)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetHeapGraph(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetHeapGraph_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetHeapGraph(ctx, req.(*emptypb.Empty))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetCpuGraph_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CpuRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetCpuGraph(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetCpuGraph_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetCpuGraph(ctx, req.(*CpuRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetCpu_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(CpuRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetCpu(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetCpu_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetCpu(ctx, req.(*CpuRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "debug.api",
|
||||
HandlerType: (*ApiServer)(nil),
|
||||
Methods: []grpc.MethodDesc{
|
||||
{
|
||||
MethodName: "GetHeap",
|
||||
Handler: _Api_GetHeap_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetHeapGraph",
|
||||
Handler: _Api_GetHeapGraph_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetCpuGraph",
|
||||
Handler: _Api_GetCpuGraph_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetCpu",
|
||||
Handler: _Api_GetCpu_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "debug.proto",
|
||||
}
|
||||
17
plugin/debug/pkg/index.go
Normal file
17
plugin/debug/pkg/index.go
Normal file
@@ -0,0 +1,17 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/graph"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/report"
|
||||
"m7s.live/v5/plugin/debug/pkg/profile"
|
||||
)
|
||||
|
||||
func GetDotGraph(profile *profile.Profile) (string, error) {
|
||||
rpt := report.NewDefault(profile, report.Options{})
|
||||
g, config := report.GetDOT(rpt)
|
||||
dot := &bytes.Buffer{}
|
||||
graph.ComposeDot(dot, g, &graph.DotAttributes{}, config)
|
||||
return dot.String(), nil
|
||||
}
|
||||
238
plugin/debug/pkg/internal/binutils/addr2liner.go
Normal file
238
plugin/debug/pkg/internal/binutils/addr2liner.go
Normal file
@@ -0,0 +1,238 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultAddr2line = "addr2line"
|
||||
|
||||
// addr2line may produce multiple lines of output. We
|
||||
// use this sentinel to identify the end of the output.
|
||||
sentinel = ^uint64(0)
|
||||
)
|
||||
|
||||
// addr2Liner is a connection to an addr2line command for obtaining
|
||||
// address and line number information from a binary.
|
||||
type addr2Liner struct {
|
||||
mu sync.Mutex
|
||||
rw lineReaderWriter
|
||||
base uint64
|
||||
|
||||
// nm holds an addr2Liner using nm tool. Certain versions of addr2line
|
||||
// produce incomplete names due to
|
||||
// https://sourceware.org/bugzilla/show_bug.cgi?id=17541. As a workaround,
|
||||
// the names from nm are used when they look more complete. See addrInfo()
|
||||
// code below for the exact heuristic.
|
||||
nm *addr2LinerNM
|
||||
}
|
||||
|
||||
// lineReaderWriter is an interface to abstract the I/O to an addr2line
|
||||
// process. It writes a line of input to the job, and reads its output
|
||||
// one line at a time.
|
||||
type lineReaderWriter interface {
|
||||
write(string) error
|
||||
readLine() (string, error)
|
||||
close()
|
||||
}
|
||||
|
||||
type addr2LinerJob struct {
|
||||
cmd *exec.Cmd
|
||||
in io.WriteCloser
|
||||
out *bufio.Reader
|
||||
}
|
||||
|
||||
func (a *addr2LinerJob) write(s string) error {
|
||||
_, err := fmt.Fprint(a.in, s+"\n")
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *addr2LinerJob) readLine() (string, error) {
|
||||
s, err := a.out.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(s), nil
|
||||
}
|
||||
|
||||
// close releases any resources used by the addr2liner object.
|
||||
func (a *addr2LinerJob) close() {
|
||||
a.in.Close()
|
||||
a.cmd.Wait()
|
||||
}
|
||||
|
||||
// newAddr2Liner starts the given addr2liner command reporting
|
||||
// information about the given executable file. If file is a shared
|
||||
// library, base should be the address at which it was mapped in the
|
||||
// program under consideration.
|
||||
func newAddr2Liner(cmd, file string, base uint64) (*addr2Liner, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultAddr2line
|
||||
}
|
||||
|
||||
j := &addr2LinerJob{
|
||||
cmd: exec.Command(cmd, "-aif", "-e", file),
|
||||
}
|
||||
|
||||
var err error
|
||||
if j.in, err = j.cmd.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outPipe, err := j.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
j.out = bufio.NewReader(outPipe)
|
||||
if err := j.cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &addr2Liner{
|
||||
rw: j,
|
||||
base: base,
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// readFrame parses the addr2line output for a single address. It
|
||||
// returns a populated plugin.Frame and whether it has reached the end of the
|
||||
// data.
|
||||
func (d *addr2Liner) readFrame() (plugin.Frame, bool) {
|
||||
funcname, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
if strings.HasPrefix(funcname, "0x") {
|
||||
// If addr2line returns a hex address we can assume it is the
|
||||
// sentinel. Read and ignore next two lines of output from
|
||||
// addr2line
|
||||
d.rw.readLine()
|
||||
d.rw.readLine()
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
|
||||
fileline, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return plugin.Frame{}, true
|
||||
}
|
||||
|
||||
linenumber := 0
|
||||
|
||||
if funcname == "??" {
|
||||
funcname = ""
|
||||
}
|
||||
|
||||
if fileline == "??:0" {
|
||||
fileline = ""
|
||||
} else {
|
||||
if i := strings.LastIndex(fileline, ":"); i >= 0 {
|
||||
// Remove discriminator, if present
|
||||
if disc := strings.Index(fileline, " (discriminator"); disc > 0 {
|
||||
fileline = fileline[:disc]
|
||||
}
|
||||
// If we cannot parse a number after the last ":", keep it as
|
||||
// part of the filename.
|
||||
if line, err := strconv.Atoi(fileline[i+1:]); err == nil {
|
||||
linenumber = line
|
||||
fileline = fileline[:i]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return plugin.Frame{
|
||||
Func: funcname,
|
||||
File: fileline,
|
||||
Line: linenumber}, false
|
||||
}
|
||||
|
||||
func (d *addr2Liner) rawAddrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if err := d.rw.write(fmt.Sprintf("%x", addr-d.base)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.rw.write(fmt.Sprintf("%x", sentinel)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(resp, "0x") {
|
||||
return nil, fmt.Errorf("unexpected addr2line output: %s", resp)
|
||||
}
|
||||
|
||||
var stack []plugin.Frame
|
||||
for {
|
||||
frame, end := d.readFrame()
|
||||
if end {
|
||||
break
|
||||
}
|
||||
|
||||
if frame != (plugin.Frame{}) {
|
||||
stack = append(stack, frame)
|
||||
}
|
||||
}
|
||||
return stack, err
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (d *addr2Liner) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
stack, err := d.rawAddrInfo(addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Certain versions of addr2line produce incomplete names due to
|
||||
// https://sourceware.org/bugzilla/show_bug.cgi?id=17541. Attempt to replace
|
||||
// the name with a better one from nm.
|
||||
if len(stack) > 0 && d.nm != nil {
|
||||
nm, err := d.nm.addrInfo(addr)
|
||||
if err == nil && len(nm) > 0 {
|
||||
// Last entry in frame list should match since it is non-inlined. As a
|
||||
// simple heuristic, we only switch to the nm-based name if it is longer
|
||||
// by 2 or more characters. We consider nm names that are longer by 1
|
||||
// character insignificant to avoid replacing foo with _foo on MacOS (for
|
||||
// unknown reasons read2line produces the former and nm produces the
|
||||
// latter on MacOS even though both tools are asked to produce mangled
|
||||
// names).
|
||||
nmName := nm[len(nm)-1].Func
|
||||
a2lName := stack[len(stack)-1].Func
|
||||
if len(nmName) > len(a2lName)+1 {
|
||||
stack[len(stack)-1].Func = nmName
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stack, nil
|
||||
}
|
||||
184
plugin/debug/pkg/internal/binutils/addr2liner_llvm.go
Normal file
184
plugin/debug/pkg/internal/binutils/addr2liner_llvm.go
Normal file
@@ -0,0 +1,184 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultLLVMSymbolizer = "llvm-symbolizer"
|
||||
)
|
||||
|
||||
// llvmSymbolizer is a connection to an llvm-symbolizer command for
|
||||
// obtaining address and line number information from a binary.
|
||||
type llvmSymbolizer struct {
|
||||
sync.Mutex
|
||||
filename string
|
||||
rw lineReaderWriter
|
||||
base uint64
|
||||
isData bool
|
||||
}
|
||||
|
||||
type llvmSymbolizerJob struct {
|
||||
cmd *exec.Cmd
|
||||
in io.WriteCloser
|
||||
out *bufio.Reader
|
||||
// llvm-symbolizer requires the symbol type, CODE or DATA, for symbolization.
|
||||
symType string
|
||||
}
|
||||
|
||||
func (a *llvmSymbolizerJob) write(s string) error {
|
||||
_, err := fmt.Fprintln(a.in, a.symType, s)
|
||||
return err
|
||||
}
|
||||
|
||||
func (a *llvmSymbolizerJob) readLine() (string, error) {
|
||||
s, err := a.out.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.TrimSpace(s), nil
|
||||
}
|
||||
|
||||
// close releases any resources used by the llvmSymbolizer object.
|
||||
func (a *llvmSymbolizerJob) close() {
|
||||
a.in.Close()
|
||||
a.cmd.Wait()
|
||||
}
|
||||
|
||||
// newLLVMSymbolizer starts the given llvmSymbolizer command reporting
|
||||
// information about the given executable file. If file is a shared
|
||||
// library, base should be the address at which it was mapped in the
|
||||
// program under consideration.
|
||||
func newLLVMSymbolizer(cmd, file string, base uint64, isData bool) (*llvmSymbolizer, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultLLVMSymbolizer
|
||||
}
|
||||
|
||||
j := &llvmSymbolizerJob{
|
||||
cmd: exec.Command(cmd, "--inlining", "-demangle=false", "--output-style=JSON"),
|
||||
symType: "CODE",
|
||||
}
|
||||
if isData {
|
||||
j.symType = "DATA"
|
||||
}
|
||||
|
||||
var err error
|
||||
if j.in, err = j.cmd.StdinPipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outPipe, err := j.cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
j.out = bufio.NewReader(outPipe)
|
||||
if err := j.cmd.Start(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
a := &llvmSymbolizer{
|
||||
filename: file,
|
||||
rw: j,
|
||||
base: base,
|
||||
isData: isData,
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// readDataFrames parses the llvm-symbolizer DATA output for a single address. It
|
||||
// returns a populated plugin.Frame array with a single entry.
|
||||
func (d *llvmSymbolizer) readDataFrames() ([]plugin.Frame, error) {
|
||||
line, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var frame struct {
|
||||
Address string `json:"Address"`
|
||||
ModuleName string `json:"ModuleName"`
|
||||
Data struct {
|
||||
Start string `json:"Start"`
|
||||
Size string `json:"Size"`
|
||||
Name string `json:"Name"`
|
||||
} `json:"Data"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &frame); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Match non-JSON output behaviour of stuffing the start/size into the filename of a single frame,
|
||||
// with the size being a decimal value.
|
||||
size, err := strconv.ParseInt(frame.Data.Size, 0, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var stack []plugin.Frame
|
||||
stack = append(stack, plugin.Frame{Func: frame.Data.Name, File: fmt.Sprintf("%s %d", frame.Data.Start, size)})
|
||||
return stack, nil
|
||||
}
|
||||
|
||||
// readCodeFrames parses the llvm-symbolizer CODE output for a single address. It
|
||||
// returns a populated plugin.Frame array.
|
||||
func (d *llvmSymbolizer) readCodeFrames() ([]plugin.Frame, error) {
|
||||
line, err := d.rw.readLine()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var frame struct {
|
||||
Address string `json:"Address"`
|
||||
ModuleName string `json:"ModuleName"`
|
||||
Symbol []struct {
|
||||
Line int `json:"Line"`
|
||||
Column int `json:"Column"`
|
||||
FunctionName string `json:"FunctionName"`
|
||||
FileName string `json:"FileName"`
|
||||
StartLine int `json:"StartLine"`
|
||||
} `json:"Symbol"`
|
||||
}
|
||||
if err := json.Unmarshal([]byte(line), &frame); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var stack []plugin.Frame
|
||||
for _, s := range frame.Symbol {
|
||||
stack = append(stack, plugin.Frame{Func: s.FunctionName, File: s.FileName, Line: s.Line, Column: s.Column, StartLine: s.StartLine})
|
||||
}
|
||||
return stack, nil
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (d *llvmSymbolizer) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
d.Lock()
|
||||
defer d.Unlock()
|
||||
|
||||
if err := d.rw.write(fmt.Sprintf("%s 0x%x", d.filename, addr-d.base)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if d.isData {
|
||||
return d.readDataFrames()
|
||||
}
|
||||
return d.readCodeFrames()
|
||||
}
|
||||
144
plugin/debug/pkg/internal/binutils/addr2liner_nm.go
Normal file
144
plugin/debug/pkg/internal/binutils/addr2liner_nm.go
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNM = "nm"
|
||||
)
|
||||
|
||||
// addr2LinerNM is a connection to an nm command for obtaining symbol
|
||||
// information from a binary.
|
||||
type addr2LinerNM struct {
|
||||
m []symbolInfo // Sorted list of symbol addresses from binary.
|
||||
}
|
||||
|
||||
type symbolInfo struct {
|
||||
address uint64
|
||||
size uint64
|
||||
name string
|
||||
symType string
|
||||
}
|
||||
|
||||
// isData returns if the symbol has a known data object symbol type.
|
||||
func (s *symbolInfo) isData() bool {
|
||||
// The following symbol types are taken from https://linux.die.net/man/1/nm:
|
||||
// Lowercase letter means local symbol, uppercase denotes a global symbol.
|
||||
// - b or B: the symbol is in the uninitialized data section, e.g. .bss;
|
||||
// - d or D: the symbol is in the initialized data section;
|
||||
// - r or R: the symbol is in a read only data section;
|
||||
// - v or V: the symbol is a weak object;
|
||||
// - W: the symbol is a weak symbol that has not been specifically tagged as a
|
||||
// weak object symbol. Experiments with some binaries, showed these to be
|
||||
// mostly data objects.
|
||||
return strings.ContainsAny(s.symType, "bBdDrRvVW")
|
||||
}
|
||||
|
||||
// newAddr2LinerNM starts the given nm command reporting information about the
|
||||
// given executable file. If file is a shared library, base should be the
|
||||
// address at which it was mapped in the program under consideration.
|
||||
func newAddr2LinerNM(cmd, file string, base uint64) (*addr2LinerNM, error) {
|
||||
if cmd == "" {
|
||||
cmd = defaultNM
|
||||
}
|
||||
var b bytes.Buffer
|
||||
c := exec.Command(cmd, "--numeric-sort", "--print-size", "--format=posix", file)
|
||||
c.Stdout = &b
|
||||
if err := c.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseAddr2LinerNM(base, &b)
|
||||
}
|
||||
|
||||
func parseAddr2LinerNM(base uint64, nm io.Reader) (*addr2LinerNM, error) {
|
||||
a := &addr2LinerNM{
|
||||
m: []symbolInfo{},
|
||||
}
|
||||
|
||||
// Parse nm output and populate symbol map.
|
||||
// Skip lines we fail to parse.
|
||||
buf := bufio.NewReader(nm)
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if line == "" && err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
fields := strings.Split(line, " ")
|
||||
if len(fields) != 4 {
|
||||
continue
|
||||
}
|
||||
address, err := strconv.ParseUint(fields[2], 16, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
size, err := strconv.ParseUint(fields[3], 16, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
a.m = append(a.m, symbolInfo{
|
||||
address: address + base,
|
||||
size: size,
|
||||
name: fields[0],
|
||||
symType: fields[1],
|
||||
})
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// addrInfo returns the stack frame information for a specific program
|
||||
// address. It returns nil if the address could not be identified.
|
||||
func (a *addr2LinerNM) addrInfo(addr uint64) ([]plugin.Frame, error) {
|
||||
if len(a.m) == 0 || addr < a.m[0].address || addr >= (a.m[len(a.m)-1].address+a.m[len(a.m)-1].size) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Binary search. Search until low, high are separated by 1.
|
||||
low, high := 0, len(a.m)
|
||||
for low+1 < high {
|
||||
mid := (low + high) / 2
|
||||
v := a.m[mid].address
|
||||
if addr == v {
|
||||
low = mid
|
||||
break
|
||||
} else if addr > v {
|
||||
low = mid
|
||||
} else {
|
||||
high = mid
|
||||
}
|
||||
}
|
||||
|
||||
// Address is between a.m[low] and a.m[high]. Pick low, as it represents
|
||||
// [low, high). For data symbols, we use a strict check that the address is in
|
||||
// the [start, start + size) range of a.m[low].
|
||||
if a.m[low].isData() && addr >= (a.m[low].address+a.m[low].size) {
|
||||
return nil, nil
|
||||
}
|
||||
return []plugin.Frame{{Func: a.m[low].name}}, nil
|
||||
}
|
||||
736
plugin/debug/pkg/internal/binutils/binutils.go
Normal file
736
plugin/debug/pkg/internal/binutils/binutils.go
Normal file
@@ -0,0 +1,736 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package binutils provides access to the GNU binutils.
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"debug/elf"
|
||||
"debug/macho"
|
||||
"debug/pe"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/elfexec"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
// A Binutils implements plugin.ObjTool by invoking the GNU binutils.
|
||||
type Binutils struct {
|
||||
mu sync.Mutex
|
||||
rep *binrep
|
||||
}
|
||||
|
||||
var (
|
||||
objdumpLLVMVerRE = regexp.MustCompile(`LLVM version (?:(\d*)\.(\d*)\.(\d*)|.*(trunk).*)`)
|
||||
|
||||
// Defined for testing
|
||||
elfOpen = elf.Open
|
||||
)
|
||||
|
||||
// binrep is an immutable representation for Binutils. It is atomically
|
||||
// replaced on every mutation to provide thread-safe access.
|
||||
type binrep struct {
|
||||
// Commands to invoke.
|
||||
llvmSymbolizer string
|
||||
llvmSymbolizerFound bool
|
||||
addr2line string
|
||||
addr2lineFound bool
|
||||
nm string
|
||||
nmFound bool
|
||||
objdump string
|
||||
objdumpFound bool
|
||||
isLLVMObjdump bool
|
||||
|
||||
// if fast, perform symbolization using nm (symbol names only),
|
||||
// instead of file-line detail from the slower addr2line.
|
||||
fast bool
|
||||
}
|
||||
|
||||
// get returns the current representation for bu, initializing it if necessary.
|
||||
func (bu *Binutils) get() *binrep {
|
||||
bu.mu.Lock()
|
||||
r := bu.rep
|
||||
if r == nil {
|
||||
r = &binrep{}
|
||||
initTools(r, "")
|
||||
bu.rep = r
|
||||
}
|
||||
bu.mu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
// update modifies the rep for bu via the supplied function.
|
||||
func (bu *Binutils) update(fn func(r *binrep)) {
|
||||
r := &binrep{}
|
||||
bu.mu.Lock()
|
||||
defer bu.mu.Unlock()
|
||||
if bu.rep == nil {
|
||||
initTools(r, "")
|
||||
} else {
|
||||
*r = *bu.rep
|
||||
}
|
||||
fn(r)
|
||||
bu.rep = r
|
||||
}
|
||||
|
||||
// String returns string representation of the binutils state for debug logging.
|
||||
func (bu *Binutils) String() string {
|
||||
r := bu.get()
|
||||
var llvmSymbolizer, addr2line, nm, objdump string
|
||||
if r.llvmSymbolizerFound {
|
||||
llvmSymbolizer = r.llvmSymbolizer
|
||||
}
|
||||
if r.addr2lineFound {
|
||||
addr2line = r.addr2line
|
||||
}
|
||||
if r.nmFound {
|
||||
nm = r.nm
|
||||
}
|
||||
if r.objdumpFound {
|
||||
objdump = r.objdump
|
||||
}
|
||||
return fmt.Sprintf("llvm-symbolizer=%q addr2line=%q nm=%q objdump=%q fast=%t",
|
||||
llvmSymbolizer, addr2line, nm, objdump, r.fast)
|
||||
}
|
||||
|
||||
// SetFastSymbolization sets a toggle that makes binutils use fast
|
||||
// symbolization (using nm), which is much faster than addr2line but
|
||||
// provides only symbol name information (no file/line).
|
||||
func (bu *Binutils) SetFastSymbolization(fast bool) {
|
||||
bu.update(func(r *binrep) { r.fast = fast })
|
||||
}
|
||||
|
||||
// SetTools processes the contents of the tools option. It
|
||||
// expects a set of entries separated by commas; each entry is a pair
|
||||
// of the form t:path, where cmd will be used to look only for the
|
||||
// tool named t. If t is not specified, the path is searched for all
|
||||
// tools.
|
||||
func (bu *Binutils) SetTools(config string) {
|
||||
bu.update(func(r *binrep) { initTools(r, config) })
|
||||
}
|
||||
|
||||
func initTools(b *binrep, config string) {
|
||||
// paths collect paths per tool; Key "" contains the default.
|
||||
paths := make(map[string][]string)
|
||||
for _, t := range strings.Split(config, ",") {
|
||||
name, path := "", t
|
||||
if ct := strings.SplitN(t, ":", 2); len(ct) == 2 {
|
||||
name, path = ct[0], ct[1]
|
||||
}
|
||||
paths[name] = append(paths[name], path)
|
||||
}
|
||||
|
||||
defaultPath := paths[""]
|
||||
b.llvmSymbolizer, b.llvmSymbolizerFound = chooseExe([]string{"llvm-symbolizer"}, []string{}, append(paths["llvm-symbolizer"], defaultPath...))
|
||||
b.addr2line, b.addr2lineFound = chooseExe([]string{"addr2line"}, []string{"gaddr2line"}, append(paths["addr2line"], defaultPath...))
|
||||
// The "-n" option is supported by LLVM since 2011. The output of llvm-nm
|
||||
// and GNU nm with "-n" option is interchangeable for our purposes, so we do
|
||||
// not need to differrentiate them.
|
||||
b.nm, b.nmFound = chooseExe([]string{"llvm-nm", "nm"}, []string{"gnm"}, append(paths["nm"], defaultPath...))
|
||||
b.objdump, b.objdumpFound, b.isLLVMObjdump = findObjdump(append(paths["objdump"], defaultPath...))
|
||||
}
|
||||
|
||||
// findObjdump finds and returns path to preferred objdump binary.
|
||||
// Order of preference is: llvm-objdump, objdump.
|
||||
// On MacOS only, also looks for gobjdump with least preference.
|
||||
// Accepts a list of paths and returns:
|
||||
// a string with path to the preferred objdump binary if found,
|
||||
// or an empty string if not found;
|
||||
// a boolean if any acceptable objdump was found;
|
||||
// a boolean indicating if it is an LLVM objdump.
|
||||
func findObjdump(paths []string) (string, bool, bool) {
|
||||
objdumpNames := []string{"llvm-objdump", "objdump"}
|
||||
if runtime.GOOS == "darwin" {
|
||||
objdumpNames = append(objdumpNames, "gobjdump")
|
||||
}
|
||||
|
||||
for _, objdumpName := range objdumpNames {
|
||||
if objdump, objdumpFound := findExe(objdumpName, paths); objdumpFound {
|
||||
cmdOut, err := exec.Command(objdump, "--version").Output()
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if isLLVMObjdump(string(cmdOut)) {
|
||||
return objdump, true, true
|
||||
}
|
||||
if isBuObjdump(string(cmdOut)) {
|
||||
return objdump, true, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", false, false
|
||||
}
|
||||
|
||||
// chooseExe finds and returns path to preferred binary. names is a list of
|
||||
// names to search on both Linux and OSX. osxNames is a list of names specific
|
||||
// to OSX. names always has a higher priority than osxNames. The order of
|
||||
// the name within each list decides its priority (e.g. the first name has a
|
||||
// higher priority than the second name in the list).
|
||||
//
|
||||
// It returns a string with path to the binary and a boolean indicating if any
|
||||
// acceptable binary was found.
|
||||
func chooseExe(names, osxNames []string, paths []string) (string, bool) {
|
||||
if runtime.GOOS == "darwin" {
|
||||
names = append(names, osxNames...)
|
||||
}
|
||||
for _, name := range names {
|
||||
if binary, found := findExe(name, paths); found {
|
||||
return binary, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// isLLVMObjdump accepts a string with path to an objdump binary,
|
||||
// and returns a boolean indicating if the given binary is an LLVM
|
||||
// objdump binary of an acceptable version.
|
||||
func isLLVMObjdump(output string) bool {
|
||||
fields := objdumpLLVMVerRE.FindStringSubmatch(output)
|
||||
if len(fields) != 5 {
|
||||
return false
|
||||
}
|
||||
if fields[4] == "trunk" {
|
||||
return true
|
||||
}
|
||||
verMajor, err := strconv.Atoi(fields[1])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
verPatch, err := strconv.Atoi(fields[3])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if runtime.GOOS == "linux" && verMajor >= 8 {
|
||||
// Ensure LLVM objdump is at least version 8.0 on Linux.
|
||||
// Some flags, like --demangle, and double dashes for options are
|
||||
// not supported by previous versions.
|
||||
return true
|
||||
}
|
||||
if runtime.GOOS == "darwin" {
|
||||
// Ensure LLVM objdump is at least version 10.0.1 on MacOS.
|
||||
return verMajor > 10 || (verMajor == 10 && verPatch >= 1)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isBuObjdump accepts a string with path to an objdump binary,
|
||||
// and returns a boolean indicating if the given binary is a GNU
|
||||
// binutils objdump binary. No version check is performed.
|
||||
func isBuObjdump(output string) bool {
|
||||
return strings.Contains(output, "GNU objdump")
|
||||
}
|
||||
|
||||
// findExe looks for an executable command on a set of paths.
|
||||
// If it cannot find it, returns cmd.
|
||||
func findExe(cmd string, paths []string) (string, bool) {
|
||||
for _, p := range paths {
|
||||
cp := filepath.Join(p, cmd)
|
||||
if c, err := exec.LookPath(cp); err == nil {
|
||||
return c, true
|
||||
}
|
||||
}
|
||||
return cmd, false
|
||||
}
|
||||
|
||||
// Disasm returns the assembly instructions for the specified address range
|
||||
// of a binary.
|
||||
func (bu *Binutils) Disasm(file string, start, end uint64, intelSyntax bool) ([]plugin.Inst, error) {
|
||||
b := bu.get()
|
||||
if !b.objdumpFound {
|
||||
return nil, errors.New("cannot disasm: no objdump tool available")
|
||||
}
|
||||
args := []string{"--disassemble", "--demangle", "--no-show-raw-insn",
|
||||
"--line-numbers", fmt.Sprintf("--start-address=%#x", start),
|
||||
fmt.Sprintf("--stop-address=%#x", end)}
|
||||
|
||||
if intelSyntax {
|
||||
if b.isLLVMObjdump {
|
||||
args = append(args, "--x86-asm-syntax=intel")
|
||||
} else {
|
||||
args = append(args, "-M", "intel")
|
||||
}
|
||||
}
|
||||
|
||||
args = append(args, file)
|
||||
cmd := exec.Command(b.objdump, args...)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %v", cmd.Args, err)
|
||||
}
|
||||
|
||||
return disassemble(out)
|
||||
}
|
||||
|
||||
// Open satisfies the plugin.ObjTool interface.
|
||||
func (bu *Binutils) Open(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) {
|
||||
b := bu.get()
|
||||
|
||||
// Make sure file is a supported executable.
|
||||
// This uses magic numbers, mainly to provide better error messages but
|
||||
// it should also help speed.
|
||||
|
||||
if _, err := os.Stat(name); err != nil {
|
||||
// For testing, do not require file name to exist.
|
||||
if strings.Contains(b.addr2line, "testdata/") {
|
||||
return &fileAddr2Line{file: file{b: b, name: name}}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read the first 4 bytes of the file.
|
||||
|
||||
f, err := os.Open(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error opening %s: %v", name, err)
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var header [4]byte
|
||||
if _, err = io.ReadFull(f, header[:]); err != nil {
|
||||
return nil, fmt.Errorf("error reading magic number from %s: %v", name, err)
|
||||
}
|
||||
|
||||
elfMagic := string(header[:])
|
||||
|
||||
// Match against supported file types.
|
||||
if elfMagic == elf.ELFMAG {
|
||||
f, err := b.openELF(name, start, limit, offset, relocationSymbol)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading ELF file %s: %v", name, err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Mach-O magic numbers can be big or little endian.
|
||||
machoMagicLittle := binary.LittleEndian.Uint32(header[:])
|
||||
machoMagicBig := binary.BigEndian.Uint32(header[:])
|
||||
|
||||
if machoMagicLittle == macho.Magic32 || machoMagicLittle == macho.Magic64 ||
|
||||
machoMagicBig == macho.Magic32 || machoMagicBig == macho.Magic64 {
|
||||
f, err := b.openMachO(name, start, limit, offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading Mach-O file %s: %v", name, err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
if machoMagicLittle == macho.MagicFat || machoMagicBig == macho.MagicFat {
|
||||
f, err := b.openFatMachO(name, start, limit, offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading fat Mach-O file %s: %v", name, err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
peMagic := string(header[:2])
|
||||
if peMagic == "MZ" {
|
||||
f, err := b.openPE(name, start, limit, offset)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading PE file %s: %v", name, err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unrecognized binary format: %s", name)
|
||||
}
|
||||
|
||||
func (b *binrep) openMachOCommon(name string, of *macho.File, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
|
||||
// Subtract the load address of the __TEXT section. Usually 0 for shared
|
||||
// libraries or 0x100000000 for executables. You can check this value by
|
||||
// running `objdump -private-headers <file>`.
|
||||
|
||||
textSegment := of.Segment("__TEXT")
|
||||
if textSegment == nil {
|
||||
return nil, fmt.Errorf("could not identify base for %s: no __TEXT segment", name)
|
||||
}
|
||||
if textSegment.Addr > start {
|
||||
return nil, fmt.Errorf("could not identify base for %s: __TEXT segment address (0x%x) > mapping start address (0x%x)",
|
||||
name, textSegment.Addr, start)
|
||||
}
|
||||
|
||||
base := start - textSegment.Addr
|
||||
|
||||
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
|
||||
return &fileNM{file: file{b: b, name: name, base: base}}, nil
|
||||
}
|
||||
return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil
|
||||
}
|
||||
|
||||
func (b *binrep) openFatMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
of, err := macho.OpenFat(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing %s: %v", name, err)
|
||||
}
|
||||
defer of.Close()
|
||||
|
||||
if len(of.Arches) == 0 {
|
||||
return nil, fmt.Errorf("empty fat Mach-O file: %s", name)
|
||||
}
|
||||
|
||||
var arch macho.Cpu
|
||||
// Use the host architecture.
|
||||
// TODO: This is not ideal because the host architecture may not be the one
|
||||
// that was profiled. E.g. an amd64 host can profile a 386 program.
|
||||
switch runtime.GOARCH {
|
||||
case "386":
|
||||
arch = macho.Cpu386
|
||||
case "amd64", "amd64p32":
|
||||
arch = macho.CpuAmd64
|
||||
case "arm", "armbe", "arm64", "arm64be":
|
||||
arch = macho.CpuArm
|
||||
case "ppc":
|
||||
arch = macho.CpuPpc
|
||||
case "ppc64", "ppc64le":
|
||||
arch = macho.CpuPpc64
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported host architecture for %s: %s", name, runtime.GOARCH)
|
||||
}
|
||||
for i := range of.Arches {
|
||||
if of.Arches[i].Cpu == arch {
|
||||
return b.openMachOCommon(name, of.Arches[i].File, start, limit, offset)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("architecture not found in %s: %s", name, runtime.GOARCH)
|
||||
}
|
||||
|
||||
func (b *binrep) openMachO(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
of, err := macho.Open(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing %s: %v", name, err)
|
||||
}
|
||||
defer of.Close()
|
||||
|
||||
return b.openMachOCommon(name, of, start, limit, offset)
|
||||
}
|
||||
|
||||
func (b *binrep) openELF(name string, start, limit, offset uint64, relocationSymbol string) (plugin.ObjFile, error) {
|
||||
ef, err := elfOpen(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing %s: %v", name, err)
|
||||
}
|
||||
defer ef.Close()
|
||||
|
||||
buildID := ""
|
||||
if id, err := elfexec.GetBuildID(ef); err == nil {
|
||||
buildID = fmt.Sprintf("%x", id)
|
||||
}
|
||||
|
||||
var (
|
||||
kernelOffset *uint64
|
||||
pageAligned = func(addr uint64) bool { return addr%4096 == 0 }
|
||||
)
|
||||
if strings.Contains(name, "vmlinux") || !pageAligned(start) || !pageAligned(limit) || !pageAligned(offset) {
|
||||
// Reading all Symbols is expensive, and we only rarely need it so
|
||||
// we don't want to do it every time. But if _stext happens to be
|
||||
// page-aligned but isn't the same as Vaddr, we would symbolize
|
||||
// wrong. So if the name the addresses aren't page aligned, or if
|
||||
// the name is "vmlinux" we read _stext. We can be wrong if: (1)
|
||||
// someone passes a kernel path that doesn't contain "vmlinux" AND
|
||||
// (2) _stext is page-aligned AND (3) _stext is not at Vaddr
|
||||
symbols, err := ef.Symbols()
|
||||
if err != nil && err != elf.ErrNoSymbols {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The kernel relocation symbol (the mapping start address) can be either
|
||||
// _text or _stext. When profiles are generated by `perf`, which one was used is
|
||||
// distinguished by the mapping name for the kernel image:
|
||||
// '[kernel.kallsyms]_text' or '[kernel.kallsyms]_stext', respectively. If we haven't
|
||||
// been able to parse it from the mapping, we default to _stext.
|
||||
if relocationSymbol == "" {
|
||||
relocationSymbol = "_stext"
|
||||
}
|
||||
for _, s := range symbols {
|
||||
if s.Name == relocationSymbol {
|
||||
kernelOffset = &s.Value
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that we can compute a base for the binary. This may not be the
|
||||
// correct base value, so we don't save it. We delay computing the actual base
|
||||
// value until we have a sample address for this mapping, so that we can
|
||||
// correctly identify the associated program segment that is needed to compute
|
||||
// the base.
|
||||
if _, err := elfexec.GetBase(&ef.FileHeader, elfexec.FindTextProgHeader(ef), kernelOffset, start, limit, offset); err != nil {
|
||||
return nil, fmt.Errorf("could not identify base for %s: %v", name, err)
|
||||
}
|
||||
|
||||
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
|
||||
return &fileNM{file: file{
|
||||
b: b,
|
||||
name: name,
|
||||
buildID: buildID,
|
||||
m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset},
|
||||
}}, nil
|
||||
}
|
||||
return &fileAddr2Line{file: file{
|
||||
b: b,
|
||||
name: name,
|
||||
buildID: buildID,
|
||||
m: &elfMapping{start: start, limit: limit, offset: offset, kernelOffset: kernelOffset},
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (b *binrep) openPE(name string, start, limit, offset uint64) (plugin.ObjFile, error) {
|
||||
pf, err := pe.Open(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing %s: %v", name, err)
|
||||
}
|
||||
defer pf.Close()
|
||||
|
||||
var imageBase uint64
|
||||
switch h := pf.OptionalHeader.(type) {
|
||||
case *pe.OptionalHeader32:
|
||||
imageBase = uint64(h.ImageBase)
|
||||
case *pe.OptionalHeader64:
|
||||
imageBase = uint64(h.ImageBase)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown OptionalHeader %T", pf.OptionalHeader)
|
||||
}
|
||||
|
||||
var base uint64
|
||||
if start > 0 {
|
||||
base = start - imageBase
|
||||
}
|
||||
if b.fast || (!b.addr2lineFound && !b.llvmSymbolizerFound) {
|
||||
return &fileNM{file: file{b: b, name: name, base: base}}, nil
|
||||
}
|
||||
return &fileAddr2Line{file: file{b: b, name: name, base: base}}, nil
|
||||
}
|
||||
|
||||
// elfMapping stores the parameters of a runtime mapping that are needed to
|
||||
// identify the ELF segment associated with a mapping.
|
||||
type elfMapping struct {
|
||||
// Runtime mapping parameters.
|
||||
start, limit, offset uint64
|
||||
// Offset of kernel relocation symbol. Only defined for kernel images, nil otherwise.
|
||||
kernelOffset *uint64
|
||||
}
|
||||
|
||||
// findProgramHeader returns the program segment that matches the current
|
||||
// mapping and the given address, or an error if it cannot find a unique program
|
||||
// header.
|
||||
func (m *elfMapping) findProgramHeader(ef *elf.File, addr uint64) (*elf.ProgHeader, error) {
|
||||
// For user space executables, we try to find the actual program segment that
|
||||
// is associated with the given mapping. Skip this search if limit <= start.
|
||||
// We cannot use just a check on the start address of the mapping to tell if
|
||||
// it's a kernel / .ko module mapping, because with quipper address remapping
|
||||
// enabled, the address would be in the lower half of the address space.
|
||||
|
||||
if m.kernelOffset != nil || m.start >= m.limit || m.limit >= (uint64(1)<<63) {
|
||||
// For the kernel, find the program segment that includes the .text section.
|
||||
return elfexec.FindTextProgHeader(ef), nil
|
||||
}
|
||||
|
||||
// Fetch all the loadable segments.
|
||||
var phdrs []elf.ProgHeader
|
||||
for i := range ef.Progs {
|
||||
if ef.Progs[i].Type == elf.PT_LOAD {
|
||||
phdrs = append(phdrs, ef.Progs[i].ProgHeader)
|
||||
}
|
||||
}
|
||||
// Some ELF files don't contain any loadable program segments, e.g. .ko
|
||||
// kernel modules. It's not an error to have no header in such cases.
|
||||
if len(phdrs) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
// Get all program headers associated with the mapping.
|
||||
headers := elfexec.ProgramHeadersForMapping(phdrs, m.offset, m.limit-m.start)
|
||||
if len(headers) == 0 {
|
||||
return nil, errors.New("no program header matches mapping info")
|
||||
}
|
||||
if len(headers) == 1 {
|
||||
return headers[0], nil
|
||||
}
|
||||
|
||||
// Use the file offset corresponding to the address to symbolize, to narrow
|
||||
// down the header.
|
||||
return elfexec.HeaderForFileOffset(headers, addr-m.start+m.offset)
|
||||
}
|
||||
|
||||
// file implements the binutils.ObjFile interface.
|
||||
type file struct {
|
||||
b *binrep
|
||||
name string
|
||||
buildID string
|
||||
|
||||
baseOnce sync.Once // Ensures the base, baseErr and isData are computed once.
|
||||
base uint64
|
||||
baseErr error // Any eventual error while computing the base.
|
||||
isData bool
|
||||
// Mapping information. Relevant only for ELF files, nil otherwise.
|
||||
m *elfMapping
|
||||
}
|
||||
|
||||
// computeBase computes the relocation base for the given binary file only if
|
||||
// the elfMapping field is set. It populates the base and isData fields and
|
||||
// returns an error.
|
||||
func (f *file) computeBase(addr uint64) error {
|
||||
if f == nil || f.m == nil {
|
||||
return nil
|
||||
}
|
||||
if addr < f.m.start || addr >= f.m.limit {
|
||||
return fmt.Errorf("specified address %x is outside the mapping range [%x, %x] for file %q", addr, f.m.start, f.m.limit, f.name)
|
||||
}
|
||||
ef, err := elfOpen(f.name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error parsing %s: %v", f.name, err)
|
||||
}
|
||||
defer ef.Close()
|
||||
|
||||
ph, err := f.m.findProgramHeader(ef, addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to find program header for file %q, ELF mapping %#v, address %x: %v", f.name, *f.m, addr, err)
|
||||
}
|
||||
|
||||
base, err := elfexec.GetBase(&ef.FileHeader, ph, f.m.kernelOffset, f.m.start, f.m.limit, f.m.offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.base = base
|
||||
f.isData = ph != nil && ph.Flags&elf.PF_X == 0
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *file) ObjAddr(addr uint64) (uint64, error) {
|
||||
f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) })
|
||||
if f.baseErr != nil {
|
||||
return 0, f.baseErr
|
||||
}
|
||||
return addr - f.base, nil
|
||||
}
|
||||
|
||||
func (f *file) BuildID() string {
|
||||
return f.buildID
|
||||
}
|
||||
|
||||
func (f *file) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) })
|
||||
if f.baseErr != nil {
|
||||
return nil, f.baseErr
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (f *file) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *file) Symbols(r *regexp.Regexp, addr uint64) ([]*plugin.Sym, error) {
|
||||
// Get from nm a list of symbols sorted by address.
|
||||
cmd := exec.Command(f.b.nm, "-n", f.name)
|
||||
out, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v: %v", cmd.Args, err)
|
||||
}
|
||||
|
||||
return findSymbols(out, f.name, r, addr)
|
||||
}
|
||||
|
||||
// fileNM implements the binutils.ObjFile interface, using 'nm' to map
|
||||
// addresses to symbols (without file/line number information). It is
|
||||
// faster than fileAddr2Line.
|
||||
type fileNM struct {
|
||||
file
|
||||
addr2linernm *addr2LinerNM
|
||||
}
|
||||
|
||||
func (f *fileNM) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) })
|
||||
if f.baseErr != nil {
|
||||
return nil, f.baseErr
|
||||
}
|
||||
if f.addr2linernm == nil {
|
||||
addr2liner, err := newAddr2LinerNM(f.b.nm, f.name, f.base)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.addr2linernm = addr2liner
|
||||
}
|
||||
return f.addr2linernm.addrInfo(addr)
|
||||
}
|
||||
|
||||
// fileAddr2Line implements the binutils.ObjFile interface, using
|
||||
// llvm-symbolizer, if that's available, or addr2line to map addresses to
|
||||
// symbols (with file/line number information). It can be slow for large
|
||||
// binaries with debug information.
|
||||
type fileAddr2Line struct {
|
||||
once sync.Once
|
||||
file
|
||||
addr2liner *addr2Liner
|
||||
llvmSymbolizer *llvmSymbolizer
|
||||
isData bool
|
||||
}
|
||||
|
||||
func (f *fileAddr2Line) SourceLine(addr uint64) ([]plugin.Frame, error) {
|
||||
f.baseOnce.Do(func() { f.baseErr = f.computeBase(addr) })
|
||||
if f.baseErr != nil {
|
||||
return nil, f.baseErr
|
||||
}
|
||||
f.once.Do(f.init)
|
||||
if f.llvmSymbolizer != nil {
|
||||
return f.llvmSymbolizer.addrInfo(addr)
|
||||
}
|
||||
if f.addr2liner != nil {
|
||||
return f.addr2liner.addrInfo(addr)
|
||||
}
|
||||
return nil, fmt.Errorf("could not find local addr2liner")
|
||||
}
|
||||
|
||||
func (f *fileAddr2Line) init() {
|
||||
if llvmSymbolizer, err := newLLVMSymbolizer(f.b.llvmSymbolizer, f.name, f.base, f.isData); err == nil {
|
||||
f.llvmSymbolizer = llvmSymbolizer
|
||||
return
|
||||
}
|
||||
|
||||
if addr2liner, err := newAddr2Liner(f.b.addr2line, f.name, f.base); err == nil {
|
||||
f.addr2liner = addr2liner
|
||||
|
||||
// When addr2line encounters some gcc compiled binaries, it
|
||||
// drops interesting parts of names in anonymous namespaces.
|
||||
// Fallback to NM for better function names.
|
||||
if nm, err := newAddr2LinerNM(f.b.nm, f.name, f.base); err == nil {
|
||||
f.addr2liner.nm = nm
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *fileAddr2Line) Close() error {
|
||||
if f.llvmSymbolizer != nil {
|
||||
f.llvmSymbolizer.rw.close()
|
||||
f.llvmSymbolizer = nil
|
||||
}
|
||||
if f.addr2liner != nil {
|
||||
f.addr2liner.rw.close()
|
||||
f.addr2liner = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
978
plugin/debug/pkg/internal/binutils/binutils_test.go
Normal file
978
plugin/debug/pkg/internal/binutils/binutils_test.go
Normal file
@@ -0,0 +1,978 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"debug/elf"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
var testAddrMap = map[int]string{
|
||||
1000: "_Z3fooid.clone2",
|
||||
2000: "_ZNSaIiEC1Ev.clone18",
|
||||
3000: "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm",
|
||||
}
|
||||
|
||||
func functionName(level int) (name string) {
|
||||
if name = testAddrMap[level]; name != "" {
|
||||
return name
|
||||
}
|
||||
return fmt.Sprintf("fun%d", level)
|
||||
}
|
||||
|
||||
func TestAddr2Liner(t *testing.T) {
|
||||
const offset = 0x500
|
||||
|
||||
a := addr2Liner{rw: &mockAddr2liner{}, base: offset}
|
||||
for i := 1; i < 8; i++ {
|
||||
addr := i*0x1000 + offset
|
||||
s, err := a.addrInfo(uint64(addr))
|
||||
if err != nil {
|
||||
t.Fatalf("addrInfo(%#x): %v", addr, err)
|
||||
}
|
||||
if len(s) != i {
|
||||
t.Fatalf("addrInfo(%#x): got len==%d, want %d", addr, len(s), i)
|
||||
}
|
||||
for l, f := range s {
|
||||
level := (len(s) - l) * 1000
|
||||
want := plugin.Frame{Func: functionName(level), File: fmt.Sprintf("file%d", level), Line: level}
|
||||
|
||||
if f != want {
|
||||
t.Errorf("AddrInfo(%#x)[%d]: = %+v, want %+v", addr, l, f, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
s, err := a.addrInfo(0xFFFF)
|
||||
if err != nil {
|
||||
t.Fatalf("addrInfo(0xFFFF): %v", err)
|
||||
}
|
||||
if len(s) != 0 {
|
||||
t.Fatalf("AddrInfo(0xFFFF): got len==%d, want 0", len(s))
|
||||
}
|
||||
a.rw.close()
|
||||
}
|
||||
|
||||
type mockAddr2liner struct {
|
||||
output []string
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) write(s string) error {
|
||||
var lines []string
|
||||
switch s {
|
||||
case "1000":
|
||||
lines = []string{"_Z3fooid.clone2", "file1000:1000"}
|
||||
case "2000":
|
||||
lines = []string{"_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "3000":
|
||||
lines = []string{"_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "4000":
|
||||
lines = []string{"fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "5000":
|
||||
lines = []string{"fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "6000":
|
||||
lines = []string{"fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "7000":
|
||||
lines = []string{"fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "8000":
|
||||
lines = []string{"fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
case "9000":
|
||||
lines = []string{"fun9000", "file9000:9000", "fun8000", "file8000:8000", "fun7000", "file7000:7000", "fun6000", "file6000:6000", "fun5000", "file5000:5000", "fun4000", "file4000:4000", "_ZNSt6vectorIS_IS_IiSaIiEESaIS1_EESaIS3_EEixEm", "file3000:3000", "_ZNSaIiEC1Ev.clone18", "file2000:2000", "_Z3fooid.clone2", "file1000:1000"}
|
||||
default:
|
||||
lines = []string{"??", "??:0"}
|
||||
}
|
||||
a.output = append(a.output, "0x"+s)
|
||||
a.output = append(a.output, lines...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) readLine() (string, error) {
|
||||
if len(a.output) == 0 {
|
||||
return "", fmt.Errorf("end of file")
|
||||
}
|
||||
next := a.output[0]
|
||||
a.output = a.output[1:]
|
||||
return next, nil
|
||||
}
|
||||
|
||||
func (a *mockAddr2liner) close() {
|
||||
}
|
||||
|
||||
func TestAddr2LinerLookup(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
nmOutput string
|
||||
wantSymbolized map[uint64]string
|
||||
wantUnsymbolized []uint64
|
||||
}{
|
||||
{
|
||||
desc: "odd symbol count",
|
||||
nmOutput: `
|
||||
0x1000 T 1000 100
|
||||
0x2000 T 2000 120
|
||||
0x3000 T 3000 130
|
||||
`,
|
||||
wantSymbolized: map[uint64]string{
|
||||
0x1000: "0x1000",
|
||||
0x1001: "0x1000",
|
||||
0x1FFF: "0x1000",
|
||||
0x2000: "0x2000",
|
||||
0x2001: "0x2000",
|
||||
0x3000: "0x3000",
|
||||
0x312f: "0x3000",
|
||||
},
|
||||
wantUnsymbolized: []uint64{0x0fff, 0x3130},
|
||||
},
|
||||
{
|
||||
desc: "even symbol count",
|
||||
nmOutput: `
|
||||
0x1000 T 1000 100
|
||||
0x2000 T 2000 120
|
||||
0x3000 T 3000 130
|
||||
0x4000 T 4000 140
|
||||
`,
|
||||
wantSymbolized: map[uint64]string{
|
||||
0x1000: "0x1000",
|
||||
0x1001: "0x1000",
|
||||
0x1FFF: "0x1000",
|
||||
0x2000: "0x2000",
|
||||
0x2fff: "0x2000",
|
||||
0x3000: "0x3000",
|
||||
0x3fff: "0x3000",
|
||||
0x4000: "0x4000",
|
||||
0x413f: "0x4000",
|
||||
},
|
||||
wantUnsymbolized: []uint64{0x0fff, 0x4140},
|
||||
},
|
||||
{
|
||||
desc: "different symbol types",
|
||||
nmOutput: `
|
||||
absolute_0x100 a 100
|
||||
absolute_0x200 A 200
|
||||
text_0x1000 t 1000 100
|
||||
bss_0x2000 b 2000 120
|
||||
data_0x3000 d 3000 130
|
||||
rodata_0x4000 r 4000 140
|
||||
weak_0x5000 v 5000 150
|
||||
text_0x6000 T 6000 160
|
||||
bss_0x7000 B 7000 170
|
||||
data_0x8000 D 8000 180
|
||||
rodata_0x9000 R 9000 190
|
||||
weak_0xa000 V a000 1a0
|
||||
weak_0xb000 W b000 1b0
|
||||
`,
|
||||
wantSymbolized: map[uint64]string{
|
||||
0x1000: "text_0x1000",
|
||||
0x1FFF: "text_0x1000",
|
||||
0x2000: "bss_0x2000",
|
||||
0x211f: "bss_0x2000",
|
||||
0x3000: "data_0x3000",
|
||||
0x312f: "data_0x3000",
|
||||
0x4000: "rodata_0x4000",
|
||||
0x413f: "rodata_0x4000",
|
||||
0x5000: "weak_0x5000",
|
||||
0x514f: "weak_0x5000",
|
||||
0x6000: "text_0x6000",
|
||||
0x6fff: "text_0x6000",
|
||||
0x7000: "bss_0x7000",
|
||||
0x716f: "bss_0x7000",
|
||||
0x8000: "data_0x8000",
|
||||
0x817f: "data_0x8000",
|
||||
0x9000: "rodata_0x9000",
|
||||
0x918f: "rodata_0x9000",
|
||||
0xa000: "weak_0xa000",
|
||||
0xa19f: "weak_0xa000",
|
||||
0xb000: "weak_0xb000",
|
||||
0xb1af: "weak_0xb000",
|
||||
},
|
||||
wantUnsymbolized: []uint64{0x100, 0x200, 0x0fff, 0x2120, 0x3130, 0x4140, 0x5150, 0x7170, 0x8180, 0x9190, 0xa1a0, 0xb1b0},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
a, err := parseAddr2LinerNM(0, bytes.NewBufferString(tc.nmOutput))
|
||||
if err != nil {
|
||||
t.Fatalf("nm parse error: %v", err)
|
||||
}
|
||||
for address, want := range tc.wantSymbolized {
|
||||
if got, _ := a.addrInfo(address); !checkAddress(got, address, want) {
|
||||
t.Errorf("%x: got %v, want %s", address, got, want)
|
||||
}
|
||||
}
|
||||
for _, unknown := range tc.wantUnsymbolized {
|
||||
if got, _ := a.addrInfo(unknown); got != nil {
|
||||
t.Errorf("%x: got %v, want nil", unknown, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func checkAddress(got []plugin.Frame, address uint64, want string) bool {
|
||||
if len(got) != 1 {
|
||||
return false
|
||||
}
|
||||
return got[0].Func == want
|
||||
}
|
||||
|
||||
func TestSetTools(t *testing.T) {
|
||||
// Test that multiple calls work.
|
||||
bu := &Binutils{}
|
||||
bu.SetTools("")
|
||||
bu.SetTools("")
|
||||
}
|
||||
|
||||
func TestSetFastSymbolization(t *testing.T) {
|
||||
// Test that multiple calls work.
|
||||
bu := &Binutils{}
|
||||
bu.SetFastSymbolization(true)
|
||||
bu.SetFastSymbolization(false)
|
||||
}
|
||||
|
||||
func skipUnlessLinuxAmd64(t *testing.T) {
|
||||
if runtime.GOOS != "linux" || runtime.GOARCH != "amd64" {
|
||||
t.Skip("This test only works on x86-64 Linux")
|
||||
}
|
||||
}
|
||||
|
||||
func skipUnlessDarwinAmd64(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" || runtime.GOARCH != "amd64" {
|
||||
t.Skip("This test only works on x86-64 macOS")
|
||||
}
|
||||
}
|
||||
|
||||
func skipUnlessWindowsAmd64(t *testing.T) {
|
||||
if runtime.GOOS != "windows" || runtime.GOARCH != "amd64" {
|
||||
t.Skip("This test only works on x86-64 Windows")
|
||||
}
|
||||
}
|
||||
|
||||
func testDisasm(t *testing.T, intelSyntax bool) {
|
||||
_, llvmObjdump, buObjdump := findObjdump([]string{""})
|
||||
if !(llvmObjdump || buObjdump) {
|
||||
t.Skip("cannot disasm: no objdump tool available")
|
||||
}
|
||||
|
||||
bu := &Binutils{}
|
||||
var testexe string
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
testexe = "exe_linux_64"
|
||||
case "darwin":
|
||||
testexe = "exe_mac_64"
|
||||
case "windows":
|
||||
testexe = "exe_windows_64.exe"
|
||||
default:
|
||||
t.Skipf("unsupported OS %q", runtime.GOOS)
|
||||
}
|
||||
|
||||
insts, err := bu.Disasm(filepath.Join("testdata", testexe), 0, math.MaxUint64, intelSyntax)
|
||||
if err != nil {
|
||||
t.Fatalf("Disasm: unexpected error %v", err)
|
||||
}
|
||||
mainCount := 0
|
||||
for _, x := range insts {
|
||||
// macOS symbols have a leading underscore.
|
||||
if x.Function == "main" || x.Function == "_main" {
|
||||
mainCount++
|
||||
}
|
||||
}
|
||||
if mainCount == 0 {
|
||||
t.Error("Disasm: found no main instructions")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDisasm(t *testing.T) {
|
||||
if (runtime.GOOS != "linux" && runtime.GOOS != "darwin" && runtime.GOOS != "windows") || runtime.GOARCH != "amd64" {
|
||||
t.Skip("This test only works on x86-64 Linux, macOS or Windows")
|
||||
}
|
||||
testDisasm(t, false)
|
||||
}
|
||||
|
||||
func TestDisasmIntelSyntax(t *testing.T) {
|
||||
if (runtime.GOOS != "linux" && runtime.GOOS != "darwin" && runtime.GOOS != "windows") || runtime.GOARCH != "amd64" {
|
||||
t.Skip("This test only works on x86_64 Linux, macOS or Windows as it tests Intel asm syntax")
|
||||
}
|
||||
testDisasm(t, true)
|
||||
}
|
||||
|
||||
func findSymbol(syms []*plugin.Sym, name string) *plugin.Sym {
|
||||
for _, s := range syms {
|
||||
for _, n := range s.Name {
|
||||
if n == name {
|
||||
return s
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestObjFile(t *testing.T) {
|
||||
// If this test fails, check the address for main function in testdata/exe_linux_64
|
||||
// using the command 'nm -n '. Update the hardcoded addresses below to match
|
||||
// the addresses from the output.
|
||||
skipUnlessLinuxAmd64(t)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
start, limit, offset uint64
|
||||
addr uint64
|
||||
}{
|
||||
{"fixed load address", 0x400000, 0x4006fc, 0, 0x40052d},
|
||||
// True user-mode ASLR binaries are ET_DYN rather than ET_EXEC so this case
|
||||
// is a bit artificial except that it approximates the
|
||||
// vmlinux-with-kernel-ASLR case where the binary *is* ET_EXEC.
|
||||
{"simulated ASLR address", 0x500000, 0x5006fc, 0, 0x50052d},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
bu := &Binutils{}
|
||||
f, err := bu.Open(filepath.Join("testdata", "exe_linux_64"), tc.start, tc.limit, tc.offset, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Open: unexpected error %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
syms, err := f.Symbols(regexp.MustCompile("main"), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Symbols: unexpected error %v", err)
|
||||
}
|
||||
|
||||
m := findSymbol(syms, "main")
|
||||
if m == nil {
|
||||
t.Fatalf("Symbols: did not find main")
|
||||
}
|
||||
addr, err := f.ObjAddr(tc.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("ObjAddr(%x) failed: %v", tc.addr, err)
|
||||
}
|
||||
if addr != m.Start {
|
||||
t.Errorf("ObjAddr(%x) got %x, want %x", tc.addr, addr, m.Start)
|
||||
}
|
||||
gotFrames, err := f.SourceLine(tc.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("SourceLine: unexpected error %v", err)
|
||||
}
|
||||
wantFrames := []plugin.Frame{
|
||||
{Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3},
|
||||
}
|
||||
if !reflect.DeepEqual(gotFrames, wantFrames) {
|
||||
t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, wantFrames)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMachoFiles(t *testing.T) {
|
||||
// If this test fails, check the address for main function in testdata/exe_mac_64
|
||||
// and testdata/lib_mac_64 using addr2line or gaddr2line. Update the
|
||||
// hardcoded addresses below to match the addresses from the output.
|
||||
skipUnlessDarwinAmd64(t)
|
||||
|
||||
// Load `file`, pretending it was mapped at `start`. Then get the symbol
|
||||
// table. Check that it contains the symbol `sym` and that the address
|
||||
// `addr` gives the `expected` stack trace.
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
file string
|
||||
start, limit, offset uint64
|
||||
addr uint64
|
||||
sym string
|
||||
expected []plugin.Frame
|
||||
}{
|
||||
{"normal mapping", "exe_mac_64", 0x100000000, math.MaxUint64, 0,
|
||||
0x100000f50, "_main",
|
||||
[]plugin.Frame{
|
||||
{Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3},
|
||||
}},
|
||||
{"other mapping", "exe_mac_64", 0x200000000, math.MaxUint64, 0,
|
||||
0x200000f50, "_main",
|
||||
[]plugin.Frame{
|
||||
{Func: "main", File: "/tmp/hello.c", Line: 3, StartLine: 3},
|
||||
}},
|
||||
{"lib normal mapping", "lib_mac_64", 0, math.MaxUint64, 0,
|
||||
0xfa0, "_bar",
|
||||
[]plugin.Frame{
|
||||
{Func: "bar", File: "/tmp/lib.c", Line: 5, StartLine: 5},
|
||||
}},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
bu := &Binutils{}
|
||||
f, err := bu.Open(filepath.Join("testdata", tc.file), tc.start, tc.limit, tc.offset, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Open: unexpected error %v", err)
|
||||
}
|
||||
t.Logf("binutils: %v", bu)
|
||||
if runtime.GOOS == "darwin" && !bu.rep.addr2lineFound && !bu.rep.llvmSymbolizerFound {
|
||||
// On macOS, user needs to install gaddr2line or llvm-symbolizer with
|
||||
// Homebrew, skip the test when the environment doesn't have it
|
||||
// installed.
|
||||
t.Skip("couldn't find addr2line or gaddr2line")
|
||||
}
|
||||
defer f.Close()
|
||||
syms, err := f.Symbols(nil, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Symbols: unexpected error %v", err)
|
||||
}
|
||||
|
||||
m := findSymbol(syms, tc.sym)
|
||||
if m == nil {
|
||||
t.Fatalf("Symbols: could not find symbol %v", tc.sym)
|
||||
}
|
||||
gotFrames, err := f.SourceLine(tc.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("SourceLine: unexpected error %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(gotFrames, tc.expected) {
|
||||
t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, tc.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLLVMSymbolizer(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("testtdata/llvm-symbolizer has only been tested on linux")
|
||||
}
|
||||
|
||||
cmd := filepath.Join("testdata", "fake-llvm-symbolizer")
|
||||
for _, c := range []struct {
|
||||
addr uint64
|
||||
isData bool
|
||||
frames []plugin.Frame
|
||||
}{
|
||||
{0x10, false, []plugin.Frame{
|
||||
{Func: "Inlined_0x10", File: "foo.h", Line: 0, Column: 0, StartLine: 0},
|
||||
{Func: "Func_0x10", File: "foo.c", Line: 2, Column: 1, StartLine: 2},
|
||||
}},
|
||||
{0x20, true, []plugin.Frame{
|
||||
{Func: "foo_0x20", File: "0x20 8"},
|
||||
}},
|
||||
} {
|
||||
desc := fmt.Sprintf("Code %x", c.addr)
|
||||
if c.isData {
|
||||
desc = fmt.Sprintf("Data %x", c.addr)
|
||||
}
|
||||
t.Run(desc, func(t *testing.T) {
|
||||
symbolizer, err := newLLVMSymbolizer(cmd, "foo", 0, c.isData)
|
||||
if err != nil {
|
||||
t.Fatalf("newLLVMSymbolizer: unexpected error %v", err)
|
||||
}
|
||||
defer symbolizer.rw.close()
|
||||
|
||||
frames, err := symbolizer.addrInfo(c.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("LLVM: unexpected error %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(frames, c.frames) {
|
||||
t.Errorf("LLVM: expect %v; got %v\n", c.frames, frames)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPEFile(t *testing.T) {
|
||||
// If this test fails, check the address for main function in testdata/exe_windows_64.exe
|
||||
// using the command 'nm -n '. Update the hardcoded addresses below to match
|
||||
// the addresses from the output.
|
||||
skipUnlessWindowsAmd64(t)
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
start, limit, offset uint64
|
||||
addr uint64
|
||||
}{
|
||||
{"fake mapping", 0, math.MaxUint64, 0, 0x140001594},
|
||||
{"fixed load address", 0x140000000, 0x140002000, 0, 0x140001594},
|
||||
{"simulated ASLR address", 0x150000000, 0x150002000, 0, 0x150001594},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
bu := &Binutils{}
|
||||
f, err := bu.Open(filepath.Join("testdata", "exe_windows_64.exe"), tc.start, tc.limit, tc.offset, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Open: unexpected error %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
syms, err := f.Symbols(regexp.MustCompile("main"), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Symbols: unexpected error %v", err)
|
||||
}
|
||||
|
||||
m := findSymbol(syms, "main")
|
||||
if m == nil {
|
||||
t.Fatalf("Symbols: did not find main")
|
||||
}
|
||||
addr, err := f.ObjAddr(tc.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("ObjAddr(%x) failed: %v", tc.addr, err)
|
||||
}
|
||||
if addr != m.Start {
|
||||
t.Errorf("ObjAddr(%x) got %x, want %x", tc.addr, addr, m.Start)
|
||||
}
|
||||
gotFrames, err := f.SourceLine(tc.addr)
|
||||
if err != nil {
|
||||
t.Fatalf("SourceLine: unexpected error %v", err)
|
||||
}
|
||||
wantFrames := []plugin.Frame{
|
||||
{Func: "main", File: "hello.c", Line: 3, Column: 12, StartLine: 3},
|
||||
}
|
||||
if !reflect.DeepEqual(gotFrames, wantFrames) {
|
||||
t.Fatalf("SourceLine for main: got %v; want %v\n", gotFrames, wantFrames)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenMalformedELF(t *testing.T) {
|
||||
// Test that opening a malformed ELF file will report an error containing
|
||||
// the word "ELF".
|
||||
bu := &Binutils{}
|
||||
_, err := bu.Open(filepath.Join("testdata", "malformed_elf"), 0, 0, 0, "")
|
||||
if err == nil {
|
||||
t.Fatalf("Open: unexpected success")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "ELF") {
|
||||
t.Errorf("Open: got %v, want error containing 'ELF'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOpenMalformedMachO(t *testing.T) {
|
||||
// Test that opening a malformed Mach-O file will report an error containing
|
||||
// the word "Mach-O".
|
||||
bu := &Binutils{}
|
||||
_, err := bu.Open(filepath.Join("testdata", "malformed_macho"), 0, 0, 0, "")
|
||||
if err == nil {
|
||||
t.Fatalf("Open: unexpected success")
|
||||
}
|
||||
|
||||
if !strings.Contains(err.Error(), "Mach-O") {
|
||||
t.Errorf("Open: got %v, want error containing 'Mach-O'", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestObjdumpVersionChecks(t *testing.T) {
|
||||
// Test that the objdump version strings are parsed properly.
|
||||
type testcase struct {
|
||||
desc string
|
||||
os string
|
||||
ver string
|
||||
want bool
|
||||
}
|
||||
|
||||
for _, tc := range []testcase{
|
||||
{
|
||||
desc: "Valid Apple LLVM version string with usable version",
|
||||
os: "darwin",
|
||||
ver: "Apple LLVM version 11.0.3 (clang-1103.0.32.62)\nOptimized build.",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
desc: "Valid Apple LLVM version string with unusable version",
|
||||
os: "darwin",
|
||||
ver: "Apple LLVM version 10.0.0 (clang-1000.11.45.5)\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
desc: "Invalid Apple LLVM version string with usable version",
|
||||
os: "darwin",
|
||||
ver: "Apple LLVM versions 11.0.3 (clang-1103.0.32.62)\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
desc: "Valid LLVM version string with usable version",
|
||||
os: "linux",
|
||||
ver: "LLVM (http://llvm.org/):\nLLVM version 9.0.1\n\nOptimized build.",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
desc: "Valid LLVM version string with unusable version",
|
||||
os: "linux",
|
||||
ver: "LLVM (http://llvm.org/):\nLLVM version 6.0.1\n\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
desc: "Invalid LLVM version string with usable version",
|
||||
os: "linux",
|
||||
ver: "LLVM (http://llvm.org/):\nLLVM versions 9.0.1\n\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
desc: "Valid LLVM objdump version string with trunk",
|
||||
os: runtime.GOOS,
|
||||
ver: "LLVM (http://llvm.org/):\nLLVM version custom-trunk 124ffeb592a00bfe\nOptimized build.",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
desc: "Invalid LLVM objdump version string with trunk",
|
||||
os: runtime.GOOS,
|
||||
ver: "LLVM (http://llvm.org/):\nLLVM version custom-trank 124ffeb592a00bfe\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
desc: "Invalid LLVM objdump version string with trunk",
|
||||
os: runtime.GOOS,
|
||||
ver: "LLVM (http://llvm.org/):\nllvm version custom-trunk 124ffeb592a00bfe\nOptimized build.",
|
||||
want: false,
|
||||
},
|
||||
} {
|
||||
if runtime.GOOS == tc.os {
|
||||
if got := isLLVMObjdump(tc.ver); got != tc.want {
|
||||
t.Errorf("%v: got %v, want %v", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, tc := range []testcase{
|
||||
{
|
||||
desc: "Valid GNU objdump version string",
|
||||
ver: "GNU objdump (GNU Binutils) 2.34\nCopyright (C) 2020 Free Software Foundation, Inc.",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
desc: "Invalid GNU objdump version string",
|
||||
ver: "GNU nm (GNU Binutils) 2.34\nCopyright (C) 2020 Free Software Foundation, Inc.",
|
||||
want: false,
|
||||
},
|
||||
} {
|
||||
if got := isBuObjdump(tc.ver); got != tc.want {
|
||||
t.Errorf("%v: got %v, want %v", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestComputeBase(t *testing.T) {
|
||||
realELFOpen := elfOpen
|
||||
defer func() {
|
||||
elfOpen = realELFOpen
|
||||
}()
|
||||
|
||||
tinyExecFile := &elf.File{
|
||||
FileHeader: elf.FileHeader{Type: elf.ET_EXEC},
|
||||
Progs: []*elf.Prog{
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_PHDR, Flags: elf.PF_R | elf.PF_X, Off: 0x40, Vaddr: 0x400040, Paddr: 0x400040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x400238, Paddr: 0x400238, Filesz: 0x1c, Memsz: 0x1c, Align: 1}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x1f0, Memsz: 0x1f0, Align: 0x200000}},
|
||||
},
|
||||
}
|
||||
tinyBadBSSExecFile := &elf.File{
|
||||
FileHeader: elf.FileHeader{Type: elf.ET_EXEC},
|
||||
Progs: []*elf.Prog{
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_PHDR, Flags: elf.PF_R | elf.PF_X, Off: 0x40, Vaddr: 0x400040, Paddr: 0x400040, Filesz: 0x1f8, Memsz: 0x1f8, Align: 8}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_INTERP, Flags: elf.PF_R, Off: 0x238, Vaddr: 0x400238, Paddr: 0x400238, Filesz: 0x1c, Memsz: 0x1c, Align: 1}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_X, Off: 0, Vaddr: 0, Paddr: 0, Filesz: 0xc80, Memsz: 0xc80, Align: 0x200000}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xc80, Vaddr: 0x200c80, Paddr: 0x200c80, Filesz: 0x100, Memsz: 0x1f0, Align: 0x200000}},
|
||||
{ProgHeader: elf.ProgHeader{Type: elf.PT_LOAD, Flags: elf.PF_R | elf.PF_W, Off: 0xd80, Vaddr: 0x400d80, Paddr: 0x400d80, Filesz: 0x90, Memsz: 0x90, Align: 0x200000}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
file *elf.File
|
||||
openErr error
|
||||
mapping *elfMapping
|
||||
addr uint64
|
||||
wantError bool
|
||||
wantBase uint64
|
||||
wantIsData bool
|
||||
}{
|
||||
{
|
||||
desc: "no elf mapping, no error",
|
||||
mapping: nil,
|
||||
addr: 0x1000,
|
||||
wantBase: 0,
|
||||
wantIsData: false,
|
||||
},
|
||||
{
|
||||
desc: "address outside mapping bounds means error",
|
||||
file: &elf.File{},
|
||||
mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000},
|
||||
addr: 0x1000,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "elf.Open failing means error",
|
||||
file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_EXEC}},
|
||||
openErr: errors.New("elf.Open failed"),
|
||||
mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000},
|
||||
addr: 0x4000,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "no loadable segments, no error",
|
||||
file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_EXEC}},
|
||||
mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000},
|
||||
addr: 0x4000,
|
||||
wantBase: 0,
|
||||
wantIsData: false,
|
||||
},
|
||||
{
|
||||
desc: "unsupported executable type, Get Base returns error",
|
||||
file: &elf.File{FileHeader: elf.FileHeader{Type: elf.ET_NONE}},
|
||||
mapping: &elfMapping{start: 0x2000, limit: 0x5000, offset: 0x1000},
|
||||
addr: 0x4000,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "tiny file select executable segment by offset",
|
||||
file: tinyExecFile,
|
||||
mapping: &elfMapping{start: 0x5000000, limit: 0x5001000, offset: 0x0},
|
||||
addr: 0x5000c00,
|
||||
wantBase: 0x5000000,
|
||||
wantIsData: false,
|
||||
},
|
||||
{
|
||||
desc: "tiny file select data segment by offset",
|
||||
file: tinyExecFile,
|
||||
mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0},
|
||||
addr: 0x5200c80,
|
||||
wantBase: 0x5000000,
|
||||
wantIsData: true,
|
||||
},
|
||||
{
|
||||
desc: "tiny file offset outside any segment means error",
|
||||
file: tinyExecFile,
|
||||
mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0},
|
||||
addr: 0x5200e70,
|
||||
wantError: true,
|
||||
},
|
||||
{
|
||||
desc: "tiny file with bad BSS segment selects data segment by offset in initialized section",
|
||||
file: tinyBadBSSExecFile,
|
||||
mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0},
|
||||
addr: 0x5200d79,
|
||||
wantBase: 0x5000000,
|
||||
wantIsData: true,
|
||||
},
|
||||
{
|
||||
desc: "tiny file with bad BSS segment with offset in uninitialized section means error",
|
||||
file: tinyBadBSSExecFile,
|
||||
mapping: &elfMapping{start: 0x5200000, limit: 0x5201000, offset: 0x0},
|
||||
addr: 0x5200d80,
|
||||
wantError: true,
|
||||
},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
elfOpen = func(_ string) (*elf.File, error) {
|
||||
return tc.file, tc.openErr
|
||||
}
|
||||
f := file{m: tc.mapping}
|
||||
err := f.computeBase(tc.addr)
|
||||
if (err != nil) != tc.wantError {
|
||||
t.Errorf("got error %v, want any error=%v", err, tc.wantError)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if f.base != tc.wantBase {
|
||||
t.Errorf("got base %x, want %x", f.base, tc.wantBase)
|
||||
}
|
||||
if f.isData != tc.wantIsData {
|
||||
t.Errorf("got isData %v, want %v", f.isData, tc.wantIsData)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestELFObjAddr(t *testing.T) {
|
||||
// The exe_linux_64 has two loadable program headers:
|
||||
// LOAD 0x0000000000000000 0x0000000000400000 0x0000000000400000
|
||||
// 0x00000000000006fc 0x00000000000006fc R E 0x200000
|
||||
// LOAD 0x0000000000000e10 0x0000000000600e10 0x0000000000600e10
|
||||
// 0x0000000000000230 0x0000000000000238 RW 0x200000
|
||||
name := filepath.Join("testdata", "exe_linux_64")
|
||||
|
||||
for _, tc := range []struct {
|
||||
desc string
|
||||
start, limit, offset uint64
|
||||
wantOpenError bool
|
||||
addr uint64
|
||||
wantObjAddr uint64
|
||||
wantAddrError bool
|
||||
}{
|
||||
{"exec mapping, good address", 0x5400000, 0x5401000, 0, false, 0x5400400, 0x400400, false},
|
||||
{"exec mapping, address outside segment", 0x5400000, 0x5401000, 0, false, 0x5400800, 0, true},
|
||||
{"short data mapping, good address", 0x5600e00, 0x5602000, 0xe00, false, 0x5600e10, 0x600e10, false},
|
||||
{"short data mapping, address outside segment", 0x5600e00, 0x5602000, 0xe00, false, 0x5600e00, 0x600e00, false},
|
||||
{"page aligned data mapping, good address", 0x5600000, 0x5602000, 0, false, 0x5601000, 0x601000, false},
|
||||
{"page aligned data mapping, address outside segment", 0x5600000, 0x5602000, 0, false, 0x5601048, 0, true},
|
||||
{"bad file offset, no matching segment", 0x5600000, 0x5602000, 0x2000, false, 0x5600e10, 0, true},
|
||||
{"large mapping size, match by sample offset", 0x5600000, 0x5603000, 0, false, 0x5600e10, 0x600e10, false},
|
||||
} {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
b := binrep{}
|
||||
o, err := b.openELF(name, tc.start, tc.limit, tc.offset, "")
|
||||
if (err != nil) != tc.wantOpenError {
|
||||
t.Errorf("openELF got error %v, want any error=%v", err, tc.wantOpenError)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
got, err := o.ObjAddr(tc.addr)
|
||||
if (err != nil) != tc.wantAddrError {
|
||||
t.Errorf("ObjAddr got error %v, want any error=%v", err, tc.wantAddrError)
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if got != tc.wantObjAddr {
|
||||
t.Errorf("got ObjAddr %x; want %x\n", got, tc.wantObjAddr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type buf struct {
|
||||
data []byte
|
||||
}
|
||||
|
||||
// write appends a null-terminated string and returns its starting index.
|
||||
func (b *buf) write(s string) uint32 {
|
||||
res := uint32(len(b.data))
|
||||
b.data = append(b.data, s...)
|
||||
b.data = append(b.data, '\x00')
|
||||
return res
|
||||
}
|
||||
|
||||
// fakeELFFile generates a minimal valid ELF file, with fake .head.text and
|
||||
// .text sections, and their corresponding _text and _stext start symbols,
|
||||
// mimicking a kernel vmlinux image.
|
||||
func fakeELFFile(t *testing.T) *elf.File {
|
||||
var (
|
||||
sizeHeader64 = binary.Size(elf.Header64{})
|
||||
sizeProg64 = binary.Size(elf.Prog64{})
|
||||
sizeSection64 = binary.Size(elf.Section64{})
|
||||
)
|
||||
|
||||
const (
|
||||
textAddr = 0xffff000010080000
|
||||
stextAddr = 0xffff000010081000
|
||||
)
|
||||
|
||||
// Generate magic to identify as an ELF file.
|
||||
var ident [16]uint8
|
||||
ident[0] = '\x7f'
|
||||
ident[1] = 'E'
|
||||
ident[2] = 'L'
|
||||
ident[3] = 'F'
|
||||
ident[elf.EI_CLASS] = uint8(elf.ELFCLASS64)
|
||||
ident[elf.EI_DATA] = uint8(elf.ELFDATA2LSB)
|
||||
ident[elf.EI_VERSION] = uint8(elf.EV_CURRENT)
|
||||
ident[elf.EI_OSABI] = uint8(elf.ELFOSABI_NONE)
|
||||
|
||||
// A single program header, containing code and starting at the _text address.
|
||||
progs := []elf.Prog64{{
|
||||
Type: uint32(elf.PT_LOAD), Flags: uint32(elf.PF_R | elf.PF_X), Off: 0x10000, Vaddr: textAddr, Paddr: textAddr, Filesz: 0x1234567, Memsz: 0x1234567, Align: 0x10000}}
|
||||
|
||||
symNames := buf{}
|
||||
syms := []elf.Sym64{
|
||||
{}, // first symbol empty by convention
|
||||
{Name: symNames.write("_text"), Info: 0, Other: 0, Shndx: 0, Value: textAddr, Size: 0},
|
||||
{Name: symNames.write("_stext"), Info: 0, Other: 0, Shndx: 0, Value: stextAddr, Size: 0},
|
||||
}
|
||||
|
||||
const numSections = 5
|
||||
// We'll write `textSize` zero bytes as contents of the .head.text and .text sections.
|
||||
const textSize = 16
|
||||
// Offset of section contents in the byte stream -- after header, program headers, and section headers.
|
||||
sectionsStart := uint64(sizeHeader64 + len(progs)*sizeProg64 + numSections*sizeSection64)
|
||||
|
||||
secNames := buf{}
|
||||
sections := [numSections]elf.Section64{
|
||||
{Name: secNames.write(".head.text"), Type: uint32(elf.SHT_PROGBITS), Flags: uint64(elf.SHF_ALLOC | elf.SHF_EXECINSTR), Addr: textAddr, Off: sectionsStart, Size: textSize, Link: 0, Info: 0, Addralign: 2048, Entsize: 0},
|
||||
{Name: secNames.write(".text"), Type: uint32(elf.SHT_PROGBITS), Flags: uint64(elf.SHF_ALLOC | elf.SHF_EXECINSTR), Addr: stextAddr, Off: sectionsStart + textSize, Size: textSize, Link: 0, Info: 0, Addralign: 2048, Entsize: 0},
|
||||
{Name: secNames.write(".symtab"), Type: uint32(elf.SHT_SYMTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize, Size: uint64(len(syms) * elf.Sym64Size), Link: 3 /*index of .strtab*/, Info: 0, Addralign: 8, Entsize: elf.Sym64Size},
|
||||
{Name: secNames.write(".strtab"), Type: uint32(elf.SHT_STRTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize + uint64(len(syms)*elf.Sym64Size), Size: uint64(len(symNames.data)), Link: 0, Info: 0, Addralign: 1, Entsize: 0},
|
||||
{Name: secNames.write(".shstrtab"), Type: uint32(elf.SHT_STRTAB), Flags: 0, Addr: 0, Off: sectionsStart + 2*textSize + uint64(len(syms)*elf.Sym64Size+len(symNames.data)), Size: uint64(len(secNames.data)), Link: 0, Info: 0, Addralign: 1, Entsize: 0},
|
||||
}
|
||||
|
||||
hdr := elf.Header64{
|
||||
Ident: ident,
|
||||
Type: uint16(elf.ET_DYN),
|
||||
Machine: uint16(elf.EM_AARCH64),
|
||||
Version: uint32(elf.EV_CURRENT),
|
||||
Entry: textAddr,
|
||||
Phoff: uint64(sizeHeader64),
|
||||
Shoff: uint64(sizeHeader64 + len(progs)*sizeProg64),
|
||||
Flags: 0,
|
||||
Ehsize: uint16(sizeHeader64),
|
||||
Phentsize: uint16(sizeProg64),
|
||||
Phnum: uint16(len(progs)),
|
||||
Shentsize: uint16(sizeSection64),
|
||||
Shnum: uint16(len(sections)),
|
||||
Shstrndx: 4, // index of .shstrtab
|
||||
}
|
||||
|
||||
// Serialize all headers and sections into a single binary stream.
|
||||
var data bytes.Buffer
|
||||
for i, b := range []interface{}{hdr, progs, sections, [textSize]byte{}, [textSize]byte{}, syms, symNames.data, secNames.data} {
|
||||
err := binary.Write(&data, binary.LittleEndian, b)
|
||||
if err != nil {
|
||||
t.Fatalf("Write(%v) got err %v, want nil", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ... and parse it as and ELF file.
|
||||
ef, err := elf.NewFile(bytes.NewReader(data.Bytes()))
|
||||
if err != nil {
|
||||
t.Fatalf("elf.NewFile got err %v, want nil", err)
|
||||
}
|
||||
return ef
|
||||
}
|
||||
|
||||
func TestELFKernelOffset(t *testing.T) {
|
||||
realELFOpen := elfOpen
|
||||
defer func() {
|
||||
elfOpen = realELFOpen
|
||||
}()
|
||||
|
||||
wantAddr := uint64(0xffff000010082000)
|
||||
elfOpen = func(_ string) (*elf.File, error) {
|
||||
return fakeELFFile(t), nil
|
||||
}
|
||||
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
relocationSymbol string
|
||||
start uint64
|
||||
}{
|
||||
{"text", "_text", 0xffff000020080000},
|
||||
{"stext", "_stext", 0xffff000020081000},
|
||||
} {
|
||||
|
||||
b := binrep{}
|
||||
o, err := b.openELF("vmlinux", tc.start, 0xffffffffffffffff, tc.start, tc.relocationSymbol)
|
||||
if err != nil {
|
||||
t.Errorf("%v: openELF got error %v, want nil", tc.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
addr, err := o.ObjAddr(0xffff000020082000)
|
||||
if err != nil {
|
||||
t.Errorf("%v: ObjAddr got err %v, want nil", tc.name, err)
|
||||
continue
|
||||
}
|
||||
if addr != wantAddr {
|
||||
t.Errorf("%v: ObjAddr got %x, want %x", tc.name, addr, wantAddr)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
180
plugin/debug/pkg/internal/binutils/disasm.go
Normal file
180
plugin/debug/pkg/internal/binutils/disasm.go
Normal file
@@ -0,0 +1,180 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ianlancetaylor/demangle"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
var (
|
||||
nmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+)\s+(.)\s+(.*)`)
|
||||
objdumpAsmOutputRE = regexp.MustCompile(`^\s*([[:xdigit:]]+):\s+(.*)`)
|
||||
objdumpOutputFileLine = regexp.MustCompile(`^;?\s?(.*):([0-9]+)`)
|
||||
objdumpOutputFunction = regexp.MustCompile(`^;?\s?(\S.*)\(\):`)
|
||||
objdumpOutputFunctionLLVM = regexp.MustCompile(`^([[:xdigit:]]+)?\s?(.*):`)
|
||||
)
|
||||
|
||||
func findSymbols(syms []byte, file string, r *regexp.Regexp, address uint64) ([]*plugin.Sym, error) {
|
||||
// Collect all symbols from the nm output, grouping names mapped to
|
||||
// the same address into a single symbol.
|
||||
|
||||
// The symbols to return.
|
||||
var symbols []*plugin.Sym
|
||||
|
||||
// The current group of symbol names, and the address they are all at.
|
||||
names, start := []string{}, uint64(0)
|
||||
|
||||
buf := bytes.NewBuffer(syms)
|
||||
|
||||
for {
|
||||
symAddr, name, err := nextSymbol(buf)
|
||||
if err == io.EOF {
|
||||
// Done. If there was an unfinished group, append it.
|
||||
if len(names) != 0 {
|
||||
if match := matchSymbol(names, start, symAddr-1, r, address); match != nil {
|
||||
symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1})
|
||||
}
|
||||
}
|
||||
|
||||
// And return the symbols.
|
||||
return symbols, nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// There was some kind of serious error reading nm's output.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If this symbol is at the same address as the current group, add it to the group.
|
||||
if symAddr == start {
|
||||
names = append(names, name)
|
||||
continue
|
||||
}
|
||||
|
||||
// Otherwise append the current group to the list of symbols.
|
||||
if match := matchSymbol(names, start, symAddr-1, r, address); match != nil {
|
||||
symbols = append(symbols, &plugin.Sym{Name: match, File: file, Start: start, End: symAddr - 1})
|
||||
}
|
||||
|
||||
// And start a new group.
|
||||
names, start = []string{name}, symAddr
|
||||
}
|
||||
}
|
||||
|
||||
// matchSymbol checks if a symbol is to be selected by checking its
|
||||
// name to the regexp and optionally its address. It returns the name(s)
|
||||
// to be used for the matched symbol, or nil if no match
|
||||
func matchSymbol(names []string, start, end uint64, r *regexp.Regexp, address uint64) []string {
|
||||
if address != 0 && address >= start && address <= end {
|
||||
return names
|
||||
}
|
||||
for _, name := range names {
|
||||
if r == nil || r.MatchString(name) {
|
||||
return []string{name}
|
||||
}
|
||||
|
||||
// Match all possible demangled versions of the name.
|
||||
for _, o := range [][]demangle.Option{
|
||||
{demangle.NoClones},
|
||||
{demangle.NoParams, demangle.NoEnclosingParams},
|
||||
{demangle.NoParams, demangle.NoEnclosingParams, demangle.NoTemplateParams},
|
||||
} {
|
||||
if demangled, err := demangle.ToString(name, o...); err == nil && r.MatchString(demangled) {
|
||||
return []string{demangled}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// disassemble parses the output of the objdump command and returns
|
||||
// the assembly instructions in a slice.
|
||||
func disassemble(asm []byte) ([]plugin.Inst, error) {
|
||||
buf := bytes.NewBuffer(asm)
|
||||
function, file, line := "", "", 0
|
||||
var assembly []plugin.Inst
|
||||
for {
|
||||
input, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return nil, err
|
||||
}
|
||||
if input == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
input = strings.TrimSpace(input)
|
||||
|
||||
if fields := objdumpAsmOutputRE.FindStringSubmatch(input); len(fields) == 3 {
|
||||
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
|
||||
assembly = append(assembly,
|
||||
plugin.Inst{
|
||||
Addr: address,
|
||||
Text: fields[2],
|
||||
Function: function,
|
||||
File: file,
|
||||
Line: line,
|
||||
})
|
||||
continue
|
||||
}
|
||||
}
|
||||
if fields := objdumpOutputFileLine.FindStringSubmatch(input); len(fields) == 3 {
|
||||
if l, err := strconv.ParseUint(fields[2], 10, 32); err == nil {
|
||||
file, line = fields[1], int(l)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if fields := objdumpOutputFunction.FindStringSubmatch(input); len(fields) == 2 {
|
||||
function = fields[1]
|
||||
continue
|
||||
} else {
|
||||
if fields := objdumpOutputFunctionLLVM.FindStringSubmatch(input); len(fields) == 3 {
|
||||
function = fields[2]
|
||||
continue
|
||||
}
|
||||
}
|
||||
// Reset on unrecognized lines.
|
||||
function, file, line = "", "", 0
|
||||
}
|
||||
|
||||
return assembly, nil
|
||||
}
|
||||
|
||||
// nextSymbol parses the nm output to find the next symbol listed.
|
||||
// Skips over any output it cannot recognize.
|
||||
func nextSymbol(buf *bytes.Buffer) (uint64, string, error) {
|
||||
for {
|
||||
line, err := buf.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF || line == "" {
|
||||
return 0, "", err
|
||||
}
|
||||
}
|
||||
line = strings.TrimSpace(line)
|
||||
|
||||
if fields := nmOutputRE.FindStringSubmatch(line); len(fields) == 4 {
|
||||
if address, err := strconv.ParseUint(fields[1], 16, 64); err == nil {
|
||||
return address, fields[3], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
160
plugin/debug/pkg/internal/binutils/disasm_test.go
Normal file
160
plugin/debug/pkg/internal/binutils/disasm_test.go
Normal file
@@ -0,0 +1,160 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package binutils
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
// TestFindSymbols tests the FindSymbols routine using a hardcoded nm output.
|
||||
func TestFindSymbols(t *testing.T) {
|
||||
type testcase struct {
|
||||
query, syms string
|
||||
want []plugin.Sym
|
||||
}
|
||||
|
||||
testsyms := `0000000000001000 t lineA001
|
||||
0000000000001000 t lineA002
|
||||
0000000000001000 t line1000
|
||||
0000000000002000 t line200A
|
||||
0000000000002000 t line2000
|
||||
0000000000002000 t line200B
|
||||
0000000000003000 t line3000
|
||||
0000000000003000 t _ZNK4DumbclEPKc
|
||||
0000000000003000 t lineB00C
|
||||
0000000000003000 t line300D
|
||||
0000000000004000 t _the_end
|
||||
`
|
||||
testcases := []testcase{
|
||||
{
|
||||
"line.*[AC]",
|
||||
testsyms,
|
||||
[]plugin.Sym{
|
||||
{Name: []string{"lineA001"}, File: "object.o", Start: 0x1000, End: 0x1FFF},
|
||||
{Name: []string{"line200A"}, File: "object.o", Start: 0x2000, End: 0x2FFF},
|
||||
{Name: []string{"lineB00C"}, File: "object.o", Start: 0x3000, End: 0x3FFF},
|
||||
},
|
||||
},
|
||||
{
|
||||
"Dumb::operator",
|
||||
testsyms,
|
||||
[]plugin.Sym{
|
||||
{Name: []string{"Dumb::operator()(char const*) const"}, File: "object.o", Start: 0x3000, End: 0x3FFF},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
syms, err := findSymbols([]byte(tc.syms), "object.o", regexp.MustCompile(tc.query), 0)
|
||||
if err != nil {
|
||||
t.Fatalf("%q: findSymbols: %v", tc.query, err)
|
||||
}
|
||||
if err := checkSymbol(syms, tc.want); err != nil {
|
||||
t.Errorf("%q: %v", tc.query, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func checkSymbol(got []*plugin.Sym, want []plugin.Sym) error {
|
||||
if len(got) != len(want) {
|
||||
return fmt.Errorf("unexpected number of symbols %d (want %d)", len(got), len(want))
|
||||
}
|
||||
|
||||
for i, g := range got {
|
||||
w := want[i]
|
||||
if len(g.Name) != len(w.Name) {
|
||||
return fmt.Errorf("names, got %d, want %d", len(g.Name), len(w.Name))
|
||||
}
|
||||
for n := range g.Name {
|
||||
if g.Name[n] != w.Name[n] {
|
||||
return fmt.Errorf("name %d, got %q, want %q", n, g.Name[n], w.Name[n])
|
||||
}
|
||||
}
|
||||
if g.File != w.File {
|
||||
return fmt.Errorf("filename, got %q, want %q", g.File, w.File)
|
||||
}
|
||||
if g.Start != w.Start {
|
||||
return fmt.Errorf("start address, got %#x, want %#x", g.Start, w.Start)
|
||||
}
|
||||
if g.End != w.End {
|
||||
return fmt.Errorf("end address, got %#x, want %#x", g.End, w.End)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TestFunctionAssembly tests the FunctionAssembly routine by using a
|
||||
// fake objdump script.
|
||||
func TestFunctionAssembly(t *testing.T) {
|
||||
type testcase struct {
|
||||
s plugin.Sym
|
||||
asm string
|
||||
want []plugin.Inst
|
||||
}
|
||||
testcases := []testcase{
|
||||
{
|
||||
plugin.Sym{Name: []string{"symbol1"}, Start: 0x1000, End: 0x1FFF},
|
||||
" 1000: instruction one\n 1001: instruction two\n 1002: instruction three\n 1003: instruction four",
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x1000, Text: "instruction one"},
|
||||
{Addr: 0x1001, Text: "instruction two"},
|
||||
{Addr: 0x1002, Text: "instruction three"},
|
||||
{Addr: 0x1003, Text: "instruction four"},
|
||||
},
|
||||
},
|
||||
{
|
||||
plugin.Sym{Name: []string{"symbol2"}, Start: 0x2000, End: 0x2FFF},
|
||||
" 2000: instruction one\n 2001: instruction two",
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x2000, Text: "instruction one"},
|
||||
{Addr: 0x2001, Text: "instruction two"},
|
||||
},
|
||||
},
|
||||
{
|
||||
plugin.Sym{Name: []string{"_main"}, Start: 0x30000, End: 0x3FFF},
|
||||
"_main:\n; /tmp/hello.c:3\n30001: push %rbp",
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x30001, Text: "push %rbp", Function: "_main", File: "/tmp/hello.c", Line: 3},
|
||||
},
|
||||
},
|
||||
{
|
||||
plugin.Sym{Name: []string{"main"}, Start: 0x4000, End: 0x4FFF},
|
||||
"000000000040052d <main>:\nmain():\n/tmp/hello.c:3\n40001: push %rbp",
|
||||
[]plugin.Inst{
|
||||
{Addr: 0x40001, Text: "push %rbp", Function: "main", File: "/tmp/hello.c", Line: 3},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testcases {
|
||||
insts, err := disassemble([]byte(tc.asm))
|
||||
if err != nil {
|
||||
t.Fatalf("FunctionAssembly: %v", err)
|
||||
}
|
||||
|
||||
if len(insts) != len(tc.want) {
|
||||
t.Errorf("Unexpected number of assembly instructions %d (want %d)\n", len(insts), len(tc.want))
|
||||
}
|
||||
for i := range insts {
|
||||
if insts[i] != tc.want[i] {
|
||||
t.Errorf("Expected symbol %v, got %v\n", tc.want[i], insts[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
94
plugin/debug/pkg/internal/binutils/testdata/build_binaries.go
vendored
Normal file
94
plugin/debug/pkg/internal/binutils/testdata/build_binaries.go
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2019 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This is a script that generates the test executables for MacOS and Linux
|
||||
// in this directory. It should be needed very rarely to run this script.
|
||||
// It is mostly provided as a future reference on how the original binary
|
||||
// set was created.
|
||||
|
||||
// When a new executable is generated, hardcoded addresses in the
|
||||
// functions TestObjFile, TestMachoFiles, TestPEFile in binutils_test.go must be updated.
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func main() {
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if err := removeGlob("exe_linux_64*"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := exec.Command("cc", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "exe_linux_64", "hello.c").CombinedOutput()
|
||||
log.Println(string(out))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
case "darwin":
|
||||
if err := removeGlob("exe_mac_64*", "lib_mac_64"); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
out, err := exec.Command("clang", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "exe_mac_64", "hello.c").CombinedOutput()
|
||||
log.Println(string(out))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
out, err = exec.Command("clang", "-g", "-ffile-prefix-map="+wd+"="+"/tmp", "-o", "lib_mac_64", "-dynamiclib", "lib.c").CombinedOutput()
|
||||
log.Println(string(out))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
case "windows":
|
||||
// Many gcc environments may create binaries that trigger false-positives
|
||||
// in antiviruses. MSYS2 with gcc 10.2.0 is a working environment for
|
||||
// compiling. To setup the environment follow the guide at
|
||||
// https://www.msys2.org/ and install gcc with `pacman -S gcc`.
|
||||
out, err := exec.Command("gcc", "-g", "-ffile-prefix-map="+wd+"=", "-o", "exe_windows_64.exe", "hello.c").CombinedOutput()
|
||||
log.Println(string(out))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Println("Please verify that exe_windows_64.exe does not trigger any antivirus on `virustotal.com`.")
|
||||
default:
|
||||
log.Fatalf("Unsupported OS %q", runtime.GOOS)
|
||||
}
|
||||
}
|
||||
|
||||
func removeGlob(globs ...string) error {
|
||||
for _, glob := range globs {
|
||||
matches, err := filepath.Glob(glob)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, p := range matches {
|
||||
os.Remove(p)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_linux_64
vendored
Executable file
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_linux_64
vendored
Executable file
Binary file not shown.
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64
vendored
Executable file
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64
vendored
Executable file
Binary file not shown.
20
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Info.plist
vendored
Normal file
20
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Info.plist
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>English</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>com.apple.xcode.dsym.exe_mac_64</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>dSYM</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1.0</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>1</string>
|
||||
</dict>
|
||||
</plist>
|
||||
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Resources/DWARF/exe_mac_64
vendored
Normal file
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_mac_64.dSYM/Contents/Resources/DWARF/exe_mac_64
vendored
Normal file
Binary file not shown.
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_windows_64.exe
vendored
Normal file
BIN
plugin/debug/pkg/internal/binutils/testdata/exe_windows_64.exe
vendored
Normal file
Binary file not shown.
39
plugin/debug/pkg/internal/binutils/testdata/fake-llvm-symbolizer
vendored
Executable file
39
plugin/debug/pkg/internal/binutils/testdata/fake-llvm-symbolizer
vendored
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# Copyright 2014 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
# Fake llvm-symbolizer to use in tests
|
||||
|
||||
set -f
|
||||
IFS=" "
|
||||
|
||||
while read line; do
|
||||
# line has form:
|
||||
# filename 0xaddr
|
||||
# Emit dummy output that matches llvm-symbolizer JSON output format.
|
||||
set -- ${line}
|
||||
kind=$1
|
||||
fname=$2
|
||||
addr=$3
|
||||
case ${kind} in
|
||||
CODE)
|
||||
echo "{\"Address\":\"${addr}\",\"ModuleName\":\"${fname}\",\"Symbol\":[{\"Column\":0,\"FileName\":\"${fname}.h\",\"FunctionName\":\"Inlined_${addr}\",\"Line\":0,\"StartLine\":0},{\"Column\":1,\"FileName\":\"${fname}.c\",\"FunctionName\":\"Func_${addr}\",\"Line\":2,\"StartLine\":2}]}"
|
||||
;;
|
||||
DATA)
|
||||
echo "{\"Address\":\"${addr}\",\"ModuleName\":\"${fname}\",\"Data\":{\"Name\":\"${fname}_${addr}\",\"Size\":\"0x8\",\"Start\":\"${addr}\"}}"
|
||||
;;
|
||||
*) exit 1;;
|
||||
esac
|
||||
done
|
||||
6
plugin/debug/pkg/internal/binutils/testdata/hello.c
vendored
Normal file
6
plugin/debug/pkg/internal/binutils/testdata/hello.c
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
#include <stdio.h>
|
||||
|
||||
int main() {
|
||||
printf("Hello, world!\n");
|
||||
return 0;
|
||||
}
|
||||
7
plugin/debug/pkg/internal/binutils/testdata/lib.c
vendored
Normal file
7
plugin/debug/pkg/internal/binutils/testdata/lib.c
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
int foo() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
int bar() {
|
||||
return 2;
|
||||
}
|
||||
BIN
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64
vendored
Executable file
BIN
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64
vendored
Executable file
Binary file not shown.
20
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Info.plist
vendored
Normal file
20
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Info.plist
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>English</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>com.apple.xcode.dsym.lib_mac_64</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>dSYM</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>????</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1.0</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>1</string>
|
||||
</dict>
|
||||
</plist>
|
||||
BIN
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Resources/DWARF/lib_mac_64
vendored
Normal file
BIN
plugin/debug/pkg/internal/binutils/testdata/lib_mac_64.dSYM/Contents/Resources/DWARF/lib_mac_64
vendored
Normal file
Binary file not shown.
1
plugin/debug/pkg/internal/binutils/testdata/malformed_elf
vendored
Normal file
1
plugin/debug/pkg/internal/binutils/testdata/malformed_elf
vendored
Normal file
@@ -0,0 +1 @@
|
||||
ELF<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
1
plugin/debug/pkg/internal/binutils/testdata/malformed_macho
vendored
Normal file
1
plugin/debug/pkg/internal/binutils/testdata/malformed_macho
vendored
Normal file
@@ -0,0 +1 @@
|
||||
<EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD><EFBFBD>
|
||||
360
plugin/debug/pkg/internal/driver/cli.go
Normal file
360
plugin/debug/pkg/internal/driver/cli.go
Normal file
@@ -0,0 +1,360 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/binutils"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
)
|
||||
|
||||
type source struct {
|
||||
Sources []string
|
||||
ExecName string
|
||||
BuildID string
|
||||
Base []string
|
||||
DiffBase bool
|
||||
Normalize bool
|
||||
|
||||
Seconds int
|
||||
Timeout int
|
||||
Symbolize string
|
||||
HTTPHostport string
|
||||
HTTPDisableBrowser bool
|
||||
Comment string
|
||||
}
|
||||
|
||||
// parseFlags parses the command lines through the specified flags package
|
||||
// and returns the source of the profile and optionally the command
|
||||
// for the kind of report to generate (nil for interactive use).
|
||||
func parseFlags(o *plugin.Options) (*source, []string, error) {
|
||||
flag := o.Flagset
|
||||
// Comparisons.
|
||||
flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison")
|
||||
flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction")
|
||||
// Source options.
|
||||
flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization")
|
||||
flagBuildID := flag.String("buildid", "", "Override build id for first mapping")
|
||||
flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile")
|
||||
flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile")
|
||||
// CPU profile options
|
||||
flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles")
|
||||
// Heap profile options
|
||||
flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size")
|
||||
flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts")
|
||||
flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size")
|
||||
flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts")
|
||||
// Contention profile options
|
||||
flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region")
|
||||
flagContentions := flag.Bool("contentions", false, "Display number of delays at each region")
|
||||
flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region")
|
||||
flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames")
|
||||
|
||||
flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port")
|
||||
flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browser for the interactive web UI")
|
||||
|
||||
// Flags that set configuration properties.
|
||||
cfg := currentConfig()
|
||||
configFlagSetter := installConfigFlags(flag, &cfg)
|
||||
|
||||
flagCommands := make(map[string]*bool)
|
||||
flagParamCommands := make(map[string]*string)
|
||||
for name, cmd := range pprofCommands {
|
||||
if cmd.hasParam {
|
||||
flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp")
|
||||
} else {
|
||||
flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format")
|
||||
}
|
||||
}
|
||||
|
||||
args := flag.Parse(func() {
|
||||
o.UI.Print(usageMsgHdr +
|
||||
usage(true) +
|
||||
usageMsgSrc +
|
||||
flag.ExtraUsage() +
|
||||
usageMsgVars)
|
||||
})
|
||||
if len(args) == 0 {
|
||||
return nil, nil, errors.New("no profile source specified")
|
||||
}
|
||||
|
||||
var execName string
|
||||
// Recognize first argument as an executable or buildid override.
|
||||
if len(args) > 1 {
|
||||
arg0 := args[0]
|
||||
if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil {
|
||||
file.Close()
|
||||
execName = arg0
|
||||
args = args[1:]
|
||||
}
|
||||
}
|
||||
|
||||
// Apply any specified flags to cfg.
|
||||
if err := configFlagSetter(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
cmd, err := outputFormat(flagCommands, flagParamCommands)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if cmd != nil && *flagHTTP != "" {
|
||||
return nil, nil, errors.New("-http is not compatible with an output format on the command line")
|
||||
}
|
||||
|
||||
if *flagNoBrowser && *flagHTTP == "" {
|
||||
return nil, nil, errors.New("-no_browser only makes sense with -http")
|
||||
}
|
||||
|
||||
si := cfg.SampleIndex
|
||||
si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI)
|
||||
si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI)
|
||||
si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI)
|
||||
si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI)
|
||||
si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI)
|
||||
si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI)
|
||||
si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI)
|
||||
cfg.SampleIndex = si
|
||||
|
||||
if *flagMeanDelay {
|
||||
cfg.Mean = true
|
||||
}
|
||||
|
||||
source := &source{
|
||||
Sources: args,
|
||||
ExecName: execName,
|
||||
BuildID: *flagBuildID,
|
||||
Seconds: *flagSeconds,
|
||||
Timeout: *flagTimeout,
|
||||
Symbolize: *flagSymbolize,
|
||||
HTTPHostport: *flagHTTP,
|
||||
HTTPDisableBrowser: *flagNoBrowser,
|
||||
Comment: *flagAddComment,
|
||||
}
|
||||
|
||||
if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
normalize := cfg.Normalize
|
||||
if normalize && len(source.Base) == 0 {
|
||||
return nil, nil, errors.New("must have base profile to normalize by")
|
||||
}
|
||||
source.Normalize = normalize
|
||||
|
||||
if bu, ok := o.Obj.(*binutils.Binutils); ok {
|
||||
bu.SetTools(*flagTools)
|
||||
}
|
||||
|
||||
setCurrentConfig(cfg)
|
||||
return source, cmd, nil
|
||||
}
|
||||
|
||||
// addBaseProfiles adds the list of base profiles or diff base profiles to
|
||||
// the source. This function will return an error if both base and diff base
|
||||
// profiles are specified.
|
||||
func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error {
|
||||
base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase)
|
||||
if len(base) > 0 && len(diffBase) > 0 {
|
||||
return errors.New("-base and -diff_base flags cannot both be specified")
|
||||
}
|
||||
|
||||
source.Base = base
|
||||
if len(diffBase) > 0 {
|
||||
source.Base, source.DiffBase = diffBase, true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropEmpty list takes a slice of string pointers, and outputs a slice of
|
||||
// non-empty strings associated with the flag.
|
||||
func dropEmpty(list []*string) []string {
|
||||
var l []string
|
||||
for _, s := range list {
|
||||
if *s != "" {
|
||||
l = append(l, *s)
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// installConfigFlags creates command line flags for configuration
|
||||
// fields and returns a function which can be called after flags have
|
||||
// been parsed to copy any flags specified on the command line to
|
||||
// *cfg.
|
||||
func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error {
|
||||
// List of functions for setting the different parts of a config.
|
||||
var setters []func()
|
||||
var err error // Holds any errors encountered while running setters.
|
||||
|
||||
for _, field := range configFields {
|
||||
n := field.name
|
||||
help := configHelp[n]
|
||||
var setter func()
|
||||
switch ptr := cfg.fieldPtr(field).(type) {
|
||||
case *bool:
|
||||
f := flag.Bool(n, *ptr, help)
|
||||
setter = func() { *ptr = *f }
|
||||
case *int:
|
||||
f := flag.Int(n, *ptr, help)
|
||||
setter = func() { *ptr = *f }
|
||||
case *float64:
|
||||
f := flag.Float64(n, *ptr, help)
|
||||
setter = func() { *ptr = *f }
|
||||
case *string:
|
||||
if len(field.choices) == 0 {
|
||||
f := flag.String(n, *ptr, help)
|
||||
setter = func() { *ptr = *f }
|
||||
} else {
|
||||
// Make a separate flag per possible choice.
|
||||
// Set all flags to initially false so we can
|
||||
// identify conflicts.
|
||||
bools := make(map[string]*bool)
|
||||
for _, choice := range field.choices {
|
||||
bools[choice] = flag.Bool(choice, false, configHelp[choice])
|
||||
}
|
||||
setter = func() {
|
||||
var set []string
|
||||
for k, v := range bools {
|
||||
if *v {
|
||||
set = append(set, k)
|
||||
}
|
||||
}
|
||||
switch len(set) {
|
||||
case 0:
|
||||
// Leave as default value.
|
||||
case 1:
|
||||
*ptr = set[0]
|
||||
default:
|
||||
err = fmt.Errorf("conflicting options set: %v", set)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
setters = append(setters, setter)
|
||||
}
|
||||
|
||||
return func() error {
|
||||
// Apply the setter for every flag.
|
||||
for _, setter := range setters {
|
||||
setter()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string {
|
||||
if *flag {
|
||||
if si == "" {
|
||||
return sampleType
|
||||
}
|
||||
ui.PrintErr("Multiple value selections, ignoring ", option)
|
||||
}
|
||||
return si
|
||||
}
|
||||
|
||||
func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) {
|
||||
for n, b := range bcmd {
|
||||
if *b {
|
||||
if cmd != nil {
|
||||
return nil, errors.New("must set at most one output format")
|
||||
}
|
||||
cmd = []string{n}
|
||||
}
|
||||
}
|
||||
for n, s := range acmd {
|
||||
if *s != "" {
|
||||
if cmd != nil {
|
||||
return nil, errors.New("must set at most one output format")
|
||||
}
|
||||
cmd = []string{n, *s}
|
||||
}
|
||||
}
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
var usageMsgHdr = `usage:
|
||||
|
||||
Produce output in the specified format.
|
||||
|
||||
pprof <format> [options] [binary] <source> ...
|
||||
|
||||
Omit the format to get an interactive shell whose commands can be used
|
||||
to generate various views of a profile
|
||||
|
||||
pprof [options] [binary] <source> ...
|
||||
|
||||
Omit the format and provide the "-http" flag to get an interactive web
|
||||
interface at the specified host:port that can be used to navigate through
|
||||
various views of a profile.
|
||||
|
||||
pprof -http [host]:[port] [options] [binary] <source> ...
|
||||
|
||||
Details:
|
||||
`
|
||||
|
||||
var usageMsgSrc = "\n\n" +
|
||||
" Source options:\n" +
|
||||
" -seconds Duration for time-based profile collection\n" +
|
||||
" -timeout Timeout in seconds for profile collection\n" +
|
||||
" -buildid Override build id for main binary\n" +
|
||||
" -add_comment Free-form annotation to add to the profile\n" +
|
||||
" Displayed on some reports or with pprof -comments\n" +
|
||||
" -diff_base source Source of base profile for comparison\n" +
|
||||
" -base source Source of base profile for profile subtraction\n" +
|
||||
" profile.pb.gz Profile in compressed protobuf format\n" +
|
||||
" legacy_profile Profile in legacy pprof format\n" +
|
||||
" http://host/profile URL for profile handler to retrieve\n" +
|
||||
" -symbolize= Controls source of symbol information\n" +
|
||||
" none Do not attempt symbolization\n" +
|
||||
" local Examine only local binaries\n" +
|
||||
" fastlocal Only get function names from local binaries\n" +
|
||||
" remote Do not examine local binaries\n" +
|
||||
" force Force re-symbolization\n" +
|
||||
" Binary Local path or build id of binary for symbolization\n"
|
||||
|
||||
var usageMsgVars = "\n\n" +
|
||||
" Misc options:\n" +
|
||||
" -http Provide web interface at host:port.\n" +
|
||||
" Host is optional and 'localhost' by default.\n" +
|
||||
" Port is optional and a randomly available port by default.\n" +
|
||||
" -no_browser Skip opening a browser for the interactive web UI.\n" +
|
||||
" -tools Search path for object tools\n" +
|
||||
"\n" +
|
||||
" Legacy convenience options:\n" +
|
||||
" -inuse_space Same as -sample_index=inuse_space\n" +
|
||||
" -inuse_objects Same as -sample_index=inuse_objects\n" +
|
||||
" -alloc_space Same as -sample_index=alloc_space\n" +
|
||||
" -alloc_objects Same as -sample_index=alloc_objects\n" +
|
||||
" -total_delay Same as -sample_index=delay\n" +
|
||||
" -contentions Same as -sample_index=contentions\n" +
|
||||
" -mean_delay Same as -mean -sample_index=delay\n" +
|
||||
"\n" +
|
||||
" Environment Variables:\n" +
|
||||
" PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" +
|
||||
" PPROF_TOOLS Search path for object-level tools\n" +
|
||||
" PPROF_BINARY_PATH Search path for local binary files\n" +
|
||||
" default: $HOME/pprof/binaries\n" +
|
||||
" searches $buildid/$name, $buildid/*, $path/$buildid,\n" +
|
||||
" ${buildid:0:2}/${buildid:2}.debug, $name, $path,\n" +
|
||||
" ${name}.debug, $dir/.debug/${name}.debug,\n" +
|
||||
" usr/lib/debug/$dir/${name}.debug\n" +
|
||||
" * On Windows, %USERPROFILE% is used instead of $HOME"
|
||||
461
plugin/debug/pkg/internal/driver/commands.go
Normal file
461
plugin/debug/pkg/internal/driver/commands.go
Normal file
@@ -0,0 +1,461 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/report"
|
||||
)
|
||||
|
||||
// commands describes the commands accepted by pprof.
|
||||
type commands map[string]*command
|
||||
|
||||
// command describes the actions for a pprof command. Includes a
|
||||
// function for command-line completion, the report format to use
|
||||
// during report generation, any postprocessing functions, and whether
|
||||
// the command expects a regexp parameter (typically a function name).
|
||||
type command struct {
|
||||
format int // report format to generate
|
||||
postProcess PostProcessor // postprocessing to run on report
|
||||
visualizer PostProcessor // display output using some callback
|
||||
hasParam bool // collect a parameter from the CLI
|
||||
description string // single-line description text saying what the command does
|
||||
usage string // multi-line help text saying how the command is used
|
||||
}
|
||||
|
||||
// help returns a help string for a command.
|
||||
func (c *command) help(name string) string {
|
||||
message := c.description + "\n"
|
||||
if c.usage != "" {
|
||||
message += " Usage:\n"
|
||||
lines := strings.Split(c.usage, "\n")
|
||||
for _, line := range lines {
|
||||
message += fmt.Sprintf(" %s\n", line)
|
||||
}
|
||||
}
|
||||
return message + "\n"
|
||||
}
|
||||
|
||||
// AddCommand adds an additional command to the set of commands
|
||||
// accepted by pprof. This enables extensions to add new commands for
|
||||
// specialized visualization formats. If the command specified already
|
||||
// exists, it is overwritten.
|
||||
func AddCommand(cmd string, format int, post PostProcessor, desc, usage string) {
|
||||
pprofCommands[cmd] = &command{format, post, nil, false, desc, usage}
|
||||
}
|
||||
|
||||
// SetVariableDefault sets the default value for a pprof
|
||||
// variable. This enables extensions to set their own defaults.
|
||||
func SetVariableDefault(variable, value string) {
|
||||
configure(variable, value)
|
||||
}
|
||||
|
||||
// PostProcessor is a function that applies post-processing to the report output
|
||||
type PostProcessor func(input io.Reader, output io.Writer, ui plugin.UI) error
|
||||
|
||||
// interactiveMode is true if pprof is running on interactive mode, reading
|
||||
// commands from its shell.
|
||||
var interactiveMode = false
|
||||
|
||||
// pprofCommands are the report generation commands recognized by pprof.
|
||||
var pprofCommands = commands{
|
||||
// Commands that require no post-processing.
|
||||
"comments": {report.Comments, nil, nil, false, "Output all profile comments", ""},
|
||||
"disasm": {report.Dis, nil, nil, true, "Output assembly listings annotated with samples", listHelp("disasm", true)},
|
||||
"dot": {report.Dot, nil, nil, false, "Outputs a graph in DOT format", reportHelp("dot", false, true)},
|
||||
"list": {report.List, nil, nil, true, "Output annotated source for functions matching regexp", listHelp("list", false)},
|
||||
"peek": {report.Tree, nil, nil, true, "Output callers/callees of functions matching regexp", "peek func_regex\nDisplay callers and callees of functions matching func_regex."},
|
||||
"raw": {report.Raw, nil, nil, false, "Outputs a text representation of the raw profile", ""},
|
||||
"tags": {report.Tags, nil, nil, false, "Outputs all tags in the profile", "tags [tag_regex]* [-ignore_regex]* [>file]\nList tags with key:value matching tag_regex and exclude ignore_regex."},
|
||||
"text": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("text", true, true)},
|
||||
"top": {report.Text, nil, nil, false, "Outputs top entries in text form", reportHelp("top", true, true)},
|
||||
"traces": {report.Traces, nil, nil, false, "Outputs all profile samples in text form", ""},
|
||||
"tree": {report.Tree, nil, nil, false, "Outputs a text rendering of call graph", reportHelp("tree", true, true)},
|
||||
|
||||
// Save binary formats to a file
|
||||
"callgrind": {report.Callgrind, nil, awayFromTTY("callgraph.out"), false, "Outputs a graph in callgrind format", reportHelp("callgrind", false, true)},
|
||||
"proto": {report.Proto, nil, awayFromTTY("pb.gz"), false, "Outputs the profile in compressed protobuf format", ""},
|
||||
"topproto": {report.TopProto, nil, awayFromTTY("pb.gz"), false, "Outputs top entries in compressed protobuf format", ""},
|
||||
|
||||
// Generate report in DOT format and postprocess with dot
|
||||
"gif": {report.Dot, invokeDot("gif"), awayFromTTY("gif"), false, "Outputs a graph image in GIF format", reportHelp("gif", false, true)},
|
||||
"pdf": {report.Dot, invokeDot("pdf"), awayFromTTY("pdf"), false, "Outputs a graph in PDF format", reportHelp("pdf", false, true)},
|
||||
"png": {report.Dot, invokeDot("png"), awayFromTTY("png"), false, "Outputs a graph image in PNG format", reportHelp("png", false, true)},
|
||||
"ps": {report.Dot, invokeDot("ps"), awayFromTTY("ps"), false, "Outputs a graph in PS format", reportHelp("ps", false, true)},
|
||||
|
||||
// Save SVG output into a file
|
||||
"svg": {report.Dot, massageDotSVG(), awayFromTTY("svg"), false, "Outputs a graph in SVG format", reportHelp("svg", false, true)},
|
||||
|
||||
// Visualize postprocessed dot output
|
||||
"eog": {report.Dot, invokeDot("svg"), invokeVisualizer("svg", []string{"eog"}), false, "Visualize graph through eog", reportHelp("eog", false, false)},
|
||||
"evince": {report.Dot, invokeDot("pdf"), invokeVisualizer("pdf", []string{"evince"}), false, "Visualize graph through evince", reportHelp("evince", false, false)},
|
||||
"gv": {report.Dot, invokeDot("ps"), invokeVisualizer("ps", []string{"gv --noantialias"}), false, "Visualize graph through gv", reportHelp("gv", false, false)},
|
||||
"web": {report.Dot, massageDotSVG(), invokeVisualizer("svg", browsers()), false, "Visualize graph through web browser", reportHelp("web", false, false)},
|
||||
|
||||
// Visualize callgrind output
|
||||
"kcachegrind": {report.Callgrind, nil, invokeVisualizer("grind", kcachegrind), false, "Visualize report in KCachegrind", reportHelp("kcachegrind", false, false)},
|
||||
|
||||
// Visualize HTML directly generated by report.
|
||||
"weblist": {report.WebList, nil, invokeVisualizer("html", browsers()), true, "Display annotated source in a web browser", listHelp("weblist", false)},
|
||||
}
|
||||
|
||||
// configHelp contains help text per configuration parameter.
|
||||
var configHelp = map[string]string{
|
||||
// Filename for file-based output formats, stdout by default.
|
||||
"output": helpText("Output filename for file-based outputs"),
|
||||
|
||||
// Comparisons.
|
||||
"drop_negative": helpText(
|
||||
"Ignore negative differences",
|
||||
"Do not show any locations with values <0."),
|
||||
|
||||
// Graph handling options.
|
||||
"call_tree": helpText(
|
||||
"Create a context-sensitive call tree",
|
||||
"Treat locations reached through different paths as separate."),
|
||||
|
||||
// Display options.
|
||||
"relative_percentages": helpText(
|
||||
"Show percentages relative to focused subgraph",
|
||||
"If unset, percentages are relative to full graph before focusing",
|
||||
"to facilitate comparison with original graph."),
|
||||
"unit": helpText(
|
||||
"Measurement units to display",
|
||||
"Scale the sample values to this unit.",
|
||||
"For time-based profiles, use seconds, milliseconds, nanoseconds, etc.",
|
||||
"For memory profiles, use megabytes, kilobytes, bytes, etc.",
|
||||
"Using auto will scale each value independently to the most natural unit."),
|
||||
"compact_labels": "Show minimal headers",
|
||||
"source_path": "Search path for source files",
|
||||
"trim_path": "Path to trim from source paths before search",
|
||||
"intel_syntax": helpText(
|
||||
"Show assembly in Intel syntax",
|
||||
"Only applicable to commands `disasm` and `weblist`"),
|
||||
|
||||
// Filtering options
|
||||
"nodecount": helpText(
|
||||
"Max number of nodes to show",
|
||||
"Uses heuristics to limit the number of locations to be displayed.",
|
||||
"On graphs, dotted edges represent paths through nodes that have been removed."),
|
||||
"nodefraction": "Hide nodes below <f>*total",
|
||||
"edgefraction": "Hide edges below <f>*total",
|
||||
"trim": helpText(
|
||||
"Honor nodefraction/edgefraction/nodecount defaults",
|
||||
"Set to false to get the full profile, without any trimming."),
|
||||
"focus": helpText(
|
||||
"Restricts to samples going through a node matching regexp",
|
||||
"Discard samples that do not include a node matching this regexp.",
|
||||
"Matching includes the function name, filename or object name."),
|
||||
"ignore": helpText(
|
||||
"Skips paths going through any nodes matching regexp",
|
||||
"If set, discard samples that include a node matching this regexp.",
|
||||
"Matching includes the function name, filename or object name."),
|
||||
"prune_from": helpText(
|
||||
"Drops any functions below the matched frame.",
|
||||
"If set, any frames matching the specified regexp and any frames",
|
||||
"below it will be dropped from each sample."),
|
||||
"hide": helpText(
|
||||
"Skips nodes matching regexp",
|
||||
"Discard nodes that match this location.",
|
||||
"Other nodes from samples that include this location will be shown.",
|
||||
"Matching includes the function name, filename or object name."),
|
||||
"show": helpText(
|
||||
"Only show nodes matching regexp",
|
||||
"If set, only show nodes that match this location.",
|
||||
"Matching includes the function name, filename or object name."),
|
||||
"show_from": helpText(
|
||||
"Drops functions above the highest matched frame.",
|
||||
"If set, all frames above the highest match are dropped from every sample.",
|
||||
"Matching includes the function name, filename or object name."),
|
||||
"tagroot": helpText(
|
||||
"Adds pseudo stack frames for labels key/value pairs at the callstack root.",
|
||||
"A comma-separated list of label keys.",
|
||||
"The first key creates frames at the new root."),
|
||||
"tagleaf": helpText(
|
||||
"Adds pseudo stack frames for labels key/value pairs at the callstack leaf.",
|
||||
"A comma-separated list of label keys.",
|
||||
"The last key creates frames at the new leaf."),
|
||||
"tagfocus": helpText(
|
||||
"Restricts to samples with tags in range or matched by regexp",
|
||||
"Use name=value syntax to limit the matching to a specific tag.",
|
||||
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
|
||||
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar"),
|
||||
"tagignore": helpText(
|
||||
"Discard samples with tags in range or matched by regexp",
|
||||
"Use name=value syntax to limit the matching to a specific tag.",
|
||||
"Numeric tag filter examples: 1kb, 1kb:10kb, memory=32mb:",
|
||||
"String tag filter examples: foo, foo.*bar, mytag=foo.*bar"),
|
||||
"tagshow": helpText(
|
||||
"Only consider tags matching this regexp",
|
||||
"Discard tags that do not match this regexp"),
|
||||
"taghide": helpText(
|
||||
"Skip tags matching this regexp",
|
||||
"Discard tags that match this regexp"),
|
||||
// Heap profile options
|
||||
"divide_by": helpText(
|
||||
"Ratio to divide all samples before visualization",
|
||||
"Divide all samples values by a constant, eg the number of processors or jobs."),
|
||||
"mean": helpText(
|
||||
"Average sample value over first value (count)",
|
||||
"For memory profiles, report average memory per allocation.",
|
||||
"For time-based profiles, report average time per event."),
|
||||
"sample_index": helpText(
|
||||
"Sample value to report (0-based index or name)",
|
||||
"Profiles contain multiple values per sample.",
|
||||
"Use sample_index=i to select the ith value (starting at 0)."),
|
||||
"normalize": helpText(
|
||||
"Scales profile based on the base profile."),
|
||||
|
||||
// Data sorting criteria
|
||||
"flat": helpText("Sort entries based on own weight"),
|
||||
"cum": helpText("Sort entries based on cumulative weight"),
|
||||
|
||||
// Output granularity
|
||||
"functions": helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Ignores the filename where the function was defined."),
|
||||
"filefunctions": helpText(
|
||||
"Aggregate at the function level.",
|
||||
"Takes into account the filename where the function was defined."),
|
||||
"files": "Aggregate at the file level.",
|
||||
"lines": "Aggregate at the source code line level.",
|
||||
"addresses": helpText(
|
||||
"Aggregate at the address level.",
|
||||
"Includes functions' addresses in the output."),
|
||||
"noinlines": helpText(
|
||||
"Ignore inlines.",
|
||||
"Attributes inlined functions to their first out-of-line caller."),
|
||||
"showcolumns": helpText(
|
||||
"Show column numbers at the source code line level."),
|
||||
}
|
||||
|
||||
func helpText(s ...string) string {
|
||||
return strings.Join(s, "\n") + "\n"
|
||||
}
|
||||
|
||||
// usage returns a string describing the pprof commands and configuration
|
||||
// options. if commandLine is set, the output reflect cli usage.
|
||||
func usage(commandLine bool) string {
|
||||
var prefix string
|
||||
if commandLine {
|
||||
prefix = "-"
|
||||
}
|
||||
fmtHelp := func(c, d string) string {
|
||||
return fmt.Sprintf(" %-16s %s", c, strings.SplitN(d, "\n", 2)[0])
|
||||
}
|
||||
|
||||
var commands []string
|
||||
for name, cmd := range pprofCommands {
|
||||
commands = append(commands, fmtHelp(prefix+name, cmd.description))
|
||||
}
|
||||
sort.Strings(commands)
|
||||
|
||||
var help string
|
||||
if commandLine {
|
||||
help = " Output formats (select at most one):\n"
|
||||
} else {
|
||||
help = " Commands:\n"
|
||||
commands = append(commands, fmtHelp("o/options", "List options and their current values"))
|
||||
commands = append(commands, fmtHelp("q/quit/exit/^D", "Exit pprof"))
|
||||
}
|
||||
|
||||
help = help + strings.Join(commands, "\n") + "\n\n" +
|
||||
" Options:\n"
|
||||
|
||||
// Print help for configuration options after sorting them.
|
||||
// Collect choices for multi-choice options print them together.
|
||||
var variables []string
|
||||
var radioStrings []string
|
||||
for _, f := range configFields {
|
||||
if len(f.choices) == 0 {
|
||||
variables = append(variables, fmtHelp(prefix+f.name, configHelp[f.name]))
|
||||
continue
|
||||
}
|
||||
// Format help for for this group.
|
||||
s := []string{fmtHelp(f.name, "")}
|
||||
for _, choice := range f.choices {
|
||||
s = append(s, " "+fmtHelp(prefix+choice, configHelp[choice]))
|
||||
}
|
||||
radioStrings = append(radioStrings, strings.Join(s, "\n"))
|
||||
}
|
||||
sort.Strings(variables)
|
||||
sort.Strings(radioStrings)
|
||||
return help + strings.Join(variables, "\n") + "\n\n" +
|
||||
" Option groups (only set one per group):\n" +
|
||||
strings.Join(radioStrings, "\n")
|
||||
}
|
||||
|
||||
func reportHelp(c string, cum, redirect bool) string {
|
||||
h := []string{
|
||||
c + " [n] [focus_regex]* [-ignore_regex]*",
|
||||
"Include up to n samples",
|
||||
"Include samples matching focus_regex, and exclude ignore_regex.",
|
||||
}
|
||||
if cum {
|
||||
h[0] += " [-cum]"
|
||||
h = append(h, "-cum sorts the output by cumulative weight")
|
||||
}
|
||||
if redirect {
|
||||
h[0] += " >f"
|
||||
h = append(h, "Optionally save the report on the file f")
|
||||
}
|
||||
return strings.Join(h, "\n")
|
||||
}
|
||||
|
||||
func listHelp(c string, redirect bool) string {
|
||||
h := []string{
|
||||
c + "<func_regex|address> [-focus_regex]* [-ignore_regex]*",
|
||||
"Include functions matching func_regex, or including the address specified.",
|
||||
"Include samples matching focus_regex, and exclude ignore_regex.",
|
||||
}
|
||||
if redirect {
|
||||
h[0] += " >f"
|
||||
h = append(h, "Optionally save the report on the file f")
|
||||
}
|
||||
return strings.Join(h, "\n")
|
||||
}
|
||||
|
||||
// browsers returns a list of commands to attempt for web visualization.
|
||||
func browsers() []string {
|
||||
var cmds []string
|
||||
if userBrowser := os.Getenv("BROWSER"); userBrowser != "" {
|
||||
cmds = append(cmds, userBrowser)
|
||||
}
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
cmds = append(cmds, "/usr/bin/open")
|
||||
case "windows":
|
||||
cmds = append(cmds, "cmd /c start")
|
||||
default:
|
||||
// Commands opening browsers are prioritized over xdg-open, so browser()
|
||||
// command can be used on linux to open the .svg file generated by the -web
|
||||
// command (the .svg file includes embedded javascript so is best viewed in
|
||||
// a browser).
|
||||
cmds = append(cmds, []string{"chrome", "google-chrome", "chromium", "firefox", "sensible-browser"}...)
|
||||
if os.Getenv("DISPLAY") != "" {
|
||||
// xdg-open is only for use in a desktop environment.
|
||||
cmds = append(cmds, "xdg-open")
|
||||
}
|
||||
}
|
||||
return cmds
|
||||
}
|
||||
|
||||
var kcachegrind = []string{"kcachegrind"}
|
||||
|
||||
// awayFromTTY saves the output in a file if it would otherwise go to
|
||||
// the terminal screen. This is used to avoid dumping binary data on
|
||||
// the screen.
|
||||
func awayFromTTY(format string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
if output == os.Stdout && (ui.IsTerminal() || interactiveMode) {
|
||||
tempFile, err := newTempFile("", "profile", "."+format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ui.PrintErr("Generating report in ", tempFile.Name())
|
||||
output = tempFile
|
||||
}
|
||||
_, err := io.Copy(output, input)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func invokeDot(format string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
cmd := exec.Command("dot", "-T"+format)
|
||||
cmd.Stdin, cmd.Stdout, cmd.Stderr = input, output, os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to execute dot. Is Graphviz installed? Error: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// massageDotSVG invokes the dot tool to generate an SVG image and alters
|
||||
// the image to have panning capabilities when viewed in a browser.
|
||||
func massageDotSVG() PostProcessor {
|
||||
generateSVG := invokeDot("svg")
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
baseSVG := new(bytes.Buffer)
|
||||
if err := generateSVG(input, baseSVG, ui); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := output.Write([]byte(massageSVG(baseSVG.String())))
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func invokeVisualizer(suffix string, visualizers []string) PostProcessor {
|
||||
return func(input io.Reader, output io.Writer, ui plugin.UI) error {
|
||||
tempFile, err := newTempFile(os.TempDir(), "pprof", "."+suffix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
deferDeleteTempFile(tempFile.Name())
|
||||
if _, err := io.Copy(tempFile, input); err != nil {
|
||||
return err
|
||||
}
|
||||
tempFile.Close()
|
||||
// Try visualizers until one is successful
|
||||
for _, v := range visualizers {
|
||||
// Separate command and arguments for exec.Command.
|
||||
args := strings.Split(v, " ")
|
||||
if len(args) == 0 {
|
||||
continue
|
||||
}
|
||||
viewer := exec.Command(args[0], append(args[1:], tempFile.Name())...)
|
||||
viewer.Stderr = os.Stderr
|
||||
if err = viewer.Start(); err == nil {
|
||||
// Wait for a second so that the visualizer has a chance to
|
||||
// open the input file. This needs to be done even if we're
|
||||
// waiting for the visualizer as it can be just a wrapper that
|
||||
// spawns a browser tab and returns right away.
|
||||
defer func(t <-chan time.Time) {
|
||||
<-t
|
||||
}(time.After(time.Second))
|
||||
// On interactive mode, let the visualizer run in the background
|
||||
// so other commands can be issued.
|
||||
if !interactiveMode {
|
||||
return viewer.Wait()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// stringToBool is a custom parser for bools. We avoid using strconv.ParseBool
|
||||
// to remain compatible with old pprof behavior (e.g., treating "" as true).
|
||||
func stringToBool(s string) (bool, error) {
|
||||
switch strings.ToLower(s) {
|
||||
case "true", "t", "yes", "y", "1", "":
|
||||
return true, nil
|
||||
case "false", "f", "no", "n", "0":
|
||||
return false, nil
|
||||
default:
|
||||
return false, fmt.Errorf(`illegal value "%s" for bool variable`, s)
|
||||
}
|
||||
}
|
||||
373
plugin/debug/pkg/internal/driver/config.go
Normal file
373
plugin/debug/pkg/internal/driver/config.go
Normal file
@@ -0,0 +1,373 @@
|
||||
package driver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// config holds settings for a single named config.
|
||||
// The JSON tag name for a field is used both for JSON encoding and as
|
||||
// a named variable.
|
||||
type config struct {
|
||||
// Filename for file-based output formats, stdout by default.
|
||||
Output string `json:"-"`
|
||||
|
||||
// Display options.
|
||||
CallTree bool `json:"call_tree,omitempty"`
|
||||
RelativePercentages bool `json:"relative_percentages,omitempty"`
|
||||
Unit string `json:"unit,omitempty"`
|
||||
CompactLabels bool `json:"compact_labels,omitempty"`
|
||||
SourcePath string `json:"-"`
|
||||
TrimPath string `json:"-"`
|
||||
IntelSyntax bool `json:"intel_syntax,omitempty"`
|
||||
Mean bool `json:"mean,omitempty"`
|
||||
SampleIndex string `json:"-"`
|
||||
DivideBy float64 `json:"-"`
|
||||
Normalize bool `json:"normalize,omitempty"`
|
||||
Sort string `json:"sort,omitempty"`
|
||||
|
||||
// Label pseudo stack frame generation options
|
||||
TagRoot string `json:"tagroot,omitempty"`
|
||||
TagLeaf string `json:"tagleaf,omitempty"`
|
||||
|
||||
// Filtering options
|
||||
DropNegative bool `json:"drop_negative,omitempty"`
|
||||
NodeCount int `json:"nodecount,omitempty"`
|
||||
NodeFraction float64 `json:"nodefraction,omitempty"`
|
||||
EdgeFraction float64 `json:"edgefraction,omitempty"`
|
||||
Trim bool `json:"trim,omitempty"`
|
||||
Focus string `json:"focus,omitempty"`
|
||||
Ignore string `json:"ignore,omitempty"`
|
||||
PruneFrom string `json:"prune_from,omitempty"`
|
||||
Hide string `json:"hide,omitempty"`
|
||||
Show string `json:"show,omitempty"`
|
||||
ShowFrom string `json:"show_from,omitempty"`
|
||||
TagFocus string `json:"tagfocus,omitempty"`
|
||||
TagIgnore string `json:"tagignore,omitempty"`
|
||||
TagShow string `json:"tagshow,omitempty"`
|
||||
TagHide string `json:"taghide,omitempty"`
|
||||
NoInlines bool `json:"noinlines,omitempty"`
|
||||
ShowColumns bool `json:"showcolumns,omitempty"`
|
||||
|
||||
// Output granularity
|
||||
Granularity string `json:"granularity,omitempty"`
|
||||
}
|
||||
|
||||
// defaultConfig returns the default configuration values; it is unaffected by
|
||||
// flags and interactive assignments.
|
||||
func defaultConfig() config {
|
||||
return config{
|
||||
Unit: "minimum",
|
||||
NodeCount: -1,
|
||||
NodeFraction: 0.005,
|
||||
EdgeFraction: 0.001,
|
||||
Trim: true,
|
||||
DivideBy: 1.0,
|
||||
Sort: "flat",
|
||||
Granularity: "", // Default depends on the display format
|
||||
}
|
||||
}
|
||||
|
||||
// currentConfig holds the current configuration values; it is affected by
|
||||
// flags and interactive assignments.
|
||||
var currentCfg = defaultConfig()
|
||||
var currentMu sync.Mutex
|
||||
|
||||
func currentConfig() config {
|
||||
currentMu.Lock()
|
||||
defer currentMu.Unlock()
|
||||
return currentCfg
|
||||
}
|
||||
|
||||
func setCurrentConfig(cfg config) {
|
||||
currentMu.Lock()
|
||||
defer currentMu.Unlock()
|
||||
currentCfg = cfg
|
||||
}
|
||||
|
||||
// configField contains metadata for a single configuration field.
|
||||
type configField struct {
|
||||
name string // JSON field name/key in variables
|
||||
urlparam string // URL parameter name
|
||||
saved bool // Is field saved in settings?
|
||||
field reflect.StructField // Field in config
|
||||
choices []string // Name Of variables in group
|
||||
defaultValue string // Default value for this field.
|
||||
}
|
||||
|
||||
var (
|
||||
configFields []configField // Precomputed metadata per config field
|
||||
|
||||
// configFieldMap holds an entry for every config field as well as an
|
||||
// entry for every valid choice for a multi-choice field.
|
||||
configFieldMap map[string]configField
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Config names for fields that are not saved in settings and therefore
|
||||
// do not have a JSON name.
|
||||
notSaved := map[string]string{
|
||||
// Not saved in settings, but present in URLs.
|
||||
"SampleIndex": "sample_index",
|
||||
|
||||
// Following fields are also not placed in URLs.
|
||||
"Output": "output",
|
||||
"SourcePath": "source_path",
|
||||
"TrimPath": "trim_path",
|
||||
"DivideBy": "divide_by",
|
||||
}
|
||||
|
||||
// choices holds the list of allowed values for config fields that can
|
||||
// take on one of a bounded set of values.
|
||||
choices := map[string][]string{
|
||||
"sort": {"cum", "flat"},
|
||||
"granularity": {"functions", "filefunctions", "files", "lines", "addresses"},
|
||||
}
|
||||
|
||||
// urlparam holds the mapping from a config field name to the URL
|
||||
// parameter used to hold that config field. If no entry is present for
|
||||
// a name, the corresponding field is not saved in URLs.
|
||||
urlparam := map[string]string{
|
||||
"drop_negative": "dropneg",
|
||||
"call_tree": "calltree",
|
||||
"relative_percentages": "rel",
|
||||
"unit": "unit",
|
||||
"compact_labels": "compact",
|
||||
"intel_syntax": "intel",
|
||||
"nodecount": "n",
|
||||
"nodefraction": "nf",
|
||||
"edgefraction": "ef",
|
||||
"trim": "trim",
|
||||
"focus": "f",
|
||||
"ignore": "i",
|
||||
"prune_from": "prunefrom",
|
||||
"hide": "h",
|
||||
"show": "s",
|
||||
"show_from": "sf",
|
||||
"tagfocus": "tf",
|
||||
"tagignore": "ti",
|
||||
"tagshow": "ts",
|
||||
"taghide": "th",
|
||||
"mean": "mean",
|
||||
"sample_index": "si",
|
||||
"normalize": "norm",
|
||||
"sort": "sort",
|
||||
"granularity": "g",
|
||||
"noinlines": "noinlines",
|
||||
"showcolumns": "showcolumns",
|
||||
}
|
||||
|
||||
def := defaultConfig()
|
||||
configFieldMap = map[string]configField{}
|
||||
t := reflect.TypeOf(config{})
|
||||
for i, n := 0, t.NumField(); i < n; i++ {
|
||||
field := t.Field(i)
|
||||
js := strings.Split(field.Tag.Get("json"), ",")
|
||||
if len(js) == 0 {
|
||||
continue
|
||||
}
|
||||
// Get the configuration name for this field.
|
||||
name := js[0]
|
||||
if name == "-" {
|
||||
name = notSaved[field.Name]
|
||||
if name == "" {
|
||||
// Not a configurable field.
|
||||
continue
|
||||
}
|
||||
}
|
||||
f := configField{
|
||||
name: name,
|
||||
urlparam: urlparam[name],
|
||||
saved: (name == js[0]),
|
||||
field: field,
|
||||
choices: choices[name],
|
||||
}
|
||||
f.defaultValue = def.get(f)
|
||||
configFields = append(configFields, f)
|
||||
configFieldMap[f.name] = f
|
||||
for _, choice := range f.choices {
|
||||
configFieldMap[choice] = f
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fieldPtr returns a pointer to the field identified by f in *cfg.
|
||||
func (cfg *config) fieldPtr(f configField) interface{} {
|
||||
// reflect.ValueOf: converts to reflect.Value
|
||||
// Elem: dereferences cfg to make *cfg
|
||||
// FieldByIndex: fetches the field
|
||||
// Addr: takes address of field
|
||||
// Interface: converts back from reflect.Value to a regular value
|
||||
return reflect.ValueOf(cfg).Elem().FieldByIndex(f.field.Index).Addr().Interface()
|
||||
}
|
||||
|
||||
// get returns the value of field f in cfg.
|
||||
func (cfg *config) get(f configField) string {
|
||||
switch ptr := cfg.fieldPtr(f).(type) {
|
||||
case *string:
|
||||
return *ptr
|
||||
case *int:
|
||||
return fmt.Sprint(*ptr)
|
||||
case *float64:
|
||||
return fmt.Sprint(*ptr)
|
||||
case *bool:
|
||||
return fmt.Sprint(*ptr)
|
||||
}
|
||||
panic(fmt.Sprintf("unsupported config field type %v", f.field.Type))
|
||||
}
|
||||
|
||||
// set sets the value of field f in cfg to value.
|
||||
func (cfg *config) set(f configField, value string) error {
|
||||
switch ptr := cfg.fieldPtr(f).(type) {
|
||||
case *string:
|
||||
if len(f.choices) > 0 {
|
||||
// Verify that value is one of the allowed choices.
|
||||
for _, choice := range f.choices {
|
||||
if choice == value {
|
||||
*ptr = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid %q value %q", f.name, value)
|
||||
}
|
||||
*ptr = value
|
||||
case *int:
|
||||
v, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ptr = v
|
||||
case *float64:
|
||||
v, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ptr = v
|
||||
case *bool:
|
||||
v, err := stringToBool(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*ptr = v
|
||||
default:
|
||||
panic(fmt.Sprintf("unsupported config field type %v", f.field.Type))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// isConfigurable returns true if name is either the name of a config field, or
|
||||
// a valid value for a multi-choice config field.
|
||||
func isConfigurable(name string) bool {
|
||||
_, ok := configFieldMap[name]
|
||||
return ok
|
||||
}
|
||||
|
||||
// isBoolConfig returns true if name is either name of a boolean config field,
|
||||
// or a valid value for a multi-choice config field.
|
||||
func isBoolConfig(name string) bool {
|
||||
f, ok := configFieldMap[name]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
if name != f.name {
|
||||
return true // name must be one possible value for the field
|
||||
}
|
||||
var cfg config
|
||||
_, ok = cfg.fieldPtr(f).(*bool)
|
||||
return ok
|
||||
}
|
||||
|
||||
// completeConfig returns the list of configurable names starting with prefix.
|
||||
func completeConfig(prefix string) []string {
|
||||
var result []string
|
||||
for v := range configFieldMap {
|
||||
if strings.HasPrefix(v, prefix) {
|
||||
result = append(result, v)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// configure stores the name=value mapping into the current config, correctly
|
||||
// handling the case when name identifies a particular choice in a field.
|
||||
func configure(name, value string) error {
|
||||
currentMu.Lock()
|
||||
defer currentMu.Unlock()
|
||||
f, ok := configFieldMap[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("unknown config field %q", name)
|
||||
}
|
||||
if f.name == name {
|
||||
return currentCfg.set(f, value)
|
||||
}
|
||||
// name must be one of the choices. If value is true, set field-value
|
||||
// to name.
|
||||
if v, err := strconv.ParseBool(value); v && err == nil {
|
||||
return currentCfg.set(f, name)
|
||||
}
|
||||
return fmt.Errorf("unknown config field %q", name)
|
||||
}
|
||||
|
||||
// resetTransient sets all transient fields in *cfg to their currently
|
||||
// configured values.
|
||||
func (cfg *config) resetTransient() {
|
||||
current := currentConfig()
|
||||
cfg.Output = current.Output
|
||||
cfg.SourcePath = current.SourcePath
|
||||
cfg.TrimPath = current.TrimPath
|
||||
cfg.DivideBy = current.DivideBy
|
||||
cfg.SampleIndex = current.SampleIndex
|
||||
}
|
||||
|
||||
// applyURL updates *cfg based on params.
|
||||
func (cfg *config) applyURL(params url.Values) error {
|
||||
for _, f := range configFields {
|
||||
var value string
|
||||
if f.urlparam != "" {
|
||||
value = params.Get(f.urlparam)
|
||||
}
|
||||
if value == "" {
|
||||
continue
|
||||
}
|
||||
if err := cfg.set(f, value); err != nil {
|
||||
return fmt.Errorf("error setting config field %s: %v", f.name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeURL returns a URL based on initialURL that contains the config contents
|
||||
// as parameters. The second result is true iff a parameter value was changed.
|
||||
func (cfg *config) makeURL(initialURL url.URL) (url.URL, bool) {
|
||||
q := initialURL.Query()
|
||||
changed := false
|
||||
for _, f := range configFields {
|
||||
if f.urlparam == "" || !f.saved {
|
||||
continue
|
||||
}
|
||||
v := cfg.get(f)
|
||||
if v == f.defaultValue {
|
||||
v = "" // URL for of default value is the empty string.
|
||||
} else if f.field.Type.Kind() == reflect.Bool {
|
||||
// Shorten bool values to "f" or "t"
|
||||
v = v[:1]
|
||||
}
|
||||
if q.Get(f.urlparam) == v {
|
||||
continue
|
||||
}
|
||||
changed = true
|
||||
if v == "" {
|
||||
q.Del(f.urlparam)
|
||||
} else {
|
||||
q.Set(f.urlparam, v)
|
||||
}
|
||||
}
|
||||
if changed {
|
||||
initialURL.RawQuery = q.Encode()
|
||||
}
|
||||
return initialURL, changed
|
||||
}
|
||||
408
plugin/debug/pkg/internal/driver/driver.go
Normal file
408
plugin/debug/pkg/internal/driver/driver.go
Normal file
@@ -0,0 +1,408 @@
|
||||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package driver implements the core pprof functionality. It can be
|
||||
// parameterized with a flag implementation, fetch and symbolize
|
||||
// mechanisms.
|
||||
package driver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/plugin"
|
||||
"m7s.live/v5/plugin/debug/pkg/internal/report"
|
||||
"m7s.live/v5/plugin/debug/pkg/profile"
|
||||
)
|
||||
|
||||
// PProf acquires a profile, and symbolizes it using a profile
|
||||
// manager. Then it generates a report formatted according to the
|
||||
// options selected through the flags package.
|
||||
func PProf(eo *plugin.Options) error {
|
||||
// Remove any temporary files created during pprof processing.
|
||||
defer cleanupTempFiles()
|
||||
|
||||
o := setDefaults(eo)
|
||||
|
||||
src, cmd, err := parseFlags(o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p, err := fetchProfiles(src, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cmd != nil {
|
||||
return generateReport(p, cmd, currentConfig(), o)
|
||||
}
|
||||
|
||||
if src.HTTPHostport != "" {
|
||||
return serveWebInterface(src.HTTPHostport, p, o, src.HTTPDisableBrowser)
|
||||
}
|
||||
return interactive(p, o)
|
||||
}
|
||||
|
||||
// generateRawReport is allowed to modify p.
|
||||
func generateRawReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) (*command, *report.Report, error) {
|
||||
// Identify units of numeric tags in profile.
|
||||
numLabelUnits := identifyNumLabelUnits(p, o.UI)
|
||||
|
||||
// Get report output format
|
||||
c := pprofCommands[cmd[0]]
|
||||
if c == nil {
|
||||
panic("unexpected nil command")
|
||||
}
|
||||
|
||||
cfg = applyCommandOverrides(cmd[0], c.format, cfg)
|
||||
|
||||
// Create label pseudo nodes before filtering, in case the filters use
|
||||
// the generated nodes.
|
||||
generateTagRootsLeaves(p, cfg, o.UI)
|
||||
|
||||
// Delay focus after configuring report to get percentages on all samples.
|
||||
relative := cfg.RelativePercentages
|
||||
if relative {
|
||||
if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
ropt, err := reportOptions(p, numLabelUnits, cfg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
ropt.OutputFormat = c.format
|
||||
if len(cmd) == 2 {
|
||||
s, err := regexp.Compile(cmd[1])
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing argument regexp %s: %v", cmd[1], err)
|
||||
}
|
||||
ropt.Symbol = s
|
||||
}
|
||||
|
||||
rpt := report.New(p, ropt)
|
||||
if !relative {
|
||||
if err := applyFocus(p, numLabelUnits, cfg, o.UI); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
if err := aggregate(p, cfg); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return c, rpt, nil
|
||||
}
|
||||
|
||||
// generateReport is allowed to modify p.
|
||||
func generateReport(p *profile.Profile, cmd []string, cfg config, o *plugin.Options) error {
|
||||
c, rpt, err := generateRawReport(p, cmd, cfg, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Generate the report.
|
||||
dst := new(bytes.Buffer)
|
||||
switch rpt.OutputFormat() {
|
||||
case report.WebList:
|
||||
// We need template expansion, so generate here instead of in report.
|
||||
err = printWebList(dst, rpt, o.Obj)
|
||||
default:
|
||||
err = report.Generate(dst, rpt, o.Obj)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src := dst
|
||||
|
||||
// If necessary, perform any data post-processing.
|
||||
if c.postProcess != nil {
|
||||
dst = new(bytes.Buffer)
|
||||
if err := c.postProcess(src, dst, o.UI); err != nil {
|
||||
return err
|
||||
}
|
||||
src = dst
|
||||
}
|
||||
|
||||
// If no output is specified, use default visualizer.
|
||||
output := cfg.Output
|
||||
if output == "" {
|
||||
if c.visualizer != nil {
|
||||
return c.visualizer(src, os.Stdout, o.UI)
|
||||
}
|
||||
_, err := src.WriteTo(os.Stdout)
|
||||
return err
|
||||
}
|
||||
|
||||
// Output to specified file.
|
||||
o.UI.PrintErr("Generating report in ", output)
|
||||
out, err := o.Writer.Open(output)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := src.WriteTo(out); err != nil {
|
||||
out.Close()
|
||||
return err
|
||||
}
|
||||
return out.Close()
|
||||
}
|
||||
|
||||
func printWebList(dst io.Writer, rpt *report.Report, obj plugin.ObjTool) error {
|
||||
listing, err := report.MakeWebList(rpt, obj, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
legend := report.ProfileLabels(rpt)
|
||||
return renderHTML(dst, "sourcelisting", rpt, nil, legend, webArgs{
|
||||
Standalone: true,
|
||||
Listing: listing,
|
||||
})
|
||||
}
|
||||
|
||||
func applyCommandOverrides(cmd string, outputFormat int, cfg config) config {
|
||||
// Some report types override the trim flag to false below. This is to make
|
||||
// sure the default heuristics of excluding insignificant nodes and edges
|
||||
// from the call graph do not apply. One example where it is important is
|
||||
// annotated source or disassembly listing. Those reports run on a specific
|
||||
// function (or functions), but the trimming is applied before the function
|
||||
// data is selected. So, with trimming enabled, the report could end up
|
||||
// showing no data if the specified function is "uninteresting" as far as the
|
||||
// trimming is concerned.
|
||||
trim := cfg.Trim
|
||||
|
||||
switch cmd {
|
||||
case "disasm":
|
||||
trim = false
|
||||
cfg.Granularity = "addresses"
|
||||
// Force the 'noinlines' mode so that source locations for a given address
|
||||
// collapse and there is only one for the given address. Without this
|
||||
// cumulative metrics would be double-counted when annotating the assembly.
|
||||
// This is because the merge is done by address and in case of an inlined
|
||||
// stack each of the inlined entries is a separate callgraph node.
|
||||
cfg.NoInlines = true
|
||||
case "weblist":
|
||||
trim = false
|
||||
cfg.Granularity = "addresses"
|
||||
cfg.NoInlines = false // Need inline info to support call expansion
|
||||
case "peek":
|
||||
trim = false
|
||||
case "list":
|
||||
trim = false
|
||||
cfg.Granularity = "lines"
|
||||
// Do not force 'noinlines' to be false so that specifying
|
||||
// "-list foo -noinlines" is supported and works as expected.
|
||||
case "text", "top", "topproto":
|
||||
if cfg.NodeCount == -1 {
|
||||
cfg.NodeCount = 0
|
||||
}
|
||||
default:
|
||||
if cfg.NodeCount == -1 {
|
||||
cfg.NodeCount = 80
|
||||
}
|
||||
}
|
||||
|
||||
switch outputFormat {
|
||||
case report.Proto, report.Raw, report.Callgrind:
|
||||
trim = false
|
||||
cfg.Granularity = "addresses"
|
||||
}
|
||||
|
||||
if !trim {
|
||||
cfg.NodeCount = 0
|
||||
cfg.NodeFraction = 0
|
||||
cfg.EdgeFraction = 0
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// generateTagRootsLeaves generates extra nodes from the tagroot and tagleaf options.
|
||||
func generateTagRootsLeaves(prof *profile.Profile, cfg config, ui plugin.UI) {
|
||||
tagRootLabelKeys := dropEmptyStrings(strings.Split(cfg.TagRoot, ","))
|
||||
tagLeafLabelKeys := dropEmptyStrings(strings.Split(cfg.TagLeaf, ","))
|
||||
rootm, leafm := addLabelNodes(prof, tagRootLabelKeys, tagLeafLabelKeys, cfg.Unit)
|
||||
warnNoMatches(cfg.TagRoot == "" || rootm, "TagRoot", ui)
|
||||
warnNoMatches(cfg.TagLeaf == "" || leafm, "TagLeaf", ui)
|
||||
}
|
||||
|
||||
// dropEmptyStrings filters a slice to only non-empty strings
|
||||
func dropEmptyStrings(in []string) (out []string) {
|
||||
for _, s := range in {
|
||||
if s != "" {
|
||||
out = append(out, s)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aggregate(prof *profile.Profile, cfg config) error {
|
||||
var function, filename, linenumber, address bool
|
||||
inlines := !cfg.NoInlines
|
||||
switch cfg.Granularity {
|
||||
case "":
|
||||
function = true // Default granularity is "functions"
|
||||
case "addresses":
|
||||
if inlines {
|
||||
return nil
|
||||
}
|
||||
function = true
|
||||
filename = true
|
||||
linenumber = true
|
||||
address = true
|
||||
case "lines":
|
||||
function = true
|
||||
filename = true
|
||||
linenumber = true
|
||||
case "files":
|
||||
filename = true
|
||||
case "functions":
|
||||
function = true
|
||||
case "filefunctions":
|
||||
function = true
|
||||
filename = true
|
||||
default:
|
||||
return fmt.Errorf("unexpected granularity")
|
||||
}
|
||||
return prof.Aggregate(inlines, function, filename, linenumber, cfg.ShowColumns, address)
|
||||
}
|
||||
|
||||
func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) {
|
||||
si, mean := cfg.SampleIndex, cfg.Mean
|
||||
value, meanDiv, sample, err := sampleFormat(p, si, mean)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stype := sample.Type
|
||||
if mean {
|
||||
stype = "mean_" + stype
|
||||
}
|
||||
|
||||
if cfg.DivideBy == 0 {
|
||||
return nil, fmt.Errorf("zero divisor specified")
|
||||
}
|
||||
|
||||
var filters []string
|
||||
addFilter := func(k string, v string) {
|
||||
if v != "" {
|
||||
filters = append(filters, k+"="+v)
|
||||
}
|
||||
}
|
||||
addFilter("focus", cfg.Focus)
|
||||
addFilter("ignore", cfg.Ignore)
|
||||
addFilter("hide", cfg.Hide)
|
||||
addFilter("show", cfg.Show)
|
||||
addFilter("show_from", cfg.ShowFrom)
|
||||
addFilter("tagfocus", cfg.TagFocus)
|
||||
addFilter("tagignore", cfg.TagIgnore)
|
||||
addFilter("tagshow", cfg.TagShow)
|
||||
addFilter("taghide", cfg.TagHide)
|
||||
|
||||
ropt := &report.Options{
|
||||
CumSort: cfg.Sort == "cum",
|
||||
CallTree: cfg.CallTree,
|
||||
DropNegative: cfg.DropNegative,
|
||||
|
||||
CompactLabels: cfg.CompactLabels,
|
||||
Ratio: 1 / cfg.DivideBy,
|
||||
|
||||
NodeCount: cfg.NodeCount,
|
||||
NodeFraction: cfg.NodeFraction,
|
||||
EdgeFraction: cfg.EdgeFraction,
|
||||
|
||||
ActiveFilters: filters,
|
||||
NumLabelUnits: numLabelUnits,
|
||||
|
||||
SampleValue: value,
|
||||
SampleMeanDivisor: meanDiv,
|
||||
SampleType: stype,
|
||||
SampleUnit: sample.Unit,
|
||||
|
||||
OutputUnit: cfg.Unit,
|
||||
|
||||
SourcePath: cfg.SourcePath,
|
||||
TrimPath: cfg.TrimPath,
|
||||
|
||||
IntelSyntax: cfg.IntelSyntax,
|
||||
}
|
||||
|
||||
if len(p.Mapping) > 0 && p.Mapping[0].File != "" {
|
||||
ropt.Title = filepath.Base(p.Mapping[0].File)
|
||||
}
|
||||
|
||||
return ropt, nil
|
||||
}
|
||||
|
||||
// identifyNumLabelUnits returns a map of numeric label keys to the units
|
||||
// associated with those keys.
|
||||
func identifyNumLabelUnits(p *profile.Profile, ui plugin.UI) map[string]string {
|
||||
numLabelUnits, ignoredUnits := p.NumLabelUnits()
|
||||
|
||||
// Print errors for tags with multiple units associated with
|
||||
// a single key.
|
||||
for k, units := range ignoredUnits {
|
||||
ui.PrintErr(fmt.Sprintf("For tag %s used unit %s, also encountered unit(s) %s", k, numLabelUnits[k], strings.Join(units, ", ")))
|
||||
}
|
||||
return numLabelUnits
|
||||
}
|
||||
|
||||
type sampleValueFunc func([]int64) int64
|
||||
|
||||
// sampleFormat returns a function to extract values out of a profile.Sample,
|
||||
// and the type/units of those values.
|
||||
func sampleFormat(p *profile.Profile, sampleIndex string, mean bool) (value, meanDiv sampleValueFunc, v *profile.ValueType, err error) {
|
||||
if len(p.SampleType) == 0 {
|
||||
return nil, nil, nil, fmt.Errorf("profile has no samples")
|
||||
}
|
||||
index, err := p.SampleIndexByName(sampleIndex)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
value = valueExtractor(index)
|
||||
if mean {
|
||||
meanDiv = valueExtractor(0)
|
||||
}
|
||||
v = p.SampleType[index]
|
||||
return
|
||||
}
|
||||
|
||||
func valueExtractor(ix int) sampleValueFunc {
|
||||
return func(v []int64) int64 {
|
||||
return v[ix]
|
||||
}
|
||||
}
|
||||
|
||||
// profileCopier can be used to obtain a fresh copy of a profile.
|
||||
// It is useful since reporting code may mutate the profile handed to it.
|
||||
type profileCopier []byte
|
||||
|
||||
func makeProfileCopier(src *profile.Profile) profileCopier {
|
||||
// Pre-serialize the profile. We will deserialize every time a fresh copy is needed.
|
||||
var buf bytes.Buffer
|
||||
src.WriteUncompressed(&buf)
|
||||
return profileCopier(buf.Bytes())
|
||||
}
|
||||
|
||||
// newCopy returns a new copy of the profile.
|
||||
func (c profileCopier) newCopy() *profile.Profile {
|
||||
p, err := profile.ParseUncompressed([]byte(c))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return p
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user