Compare commits

...

57 Commits

Author SHA1 Message Date
langhuihui
77613e52a8 feat: mp4 to ts convert 2025-06-16 09:03:26 +08:00
erroot
ec56bba75a Erroot v5 (#286)
* 插件数据库不同时,新建DB 对象赋值给插件

* MP4 plugin adds extraction, clips, images, compressed video, GOP clicp

* remove mp4/util panic code
2025-06-16 08:29:14 +08:00
pggiroro
b2b511d755 fix: user.LastLogin set gorm type:timestamp, gb28181 api GetGroupChannels modify 2025-06-15 22:19:14 +08:00
pggiroro
42acf47250 feature: gb28181 support single mediaport 2025-06-15 16:58:52 +08:00
langhuihui
6206ee847d fix: record table fit pg database 2025-06-15 15:58:12 +08:00
langhuihui
6cfdc03e4a fix: user mode fit pg database 2025-06-15 15:21:21 +08:00
pggiroro
b425b8da1f fix: ignore RecordEvent in gorm 2025-06-13 12:52:57 +08:00
langhuihui
e105243cd5 refactor: record 2025-06-13 12:52:57 +08:00
langhuihui
20ec6c55cd fix: plugin init error 2025-06-12 15:08:47 +08:00
langhuihui
e478a1972e fix: webrtc batch bug 2025-06-12 14:21:59 +08:00
langhuihui
94be02cd79 feat: consider pull proxy disable status 2025-06-12 13:50:47 +08:00
langhuihui
bacda6f5a0 feat: webrtc fit client codecs 2025-06-12 12:49:36 +08:00
langhuihui
61fae4cc97 fix: webrtc h265 subscribe 2025-06-11 23:43:22 +08:00
pggiroro
e0752242b2 feat: crontab support record with plan like nvr 2025-06-11 22:18:45 +08:00
pggiroro
23f2ed39a1 fix: gb28181 check from.Address.User when onRegister,delete device from db when device is not register 2025-06-11 22:18:45 +08:00
erroot
0b731e468b 插件数据库不同时,新建DB 对象赋值给插件 2025-06-11 21:43:34 +08:00
langhuihui
4fe1472117 refactor: init plugin faild do not register http handle 2025-06-11 13:57:45 +08:00
langhuihui
a8b3a644c3 feat: record recover 2025-06-10 20:16:39 +08:00
pggiroro
4f0a097dac feat: crontab support plat with streampath in database 2025-06-08 21:01:36 +08:00
pggiroro
4df3de00af fix: gb28181 subscriber and invite sdp 2025-06-08 10:40:17 +08:00
langhuihui
9c16905f28 feat: add evn check to debug plugin 2025-06-07 21:07:28 +08:00
pggiroro
0470f78ed7 fix: register to up platform change cseq when need password, get deviceinfo do not update device name when name is not nil in db,return error when DB is nil in Oninit 2025-06-06 22:45:50 +08:00
pggiroro
7282f1f44d fix: add platform from config.yaml,add example into default/config.yaml 2025-06-06 09:03:58 +08:00
pggiroro
67186cd669 fix: subscribe stream before start mp4 record 2025-06-06 09:03:58 +08:00
pggiroro
09e9761083 feat: Added the association feature between plan and streampath, which has not been tested yet. 2025-06-06 09:03:58 +08:00
langhuihui
4acdc19beb feat: add duration to record 2025-06-05 23:51:33 +08:00
langhuihui
80e19726d4 fix: use safeGet insteadof Call and get
feat: multi buddy support
2025-06-05 20:33:59 +08:00
langhuihui
8ff14931fe feat: disable replay protection on tcp webrtc 2025-06-04 23:02:24 +08:00
pggiroro
9c7dc7e628 fix: modify gb.Logger.With 2025-06-04 20:39:49 +08:00
pggiroro
75791fe93f feat: gb28181 support add platform and platform channel from config.yaml 2025-06-04 20:36:48 +08:00
langhuihui
cf218215ff fix: tcp read block 2025-06-04 14:13:28 +08:00
langhuihui
dbf820b845 feat: downlad flv format from mp4 record file 2025-06-03 17:20:58 +08:00
langhuihui
86b9969954 feat: config support more format 2025-06-03 09:06:43 +08:00
langhuihui
b3143e8c14 fix: mp4 download 2025-06-02 22:31:25 +08:00
langhuihui
7f859e6139 fix: mp4 recovery 2025-06-02 21:12:02 +08:00
pggiroro
6eb2941087 fix: use task.Manager to resolve register handler 2025-06-02 20:09:22 +08:00
pggiroro
e8b4cea007 fix: plan.length is 168 2025-06-02 20:09:22 +08:00
pggiroro
3949773e63 fix: update config.yaml add comment about autoinvite,mediaip,sipip 2025-06-02 20:09:22 +08:00
langhuihui
d67279a404 feat: add raw check no frame 2025-05-30 14:01:18 +08:00
langhuihui
043c62f38f feat: add loop read mp4 2025-05-29 20:25:26 +08:00
pggiroro
acf9f0c677 fix: gb28181 make invite sdp mediaip or sipip correct;linux remove viaheader in sip request 2025-05-28 09:22:34 +08:00
langhuihui
49d1e7c784 feat: add s3 plugin 2025-05-28 08:40:53 +08:00
langhuihui
40bc7d4675 feat: add writerBuffer config to tcp 2025-05-27 16:56:01 +08:00
langhuihui
5aa8503aeb feat: add pull testMode 2025-05-27 10:43:34 +08:00
langhuihui
09175f0255 fix: use total insteadof totalCount 2025-05-26 16:04:20 +08:00
pggiroro
dd1a398ca2 feat: gb28181 support play sub stream 2025-05-25 21:33:14 +08:00
pggiroro
50cdfad931 fix: d.conn.NetConnection.Conn maybe nil 2025-05-25 21:33:14 +08:00
langhuihui
6df793a8fb feat: add more format for sei api 2025-05-23 17:18:43 +08:00
langhuihui
74c948d0c3 fix: rtsp memory leak 2025-05-23 10:02:36 +08:00
pggiroro
80ad1044e3 fix: gb28181 register too fast will start too many task 2025-05-22 22:56:41 +08:00
langhuihui
47884b6880 fix: rtmp timestamp start with 1 2025-05-22 22:52:21 +08:00
langhuihui
a38ddd68aa feat: add tcp dump to docker 2025-05-22 20:34:50 +08:00
banshan
a2bc3d94c1 fix: rtsp no audio or video flag 2025-05-22 20:10:17 +08:00
langhuihui
8d6bcc7b1b feat: add more hooks 2025-05-22 10:03:13 +08:00
pggiroro
f475419b7b fix: gb28181 get wrong contact 2025-05-22 09:06:06 +08:00
pggiroro
b8772f62c1 fix: gb28181 save localport into db 2025-05-22 08:58:40 +08:00
pggiroro
962f2450e5 feat: plugin crontab 2025-05-22 08:58:40 +08:00
137 changed files with 15758 additions and 4278 deletions

View File

@@ -98,4 +98,13 @@ jobs:
if: success() && !contains(env.version, 'beta')
run: |
docker tag langhuihui/monibuca:v5 langhuihui/monibuca:${{ env.version }}
docker push langhuihui/monibuca:${{ env.version }}
docker push langhuihui/monibuca:${{ env.version }}
- name: docker build lite version
if: success() && startsWith(github.ref, 'refs/tags/')
run: |
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest --push .
- name: docker lite push version tag
if: success() && !contains(env.version, 'beta')
run: |
docker tag monibuca/v5 monibuca/v5:${{ env.version }}
docker push lmonibuca/v5:${{ env.version }}

1
.gitignore vendored
View File

@@ -19,3 +19,4 @@ __debug*
example/default/*
!example/default/main.go
!example/default/config.yaml
shutdown.sh

View File

@@ -11,6 +11,9 @@ COPY monibuca_arm64 ./monibuca_arm64
COPY admin.zip ./admin.zip
# Install tcpdump
RUN apt-get update && apt-get install -y tcpdump && rm -rf /var/lib/apt/lists/*
# Copy the configuration file from the build context
COPY example/default/config.yaml /etc/monibuca/config.yaml

31
DockerfileLite Normal file
View File

@@ -0,0 +1,31 @@
# Running Stage
FROM alpine:latest
WORKDIR /monibuca
# Copy the pre-compiled binary from the build context
# The GitHub Actions workflow prepares 'monibuca_linux' in the context root
COPY monibuca_amd64 ./monibuca_amd64
COPY monibuca_arm64 ./monibuca_arm64
COPY admin.zip ./admin.zip
# Copy the configuration file from the build context
COPY example/default/config.yaml /etc/monibuca/config.yaml
# Export necessary ports
EXPOSE 6000 8080 8443 1935 554 5060 9000-20000
EXPOSE 5060/udp 44944/udp
RUN if [ "$(uname -m)" = "aarch64" ]; then \
mv ./monibuca_arm64 ./monibuca_linux; \
rm ./monibuca_amd64; \
else \
mv ./monibuca_amd64 ./monibuca_linux; \
rm ./monibuca_arm64; \
fi
ENTRYPOINT [ "./monibuca_linux"]
CMD ["-c", "/etc/monibuca/config.yaml"]

111
RELEASE_NOTES_5.0.x_CN.md Normal file
View File

@@ -0,0 +1,111 @@
# Monibuca v5.0.x Release Notes
## v5.0.2 (2025-06-05)
### 🎉 新功能 (New Features)
#### 核心功能
- **降低延迟** - 禁用了TCP WebRTC的重放保护功能降低了延迟
- **配置系统增强** - 支持更多配置格式(支持配置项中插入`-``_`和大写字母),提升配置灵活性
- **原始数据检查** - 新增原始数据无帧检查功能,提升数据处理稳定性
- **MP4循环读取** - 支持MP4文件循环读取功能通过配置 pull 配置下的 `loop` 配置)
- **S3插件** - 新增S3存储插件支持云存储集成
- **TCP读写缓冲配置** - 新增TCP连接读写缓冲区配置选项针对高并发下的吞吐能力增强
- **拉流测试模式** - 新增拉流测试模式选项(可以选择拉流时不发布),便于调试和测试
- **SEI API格式扩展** - 扩展SEI API支持更多数据格式
- **Hook扩展** - 新增更多Hook回调点增强扩展性
- **定时任务插件** - 新增crontab定时任务插件
- **服务器抓包** - 新增服务器抓包功能(调用`tcpdump`支持TCP和UDP协议,API 说明见 [tcpdump](https://api.monibuca.com/api-301117332)
#### GB28181协议增强
- **平台配置支持** - GB28181现在支持从config.yaml中添加平台和平台通道配置
- **子码流播放** - 支持GB28181子码流播放功能
- **SDP优化** - 优化invite SDP中的mediaip和sipip处理
- **本地端口保存** - 修复GB28181本地端口保存到数据库的问题
#### MP4功能增强
- **FLV格式下载** - 支持从MP4录制文件下载FLV格式
- **下载功能修复** - 修复MP4下载功能的相关问题
- **恢复功能修复** - 修复MP4恢复功能
### 🐛 问题修复 (Bug Fixes)
#### 网络通信
- **TCP读取阻塞** - 修复TCP读取阻塞问题增加了读取超时设置
- **RTSP内存泄漏** - 修复RTSP协议的内存泄漏问题
- **RTSP音视频标识** - 修复RTSP无音频或视频标识的问题
#### GB28181协议
- **任务管理** - 使用task.Manager解决注册处理器的问题
- **计划长度** - 修复plan.length为168的问题
- **注册频率** - 修复GB28181注册过快导致启动过多任务的问题
- **联系信息** - 修复GB28181获取错误联系信息的问题
#### RTMP协议
- **时间戳处理** - 修复RTMP时间戳开头跳跃问题
### 🛠️ 优化改进 (Improvements)
#### Docker支持
- **tcpdump工具** - Docker镜像中新增tcpdump网络诊断工具
#### Linux平台优化
- **SIP请求优化** - Linux平台移除SIP请求中的viaheader
### 👥 贡献者 (Contributors)
- langhuihui
- pggiroro
- banshan
---
## v5.0.1 (2025-05-21)
### 🎉 新功能 (New Features)
#### WebRTC增强
- **H265支持** - 新增WebRTC对H265编码的支持提升视频质量和压缩效率
#### GB28181协议增强
- **订阅功能扩展** - GB28181模块现在支持订阅报警、移动位置、目录信息
- **通知请求** - 支持接收通知请求,增强与设备的交互能力
#### Docker优化
- **FFmpeg集成** - Docker镜像中新增FFmpeg工具支持更多音视频处理场景
- **多架构支持** - 新增Docker多架构构建支持
### 🐛 问题修复 (Bug Fixes)
#### Docker相关
- **构建问题** - 修复Docker构建过程中的多个问题
- **构建优化** - 优化Docker构建流程提升构建效率
#### RTMP协议
- **时间戳处理** - 修复RTMP第一个chunk类型3需要添加时间戳的问题
#### GB28181协议
- **路径匹配** - 修复GB28181模块中播放流路径的正则表达式匹配问题
#### MP4处理
- **stsz box** - 修复stsz box采样大小的问题
- **G711音频** - 修复拉取MP4文件时读取G711音频的问题
- **H265解析** - 修复H265 MP4文件解析问题
### 🛠️ 优化改进 (Improvements)
#### 代码质量
- **错误处理** - 新增maxcount错误处理机制
- **文档更新** - 更新README文档和go.mod配置
#### 构建系统
- **ARM架构** - 减少JavaScript代码优化ARM架构Docker构建
- **构建标签** - 移除Docker中不必要的构建标签
### 📦 其他更新 (Other Updates)
- **MCP相关** - 更新Model Context Protocol相关功能
- **依赖更新** - 更新项目依赖和模块配置
### 👥 贡献者 (Contributors)
- langhuihui
---

343
api.go
View File

@@ -7,7 +7,6 @@ import (
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
@@ -79,7 +78,7 @@ func (s *Server) DisabledPlugins(ctx context.Context, _ *emptypb.Empty) (res *pb
// /api/stream/annexb/{streamPath}
func (s *Server) api_Stream_AnnexB_(rw http.ResponseWriter, r *http.Request) {
publisher, ok := s.Streams.Get(r.PathValue("streamPath"))
publisher, ok := s.Streams.SafeGet(r.PathValue("streamPath"))
if !ok || publisher.VideoTrack.AVTrack == nil {
http.Error(rw, pkg.ErrNotFound.Error(), http.StatusNotFound)
return
@@ -181,32 +180,27 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.StreamInfoResponse, err error) {
var recordings []*pb.RecordingDetail
s.Records.Call(func() error {
for record := range s.Records.Range {
if record.StreamPath == req.StreamPath {
recordings = append(recordings, &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
})
}
s.Records.SafeRange(func(record *RecordJob) bool {
if record.StreamPath == req.StreamPath {
recordings = append(recordings, &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.RecConf.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
})
}
return nil
return true
})
s.Streams.Call(func() error {
if pub, ok := s.Streams.Get(req.StreamPath); ok {
res, err = s.getStreamInfo(pub)
if err != nil {
return err
}
res.Data.Recording = recordings
} else {
err = pkg.ErrNotFound
if pub, ok := s.Streams.SafeGet(req.StreamPath); ok {
res, err = s.getStreamInfo(pub)
if err != nil {
return
}
return nil
})
res.Data.Recording = recordings
} else {
err = pkg.ErrNotFound
}
return
}
@@ -264,17 +258,15 @@ func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp
}
func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb.RecordingListResponse, err error) {
s.Records.Call(func() error {
resp = &pb.RecordingListResponse{}
for record := range s.Records.Range {
resp.Data = append(resp.Data, &pb.Recording{
StreamPath: record.StreamPath,
StartTime: timestamppb.New(record.StartTime),
Type: reflect.TypeOf(record.recorder).String(),
Pointer: uint64(record.GetTaskPointer()),
})
}
return nil
resp = &pb.RecordingListResponse{}
s.Records.SafeRange(func(record *RecordJob) bool {
resp.Data = append(resp.Data, &pb.Recording{
StreamPath: record.StreamPath,
StartTime: timestamppb.New(record.StartTime),
Type: reflect.TypeOf(record.recorder).String(),
Pointer: uint64(record.GetTaskPointer()),
})
return true
})
return
}
@@ -324,50 +316,47 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
return
}
func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (res *pb.TrackSnapShotResponse, err error) {
s.Streams.Call(func() error {
if pub, ok := s.Streams.Get(req.StreamPath); ok && pub.HasAudioTrack() {
data := &pb.TrackSnapShotData{}
if pub.AudioTrack.Allocator != nil {
for _, memlist := range pub.AudioTrack.Allocator.GetChildren() {
var list []*pb.MemoryBlock
for _, block := range memlist.GetBlocks() {
list = append(list, &pb.MemoryBlock{
S: uint32(block.Start),
E: uint32(block.End),
})
}
data.Memory = append(data.Memory, &pb.MemoryBlockGroup{List: list, Size: uint32(memlist.Size)})
if pub, ok := s.Streams.SafeGet(req.StreamPath); ok && pub.HasAudioTrack() {
data := &pb.TrackSnapShotData{}
if pub.AudioTrack.Allocator != nil {
for _, memlist := range pub.AudioTrack.Allocator.GetChildren() {
var list []*pb.MemoryBlock
for _, block := range memlist.GetBlocks() {
list = append(list, &pb.MemoryBlock{
S: uint32(block.Start),
E: uint32(block.End),
})
}
data.Memory = append(data.Memory, &pb.MemoryBlockGroup{List: list, Size: uint32(memlist.Size)})
}
pub.AudioTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
snap.WriteTime = timestamppb.New(v.WriteTime)
snap.Wrap = make([]*pb.Wrap, len(v.Wraps))
snap.KeyFrame = v.IDR
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
}
data.Ring = append(data.Ring, &snap)
}
})
res = &pb.TrackSnapShotResponse{
Code: 0,
Message: "success",
Data: data,
}
} else {
err = pkg.ErrNotFound
}
return nil
})
pub.AudioTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
snap.WriteTime = timestamppb.New(v.WriteTime)
snap.Wrap = make([]*pb.Wrap, len(v.Wraps))
snap.KeyFrame = v.IDR
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
}
data.Ring = append(data.Ring, &snap)
}
})
res = &pb.TrackSnapShotResponse{
Code: 0,
Message: "success",
Data: data,
}
} else {
err = pkg.ErrNotFound
}
return
}
func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
@@ -437,50 +426,47 @@ func (s *Server) api_AudioTrack_SSE(rw http.ResponseWriter, r *http.Request) {
}
func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.TrackSnapShotResponse, err error) {
s.Streams.Call(func() error {
if pub, ok := s.Streams.Get(req.StreamPath); ok && pub.HasVideoTrack() {
data := &pb.TrackSnapShotData{}
if pub.VideoTrack.Allocator != nil {
for _, memlist := range pub.VideoTrack.Allocator.GetChildren() {
var list []*pb.MemoryBlock
for _, block := range memlist.GetBlocks() {
list = append(list, &pb.MemoryBlock{
S: uint32(block.Start),
E: uint32(block.End),
})
}
data.Memory = append(data.Memory, &pb.MemoryBlockGroup{List: list, Size: uint32(memlist.Size)})
if pub, ok := s.Streams.SafeGet(req.StreamPath); ok && pub.HasVideoTrack() {
data := &pb.TrackSnapShotData{}
if pub.VideoTrack.Allocator != nil {
for _, memlist := range pub.VideoTrack.Allocator.GetChildren() {
var list []*pb.MemoryBlock
for _, block := range memlist.GetBlocks() {
list = append(list, &pb.MemoryBlock{
S: uint32(block.Start),
E: uint32(block.End),
})
}
data.Memory = append(data.Memory, &pb.MemoryBlockGroup{List: list, Size: uint32(memlist.Size)})
}
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
snap.WriteTime = timestamppb.New(v.WriteTime)
snap.Wrap = make([]*pb.Wrap, len(v.Wraps))
snap.KeyFrame = v.IDR
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
}
data.Ring = append(data.Ring, &snap)
}
})
res = &pb.TrackSnapShotResponse{
Code: 0,
Message: "success",
Data: data,
}
} else {
err = pkg.ErrNotFound
}
return nil
})
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
snap.WriteTime = timestamppb.New(v.WriteTime)
snap.Wrap = make([]*pb.Wrap, len(v.Wraps))
snap.KeyFrame = v.IDR
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
}
data.Ring = append(data.Ring, &snap)
}
})
res = &pb.TrackSnapShotResponse{
Code: 0,
Message: "success",
Data: data,
}
} else {
err = pkg.ErrNotFound
}
return
}
@@ -526,86 +512,65 @@ func (s *Server) StopSubscribe(ctx context.Context, req *pb.RequestWithId) (res
}
func (s *Server) PauseStream(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.Get(req.StreamPath); ok {
s.Pause()
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Pause()
}
return &pb.SuccessResponse{}, err
}
func (s *Server) ResumeStream(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.Get(req.StreamPath); ok {
s.Resume()
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Resume()
}
return &pb.SuccessResponse{}, err
}
func (s *Server) SetStreamSpeed(ctx context.Context, req *pb.SetStreamSpeedRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.Get(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
return &pb.SuccessResponse{}, err
}
func (s *Server) SeekStream(ctx context.Context, req *pb.SeekStreamRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.Get(req.StreamPath); ok {
s.Seek(time.Unix(int64(req.TimeStamp), 0))
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Seek(time.Unix(int64(req.TimeStamp), 0))
}
return &pb.SuccessResponse{}, err
}
func (s *Server) StopPublish(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.Get(req.StreamPath); ok {
s.Stop(task.ErrStopByUser)
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Stop(task.ErrStopByUser)
}
return &pb.SuccessResponse{}, err
}
// /api/stream/list
func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *pb.StreamListResponse, err error) {
recordingMap := make(map[string][]*pb.RecordingDetail)
s.Records.Call(func() error {
for record := range s.Records.Range {
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
Pointer: uint64(record.GetTaskPointer()),
})
for record := range s.Records.SafeRange {
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.RecConf.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
Pointer: uint64(record.GetTaskPointer()),
})
}
var streams []*pb.StreamInfo
for publisher := range s.Streams.SafeRange {
info, err := s.getStreamInfo(publisher)
if err != nil {
continue
}
return nil
})
s.Streams.Call(func() error {
var streams []*pb.StreamInfo
for publisher := range s.Streams.Range {
info, err := s.getStreamInfo(publisher)
if err != nil {
continue
}
info.Data.Recording = recordingMap[info.Data.Path]
streams = append(streams, info.Data)
}
res = &pb.StreamListResponse{Data: streams, Total: int32(s.Streams.Length), PageNum: req.PageNum, PageSize: req.PageSize}
return nil
})
info.Data.Recording = recordingMap[info.Data.Path]
streams = append(streams, info.Data)
}
res = &pb.StreamListResponse{Data: streams, Total: int32(s.Streams.Length), PageNum: req.PageNum, PageSize: req.PageSize}
return
}
@@ -632,24 +597,18 @@ func (s *Server) Api_Summary_SSE(rw http.ResponseWriter, r *http.Request) {
func (s *Server) Api_Stream_Position_SSE(rw http.ResponseWriter, r *http.Request) {
streamPath := r.URL.Query().Get("streamPath")
util.ReturnFetchValue(func() (t time.Time) {
s.Streams.Call(func() error {
if pub, ok := s.Streams.Get(streamPath); ok {
t = pub.GetPosition()
}
return nil
})
if pub, ok := s.Streams.SafeGet(streamPath); ok {
t = pub.GetPosition()
}
return
}, rw, r)
}
// func (s *Server) Api_Vod_Position(rw http.ResponseWriter, r *http.Request) {
// streamPath := r.URL.Query().Get("streamPath")
// s.Streams.Call(func() error {
// if pub, ok := s.Streams.Get(streamPath); ok {
// t = pub.GetPosition()
// }
// return nil
// })
// if pub, ok := s.Streams.SafeGet(streamPath); ok {
// t = pub.GetPosition()
// }
// }
func (s *Server) Summary(context.Context, *emptypb.Empty) (res *pb.SummaryResponse, err error) {
@@ -733,7 +692,7 @@ func (s *Server) GetConfigFile(_ context.Context, req *emptypb.Empty) (res *pb.G
func (s *Server) UpdateConfigFile(_ context.Context, req *pb.UpdateConfigFileRequest) (res *pb.SuccessResponse, err error) {
if s.configFileContent != nil {
s.configFileContent = []byte(req.Content)
os.WriteFile(filepath.Join(ExecDir, s.conf.(string)), s.configFileContent, 0644)
os.WriteFile(s.configFilePath, s.configFileContent, 0644)
res = &pb.SuccessResponse{}
} else {
err = pkg.ErrNotFound
@@ -791,7 +750,7 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
offset := (req.PageNum - 1) * req.PageSize // 计算偏移量
var totalCount int64 //总条数
var result []*RecordStream
var result []*EventRecordStream
query := s.DB.Model(&RecordStream{})
if strings.Contains(req.StreamPath, "*") {
query = query.Where("stream_path like ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
@@ -823,9 +782,9 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
return
}
resp = &pb.ResponseList{
TotalCount: uint32(totalCount),
PageNum: req.PageNum,
PageSize: req.PageSize,
Total: uint32(totalCount),
PageNum: req.PageNum,
PageSize: req.PageSize,
}
for _, recordFile := range result {
resp.Data = append(resp.Data, &pb.RecordFile{

View File

@@ -7,4 +7,9 @@ rtsp:
mp4:
enable: true
pull:
live/test: /Users/dexter/Movies/test.mp4
live/test: /Users/dexter/Movies/test.mp4
rtmp:
enable: true
debug:
enable: true

13
example/8081/default.yaml Normal file
View File

@@ -0,0 +1,13 @@
global:
# loglevel: debug
http:
listenaddr: :8081
listenaddrtls: :8555
tcp:
listenaddr: :50052
rtsp:
enable: false
rtmp:
tcp: :1936
webrtc:
enable: false

View File

@@ -8,20 +8,40 @@ srt:
listenaddr: :6000
passphrase: foobarfoobar
gb28181:
enable: false
autoinvite: false
mediaip: 192.168.1.21 #流媒体收流IP
sipip: 192.168.1.21 #SIP通讯IP
enable: false # 是否启用GB28181协议
autoinvite: false #建议使用false开启后会自动邀请设备推流
mediaip: 192.168.1.21 #流媒体收流IP,外网情况下使用公网IP,内网情况下使用网卡IP,不要用127.0.0.1
sipip: 192.168.1.21 #SIP通讯IP,不管公网还是内网都使用本机网卡IP,不要用127.0.0.1
sip:
listenaddr:
- udp::5060
# pull:
# live/test: dump/34020000001320000001
onsub:
pull:
^\d{20}/\d{20}$: $0
^gb_\d+/(.+)$: $1
# .* : $0
platforms:
- enable: false #是否启用平台
name: "测试平台" #平台名称
servergbid: "34020000002000000002" #上级平台GBID
servergbdomain: "3402000000" #上级平台GB域
serverip: 192.168.1.106 #上级平台IP
serverport: 5061 #上级平台端口
devicegbid: "34020000002000000001" #本平台设备GBID
deviceip: 192.168.1.106 #本平台设备IP
deviceport: 5060 #本平台设备端口
username: "34020000002000000001" #SIP账号
password: "123456" #SIP密码
expires: 3600 #注册有效期,单位秒
keeptimeout: 60 #注册保持超时时间,单位秒
civilCode: "340200" #行政区划代码
manufacturer: "Monibuca" #设备制造商
model: "GB28181" #设备型号
address: "江苏南京" #设备地址
register_way: 1
platformchannels:
- platformservergbid: "34020000002000000002" #上级平台GBID
channeldbid: "34020000001110000003_34020000001320000005" #通道DBID,格式为设备ID_通道ID
mp4:
# enable: false
# publish:

5
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/IOTechSystems/onvif v1.2.0
github.com/VictoriaMetrics/VictoriaMetrics v1.102.0
github.com/asavie/xdp v0.3.3
github.com/aws/aws-sdk-go v1.55.7
github.com/beevik/etree v1.4.1
github.com/bluenviron/gohlslib v1.4.0
github.com/c0deltin/duckdb-driver v0.1.0
@@ -52,7 +53,7 @@ require (
google.golang.org/protobuf v1.34.2
gorm.io/driver/mysql v1.5.7
gorm.io/driver/postgres v1.5.9
gorm.io/gorm v1.25.11
gorm.io/gorm v1.30.0
)
require (
@@ -84,6 +85,7 @@ require (
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
@@ -142,7 +144,6 @@ require (
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
github.com/gorilla/websocket v1.5.1
github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd
github.com/mark3labs/mcp-go v0.27.0
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/phsym/console-slog v0.3.1
github.com/prometheus/client_golang v1.20.4

10
go.sum
View File

@@ -25,6 +25,8 @@ github.com/asticode/go-astikit v0.30.0 h1:DkBkRQRIxYcknlaU7W7ksNfn4gMFsB0tqMJflx
github.com/asticode/go-astikit v0.30.0/go.mod h1:h4ly7idim1tNhaVkdVBeXQZEE3L0xblP7fCWbgwipF0=
github.com/asticode/go-astits v1.13.0 h1:XOgkaadfZODnyZRR5Y0/DWkA9vrkLLPLeeOvDwfKZ1c=
github.com/asticode/go-astits v1.13.0/go.mod h1:QSHmknZ51pf6KJdHKZHJTLlMegIrhega3LPWz3ND/iI=
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI=
github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
@@ -139,6 +141,10 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -417,8 +423,8 @@ gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkD
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.5
// protoc v5.28.3
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: auth.proto
package pb
@@ -440,64 +440,39 @@ func (x *UserInfoResponse) GetData() *UserInfo {
var File_auth_proto protoreflect.FileDescriptor
var file_auth_proto_rawDesc = string([]byte{
0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62,
0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e,
0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x46,
0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a,
0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61,
0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4e, 0x0a, 0x0c, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x53,
0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x28, 0x0a, 0x08,
0x75, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c,
0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x08, 0x75, 0x73,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0x63, 0x0a, 0x0d, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d,
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x24, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x53, 0x75,
0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x25, 0x0a, 0x0d, 0x4c,
0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05,
0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b,
0x65, 0x6e, 0x22, 0x3e, 0x0a, 0x0e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x22, 0x27, 0x0a, 0x0f, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,
0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x45, 0x0a, 0x08, 0x55,
0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e,
0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61,
0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73,
0x41, 0x74, 0x22, 0x62, 0x0a, 0x10, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01,
0x20, 0x01, 0x28, 0x05, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65,
0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73,
0x73, 0x61, 0x67, 0x65, 0x12, 0x20, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x32, 0xf4, 0x01, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12,
0x48, 0x0a, 0x05, 0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x10, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f,
0x67, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x11, 0x2e, 0x70, 0x62, 0x2e,
0x4c, 0x6f, 0x67, 0x69, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1a, 0x82,
0xd3, 0xe4, 0x93, 0x02, 0x14, 0x3a, 0x01, 0x2a, 0x22, 0x0f, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61,
0x75, 0x74, 0x68, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x6e, 0x12, 0x4c, 0x0a, 0x06, 0x4c, 0x6f, 0x67,
0x6f, 0x75, 0x74, 0x12, 0x11, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x12, 0x2e, 0x70, 0x62, 0x2e, 0x4c, 0x6f, 0x67, 0x6f,
0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93,
0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x75, 0x74, 0x68,
0x2f, 0x6c, 0x6f, 0x67, 0x6f, 0x75, 0x74, 0x12, 0x54, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x55, 0x73,
0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x13, 0x2e, 0x70, 0x62, 0x2e, 0x55, 0x73, 0x65, 0x72,
0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x62,
0x2e, 0x55, 0x73, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
0x65, 0x22, 0x1a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x14, 0x12, 0x12, 0x2f, 0x61, 0x70, 0x69, 0x2f,
0x61, 0x75, 0x74, 0x68, 0x2f, 0x75, 0x73, 0x65, 0x72, 0x69, 0x6e, 0x66, 0x6f, 0x42, 0x10, 0x5a,
0x0e, 0x6d, 0x37, 0x73, 0x2e, 0x6c, 0x69, 0x76, 0x65, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x62, 0x62,
0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
})
const file_auth_proto_rawDesc = "" +
"\n" +
"\n" +
"auth.proto\x12\x02pb\x1a\x1cgoogle/api/annotations.proto\"F\n" +
"\fLoginRequest\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\x12\x1a\n" +
"\bpassword\x18\x02 \x01(\tR\bpassword\"N\n" +
"\fLoginSuccess\x12\x14\n" +
"\x05token\x18\x01 \x01(\tR\x05token\x12(\n" +
"\buserInfo\x18\x02 \x01(\v2\f.pb.UserInfoR\buserInfo\"c\n" +
"\rLoginResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12$\n" +
"\x04data\x18\x03 \x01(\v2\x10.pb.LoginSuccessR\x04data\"%\n" +
"\rLogoutRequest\x12\x14\n" +
"\x05token\x18\x01 \x01(\tR\x05token\">\n" +
"\x0eLogoutResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\"'\n" +
"\x0fUserInfoRequest\x12\x14\n" +
"\x05token\x18\x01 \x01(\tR\x05token\"E\n" +
"\bUserInfo\x12\x1a\n" +
"\busername\x18\x01 \x01(\tR\busername\x12\x1d\n" +
"\n" +
"expires_at\x18\x02 \x01(\x03R\texpiresAt\"b\n" +
"\x10UserInfoResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\x05R\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12 \n" +
"\x04data\x18\x03 \x01(\v2\f.pb.UserInfoR\x04data2\xf4\x01\n" +
"\x04Auth\x12H\n" +
"\x05Login\x12\x10.pb.LoginRequest\x1a\x11.pb.LoginResponse\"\x1a\x82\xd3\xe4\x93\x02\x14:\x01*\"\x0f/api/auth/login\x12L\n" +
"\x06Logout\x12\x11.pb.LogoutRequest\x1a\x12.pb.LogoutResponse\"\x1b\x82\xd3\xe4\x93\x02\x15:\x01*\"\x10/api/auth/logout\x12T\n" +
"\vGetUserInfo\x12\x13.pb.UserInfoRequest\x1a\x14.pb.UserInfoResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/api/auth/userinfoB\x10Z\x0em7s.live/v5/pbb\x06proto3"
var (
file_auth_proto_rawDescOnce sync.Once

View File

@@ -123,7 +123,6 @@ func local_request_Auth_GetUserInfo_0(ctx context.Context, marshaler runtime.Mar
// UnaryRPC :call AuthServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AuthServer) error {
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -207,21 +206,21 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.NewClient(endpoint, opts...)
conn, err := grpc.DialContext(ctx, endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
@@ -239,7 +238,7 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "AuthClient" to call the correct interceptors. This client ignores the HTTP middlewares.
// "AuthClient" to call the correct interceptors.
func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthClient) error {
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.3
// - protoc v5.29.3
// source: auth.proto
package pb

File diff suppressed because it is too large Load Diff

View File

@@ -1844,7 +1844,6 @@ func local_request_Api_DeleteRecord_0(ctx context.Context, marshaler runtime.Mar
// UnaryRPC :call ApiServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterApiHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ApiServer) error {
mux.Handle("GET", pattern_Api_SysInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
@@ -2953,21 +2952,21 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
// RegisterApiHandlerFromEndpoint is same as RegisterApiHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.NewClient(endpoint, opts...)
conn, err := grpc.DialContext(ctx, endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
@@ -2985,7 +2984,7 @@ func RegisterApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.C
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ApiClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ApiClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ApiClient" to call the correct interceptors. This client ignores the HTTP middlewares.
// "ApiClient" to call the correct interceptors.
func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ApiClient) error {
mux.Handle("GET", pattern_Api_SysInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {

View File

@@ -664,7 +664,7 @@ message ReqRecordList {
string end = 4;
uint32 pageNum = 5;
uint32 pageSize = 6;
string mode = 7;
string eventId = 7;
string type = 8;
string eventLevel = 9;
}
@@ -683,7 +683,7 @@ message RecordFile {
message ResponseList {
int32 code = 1;
string message = 2;
uint32 totalCount = 3;
uint32 total = 3;
uint32 pageNum = 4;
uint32 pageSize = 5;
repeated RecordFile data = 6;

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.28.3
// - protoc v5.29.3
// source: global.proto
package pb

View File

@@ -41,5 +41,11 @@ func (h265 *H265Ctx) GetRecord() []byte {
}
func (h265 *H265Ctx) String() string {
return fmt.Sprintf("hvc1.%02X%02X%02X", h265.RecordInfo.AVCProfileIndication, h265.RecordInfo.ProfileCompatibility, h265.RecordInfo.AVCLevelIndication)
// 根据 HEVC 标准格式hvc1.profile.compatibility.level.constraints
profile := h265.RecordInfo.AVCProfileIndication
compatibility := h265.RecordInfo.ProfileCompatibility
level := h265.RecordInfo.AVCLevelIndication
// 简单实现,使用可用字段模拟 HEVC 格式
return fmt.Sprintf("hvc1.%d.%X.L%d.00", profile, compatibility, level)
}

View File

@@ -208,6 +208,9 @@ func (config *Config) ParseUserFile(conf map[string]any) {
}
config.File = conf
for k, v := range conf {
k = strings.ReplaceAll(k, "-", "")
k = strings.ReplaceAll(k, "_", "")
k = strings.ToLower(k)
if config.Has(k) {
if prop := config.Get(k); prop.props != nil {
if v != nil {

View File

@@ -40,6 +40,8 @@ type TCP struct {
KeyFile string `desc:"私钥文件"`
ListenNum int `desc:"同时并行监听数量0为CPU核心数量"` //同时并行监听数量0为CPU核心数量
NoDelay bool `desc:"是否禁用Nagle算法"` //是否禁用Nagle算法
WriteBuffer int `desc:"写缓冲区大小"` //写缓冲区大小
ReadBuffer int `desc:"读缓冲区大小"` //读缓冲区大小
KeepAlive bool `desc:"是否启用KeepAlive"` //是否启用KeepAlive
AutoListen bool `default:"true" desc:"是否自动监听"`
}
@@ -141,6 +143,18 @@ func (task *ListenTCPWork) listen(handler TCPHandler) {
if !task.NoDelay {
tcpConn.SetNoDelay(false)
}
if task.WriteBuffer > 0 {
if err := tcpConn.SetWriteBuffer(task.WriteBuffer); err != nil {
task.Error("failed to set write buffer", "error", err)
continue
}
}
if task.ReadBuffer > 0 {
if err := tcpConn.SetReadBuffer(task.ReadBuffer); err != nil {
task.Error("failed to set read buffer", "error", err)
continue
}
}
tempDelay = 0
subTask := handler(tcpConn)
task.AddTask(subTask)

View File

@@ -16,16 +16,32 @@ const (
RelayModeRelay = "relay"
RelayModeMix = "mix"
HookOnPublish HookType = "publish"
HookOnSubscribe HookType = "subscribe"
HookOnPublishEnd HookType = "publish_end"
HookOnSubscribeEnd HookType = "subscribe_end"
RecordModeAuto RecordMode = "auto"
RecordModeEvent RecordMode = "event"
HookOnServerKeepAlive HookType = "server_keep_alive"
HookOnPublishStart HookType = "publish_start"
HookOnPublishEnd HookType = "publish_end"
HookOnSubscribeStart HookType = "subscribe_start"
HookOnSubscribeEnd HookType = "subscribe_end"
HookOnPullStart HookType = "pull_start"
HookOnPullEnd HookType = "pull_end"
HookOnPushStart HookType = "push_start"
HookOnPushEnd HookType = "push_end"
HookOnRecordStart HookType = "record_start"
HookOnRecordEnd HookType = "record_end"
HookOnTransformStart HookType = "transform_start"
HookOnTransformEnd HookType = "transform_end"
EventLevelLow EventLevel = "low"
EventLevelHigh EventLevel = "high"
)
type (
HookType string
Publish struct {
EventLevel = string
RecordMode = string
HookType string
Publish struct {
MaxCount int `default:"0" desc:"最大发布者数量"` // 最大发布者数量
PubAudio bool `default:"true" desc:"是否发布音频"`
PubVideo bool `default:"true" desc:"是否发布视频"`
@@ -61,11 +77,13 @@ type (
HTTPValues map[string][]string
Pull struct {
URL string `desc:"拉流地址"`
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
MaxRetry int `default:"-1" desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉高于0 的数代表最大重拉次数
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
Proxy string `desc:"代理地址"` // 代理地址
Header HTTPValues
Args HTTPValues `gorm:"-:all"` // 拉流参数
Args HTTPValues `gorm:"-:all"` // 拉流参数
TestMode int `desc:"测试模式,0:关闭,1:只拉流不发布"` // 测试模式
}
Push struct {
URL string `desc:"推送地址"` // 推送地址
@@ -74,11 +92,21 @@ type (
Proxy string `desc:"代理地址"` // 代理地址
Header HTTPValues
}
RecordEvent struct {
EventId string
BeforeDuration uint32 `json:"beforeDuration" desc:"事件前缓存时长" gorm:"comment:事件前缓存时长;default:30000"`
AfterDuration uint32 `json:"afterDuration" desc:"事件后缓存时长" gorm:"comment:事件后缓存时长;default:30000"`
EventDesc string `json:"eventDesc" desc:"事件描述" gorm:"type:varchar(255);comment:事件描述"`
EventLevel EventLevel `json:"eventLevel" desc:"事件级别" gorm:"type:varchar(255);comment:事件级别,high表示重要事件无法删除且表示无需自动删除,low表示非重要事件,达到自动删除时间后,自动删除;default:'low'"`
EventName string `json:"eventName" desc:"事件名称" gorm:"type:varchar(255);comment:事件名称"`
}
Record struct {
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
FilePath string `desc:"录制文件路径"` // 录制文件路径
Fragment time.Duration `desc:"分片时长"` // 分片时长
Append bool `desc:"是否追加录制"` // 是否追加录制
Mode RecordMode `json:"mode" desc:"事件类型,auto=连续录像模式event=事件录像模式" gorm:"type:varchar(255);comment:事件类型,auto=连续录像模式event=事件录像模式;default:'auto'"`
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
FilePath string `desc:"录制文件路径"` // 录制文件路径
Fragment time.Duration `desc:"分片时长"` // 分片时长
Append bool `desc:"是否追加录制"` // 是否追加录制
Event *RecordEvent `json:"event" desc:"事件录像配置" gorm:"-"` // 事件录像配置
}
TransfromOutput struct {
Target string `desc:"转码目标"` // 转码目标
@@ -99,13 +127,13 @@ type (
Transform map[Regexp]Transform
}
Webhook struct {
URL string `yaml:"url" json:"url"` // Webhook 地址
Method string `yaml:"method" json:"method" default:"POST"` // HTTP 方法
Headers map[string]string `yaml:"headers" json:"headers"` // 自定义请求头
TimeoutSeconds int `yaml:"timeout" json:"timeout" default:"5"` // 超时时间(秒)
RetryTimes int `yaml:"retry" json:"retry" default:"3"` // 重试次数
RetryInterval time.Duration `yaml:"retryInterval" json:"retryInterval" default:"1s"` // 重试间隔
Interval int `yaml:"interval" json:"interval" default:"60"` // 保活间隔(秒)
URL string // Webhook 地址
Method string `default:"POST"` // HTTP 方法
Headers map[string]string // 自定义请求头
TimeoutSeconds int `default:"5"` // 超时时间(秒)
RetryTimes int `default:"3"` // 重试次数
RetryInterval time.Duration `default:"1s"` // 重试间隔
Interval int `default:"60"` // 保活间隔(秒)
}
Common struct {
PublicIP string

View File

@@ -9,14 +9,11 @@ import (
// User represents a user in the system
type User struct {
ID uint `gorm:"primarykey"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt gorm.DeletedAt `gorm:"index"`
Username string `gorm:"uniqueIndex;size:64"`
Password string `gorm:"size:60"` // bcrypt hash
Role string `gorm:"size:20;default:'user'"` // admin or user
LastLogin time.Time `gorm:"type:datetime;default:CURRENT_TIMESTAMP"`
gorm.Model
Username string `gorm:"uniqueIndex;size:64"`
Password string `gorm:"size:60"` // bcrypt hash
Role string `gorm:"size:20;default:'user'"` // admin or user
LastLogin time.Time `gorm:"type:timestamp;default:CURRENT_TIMESTAMP"`
}
// BeforeCreate hook to hash password before saving

View File

@@ -2,13 +2,14 @@ package pkg
import (
"fmt"
"io"
"time"
"github.com/deepch/vdk/codec/aacparser"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/codec/h265parser"
"io"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
"time"
)
var _ IAVFrame = (*RawAudio)(nil)
@@ -104,6 +105,8 @@ type H26xFrame struct {
}
func (h *H26xFrame) Parse(track *AVTrack) (err error) {
var hasVideoFrame bool
switch h.FourCC {
case codec.FourCC_H264:
var ctx *codec.H264Ctx
@@ -127,6 +130,9 @@ func (h *H26xFrame) Parse(track *AVTrack) (err error) {
}
case codec.NALU_IDR_Picture:
track.Value.IDR = true
hasVideoFrame = true
case codec.NALU_Non_IDR_Picture:
hasVideoFrame = true
}
}
case codec.FourCC_H265:
@@ -155,9 +161,18 @@ func (h *H26xFrame) Parse(track *AVTrack) (err error) {
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
track.Value.IDR = true
hasVideoFrame = true
case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9:
hasVideoFrame = true
}
}
}
// Return ErrSkip if no video frames are present (only metadata NALUs)
if !hasVideoFrame {
return ErrSkip
}
return
}

157
pkg/raw_test.go Normal file
View File

@@ -0,0 +1,157 @@
package pkg
import (
"testing"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
func TestH26xFrame_Parse_VideoFrameDetection(t *testing.T) {
// Test H264 IDR Picture (should not skip)
t.Run("H264_IDR_Picture", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err == ErrSkip {
t.Error("Expected H264 IDR frame to not be skipped, but got ErrSkip")
}
if !track.Value.IDR {
t.Error("Expected IDR flag to be set for H264 IDR frame")
}
})
// Test H264 Non-IDR Picture (should not skip)
t.Run("H264_Non_IDR_Picture", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory([]byte{0x21}), // Non-IDR Picture NALU type
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err == ErrSkip {
t.Error("Expected H264 Non-IDR frame to not be skipped, but got ErrSkip")
}
})
// Test H264 metadata only (should skip)
t.Run("H264_SPS_Only", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory([]byte{0x67}), // SPS NALU type
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err != ErrSkip {
t.Errorf("Expected H264 SPS-only frame to be skipped, but got: %v", err)
}
})
// Test H264 PPS only (should skip)
t.Run("H264_PPS_Only", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory([]byte{0x68}), // PPS NALU type
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err != ErrSkip {
t.Errorf("Expected H264 PPS-only frame to be skipped, but got: %v", err)
}
})
// Test H265 IDR slice (should not skip)
t.Run("H265_IDR_Slice", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H265,
Nalus: []util.Memory{
util.NewMemory([]byte{0x4E, 0x01}), // IDR_W_RADL slice type (19 << 1 = 38 = 0x26, so first byte should be 0x4C, but let's use a simpler approach)
// Using NAL_UNIT_CODED_SLICE_IDR_W_RADL which should be type 19
},
}
track := &AVTrack{}
// Let's use the correct byte pattern for H265 IDR slice
// NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
// H265 header: (type << 1) | layer_id_bit
idrSliceByte := byte(19 << 1) // 19 * 2 = 38 = 0x26
frame.Nalus[0] = util.NewMemory([]byte{idrSliceByte})
err := frame.Parse(track)
if err == ErrSkip {
t.Error("Expected H265 IDR slice to not be skipped, but got ErrSkip")
}
if !track.Value.IDR {
t.Error("Expected IDR flag to be set for H265 IDR slice")
}
})
// Test H265 metadata only (should skip)
t.Run("H265_VPS_Only", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H265,
Nalus: []util.Memory{
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1 = 64 = 0x40)
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err != ErrSkip {
t.Errorf("Expected H265 VPS-only frame to be skipped, but got: %v", err)
}
})
// Test mixed H264 frame with SPS and IDR (should not skip)
t.Run("H264_Mixed_SPS_And_IDR", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory([]byte{0x67}), // SPS NALU type
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
},
}
track := &AVTrack{}
err := frame.Parse(track)
if err == ErrSkip {
t.Error("Expected H264 mixed SPS+IDR frame to not be skipped, but got ErrSkip")
}
if !track.Value.IDR {
t.Error("Expected IDR flag to be set for H264 mixed frame with IDR")
}
})
// Test mixed H265 frame with VPS and IDR (should not skip)
t.Run("H265_Mixed_VPS_And_IDR", func(t *testing.T) {
frame := &H26xFrame{
FourCC: codec.FourCC_H265,
Nalus: []util.Memory{
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1)
util.NewMemory([]byte{0x4C, 0x01}), // IDR_W_RADL slice type (19 << 1)
},
}
track := &AVTrack{}
// Fix the IDR slice byte for H265
idrSliceByte := byte(19 << 1) // NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
frame.Nalus[1] = util.NewMemory([]byte{idrSliceByte, 0x01})
err := frame.Parse(track)
if err == ErrSkip {
t.Error("Expected H265 mixed VPS+IDR frame to not be skipped, but got ErrSkip")
}
if !track.Value.IDR {
t.Error("Expected IDR flag to be set for H265 mixed frame with IDR")
}
})
}

View File

@@ -32,14 +32,15 @@ func GetNextTaskID() uint32 {
// Job include tasks
type Job struct {
Task
cases []reflect.SelectCase
addSub chan ITask
children []ITask
lazyRun sync.Once
eventLoopLock sync.Mutex
childrenDisposed chan struct{}
childDisposeListeners []func(ITask)
blocked ITask
cases []reflect.SelectCase
addSub chan ITask
children []ITask
lazyRun sync.Once
eventLoopLock sync.Mutex
childrenDisposed chan struct{}
descendantsDisposeListeners []func(ITask)
descendantsStartListeners []func(ITask)
blocked ITask
}
func (*Job) GetTaskType() TaskType {
@@ -55,6 +56,7 @@ func (mt *Job) Blocked() ITask {
}
func (mt *Job) waitChildrenDispose() {
blocked := mt.blocked
defer func() {
// 忽略由于在任务关闭过程中可能存在竞态条件,当父任务关闭时子任务可能已经被释放。
if err := recover(); err != nil {
@@ -63,17 +65,17 @@ func (mt *Job) waitChildrenDispose() {
mt.addSub <- nil
<-mt.childrenDisposed
}()
if blocked := mt.blocked; blocked != nil {
if blocked != nil {
blocked.Stop(mt.StopReason())
}
}
func (mt *Job) OnChildDispose(listener func(ITask)) {
mt.childDisposeListeners = append(mt.childDisposeListeners, listener)
func (mt *Job) OnDescendantsDispose(listener func(ITask)) {
mt.descendantsDisposeListeners = append(mt.descendantsDisposeListeners, listener)
}
func (mt *Job) onDescendantsDispose(descendants ITask) {
for _, listener := range mt.childDisposeListeners {
for _, listener := range mt.descendantsDisposeListeners {
listener(descendants)
}
if mt.parent != nil {
@@ -82,11 +84,28 @@ func (mt *Job) onDescendantsDispose(descendants ITask) {
}
func (mt *Job) onChildDispose(child ITask) {
if child.getParent() == mt {
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
mt.onDescendantsDispose(child)
}
child.dispose()
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
mt.onDescendantsDispose(child)
}
child.dispose()
}
func (mt *Job) OnDescendantsStart(listener func(ITask)) {
mt.descendantsStartListeners = append(mt.descendantsStartListeners, listener)
}
func (mt *Job) onDescendantsStart(descendants ITask) {
for _, listener := range mt.descendantsStartListeners {
listener(descendants)
}
if mt.parent != nil {
mt.parent.onDescendantsStart(descendants)
}
}
func (mt *Job) onChildStart(child ITask) {
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
mt.onDescendantsStart(child)
}
}
@@ -163,9 +182,7 @@ func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
return
}
if len(mt.addSub) > 10 {
if mt.Logger != nil {
mt.Warn("task wait list too many", "count", len(mt.addSub), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", mt.GetOwnerType())
}
mt.Warn("task wait list too many", "count", len(mt.addSub), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", mt.GetOwnerType())
}
mt.addSub <- t
return
@@ -188,9 +205,7 @@ func (mt *Job) run() {
defer func() {
err := recover()
if err != nil {
if mt.Logger != nil {
mt.Logger.Error("job panic", "err", err, "stack", string(debug.Stack()))
}
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
if !ThrowPanic {
mt.Stop(errors.Join(err.(error), ErrPanic))
} else {
@@ -209,11 +224,13 @@ func (mt *Job) run() {
mt.blocked = nil
if chosen, rev, ok := reflect.Select(mt.cases); chosen == 0 {
if rev.IsNil() {
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
return
}
if mt.blocked = rev.Interface().(ITask); mt.blocked.getParent() != mt || mt.blocked.start() {
if mt.blocked = rev.Interface().(ITask); mt.blocked.start() {
mt.children = append(mt.children, mt.blocked)
mt.cases = append(mt.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.blocked.GetSignal())})
mt.onChildStart(mt.blocked)
}
} else {
taskIndex := chosen - 1
@@ -236,6 +253,7 @@ func (mt *Job) run() {
if mt.onChildDispose(mt.blocked); mt.blocked.checkRetry(mt.blocked.StopReason()) {
if mt.blocked.reset(); mt.blocked.start() {
mt.cases[chosen].Chan = reflect.ValueOf(mt.blocked.GetSignal())
mt.onChildStart(mt.blocked)
continue
}
}

View File

@@ -24,15 +24,11 @@ func (m *Manager[K, T]) Add(ctx T, opt ...any) *Task {
ctx.Stop(ErrExist)
return
}
if m.Logger != nil {
m.Logger.Debug("add", "key", ctx.GetKey(), "count", m.Length)
}
m.Debug("add", "key", ctx.GetKey(), "count", m.Length)
})
ctx.OnDispose(func() {
m.Remove(ctx)
if m.Logger != nil {
m.Logger.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
}
m.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
})
return m.AddTask(ctx, opt...)
}

View File

@@ -7,6 +7,7 @@ import (
"log/slog"
"maps"
"reflect"
"runtime"
"runtime/debug"
"strings"
"sync"
@@ -53,7 +54,6 @@ type (
ITask interface {
context.Context
keepalive() bool
getParent() *Job
GetParent() ITask
GetTask() *Task
GetTaskID() uint32
@@ -85,7 +85,8 @@ type (
getJob() *Job
AddTask(ITask, ...any) *Task
RangeSubTask(func(yield ITask) bool)
OnChildDispose(func(ITask))
OnDescendantsDispose(func(ITask))
OnDescendantsStart(func(ITask))
Blocked() ITask
Call(func() error, ...any)
Post(func() error, ...any) *Task
@@ -117,7 +118,7 @@ type (
ID uint32
StartTime time.Time
StartReason string
*slog.Logger
Logger *slog.Logger
context.Context
context.CancelCauseFunc
handler ITask
@@ -178,10 +179,6 @@ func (task *Task) GetTaskPointer() uintptr {
return uintptr(unsafe.Pointer(task))
}
func (task *Task) getParent() *Job {
return task.parent
}
func (task *Task) GetKey() uint32 {
return task.ID
}
@@ -202,7 +199,11 @@ func (task *Task) WaitStopped() (err error) {
}
func (task *Task) Trace(msg string, fields ...any) {
task.Log(task.Context, TraceLevel, msg, fields...)
if task.Logger == nil {
slog.Default().Log(task.Context, TraceLevel, msg, fields...)
return
}
task.Logger.Log(task.Context, TraceLevel, msg, fields...)
}
func (task *Task) IsStopped() bool {
@@ -229,8 +230,9 @@ func (task *Task) Stop(err error) {
panic("task stop with nil error")
}
if task.CancelCauseFunc != nil {
if tt := task.handler.GetTaskType(); task.Logger != nil && tt != TASK_TYPE_CALL {
task.Debug("task stop", "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType())
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
_, file, line, _ := runtime.Caller(1)
task.Debug("task stop", "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType())
}
task.CancelCauseFunc(err)
}
@@ -268,12 +270,10 @@ func (task *Task) checkRetry(err error) bool {
if task.retry.MaxRetry < 0 || task.retry.RetryCount < task.retry.MaxRetry {
task.retry.RetryCount++
task.SetDescription("retryCount", task.retry.RetryCount)
if task.Logger != nil {
if task.retry.MaxRetry < 0 {
task.Warn(fmt.Sprintf("retry %d/∞", task.retry.RetryCount), "taskId", task.ID)
} else {
task.Warn(fmt.Sprintf("retry %d/%d", task.retry.RetryCount, task.retry.MaxRetry), "taskId", task.ID)
}
if task.retry.MaxRetry < 0 {
task.Warn(fmt.Sprintf("retry %d/∞", task.retry.RetryCount), "taskId", task.ID)
} else {
task.Warn(fmt.Sprintf("retry %d/%d", task.retry.RetryCount, task.retry.MaxRetry), "taskId", task.ID)
}
if delta := time.Since(task.StartTime); delta < task.retry.RetryInterval {
time.Sleep(task.retry.RetryInterval - delta)
@@ -281,9 +281,7 @@ func (task *Task) checkRetry(err error) bool {
return true
} else {
if task.retry.MaxRetry > 0 {
if task.Logger != nil {
task.Warn(fmt.Sprintf("max retry %d failed", task.retry.MaxRetry))
}
task.Warn(fmt.Sprintf("max retry %d failed", task.retry.MaxRetry))
return false
}
}
@@ -296,15 +294,13 @@ func (task *Task) start() bool {
defer func() {
if r := recover(); r != nil {
err = errors.New(fmt.Sprint(r))
if task.Logger != nil {
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
}()
}
for {
task.StartTime = time.Now()
if tt := task.handler.GetTaskType(); task.Logger != nil && tt != TASK_TYPE_CALL {
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
task.Debug("task start", "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType(), "reason", task.StartReason)
}
task.state = TASK_STATE_STARTING
@@ -326,9 +322,7 @@ func (task *Task) start() bool {
task.ResetRetryCount()
if runHandler, ok := task.handler.(TaskBlock); ok {
task.state = TASK_STATE_RUNNING
if task.Logger != nil {
task.Debug("task run", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
}
task.Debug("task run", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
err = runHandler.Run()
if err == nil {
err = ErrTaskComplete
@@ -339,9 +333,7 @@ func (task *Task) start() bool {
if err == nil {
if goHandler, ok := task.handler.(TaskGo); ok {
task.state = TASK_STATE_GOING
if task.Logger != nil {
task.Debug("task go", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
}
task.Debug("task go", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
go task.run(goHandler.Go)
}
return true
@@ -388,19 +380,17 @@ func (task *Task) SetDescriptions(value Description) {
func (task *Task) dispose() {
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
if task.state < TASK_STATE_STARTED {
if task.Logger != nil && taskType != TASK_TYPE_CALL {
if taskType != TASK_TYPE_CALL {
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
}
return
}
reason := task.StopReason()
task.state = TASK_STATE_DISPOSING
if task.Logger != nil {
if taskType != TASK_TYPE_CALL {
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
task.Debug("task dispose", yargs...)
defer task.Debug("task disposed", yargs...)
}
if taskType != TASK_TYPE_CALL {
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
task.Debug("task dispose", yargs...)
defer task.Debug("task disposed", yargs...)
}
befores := len(task.beforeDisposeListeners)
for i, listener := range task.beforeDisposeListeners {
@@ -435,15 +425,17 @@ func (task *Task) ResetRetryCount() {
task.retry.RetryCount = 0
}
func (task *Task) GetRetryCount() int {
return task.retry.RetryCount
}
func (task *Task) run(handler func() error) {
var err error
defer func() {
if !ThrowPanic {
if r := recover(); r != nil {
err = errors.New(fmt.Sprint(r))
if task.Logger != nil {
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
}
if err == nil {
@@ -454,3 +446,39 @@ func (task *Task) run(handler func() error) {
}()
err = handler()
}
func (task *Task) Debug(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Debug(msg, args...)
return
}
task.Logger.Debug(msg, args...)
}
func (task *Task) Info(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Info(msg, args...)
return
}
task.Logger.Info(msg, args...)
}
func (task *Task) Warn(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Warn(msg, args...)
return
}
task.Logger.Warn(msg, args...)
}
func (task *Task) Error(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Error(msg, args...)
return
}
task.Logger.Error(msg, args...)
}
func (task *Task) TraceEnabled() bool {
return task.Logger.Enabled(task.Context, TraceLevel)
}

View File

@@ -142,6 +142,26 @@ func Test_Hooks(t *testing.T) {
root.AddTask(&task).WaitStopped()
}
type startFailTask struct {
Task
}
func (task *startFailTask) Start() error {
return errors.New("start failed")
}
func (task *startFailTask) Dispose() {
task.Logger.Info("Dispose")
}
func Test_StartFail(t *testing.T) {
var task startFailTask
root.AddTask(&task)
if err := task.WaitStarted(); err == nil {
t.Errorf("expected start to fail")
}
}
//
//type DemoTask struct {
// Task

View File

@@ -2,33 +2,55 @@ package util
import (
"errors"
"sync"
"unsafe"
)
type Buddy struct {
size int
longests []int
size int
longests [BuddySize>>(MinPowerOf2-1) - 1]int
memoryPool [BuddySize]byte
poolStart int64
lock sync.Mutex // 保护 longests 数组的并发访问
}
var (
InValidParameterErr = errors.New("buddy: invalid parameter")
NotFoundErr = errors.New("buddy: can't find block")
buddyPool = sync.Pool{
New: func() interface{} {
return NewBuddy()
},
}
)
// GetBuddy 从池中获取一个 Buddy 实例
func GetBuddy() *Buddy {
buddy := buddyPool.Get().(*Buddy)
return buddy
}
// PutBuddy 将 Buddy 实例放回池中
func PutBuddy(b *Buddy) {
buddyPool.Put(b)
}
// NewBuddy creates a buddy instance.
// If the parameter isn't valid, return the nil and error as well
func NewBuddy(size int) *Buddy {
if !isPowerOf2(size) {
size = fixSize(size)
func NewBuddy() *Buddy {
size := BuddySize >> MinPowerOf2
ret := &Buddy{
size: size,
}
nodeCount := 2*size - 1
longests := make([]int, nodeCount)
for nodeSize, i := 2*size, 0; i < nodeCount; i++ {
for nodeSize, i := 2*size, 0; i < len(ret.longests); i++ {
if isPowerOf2(i + 1) {
nodeSize /= 2
}
longests[i] = nodeSize
ret.longests[i] = nodeSize
}
return &Buddy{size, longests}
ret.poolStart = int64(uintptr(unsafe.Pointer(&ret.memoryPool[0])))
return ret
}
// Alloc find a unused block according to the size
@@ -42,6 +64,8 @@ func (b *Buddy) Alloc(size int) (offset int, err error) {
if !isPowerOf2(size) {
size = fixSize(size)
}
b.lock.Lock()
defer b.lock.Unlock()
if size > b.longests[0] {
err = NotFoundErr
return
@@ -70,6 +94,8 @@ func (b *Buddy) Free(offset int) error {
if offset < 0 || offset >= b.size {
return InValidParameterErr
}
b.lock.Lock()
defer b.lock.Unlock()
nodeSize := 1
index := offset + b.size - 1
for ; b.longests[index] != 0; index = parent(index) {

View File

@@ -3,11 +3,9 @@
package util
import (
"container/list"
"fmt"
"io"
"slices"
"sync"
"unsafe"
)
@@ -58,53 +56,59 @@ func (r *RecyclableMemory) Recycle() {
}
}
var (
memoryPool [BuddySize]byte
buddy = NewBuddy(BuddySize >> MinPowerOf2)
lock sync.Mutex
poolStart = int64(uintptr(unsafe.Pointer(&memoryPool[0])))
blockPool = list.New()
//EnableCheckSize bool = false
)
type MemoryAllocator struct {
allocator *Allocator
start int64
memory []byte
Size int
buddy *Buddy
}
// createMemoryAllocator 创建并初始化 MemoryAllocator
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
ret := &MemoryAllocator{
allocator: NewAllocator(size),
buddy: buddy,
Size: size,
memory: buddy.memoryPool[offset : offset+size],
start: buddy.poolStart + int64(offset),
}
ret.allocator.Init(size)
return ret
}
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
lock.Lock()
offset, err := buddy.Alloc(size >> MinPowerOf2)
if blockPool.Len() > 0 {
ret = blockPool.Remove(blockPool.Front()).(*MemoryAllocator)
} else {
ret = &MemoryAllocator{
allocator: NewAllocator(size),
if size < BuddySize {
requiredSize := size >> MinPowerOf2
// 循环尝试从池中获取可用的 buddy
for {
buddy := GetBuddy()
offset, err := buddy.Alloc(requiredSize)
PutBuddy(buddy)
if err == nil {
// 分配成功,使用这个 buddy
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
}
}
}
lock.Unlock()
ret.Size = size
ret.allocator.Init(size)
if err != nil {
ret.memory = make([]byte, size)
ret.start = int64(uintptr(unsafe.Pointer(&ret.memory[0])))
return
// 池中的 buddy 都无法分配或大小不够,使用系统内存
memory := make([]byte, size)
start := int64(uintptr(unsafe.Pointer(&memory[0])))
return &MemoryAllocator{
allocator: NewAllocator(size),
Size: size,
memory: memory,
start: start,
}
offset = offset << MinPowerOf2
ret.memory = memoryPool[offset : offset+size]
ret.start = poolStart + int64(offset)
return
}
func (ma *MemoryAllocator) Recycle() {
ma.allocator.Recycle()
lock.Lock()
blockPool.PushBack(ma)
_ = buddy.Free(int((poolStart - ma.start) >> MinPowerOf2))
if ma.buddy != nil {
_ = ma.buddy.Free(int((ma.buddy.poolStart - ma.start) >> MinPowerOf2))
ma.buddy = nil
}
ma.memory = nil
lock.Unlock()
}
func (ma *MemoryAllocator) Find(size int) (memory []byte) {

229
plugin.go
View File

@@ -133,24 +133,9 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
finalConfig, _ := yaml.Marshal(p.Config.GetMap())
p.Logger.Handler().(*MultiLogHandler).SetLevel(ParseLevel(p.config.LogLevel))
p.Debug("config", "detail", string(finalConfig))
if s.DisableAll {
p.Disabled = true
}
if userConfig["enable"] == false {
p.Disabled = true
} else if userConfig["enable"] == true {
p.Disabled = false
}
if p.Disabled {
if userConfig["enable"] == false || (s.DisableAll && userConfig["enable"] != true) {
p.disable("config")
p.Warn("plugin disabled")
return
} else {
var handlers map[string]http.HandlerFunc
if v, ok := instance.(IRegisterHandler); ok {
handlers = v.RegisterHandler()
}
p.registerHandler(handlers)
}
p.Info("init", "version", plugin.Version)
var err error
@@ -158,7 +143,7 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
p.DB = s.DB
} else if p.config.DSN != "" {
if factory, ok := db.Factory[p.config.DBType]; ok {
s.DB, err = gorm.Open(factory(p.config.DSN), &gorm.Config{})
p.DB, err = gorm.Open(factory(p.config.DSN), &gorm.Config{})
if err != nil {
s.Error("failed to connect database", "error", err, "dsn", s.config.DSN, "type", s.config.DBType)
p.disable(fmt.Sprintf("database %v", err))
@@ -171,8 +156,21 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
p.disable(fmt.Sprintf("auto migrate record stream failed %v", err))
return
}
if err = p.DB.AutoMigrate(&EventRecordStream{}); err != nil {
p.disable(fmt.Sprintf("auto migrate event record stream failed %v", err))
return
}
}
s.AddTask(instance)
if err := s.AddTask(instance).WaitStarted(); err != nil {
p.disable(instance.StopReason().Error())
return
}
var handlers map[string]http.HandlerFunc
if v, ok := instance.(IRegisterHandler); ok {
handlers = v.RegisterHandler()
}
p.registerHandler(handlers)
s.Plugins.Add(p)
return
}
@@ -277,11 +275,19 @@ func (p *Plugin) GetPublicIP(netcardIP string) string {
func (p *Plugin) disable(reason string) {
p.Disabled = true
p.SetDescription("disableReason", reason)
p.Warn("plugin disabled")
p.Server.disabledPlugins = append(p.Server.disabledPlugins, p)
}
func (p *Plugin) Start() (err error) {
s := p.Server
if err = p.listen(); err != nil {
return
}
if err = p.handler.OnInit(); err != nil {
return
}
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
if p.Meta.RegisterGRPCHandler != nil {
@@ -293,15 +299,6 @@ func (p *Plugin) Start() (err error) {
}
}
}
s.Plugins.Add(p)
if err = p.listen(); err != nil {
p.disable(fmt.Sprintf("listen %v", err))
return
}
if err = p.handler.OnInit(); err != nil {
p.disable(fmt.Sprintf("init %v", err))
return
}
if p.config.Hook != nil {
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
p.AddTask(&ServerKeepAliveTask{plugin: p})
@@ -386,13 +383,13 @@ type WebHookTask struct {
task.Task
plugin *Plugin
hookType config.HookType
conf *config.Webhook
conf config.Webhook
data any
jsonData []byte
}
func (t *WebHookTask) Start() error {
if t.conf == nil || t.conf.URL == "" {
if t.conf.URL == "" {
return task.ErrTaskComplete
}
@@ -437,11 +434,11 @@ func (t *WebHookTask) Go() error {
return err
}
func (p *Plugin) SendWebhook(hookType config.HookType, conf config.Webhook, data any) *task.Task {
func (p *Plugin) SendWebhook(hookType config.HookType, data any) *task.Task {
webhookTask := &WebHookTask{
plugin: p,
hookType: hookType,
conf: &conf,
conf: p.config.Hook[hookType],
data: data,
}
return p.AddTask(webhookTask)
@@ -560,10 +557,31 @@ func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf
}
err = p.Server.Streams.AddTask(publisher, ctx).WaitStarted()
if err == nil {
publisher.OnDispose(func() {
p.sendPublishEndWebhook(publisher)
})
p.sendPublishWebhook(publisher)
if sender := p.getHookSender(config.HookOnPublishEnd); sender != nil {
publisher.OnDispose(func() {
webhookData := map[string]interface{}{
"event": config.HookOnPublishEnd,
"streamPath": publisher.StreamPath,
"publishId": publisher.ID,
"reason": publisher.StopReason().Error(),
"timestamp": time.Now().Unix(),
}
sender(config.HookOnPublishEnd, webhookData)
})
}
if sender := p.getHookSender(config.HookOnPublishStart); sender != nil {
webhookData := map[string]interface{}{
"event": config.HookOnPublishStart,
"streamPath": publisher.StreamPath,
"args": publisher.Args,
"publishId": publisher.ID,
"remoteAddr": publisher.RemoteAddr,
"type": publisher.Type,
"pluginName": p.Meta.Name,
"timestamp": time.Now().Unix(),
}
sender(config.HookOnPublishStart, webhookData)
}
}
return
}
@@ -601,10 +619,34 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
}
}
if err == nil {
subscriber.OnDispose(func() {
p.sendSubscribeEndWebhook(subscriber)
})
p.sendSubscribeWebhook(subscriber)
if sender := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
subscriber.OnDispose(func() {
webhookData := map[string]interface{}{
"event": config.HookOnSubscribeEnd,
"streamPath": subscriber.StreamPath,
"subscriberId": subscriber.ID,
"reason": subscriber.StopReason().Error(),
"timestamp": time.Now().Unix(),
}
if subscriber.Publisher != nil {
webhookData["publishId"] = subscriber.Publisher.ID
}
sender(config.HookOnSubscribeEnd, webhookData)
})
}
if sender := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
webhookData := map[string]interface{}{
"event": config.HookOnSubscribeStart,
"streamPath": subscriber.StreamPath,
"publishId": subscriber.Publisher.ID,
"subscriberId": subscriber.ID,
"remoteAddr": subscriber.RemoteAddr,
"type": subscriber.Type,
"args": subscriber.Args,
"timestamp": time.Now().Unix(),
}
sender(config.HookOnSubscribeStart, webhookData)
}
}
return
}
@@ -715,90 +757,17 @@ func (p *Plugin) handle(pattern string, handler http.Handler) {
p.Server.apiList = append(p.Server.apiList, pattern)
}
func (p *Plugin) sendPublishWebhook(pub *Publisher) {
if p.config.Hook == nil {
return
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(hookType config.HookType, data any) *task.Task) {
if p.config.Hook != nil {
if _, ok := p.config.Hook[hookType]; ok {
sender = p.SendWebhook
} else if p.Server.config.Hook != nil {
if _, ok := p.Server.config.Hook[hookType]; ok {
sender = p.Server.SendWebhook
}
}
}
webhookData := map[string]interface{}{
"event": "publish",
"streamPath": pub.StreamPath,
"args": pub.Args,
"publishId": pub.ID,
"remoteAddr": pub.RemoteAddr,
"type": pub.Type,
"pluginName": p.Meta.Name,
"timestamp": time.Now().Unix(),
}
p.SendWebhook(config.HookOnPublish, p.config.Hook[config.HookOnPublish], webhookData)
if p.Server.config.Hook == nil {
return
}
p.Server.SendWebhook(config.HookOnPublish, p.Server.config.Hook[config.HookOnPublish], webhookData)
}
func (p *Plugin) sendPublishEndWebhook(pub *Publisher) {
if p.config.Hook == nil {
return
}
webhookData := map[string]interface{}{
"event": "publish_end",
"streamPath": pub.StreamPath,
"publishId": pub.ID,
"reason": pub.StopReason().Error(),
"timestamp": time.Now().Unix(),
}
p.SendWebhook(config.HookOnPublishEnd, p.config.Hook[config.HookOnPublishEnd], webhookData)
}
func (p *Plugin) sendSubscribeWebhook(sub *Subscriber) {
if p.config.Hook == nil {
return
}
webhookData := map[string]interface{}{
"event": "subscribe",
"streamPath": sub.StreamPath,
"publishId": sub.Publisher.ID,
"subscriberId": sub.ID,
"remoteAddr": sub.RemoteAddr,
"type": sub.Type,
"args": sub.Args,
"timestamp": time.Now().Unix(),
}
p.SendWebhook(config.HookOnSubscribe, p.config.Hook[config.HookOnSubscribe], webhookData)
}
func (p *Plugin) sendSubscribeEndWebhook(sub *Subscriber) {
if p.config.Hook == nil {
return
}
webhookData := map[string]interface{}{
"event": "subscribe_end",
"streamPath": sub.StreamPath,
"subscriberId": sub.ID,
"reason": sub.StopReason().Error(),
"timestamp": time.Now().Unix(),
}
if sub.Publisher != nil {
webhookData["publishId"] = sub.Publisher.ID
}
p.SendWebhook(config.HookOnSubscribeEnd, p.config.Hook[config.HookOnSubscribeEnd], webhookData)
}
func (p *Plugin) sendServerKeepAliveWebhook() {
if p.config.Hook == nil {
return
}
s := p.Server
webhookData := map[string]interface{}{
"event": "server_keep_alive",
"timestamp": time.Now().Unix(),
"streams": s.Streams.Length,
"subscribers": s.Subscribers.Length,
"publisherCount": s.Streams.Length,
"subscriberCount": s.Subscribers.Length,
"uptime": time.Since(s.StartTime).Seconds(),
}
p.SendWebhook(config.HookOnServerKeepAlive, p.config.Hook[config.HookOnServerKeepAlive], webhookData)
return
}
type ServerKeepAliveTask struct {
@@ -811,5 +780,19 @@ func (t *ServerKeepAliveTask) GetTickInterval() time.Duration {
}
func (t *ServerKeepAliveTask) Tick(now any) {
t.plugin.sendServerKeepAliveWebhook()
sender := t.plugin.getHookSender(config.HookOnServerKeepAlive)
if sender == nil {
return
}
s := t.plugin.Server
webhookData := map[string]interface{}{
"event": config.HookOnServerKeepAlive,
"timestamp": time.Now().Unix(),
"streams": s.Streams.Length,
"subscribers": s.Subscribers.Length,
"publisherCount": s.Streams.Length,
"subscriberCount": s.Subscribers.Length,
"uptime": time.Since(s.StartTime).Seconds(),
}
sender(config.HookOnServerKeepAlive, webhookData)
}

990
plugin/crontab/api.go Normal file
View File

@@ -0,0 +1,990 @@
package plugin_crontab
import (
"context"
"fmt"
"sort"
"strings"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
cronpb "m7s.live/v5/plugin/crontab/pb"
"m7s.live/v5/plugin/crontab/pkg"
)
func (ct *CrontabPlugin) List(ctx context.Context, req *cronpb.ReqPlanList) (*cronpb.PlanResponseList, error) {
if req.PageNum < 1 {
req.PageNum = 1
}
if req.PageSize < 1 {
req.PageSize = 10
}
// 从内存中获取所有计划
plans := ct.recordPlans.Items
total := len(plans)
// 计算分页
start := int(req.PageNum-1) * int(req.PageSize)
end := start + int(req.PageSize)
if start >= total {
start = total
}
if end > total {
end = total
}
// 获取当前页的数据
pagePlans := plans[start:end]
data := make([]*cronpb.Plan, 0, len(pagePlans))
for _, plan := range pagePlans {
data = append(data, &cronpb.Plan{
Id: uint32(plan.ID),
Name: plan.Name,
Enable: plan.Enable,
CreateTime: timestamppb.New(plan.CreatedAt),
UpdateTime: timestamppb.New(plan.UpdatedAt),
Plan: plan.Plan,
})
}
return &cronpb.PlanResponseList{
Code: 0,
Message: "success",
TotalCount: uint32(total),
PageNum: req.PageNum,
PageSize: req.PageSize,
Data: data,
}, nil
}
func (ct *CrontabPlugin) Add(ctx context.Context, req *cronpb.Plan) (*cronpb.Response, error) {
// 参数验证
if strings.TrimSpace(req.Name) == "" {
return &cronpb.Response{
Code: 400,
Message: "name is required",
}, nil
}
if strings.TrimSpace(req.Plan) == "" {
return &cronpb.Response{
Code: 400,
Message: "plan is required",
}, nil
}
// 检查名称是否已存在
var count int64
if err := ct.DB.Model(&pkg.RecordPlan{}).Where("name = ?", req.Name).Count(&count).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
if count > 0 {
return &cronpb.Response{
Code: 400,
Message: "name already exists",
}, nil
}
plan := &pkg.RecordPlan{
Name: req.Name,
Plan: req.Plan,
Enable: req.Enable,
}
if err := ct.DB.Create(plan).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 添加到内存中
ct.recordPlans.Add(plan)
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) Update(ctx context.Context, req *cronpb.Plan) (*cronpb.Response, error) {
if req.Id == 0 {
return &cronpb.Response{
Code: 400,
Message: "id is required",
}, nil
}
// 参数验证
if strings.TrimSpace(req.Name) == "" {
return &cronpb.Response{
Code: 400,
Message: "name is required",
}, nil
}
if strings.TrimSpace(req.Plan) == "" {
return &cronpb.Response{
Code: 400,
Message: "plan is required",
}, nil
}
// 检查记录是否存在
var existingPlan pkg.RecordPlan
if err := ct.DB.First(&existingPlan, req.Id).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 检查新名称是否与其他记录冲突
var count int64
if err := ct.DB.Model(&pkg.RecordPlan{}).Where("name = ? AND id != ?", req.Name, req.Id).Count(&count).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
if count > 0 {
return &cronpb.Response{
Code: 400,
Message: "name already exists",
}, nil
}
// 处理 enable 状态变更
enableChanged := existingPlan.Enable != req.Enable
// 更新记录
updates := map[string]interface{}{
"name": req.Name,
"plan": req.Plan,
"enable": req.Enable,
}
if err := ct.DB.Model(&existingPlan).Updates(updates).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 更新内存中的记录
existingPlan.Name = req.Name
existingPlan.Plan = req.Plan
existingPlan.Enable = req.Enable
ct.recordPlans.Set(&existingPlan)
// 处理 enable 状态变更后的操作
if enableChanged {
if req.Enable {
// 从 false 变为 true需要创建并启动新的定时任务
var streams []pkg.RecordPlanStream
model := &pkg.RecordPlanStream{PlanID: existingPlan.ID}
if err := ct.DB.Model(model).Where(model).Find(&streams).Error; err != nil {
ct.Error("query record plan streams error: %v", err)
} else {
// 为每个流创建定时任务
for _, stream := range streams {
crontab := &Crontab{
ctp: ct,
RecordPlan: &existingPlan,
RecordPlanStream: &stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
}
} else {
// 从 true 变为 false需要停止相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlan.ID == existingPlan.ID {
crontab.Stop(nil)
}
return true
})
}
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) Remove(ctx context.Context, req *cronpb.DeleteRequest) (*cronpb.Response, error) {
if req.Id == 0 {
return &cronpb.Response{
Code: 400,
Message: "id is required",
}, nil
}
// 检查记录是否存在
var existingPlan pkg.RecordPlan
if err := ct.DB.First(&existingPlan, req.Id).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 先停止所有相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlan.ID == existingPlan.ID {
crontab.Stop(nil)
}
return true
})
// 执行软删除
if err := ct.DB.Delete(&existingPlan).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 从内存中移除
ct.recordPlans.RemoveByKey(existingPlan.ID)
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) ListRecordPlanStreams(ctx context.Context, req *cronpb.ReqPlanStreamList) (*cronpb.RecordPlanStreamResponseList, error) {
if req.PageNum < 1 {
req.PageNum = 1
}
if req.PageSize < 1 {
req.PageSize = 10
}
var total int64
var streams []pkg.RecordPlanStream
model := &pkg.RecordPlanStream{}
// 构建查询条件
query := ct.DB.Model(model).
Scopes(
pkg.ScopeRecordPlanID(uint(req.PlanId)),
pkg.ScopeStreamPathLike(req.StreamPath),
pkg.ScopeOrderByCreatedAtDesc(),
)
result := query.Count(&total)
if result.Error != nil {
return &cronpb.RecordPlanStreamResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
}
offset := (req.PageNum - 1) * req.PageSize
result = query.Offset(int(offset)).Limit(int(req.PageSize)).Find(&streams)
if result.Error != nil {
return &cronpb.RecordPlanStreamResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
}
data := make([]*cronpb.PlanStream, 0, len(streams))
for _, stream := range streams {
data = append(data, &cronpb.PlanStream{
PlanId: uint32(stream.PlanID),
StreamPath: stream.StreamPath,
Fragment: stream.Fragment,
FilePath: stream.FilePath,
CreatedAt: timestamppb.New(stream.CreatedAt),
UpdatedAt: timestamppb.New(stream.UpdatedAt),
Enable: stream.Enable,
})
}
return &cronpb.RecordPlanStreamResponseList{
Code: 0,
Message: "success",
TotalCount: uint32(total),
PageNum: req.PageNum,
PageSize: req.PageSize,
Data: data,
}, nil
}
func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
if req.PlanId == 0 {
return &cronpb.Response{
Code: 400,
Message: "record_plan_id is required",
}, nil
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 从内存中获取录制计划
plan, ok := ct.recordPlans.Get(uint(req.PlanId))
if !ok {
return &cronpb.Response{
Code: 404,
Message: "record plan not found",
}, nil
}
// 检查是否已存在相同的记录
var count int64
searchModel := pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Model(&searchModel).Where(&searchModel).Count(&count).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
if count > 0 {
return &cronpb.Response{
Code: 400,
Message: "record already exists",
}, nil
}
stream := &pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
Fragment: req.Fragment,
FilePath: req.FilePath,
Enable: req.Enable,
RecordType: req.RecordType,
}
if err := ct.DB.Create(stream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 如果计划是启用状态,创建并启动定时任务
if plan.Enable {
crontab := &Crontab{
ctp: ct,
RecordPlan: plan,
RecordPlanStream: stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) UpdateRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
if req.PlanId == 0 {
return &cronpb.Response{
Code: 400,
Message: "record_plan_id is required",
}, nil
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 检查记录是否存在
var existingStream pkg.RecordPlanStream
searchModel := pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Where(&searchModel).First(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 更新记录
existingStream.Fragment = req.Fragment
existingStream.FilePath = req.FilePath
existingStream.Enable = req.Enable
existingStream.RecordType = req.RecordType
if err := ct.DB.Save(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 停止当前流相关的所有任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlanStream.StreamPath == req.StreamPath {
crontab.Stop(nil)
}
return true
})
// 查询所有关联此流的记录
var streams []pkg.RecordPlanStream
if err := ct.DB.Where("stream_path = ?", req.StreamPath).Find(&streams).Error; err != nil {
ct.Error("query record plan streams error: %v", err)
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 为每个启用的计划创建新的定时任务
for _, stream := range streams {
// 从内存中获取对应的计划
plan, ok := ct.recordPlans.Get(stream.PlanID)
if !ok {
ct.Error("record plan not found in memory: %d", stream.PlanID)
continue
}
// 如果计划是启用状态,创建并启动定时任务
if plan.Enable && stream.Enable {
crontab := &Crontab{
ctp: ct,
RecordPlan: plan,
RecordPlanStream: &stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) RemoveRecordPlanStream(ctx context.Context, req *cronpb.DeletePlanStreamRequest) (*cronpb.Response, error) {
if req.PlanId == 0 {
return &cronpb.Response{
Code: 400,
Message: "record_plan_id is required",
}, nil
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 检查记录是否存在
var existingStream pkg.RecordPlanStream
searchModel := pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Where(&searchModel).First(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 停止所有相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlanStream.StreamPath == req.StreamPath && crontab.RecordPlan.ID == uint(req.PlanId) {
crontab.Stop(nil)
}
return true
})
// 执行删除
if err := ct.DB.Delete(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
// 获取周几的名称0=周日1=周一,...6=周六)
func getWeekdayName(weekday int) string {
weekdays := []string{"周日", "周一", "周二", "周三", "周四", "周五", "周六"}
return weekdays[weekday]
}
// 获取周几的索引0=周日1=周一,...6=周六)
func getWeekdayIndex(weekdayName string) int {
weekdays := map[string]int{
"周日": 0, "周一": 1, "周二": 2, "周三": 3, "周四": 4, "周五": 5, "周六": 6,
}
return weekdays[weekdayName]
}
// 获取下一个指定周几的日期
func getNextDateForWeekday(now time.Time, targetWeekday int, location *time.Location) time.Time {
nowWeekday := int(now.Weekday())
daysToAdd := 0
if targetWeekday >= nowWeekday {
daysToAdd = targetWeekday - nowWeekday
} else {
daysToAdd = 7 - (nowWeekday - targetWeekday)
}
// 如果是同一天但当前时间已经过了最后的时间段,则推到下一周
if daysToAdd == 0 {
// 这里简化处理直接加7天到下周同一天
daysToAdd = 7
}
return now.AddDate(0, 0, daysToAdd)
}
// 计算计划中的所有时间段
func calculateTimeSlots(plan string, now time.Time, location *time.Location) ([]*cronpb.TimeSlotInfo, error) {
if len(plan) != 168 {
return nil, fmt.Errorf("invalid plan format: length should be 168")
}
var slots []*cronpb.TimeSlotInfo
// 按周几遍历0=周日1=周一,...6=周六)
for weekday := 0; weekday < 7; weekday++ {
dayOffset := weekday * 24
var startHour int = -1
// 遍历这一天的每个小时
for hour := 0; hour <= 24; hour++ {
// 如果到了一天的结尾或者当前小时状态为0
isEndOfDay := hour == 24
isHourOff := !isEndOfDay && plan[dayOffset+hour] == '0'
if isEndOfDay || isHourOff {
// 如果之前有开始的时间段,现在结束了
if startHour != -1 {
// 计算下一个该周几的日期
targetDate := getNextDateForWeekday(now, weekday, location)
// 创建时间段
startTime := time.Date(targetDate.Year(), targetDate.Month(), targetDate.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(targetDate.Year(), targetDate.Month(), targetDate.Day(), hour, 0, 0, 0, location)
// 转换为 UTC 时间
startTs := timestamppb.New(startTime.UTC())
endTs := timestamppb.New(endTime.UTC())
slots = append(slots, &cronpb.TimeSlotInfo{
Start: startTs,
End: endTs,
Weekday: getWeekdayName(weekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, hour),
})
startHour = -1
}
} else if plan[dayOffset+hour] == '1' && startHour == -1 {
// 找到新的开始时间
startHour = hour
}
}
}
// 按时间排序
sort.Slice(slots, func(i, j int) bool {
// 先按周几排序
weekdayI := getWeekdayIndex(slots[i].Weekday)
weekdayJ := getWeekdayIndex(slots[j].Weekday)
if weekdayI != weekdayJ {
return weekdayI < weekdayJ
}
// 同一天按开始时间排序
return slots[i].Start.AsTime().Hour() < slots[j].Start.AsTime().Hour()
})
return slots, nil
}
// 获取下一个时间段
func getNextTimeSlotFromNow(plan string, now time.Time, location *time.Location) (*cronpb.TimeSlotInfo, error) {
if len(plan) != 168 {
return nil, fmt.Errorf("invalid plan format: length should be 168")
}
// 将当前时间转换为本地时间
localNow := now.In(location)
currentWeekday := int(localNow.Weekday())
currentHour := localNow.Hour()
// 检查是否在整点边界附近(前后30秒)
isNearHourBoundary := localNow.Minute() == 59 && localNow.Second() >= 30 || localNow.Minute() == 0 && localNow.Second() <= 30
// 首先检查当前时间是否在某个时间段内
dayOffset := currentWeekday * 24
if currentHour < 24 && plan[dayOffset+currentHour] == '1' {
// 找到当前小时所在的完整时间段
startHour := currentHour
// 向前查找时间段的开始
for h := currentHour - 1; h >= 0; h-- {
if plan[dayOffset+h] == '1' {
startHour = h
} else {
break
}
}
// 向后查找时间段的结束
endHour := currentHour + 1
for h := endHour; h < 24; h++ {
if plan[dayOffset+h] == '1' {
endHour = h + 1
} else {
break
}
}
// 检查是否已经接近当前时间段的结束
isNearEndOfTimeSlot := currentHour == endHour-1 && localNow.Minute() >= 59 && localNow.Second() >= 30
// 如果我们靠近时间段结束且在小时边界附近,我们跳过此时间段,找下一个
if isNearEndOfTimeSlot && isNearHourBoundary {
// 继续查找下一个时间段
} else {
// 创建时间段
startTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), endHour, 0, 0, 0, location)
// 如果当前时间已经接近或超过了结束时间,调整结束时间
if localNow.After(endTime.Add(-30*time.Second)) || localNow.Equal(endTime) {
// 继续查找下一个时间段
} else {
// 返回当前时间段
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(currentWeekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
}
// 查找下一个时间段
// 先查找当天剩余时间
for h := currentHour + 1; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 创建时间段
startTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), endHour, 0, 0, 0, location)
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(currentWeekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
// 如果当天没有找到,则查找后续日期
for d := 1; d <= 7; d++ {
nextDay := (currentWeekday + d) % 7
dayOffset := nextDay * 24
for h := 0; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 计算日期
nextDate := localNow.AddDate(0, 0, d)
// 创建时间段
startTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), endHour, 0, 0, 0, location)
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(nextDay),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
}
return nil, nil
}
func (ct *CrontabPlugin) ParsePlanTime(ctx context.Context, req *cronpb.ParsePlanRequest) (*cronpb.ParsePlanResponse, error) {
if len(req.Plan) != 168 {
return &cronpb.ParsePlanResponse{
Code: 400,
Message: "invalid plan format: length should be 168",
}, nil
}
// 检查字符串格式是否正确只包含0和1
for i, c := range req.Plan {
if c != '0' && c != '1' {
return &cronpb.ParsePlanResponse{
Code: 400,
Message: fmt.Sprintf("invalid character at position %d: %c (should be 0 or 1)", i, c),
}, nil
}
}
// 获取所有时间段
slots, err := calculateTimeSlots(req.Plan, time.Now(), time.Local)
if err != nil {
return &cronpb.ParsePlanResponse{
Code: 500,
Message: err.Error(),
}, nil
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(req.Plan, time.Now(), time.Local)
if err != nil {
return &cronpb.ParsePlanResponse{
Code: 500,
Message: err.Error(),
}, nil
}
return &cronpb.ParsePlanResponse{
Code: 0,
Message: "success",
Slots: slots,
NextSlot: nextSlot,
}, nil
}
// 辅助函数:构建任务状态信息
func buildCrontabTaskInfo(crontab *Crontab, now time.Time) *cronpb.CrontabTaskInfo {
// 基础任务信息
taskInfo := &cronpb.CrontabTaskInfo{
PlanId: uint32(crontab.RecordPlan.ID),
PlanName: crontab.RecordPlan.Name,
StreamPath: crontab.StreamPath,
FilePath: crontab.FilePath,
Fragment: crontab.Fragment,
}
// 获取完整计划时间段列表
if crontab.RecordPlan != nil && crontab.RecordPlan.Plan != "" {
planSlots, err := calculateTimeSlots(crontab.RecordPlan.Plan, now, time.Local)
if err == nil && planSlots != nil && len(planSlots) > 0 {
taskInfo.PlanSlots = planSlots
}
}
return taskInfo
}
// GetCrontabStatus 获取当前Crontab任务状态
func (ct *CrontabPlugin) GetCrontabStatus(ctx context.Context, req *cronpb.CrontabStatusRequest) (*cronpb.CrontabStatusResponse, error) {
response := &cronpb.CrontabStatusResponse{
Code: 0,
Message: "success",
RunningTasks: []*cronpb.CrontabTaskInfo{},
NextTasks: []*cronpb.CrontabTaskInfo{},
TotalRunning: 0,
TotalPlanned: 0,
}
// 获取当前正在运行的任务
runningTasks := make([]*cronpb.CrontabTaskInfo, 0)
nextTasks := make([]*cronpb.CrontabTaskInfo, 0)
// 如果只指定了流路径但未找到对应的任务,也返回该流的计划信息
streamPathFound := false
// 遍历所有Crontab任务
ct.crontabs.Range(func(crontab *Crontab) bool {
// 如果指定了stream_path过滤条件且不匹配则跳过
if req.StreamPath != "" && crontab.StreamPath != req.StreamPath {
return true // 继续遍历
}
// 标记已找到指定的流
if req.StreamPath != "" {
streamPathFound = true
}
now := time.Now()
// 构建基本任务信息
taskInfo := buildCrontabTaskInfo(crontab, now)
// 检查是否正在录制
if crontab.recording && crontab.currentSlot != nil {
// 当前正在录制
taskInfo.IsRecording = true
// 设置时间信息
taskInfo.StartTime = timestamppb.New(crontab.currentSlot.Start)
taskInfo.EndTime = timestamppb.New(crontab.currentSlot.End)
// 计算已运行时间和剩余时间
elapsedDuration := now.Sub(crontab.currentSlot.Start)
remainingDuration := crontab.currentSlot.End.Sub(now)
taskInfo.ElapsedSeconds = uint32(elapsedDuration.Seconds())
taskInfo.RemainingSeconds = uint32(remainingDuration.Seconds())
// 设置时间范围和周几
startHour := crontab.currentSlot.Start.Hour()
endHour := crontab.currentSlot.End.Hour()
taskInfo.TimeRange = fmt.Sprintf("%02d:00-%02d:00", startHour, endHour)
taskInfo.Weekday = getWeekdayName(int(crontab.currentSlot.Start.Weekday()))
// 添加到正在运行的任务列表
runningTasks = append(runningTasks, taskInfo)
} else {
// 获取下一个时间段
nextSlot := crontab.getNextTimeSlot()
if nextSlot != nil {
// 设置下一个任务的信息
taskInfo.IsRecording = false
// 设置时间信息
taskInfo.StartTime = timestamppb.New(nextSlot.Start)
taskInfo.EndTime = timestamppb.New(nextSlot.End)
// 计算等待时间
waitingDuration := nextSlot.Start.Sub(now)
taskInfo.RemainingSeconds = uint32(waitingDuration.Seconds())
// 设置时间范围和周几
startHour := nextSlot.Start.Hour()
endHour := nextSlot.End.Hour()
taskInfo.TimeRange = fmt.Sprintf("%02d:00-%02d:00", startHour, endHour)
taskInfo.Weekday = getWeekdayName(int(nextSlot.Start.Weekday()))
// 添加到计划任务列表
nextTasks = append(nextTasks, taskInfo)
}
}
return true // 继续遍历
})
// 如果指定了流路径但未找到对应的任务,查询数据库获取该流的计划信息
if req.StreamPath != "" && !streamPathFound {
// 查询与该流相关的所有计划
var streams []pkg.RecordPlanStream
if err := ct.DB.Where("stream_path = ?", req.StreamPath).Find(&streams).Error; err == nil && len(streams) > 0 {
for _, stream := range streams {
// 获取对应的计划
var plan pkg.RecordPlan
if err := ct.DB.First(&plan, stream.PlanID).Error; err == nil && plan.Enable && stream.Enable {
now := time.Now()
// 构建任务信息
taskInfo := &cronpb.CrontabTaskInfo{
PlanId: uint32(plan.ID),
PlanName: plan.Name,
StreamPath: stream.StreamPath,
FilePath: stream.FilePath,
Fragment: stream.Fragment,
IsRecording: false,
}
// 获取完整计划时间段列表
planSlots, err := calculateTimeSlots(plan.Plan, now, time.Local)
if err == nil && planSlots != nil && len(planSlots) > 0 {
taskInfo.PlanSlots = planSlots
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(plan.Plan, now, time.Local)
if err == nil && nextSlot != nil {
// 设置时间信息
taskInfo.StartTime = nextSlot.Start
taskInfo.EndTime = nextSlot.End
taskInfo.TimeRange = nextSlot.TimeRange
taskInfo.Weekday = nextSlot.Weekday
// 计算等待时间
waitingDuration := nextSlot.Start.AsTime().Sub(now)
taskInfo.RemainingSeconds = uint32(waitingDuration.Seconds())
// 添加到计划任务列表
nextTasks = append(nextTasks, taskInfo)
}
}
}
}
}
// 按开始时间排序下一个任务列表
sort.Slice(nextTasks, func(i, j int) bool {
return nextTasks[i].StartTime.AsTime().Before(nextTasks[j].StartTime.AsTime())
})
// 设置响应结果
response.RunningTasks = runningTasks
response.NextTasks = nextTasks
response.TotalRunning = uint32(len(runningTasks))
response.TotalPlanned = uint32(len(nextTasks))
return response, nil
}

244
plugin/crontab/api_test.go Normal file
View File

@@ -0,0 +1,244 @@
package plugin_crontab
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCalculateTimeSlots(t *testing.T) {
// 测试案例:周五的凌晨和上午有开启时间段
// 字符串中1的索引是120(0点),122(2点),123(3点),125(5点),130(10点),135(15点)
// 000000000000000000000000 - 周日(0-23小时) - 全0
// 000000000000000000000000 - 周一(24-47小时) - 全0
// 000000000000000000000000 - 周二(48-71小时) - 全0
// 000000000000000000000000 - 周三(72-95小时) - 全0
// 000000000000000000000000 - 周四(96-119小时) - 全0
// 101101000010000100000000 - 周五(120-143小时) - 0,2,3,5,10,15点开启
// 000000000000000000000000 - 周六(144-167小时) - 全0
planStr := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
now := time.Date(2023, 5, 1, 12, 0, 0, 0, time.Local) // 周一中午
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
assert.Equal(t, 5, len(slots), "应该有5个时间段")
// 检查结果中的时间段(按实际解析结果排序)
assert.Equal(t, "周五", slots[0].Weekday)
assert.Equal(t, "10:00-11:00", slots[0].TimeRange)
assert.Equal(t, "周五", slots[1].Weekday)
assert.Equal(t, "15:00-16:00", slots[1].TimeRange)
assert.Equal(t, "周五", slots[2].Weekday)
assert.Equal(t, "00:00-01:00", slots[2].TimeRange)
assert.Equal(t, "周五", slots[3].Weekday)
assert.Equal(t, "02:00-04:00", slots[3].TimeRange)
assert.Equal(t, "周五", slots[4].Weekday)
assert.Equal(t, "05:00-06:00", slots[4].TimeRange)
// 打印出所有时间段,便于调试
for i, slot := range slots {
t.Logf("时间段 %d: %s %s", i, slot.Weekday, slot.TimeRange)
}
}
func TestGetNextTimeSlotFromNow(t *testing.T) {
// 测试案例:周五的凌晨和上午有开启时间段
planStr := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
// 测试1: 当前是周一下一个时间段应该是周五凌晨0点
now1 := time.Date(2023, 5, 1, 12, 0, 0, 0, time.Local) // 周一中午
nextSlot1, err := getNextTimeSlotFromNow(planStr, now1, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot1)
assert.Equal(t, "周五", nextSlot1.Weekday)
assert.Equal(t, "00:00-01:00", nextSlot1.TimeRange)
// 测试2: 当前是周五凌晨1点下一个时间段应该是周五凌晨2点
now2 := time.Date(2023, 5, 5, 1, 30, 0, 0, time.Local) // 周五凌晨1:30
nextSlot2, err := getNextTimeSlotFromNow(planStr, now2, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot2)
assert.Equal(t, "周五", nextSlot2.Weekday)
assert.Equal(t, "02:00-04:00", nextSlot2.TimeRange)
// 测试3: 当前是周五凌晨3点此时正在一个时间段内
now3 := time.Date(2023, 5, 5, 3, 0, 0, 0, time.Local) // 周五凌晨3:00
nextSlot3, err := getNextTimeSlotFromNow(planStr, now3, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot3)
assert.Equal(t, "周五", nextSlot3.Weekday)
assert.Equal(t, "02:00-04:00", nextSlot3.TimeRange)
}
func TestParsePlanFromString(t *testing.T) {
// 测试用户提供的案例字符串的第36-41位表示周一的时间段
// 这个案例中对应周一的12点、14-15点、17点和22点开启
planStr := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 验证解析结果
var foundMondaySlots bool
for _, slot := range slots {
if slot.Weekday == "周一" {
foundMondaySlots = true
t.Logf("找到周一时间段: %s", slot.TimeRange)
}
}
assert.True(t, foundMondaySlots, "应该找到周一的时间段")
// 预期的周一时间段
var mondaySlots []string
for _, slot := range slots {
if slot.Weekday == "周一" {
mondaySlots = append(mondaySlots, slot.TimeRange)
}
}
// 检查是否包含预期的时间段
expectedSlots := []string{
"12:00-13:00",
"14:00-16:00",
"17:00-18:00",
"22:00-23:00",
}
for _, expected := range expectedSlots {
found := false
for _, actual := range mondaySlots {
if expected == actual {
found = true
break
}
}
assert.True(t, found, "应该找到周一时间段:"+expected)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("下一个时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
} else {
t.Log("没有找到下一个时间段")
}
}
// 手动计算字符串长度的辅助函数
func TestCountStringLength(t *testing.T) {
str1 := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
assert.Equal(t, 168, len(str1), "第一个测试字符串长度应为168")
str2 := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
assert.Equal(t, 168, len(str2), "第二个测试字符串长度应为168")
}
// 测试用户提供的具体字符串
func TestUserProvidedPlanString(t *testing.T) {
// 用户提供的测试字符串
planStr := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
// 验证字符串长度
assert.Equal(t, 168, len(planStr), "字符串长度应为168")
// 解析时间段
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 打印所有时间段
t.Log("所有时间段:")
for i, slot := range slots {
t.Logf("%d: %s %s", i, slot.Weekday, slot.TimeRange)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("下一个执行时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
t.Logf("开始时间: %s", nextSlot.Start.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
t.Logf("结束时间: %s", nextSlot.End.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
} else {
t.Log("没有找到下一个时间段")
}
// 验证周一的时间段
var mondaySlots []string
for _, slot := range slots {
if slot.Weekday == "周一" {
mondaySlots = append(mondaySlots, slot.TimeRange)
}
}
// 预期周一应该有这些时间段
expectedMondaySlots := []string{
"12:00-13:00",
"14:00-16:00",
"17:00-18:00",
"22:00-23:00",
}
assert.Equal(t, len(expectedMondaySlots), len(mondaySlots), "周一时间段数量不匹配")
for i, expected := range expectedMondaySlots {
if i < len(mondaySlots) {
t.Logf("期望周一时间段 %s, 实际是 %s", expected, mondaySlots[i])
}
}
}
// 测试用户提供的第二个字符串
func TestUserProvidedPlanString2(t *testing.T) {
// 用户提供的第二个测试字符串
planStr := "000000000000000000000000000000000000000000000000000000000000001011010100001000000000000000000000000100000000000000000000000010000000000000000000000001000000000000000000"
// 验证字符串长度
assert.Equal(t, 168, len(planStr), "字符串长度应为168")
// 解析时间段
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 打印所有时间段并按周几分组
weekdaySlots := make(map[string][]string)
for _, slot := range slots {
weekdaySlots[slot.Weekday] = append(weekdaySlots[slot.Weekday], slot.TimeRange)
}
t.Log("所有时间段(按周几分组):")
weekdays := []string{"周日", "周一", "周二", "周三", "周四", "周五", "周六"}
for _, weekday := range weekdays {
if timeRanges, ok := weekdaySlots[weekday]; ok {
t.Logf("%s: %v", weekday, timeRanges)
}
}
// 打印所有时间段的详细信息
t.Log("\n所有时间段详细信息:")
for i, slot := range slots {
t.Logf("%d: %s %s", i, slot.Weekday, slot.TimeRange)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("\n下一个执行时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
t.Logf("开始时间: %s", nextSlot.Start.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
t.Logf("结束时间: %s", nextSlot.End.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
} else {
t.Log("没有找到下一个时间段")
}
}

422
plugin/crontab/crontab.go Normal file
View File

@@ -0,0 +1,422 @@
package plugin_crontab
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"m7s.live/v5/pkg/task"
"m7s.live/v5/plugin/crontab/pkg"
)
// 计划时间段
type TimeSlot struct {
Start time.Time // 开始时间
End time.Time // 结束时间
}
// Crontab 定时任务调度器
type Crontab struct {
task.Job
ctp *CrontabPlugin
*pkg.RecordPlan
*pkg.RecordPlanStream
stop chan struct{}
running bool
location *time.Location
timer *time.Timer
currentSlot *TimeSlot // 当前执行的时间段
recording bool // 是否正在录制
}
func (cron *Crontab) GetKey() string {
return strconv.Itoa(int(cron.PlanID)) + "_" + cron.StreamPath
}
// 初始化
func (cron *Crontab) Start() (err error) {
cron.Info("crontab plugin start")
if cron.running {
return // 已经运行中,不重复启动
}
// 初始化必要字段
if cron.stop == nil {
cron.stop = make(chan struct{})
}
if cron.location == nil {
cron.location = time.Local
}
cron.running = true
return nil
}
// 阻塞运行
func (cron *Crontab) Run() (err error) {
cron.Info("crontab plugin is running")
// 初始化必要字段
if cron.stop == nil {
cron.stop = make(chan struct{})
}
if cron.location == nil {
cron.location = time.Local
}
cron.Info("调度器启动")
for {
// 获取当前时间
now := time.Now().In(cron.location)
// 首先检查是否需要立即执行操作(如停止录制)
if cron.recording && cron.currentSlot != nil &&
(now.Equal(cron.currentSlot.End) || now.After(cron.currentSlot.End)) {
cron.stopRecording()
continue
}
// 确定下一个事件
var nextEvent time.Time
var isStartEvent bool
if cron.recording {
// 如果正在录制,下一个事件是结束时间
nextEvent = cron.currentSlot.End
isStartEvent = false
} else {
// 如果没有录制,计算下一个开始时间
nextSlot := cron.getNextTimeSlot()
if nextSlot == nil {
// 无法确定下次执行时间,使用默认间隔
cron.timer = time.NewTimer(1 * time.Hour)
cron.Info("无有效计划等待1小时后重试")
// 等待定时器或停止信号
select {
case <-cron.timer.C:
continue // 继续循环
case <-cron.stop:
// 停止调度器
if cron.timer != nil {
cron.timer.Stop()
}
cron.Info("调度器停止")
return
}
}
cron.currentSlot = nextSlot
nextEvent = nextSlot.Start
isStartEvent = true
// 如果已过开始时间,立即开始录制
if now.Equal(nextEvent) || now.After(nextEvent) {
cron.startRecording()
continue
}
}
// 计算等待时间
waitDuration := nextEvent.Sub(now)
// 如果等待时间为负,立即执行
if waitDuration <= 0 {
if isStartEvent {
cron.startRecording()
} else {
cron.stopRecording()
}
continue
}
// 设置定时器
timer := time.NewTimer(waitDuration)
if isStartEvent {
cron.Info("下次开始时间: ", nextEvent, "等待时间:", waitDuration)
} else {
cron.Info("下次结束时间: ", nextEvent, " 等待时间:", waitDuration)
}
// 等待定时器或停止信号
select {
case now = <-timer.C:
// 更新当前时间为定时器触发时间
now = now.In(cron.location)
// 执行任务
if isStartEvent {
cron.startRecording()
} else {
cron.stopRecording()
}
case <-cron.stop:
// 停止调度器
timer.Stop()
cron.Info("调度器停止")
return
}
}
}
// 停止
func (cron *Crontab) Dispose() (err error) {
if cron.running {
cron.stop <- struct{}{}
cron.running = false
if cron.timer != nil {
cron.timer.Stop()
}
// 如果还在录制,停止录制
if cron.recording {
cron.stopRecording()
}
}
return
}
// 获取下一个时间段
func (cron *Crontab) getNextTimeSlot() *TimeSlot {
if cron.RecordPlan == nil || !cron.RecordPlan.Enable || cron.RecordPlan.Plan == "" {
return nil // 无有效计划
}
plan := cron.RecordPlan.Plan
if len(plan) != 168 {
cron.Error("无效的计划格式: %s, 长度应为168", plan)
return nil
}
// 使用当地时间
now := time.Now().In(cron.location)
cron.Debug("当前本地时间: %v, 星期%d, 小时%d", now.Format("2006-01-02 15:04:05"), now.Weekday(), now.Hour())
// 当前小时
currentWeekday := int(now.Weekday())
currentHour := now.Hour()
// 检查是否在整点边界附近(前后30秒)
isNearHourBoundary := now.Minute() == 59 && now.Second() >= 30 || now.Minute() == 0 && now.Second() <= 30
// 首先检查当前时间是否在某个时间段内
dayOffset := currentWeekday * 24
if currentHour < 24 && plan[dayOffset+currentHour] == '1' {
// 找到当前小时所在的完整时间段
startHour := currentHour
// 向前查找时间段的开始
for h := currentHour - 1; h >= 0; h-- {
if plan[dayOffset+h] == '1' {
startHour = h
} else {
break
}
}
// 向后查找时间段的结束
endHour := currentHour + 1
for h := endHour; h < 24; h++ {
if plan[dayOffset+h] == '1' {
endHour = h + 1
} else {
break
}
}
// 检查我们是否已经接近当前时间段的结束
isNearEndOfTimeSlot := currentHour == endHour-1 && now.Minute() == 59 && now.Second() >= 30
// 如果我们靠近时间段结束且在小时边界附近,我们跳过此时间段,找下一个
if isNearEndOfTimeSlot && isNearHourBoundary {
cron.Debug("接近当前时间段结束,准备查找下一个时间段")
} else {
// 创建时间段
startTime := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(now.Year(), now.Month(), now.Day(), endHour, 0, 0, 0, cron.location)
// 如果当前时间已经接近或超过了结束时间,调整结束时间
if now.After(endTime.Add(-30*time.Second)) || now.Equal(endTime) {
cron.Debug("当前时间已接近或超过结束时间,尝试查找下一个时间段")
} else {
cron.Debug("当前已在有效时间段内: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
}
// 查找下一个时间段
// 先查找当天剩余时间
for h := currentHour + 1; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 创建时间段
startTime := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(now.Year(), now.Month(), now.Day(), endHour, 0, 0, 0, cron.location)
cron.Debug("找到今天的有效时间段: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
// 如果当天没有找到,则查找后续日期
for d := 1; d <= 7; d++ {
nextDay := (currentWeekday + d) % 7
dayOffset := nextDay * 24
for h := 0; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 计算日期
nextDate := now.AddDate(0, 0, d)
// 创建时间段
startTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), endHour, 0, 0, 0, cron.location)
cron.Debug("找到未来有效时间段: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
}
cron.Debug("未找到有效的时间段")
return nil
}
// 开始录制
func (cron *Crontab) startRecording() {
if cron.recording {
return // 已经在录制了
}
now := time.Now().In(cron.location)
cron.Info("开始录制任务: %s, 时间: %v, 计划结束时间: %v",
cron.RecordPlan.Name, now, cron.currentSlot.End)
// 构造请求体
reqBody := map[string]string{
"fragment": cron.Fragment,
"filePath": cron.FilePath,
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
cron.Error("构造请求体失败: %v", err)
return
}
// 获取 HTTP 地址
addr := cron.ctp.Plugin.GetCommonConf().HTTP.ListenAddr
if addr == "" {
addr = ":8080" // 使用默认端口
}
if addr[0] == ':' {
addr = "localhost" + addr
}
// 发送开始录制请求
resp, err := http.Post(fmt.Sprintf("http://%s/mp4/api/start/%s", addr, cron.StreamPath), "application/json", bytes.NewBuffer(jsonBody))
if err != nil {
cron.Error("开始录制失败: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
cron.Error("开始录制失败HTTP状态码: %d", resp.StatusCode)
return
}
cron.recording = true
}
// 停止录制
func (cron *Crontab) stopRecording() {
if !cron.recording {
return // 没有在录制
}
// 立即记录当前时间并重置状态,避免重复调用
now := time.Now().In(cron.location)
cron.Info("停止录制任务: %s, 时间: %v", cron.RecordPlan.Name, now)
// 先重置状态,避免循环中重复检测到停止条件
wasRecording := cron.recording
cron.recording = false
savedSlot := cron.currentSlot
cron.currentSlot = nil
// 获取 HTTP 地址
addr := cron.ctp.Plugin.GetCommonConf().HTTP.ListenAddr
if addr == "" {
addr = ":8080" // 使用默认端口
}
if addr[0] == ':' {
addr = "localhost" + addr
}
// 发送停止录制请求
resp, err := http.Post(fmt.Sprintf("http://%s/mp4/api/stop/%s", addr, cron.StreamPath), "application/json", nil)
if err != nil {
cron.Error("停止录制失败: %v", err)
// 如果请求失败,恢复状态以便下次重试
if wasRecording {
cron.recording = true
cron.currentSlot = savedSlot
}
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
cron.Error("停止录制失败HTTP状态码: %d", resp.StatusCode)
// 如果请求失败,恢复状态以便下次重试
if wasRecording {
cron.recording = true
cron.currentSlot = savedSlot
}
}
}

71
plugin/crontab/index.go Normal file
View File

@@ -0,0 +1,71 @@
package plugin_crontab
import (
"fmt"
"m7s.live/v5/pkg/util"
"m7s.live/v5"
"m7s.live/v5/plugin/crontab/pb"
"m7s.live/v5/plugin/crontab/pkg"
)
type CrontabPlugin struct {
m7s.Plugin
pb.UnimplementedApiServer
crontabs util.Collection[string, *Crontab]
recordPlans util.Collection[uint, *pkg.RecordPlan]
}
var _ = m7s.InstallPlugin[CrontabPlugin](m7s.PluginMeta{
ServiceDesc: &pb.Api_ServiceDesc,
RegisterGRPCHandler: pb.RegisterApiHandler,
})
func (ct *CrontabPlugin) OnInit() (err error) {
if ct.DB == nil {
ct.Error("DB is nil")
} else {
err = ct.DB.AutoMigrate(&pkg.RecordPlan{}, &pkg.RecordPlanStream{})
if err != nil {
return fmt.Errorf("auto migrate tables error: %v", err)
}
ct.Info("init database success")
// 查询所有录制计划
var plans []pkg.RecordPlan
if err = ct.DB.Find(&plans).Error; err != nil {
return fmt.Errorf("query record plans error: %v", err)
}
// 遍历所有计划
for _, plan := range plans {
// 将计划存入 recordPlans 集合
ct.recordPlans.Add(&plan)
// 如果计划已启用,查询对应的流信息并创建定时任务
if plan.Enable {
var streams []pkg.RecordPlanStream
model := &pkg.RecordPlanStream{PlanID: plan.ID}
if err = ct.DB.Model(model).Where(model).Find(&streams).Error; err != nil {
ct.Error("query record plan streams error: %v", err)
continue
}
// 为每个流创建定时任务
for _, stream := range streams {
crontab := &Crontab{
ctp: ct,
RecordPlan: &plan,
RecordPlanStream: &stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
}
}
}
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,831 @@
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: crontab.proto
/*
Package pb is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package pb
import (
"context"
"errors"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var (
_ codes.Code
_ io.Reader
_ status.Status
_ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
var filter_Api_List_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_Api_List_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ReqPlanList
metadata runtime.ServerMetadata
)
io.Copy(io.Discard, req.Body)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_List_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.List(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_List_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ReqPlanList
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_List_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.List(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_Add_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq Plan
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Add(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_Add_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq Plan
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.Add(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_Update_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq Plan
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.Update(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_Update_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq Plan
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.Update(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_Remove_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := client.Remove(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_Remove_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeleteRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["id"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "id")
}
protoReq.Id, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "id", err)
}
msg, err := server.Remove(ctx, &protoReq)
return msg, metadata, err
}
var filter_Api_ListRecordPlanStreams_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_Api_ListRecordPlanStreams_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ReqPlanStreamList
metadata runtime.ServerMetadata
)
io.Copy(io.Discard, req.Body)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_ListRecordPlanStreams_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.ListRecordPlanStreams(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_ListRecordPlanStreams_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ReqPlanStreamList
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_ListRecordPlanStreams_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.ListRecordPlanStreams(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_AddRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq PlanStream
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.AddRecordPlanStream(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_AddRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq PlanStream
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.AddRecordPlanStream(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_UpdateRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq PlanStream
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.UpdateRecordPlanStream(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_UpdateRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq PlanStream
metadata runtime.ServerMetadata
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.UpdateRecordPlanStream(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_RemoveRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeletePlanStreamRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["planId"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "planId")
}
protoReq.PlanId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "planId", err)
}
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
msg, err := client.RemoveRecordPlanStream(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_RemoveRecordPlanStream_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq DeletePlanStreamRequest
metadata runtime.ServerMetadata
err error
)
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
val, ok := pathParams["planId"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "planId")
}
protoReq.PlanId, err = runtime.Uint32(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "planId", err)
}
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
msg, err := server.RemoveRecordPlanStream(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_ParsePlanTime_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ParsePlanRequest
metadata runtime.ServerMetadata
err error
)
io.Copy(io.Discard, req.Body)
val, ok := pathParams["plan"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "plan")
}
protoReq.Plan, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "plan", err)
}
msg, err := client.ParsePlanTime(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_ParsePlanTime_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq ParsePlanRequest
metadata runtime.ServerMetadata
err error
)
val, ok := pathParams["plan"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "plan")
}
protoReq.Plan, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "plan", err)
}
msg, err := server.ParsePlanTime(ctx, &protoReq)
return msg, metadata, err
}
var filter_Api_GetCrontabStatus_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
func request_Api_GetCrontabStatus_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CrontabStatusRequest
metadata runtime.ServerMetadata
)
io.Copy(io.Discard, req.Body)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCrontabStatus_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetCrontabStatus(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetCrontabStatus_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CrontabStatusRequest
metadata runtime.ServerMetadata
)
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCrontabStatus_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.GetCrontabStatus(ctx, &protoReq)
return msg, metadata, err
}
// RegisterApiHandlerServer registers the http handlers for service Api to "mux".
// UnaryRPC :call ApiServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterApiHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ApiServer) error {
mux.Handle(http.MethodGet, pattern_Api_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/List", runtime.WithHTTPPathPattern("/plan/api/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_List_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_List_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Add_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/Add", runtime.WithHTTPPathPattern("/plan/api/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_Add_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Add_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Update_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/Update", runtime.WithHTTPPathPattern("/plan/api/update/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_Update_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Update_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Remove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/Remove", runtime.WithHTTPPathPattern("/plan/api/remove/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_Remove_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Remove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_ListRecordPlanStreams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/ListRecordPlanStreams", runtime.WithHTTPPathPattern("/planstream/api/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_ListRecordPlanStreams_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_ListRecordPlanStreams_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_AddRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/AddRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_AddRecordPlanStream_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_AddRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_UpdateRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/UpdateRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_UpdateRecordPlanStream_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_UpdateRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_RemoveRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/RemoveRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/remove/{planId}/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_RemoveRecordPlanStream_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_RemoveRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_ParsePlanTime_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/ParsePlanTime", runtime.WithHTTPPathPattern("/plan/api/parse/{plan}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_ParsePlanTime_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_ParsePlanTime_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCrontabStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/crontab.Api/GetCrontabStatus", runtime.WithHTTPPathPattern("/crontab/api/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_GetCrontabStatus_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCrontabStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterApiHandlerFromEndpoint is same as RegisterApiHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.NewClient(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterApiHandler(ctx, mux, conn)
}
// RegisterApiHandler registers the http handlers for service Api to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterApiHandlerClient(ctx, mux, NewApiClient(conn))
}
// RegisterApiHandlerClient registers the http handlers for service Api
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ApiClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ApiClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ApiClient" to call the correct interceptors. This client ignores the HTTP middlewares.
func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ApiClient) error {
mux.Handle(http.MethodGet, pattern_Api_List_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/List", runtime.WithHTTPPathPattern("/plan/api/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_List_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_List_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Add_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/Add", runtime.WithHTTPPathPattern("/plan/api/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_Add_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Add_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Update_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/Update", runtime.WithHTTPPathPattern("/plan/api/update/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_Update_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Update_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_Remove_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/Remove", runtime.WithHTTPPathPattern("/plan/api/remove/{id}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_Remove_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_Remove_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_ListRecordPlanStreams_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/ListRecordPlanStreams", runtime.WithHTTPPathPattern("/planstream/api/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_ListRecordPlanStreams_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_ListRecordPlanStreams_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_AddRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/AddRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/add"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_AddRecordPlanStream_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_AddRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_UpdateRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/UpdateRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/update"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_UpdateRecordPlanStream_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_UpdateRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodPost, pattern_Api_RemoveRecordPlanStream_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/RemoveRecordPlanStream", runtime.WithHTTPPathPattern("/planstream/api/remove/{planId}/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_RemoveRecordPlanStream_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_RemoveRecordPlanStream_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_ParsePlanTime_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/ParsePlanTime", runtime.WithHTTPPathPattern("/plan/api/parse/{plan}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_ParsePlanTime_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_ParsePlanTime_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCrontabStatus_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/crontab.Api/GetCrontabStatus", runtime.WithHTTPPathPattern("/crontab/api/status"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_GetCrontabStatus_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCrontabStatus_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Api_List_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"plan", "api", "list"}, ""))
pattern_Api_Add_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"plan", "api", "add"}, ""))
pattern_Api_Update_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"plan", "api", "update", "id"}, ""))
pattern_Api_Remove_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"plan", "api", "remove", "id"}, ""))
pattern_Api_ListRecordPlanStreams_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"planstream", "api", "list"}, ""))
pattern_Api_AddRecordPlanStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"planstream", "api", "add"}, ""))
pattern_Api_UpdateRecordPlanStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"planstream", "api", "update"}, ""))
pattern_Api_RemoveRecordPlanStream_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 3, 0, 4, 1, 5, 4}, []string{"planstream", "api", "remove", "planId", "streamPath"}, ""))
pattern_Api_ParsePlanTime_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 0}, []string{"plan", "api", "parse"}, ""))
pattern_Api_GetCrontabStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"crontab", "api", "status"}, ""))
)
var (
forward_Api_List_0 = runtime.ForwardResponseMessage
forward_Api_Add_0 = runtime.ForwardResponseMessage
forward_Api_Update_0 = runtime.ForwardResponseMessage
forward_Api_Remove_0 = runtime.ForwardResponseMessage
forward_Api_ListRecordPlanStreams_0 = runtime.ForwardResponseMessage
forward_Api_AddRecordPlanStream_0 = runtime.ForwardResponseMessage
forward_Api_UpdateRecordPlanStream_0 = runtime.ForwardResponseMessage
forward_Api_RemoveRecordPlanStream_0 = runtime.ForwardResponseMessage
forward_Api_ParsePlanTime_0 = runtime.ForwardResponseMessage
forward_Api_GetCrontabStatus_0 = runtime.ForwardResponseMessage
)

View File

@@ -0,0 +1,190 @@
syntax = "proto3";
import "google/api/annotations.proto";
import "google/protobuf/timestamp.proto";
package crontab;
option go_package="m7s.live/v5/plugin/crontab/pb";
service api {
rpc List (ReqPlanList) returns (PlanResponseList) {
option (google.api.http) = {
get: "/plan/api/list"
};
}
rpc Add (Plan) returns (Response) {
option (google.api.http) = {
post: "/plan/api/add"
body: "*"
};
}
rpc Update (Plan) returns (Response) {
option (google.api.http) = {
post: "/plan/api/update/{id}"
body: "*"
};
}
rpc Remove (DeleteRequest) returns (Response) {
option (google.api.http) = {
post: "/plan/api/remove/{id}"
body: "*"
};
}
// RecordPlanStream 相关接口
rpc ListRecordPlanStreams (ReqPlanStreamList) returns (RecordPlanStreamResponseList) {
option (google.api.http) = {
get: "/planstream/api/list"
};
}
rpc AddRecordPlanStream (PlanStream) returns (Response) {
option (google.api.http) = {
post: "/planstream/api/add"
body: "*"
};
}
rpc UpdateRecordPlanStream (PlanStream) returns (Response) {
option (google.api.http) = {
post: "/planstream/api/update"
body: "*"
};
}
rpc RemoveRecordPlanStream (DeletePlanStreamRequest) returns (Response) {
option (google.api.http) = {
post: "/planstream/api/remove/{planId}/{streamPath=**}"
body: "*"
};
}
// 解析计划字符串,返回时间段信息
rpc ParsePlanTime (ParsePlanRequest) returns (ParsePlanResponse) {
option (google.api.http) = {
get: "/plan/api/parse/{plan}"
};
}
// 获取当前Crontab任务状态
rpc GetCrontabStatus (CrontabStatusRequest) returns (CrontabStatusResponse) {
option (google.api.http) = {
get: "/crontab/api/status"
};
}
}
message PlanResponseList {
int32 code = 1;
string message = 2;
uint32 totalCount = 3;
uint32 pageNum = 4;
uint32 pageSize = 5;
repeated Plan data = 6;
}
message Plan {
uint32 id = 1;
string name = 2;
bool enable = 3;
google.protobuf.Timestamp createTime = 4;
google.protobuf.Timestamp updateTime = 5;
string plan = 6;
}
message ReqPlanList {
uint32 pageNum = 1;
uint32 pageSize = 2;
}
message DeleteRequest {
uint32 id = 1;
}
message Response {
int32 code = 1;
string message = 2;
}
// RecordPlanStream 相关消息定义
message PlanStream {
uint32 planId = 1;
string stream_path = 2;
string fragment = 3;
string filePath = 4;
string record_type = 5; // 录制类型,例如 "mp4", "flv"
google.protobuf.Timestamp created_at = 6;
google.protobuf.Timestamp updated_at = 7;
bool enable = 8; // 是否启用该录制流
}
message ReqPlanStreamList {
uint32 pageNum = 1;
uint32 pageSize = 2;
uint32 planId = 3; // 可选的按录制计划ID筛选
string stream_path = 4; // 可选的按流路径筛选
}
message RecordPlanStreamResponseList {
int32 code = 1;
string message = 2;
uint32 totalCount = 3;
uint32 pageNum = 4;
uint32 pageSize = 5;
repeated PlanStream data = 6;
}
message DeletePlanStreamRequest {
uint32 planId = 1;
string streamPath = 2;
}
// 解析计划请求
message ParsePlanRequest {
string plan = 1; // 168位的0/1字符串表示一周的每个小时是否录制
}
// 时间段信息
message TimeSlotInfo {
google.protobuf.Timestamp start = 1; // 开始时间
google.protobuf.Timestamp end = 2; // 结束时间
string weekday = 3; // 周几(例如:周一)
string time_range = 4; // 时间范围例如09:00-10:00
}
// 解析计划响应
message ParsePlanResponse {
int32 code = 1; // 响应码
string message = 2; // 响应消息
repeated TimeSlotInfo slots = 3; // 所有计划的时间段
TimeSlotInfo next_slot = 4; // 从当前时间开始的下一个时间段
}
// 新增的消息定义
// 获取Crontab状态请求
message CrontabStatusRequest {
// 可以为空,表示获取所有任务
string stream_path = 1; // 可选,按流路径过滤
}
// 任务信息
message CrontabTaskInfo {
uint32 plan_id = 1; // 计划ID
string plan_name = 2; // 计划名称
string stream_path = 3; // 流路径
bool is_recording = 4; // 是否正在录制
google.protobuf.Timestamp start_time = 5; // 当前/下一个任务开始时间
google.protobuf.Timestamp end_time = 6; // 当前/下一个任务结束时间
string time_range = 7; // 时间范围例如09:00-10:00
string weekday = 8; // 周几(例如:周一)
string file_path = 9; // 文件保存路径
string fragment = 10; // 分片设置
uint32 elapsed_seconds = 11; // 已运行时间(秒,仅对正在运行的任务有效)
uint32 remaining_seconds = 12; // 剩余时间(秒)
repeated TimeSlotInfo plan_slots = 13; // 完整的计划时间段列表
}
// 获取Crontab状态响应
message CrontabStatusResponse {
int32 code = 1; // 响应码
string message = 2; // 响应消息
repeated CrontabTaskInfo running_tasks = 3; // 当前正在执行的任务列表
repeated CrontabTaskInfo next_tasks = 4; // 下一个计划执行的任务列表
uint32 total_running = 5; // 正在运行的任务总数
uint32 total_planned = 6; // 计划中的任务总数
}

View File

@@ -0,0 +1,469 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.29.3
// source: crontab.proto
package pb
import (
context "context"
grpc "google.golang.org/grpc"
codes "google.golang.org/grpc/codes"
status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Api_List_FullMethodName = "/crontab.api/List"
Api_Add_FullMethodName = "/crontab.api/Add"
Api_Update_FullMethodName = "/crontab.api/Update"
Api_Remove_FullMethodName = "/crontab.api/Remove"
Api_ListRecordPlanStreams_FullMethodName = "/crontab.api/ListRecordPlanStreams"
Api_AddRecordPlanStream_FullMethodName = "/crontab.api/AddRecordPlanStream"
Api_UpdateRecordPlanStream_FullMethodName = "/crontab.api/UpdateRecordPlanStream"
Api_RemoveRecordPlanStream_FullMethodName = "/crontab.api/RemoveRecordPlanStream"
Api_ParsePlanTime_FullMethodName = "/crontab.api/ParsePlanTime"
Api_GetCrontabStatus_FullMethodName = "/crontab.api/GetCrontabStatus"
)
// ApiClient is the client API for Api service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
type ApiClient interface {
List(ctx context.Context, in *ReqPlanList, opts ...grpc.CallOption) (*PlanResponseList, error)
Add(ctx context.Context, in *Plan, opts ...grpc.CallOption) (*Response, error)
Update(ctx context.Context, in *Plan, opts ...grpc.CallOption) (*Response, error)
Remove(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*Response, error)
// RecordPlanStream 相关接口
ListRecordPlanStreams(ctx context.Context, in *ReqPlanStreamList, opts ...grpc.CallOption) (*RecordPlanStreamResponseList, error)
AddRecordPlanStream(ctx context.Context, in *PlanStream, opts ...grpc.CallOption) (*Response, error)
UpdateRecordPlanStream(ctx context.Context, in *PlanStream, opts ...grpc.CallOption) (*Response, error)
RemoveRecordPlanStream(ctx context.Context, in *DeletePlanStreamRequest, opts ...grpc.CallOption) (*Response, error)
// 解析计划字符串,返回时间段信息
ParsePlanTime(ctx context.Context, in *ParsePlanRequest, opts ...grpc.CallOption) (*ParsePlanResponse, error)
// 获取当前Crontab任务状态
GetCrontabStatus(ctx context.Context, in *CrontabStatusRequest, opts ...grpc.CallOption) (*CrontabStatusResponse, error)
}
type apiClient struct {
cc grpc.ClientConnInterface
}
func NewApiClient(cc grpc.ClientConnInterface) ApiClient {
return &apiClient{cc}
}
func (c *apiClient) List(ctx context.Context, in *ReqPlanList, opts ...grpc.CallOption) (*PlanResponseList, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PlanResponseList)
err := c.cc.Invoke(ctx, Api_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) Add(ctx context.Context, in *Plan, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_Add_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) Update(ctx context.Context, in *Plan, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_Update_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) Remove(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_Remove_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) ListRecordPlanStreams(ctx context.Context, in *ReqPlanStreamList, opts ...grpc.CallOption) (*RecordPlanStreamResponseList, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RecordPlanStreamResponseList)
err := c.cc.Invoke(ctx, Api_ListRecordPlanStreams_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) AddRecordPlanStream(ctx context.Context, in *PlanStream, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_AddRecordPlanStream_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) UpdateRecordPlanStream(ctx context.Context, in *PlanStream, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_UpdateRecordPlanStream_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) RemoveRecordPlanStream(ctx context.Context, in *DeletePlanStreamRequest, opts ...grpc.CallOption) (*Response, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(Response)
err := c.cc.Invoke(ctx, Api_RemoveRecordPlanStream_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) ParsePlanTime(ctx context.Context, in *ParsePlanRequest, opts ...grpc.CallOption) (*ParsePlanResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ParsePlanResponse)
err := c.cc.Invoke(ctx, Api_ParsePlanTime_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) GetCrontabStatus(ctx context.Context, in *CrontabStatusRequest, opts ...grpc.CallOption) (*CrontabStatusResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CrontabStatusResponse)
err := c.cc.Invoke(ctx, Api_GetCrontabStatus_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ApiServer is the server API for Api service.
// All implementations must embed UnimplementedApiServer
// for forward compatibility.
type ApiServer interface {
List(context.Context, *ReqPlanList) (*PlanResponseList, error)
Add(context.Context, *Plan) (*Response, error)
Update(context.Context, *Plan) (*Response, error)
Remove(context.Context, *DeleteRequest) (*Response, error)
// RecordPlanStream 相关接口
ListRecordPlanStreams(context.Context, *ReqPlanStreamList) (*RecordPlanStreamResponseList, error)
AddRecordPlanStream(context.Context, *PlanStream) (*Response, error)
UpdateRecordPlanStream(context.Context, *PlanStream) (*Response, error)
RemoveRecordPlanStream(context.Context, *DeletePlanStreamRequest) (*Response, error)
// 解析计划字符串,返回时间段信息
ParsePlanTime(context.Context, *ParsePlanRequest) (*ParsePlanResponse, error)
// 获取当前Crontab任务状态
GetCrontabStatus(context.Context, *CrontabStatusRequest) (*CrontabStatusResponse, error)
mustEmbedUnimplementedApiServer()
}
// UnimplementedApiServer must be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedApiServer struct{}
func (UnimplementedApiServer) List(context.Context, *ReqPlanList) (*PlanResponseList, error) {
return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
}
func (UnimplementedApiServer) Add(context.Context, *Plan) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method Add not implemented")
}
func (UnimplementedApiServer) Update(context.Context, *Plan) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
}
func (UnimplementedApiServer) Remove(context.Context, *DeleteRequest) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method Remove not implemented")
}
func (UnimplementedApiServer) ListRecordPlanStreams(context.Context, *ReqPlanStreamList) (*RecordPlanStreamResponseList, error) {
return nil, status.Errorf(codes.Unimplemented, "method ListRecordPlanStreams not implemented")
}
func (UnimplementedApiServer) AddRecordPlanStream(context.Context, *PlanStream) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method AddRecordPlanStream not implemented")
}
func (UnimplementedApiServer) UpdateRecordPlanStream(context.Context, *PlanStream) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdateRecordPlanStream not implemented")
}
func (UnimplementedApiServer) RemoveRecordPlanStream(context.Context, *DeletePlanStreamRequest) (*Response, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemoveRecordPlanStream not implemented")
}
func (UnimplementedApiServer) ParsePlanTime(context.Context, *ParsePlanRequest) (*ParsePlanResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method ParsePlanTime not implemented")
}
func (UnimplementedApiServer) GetCrontabStatus(context.Context, *CrontabStatusRequest) (*CrontabStatusResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetCrontabStatus not implemented")
}
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
func (UnimplementedApiServer) testEmbeddedByValue() {}
// UnsafeApiServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to ApiServer will
// result in compilation errors.
type UnsafeApiServer interface {
mustEmbedUnimplementedApiServer()
}
func RegisterApiServer(s grpc.ServiceRegistrar, srv ApiServer) {
// If the following call pancis, it indicates UnimplementedApiServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Api_ServiceDesc, srv)
}
func _Api_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReqPlanList)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).List(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_List_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).List(ctx, req.(*ReqPlanList))
}
return interceptor(ctx, in, info, handler)
}
func _Api_Add_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Plan)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).Add(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_Add_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).Add(ctx, req.(*Plan))
}
return interceptor(ctx, in, info, handler)
}
func _Api_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Plan)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).Update(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_Update_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).Update(ctx, req.(*Plan))
}
return interceptor(ctx, in, info, handler)
}
func _Api_Remove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeleteRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).Remove(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_Remove_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).Remove(ctx, req.(*DeleteRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Api_ListRecordPlanStreams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReqPlanStreamList)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).ListRecordPlanStreams(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_ListRecordPlanStreams_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).ListRecordPlanStreams(ctx, req.(*ReqPlanStreamList))
}
return interceptor(ctx, in, info, handler)
}
func _Api_AddRecordPlanStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PlanStream)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).AddRecordPlanStream(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_AddRecordPlanStream_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).AddRecordPlanStream(ctx, req.(*PlanStream))
}
return interceptor(ctx, in, info, handler)
}
func _Api_UpdateRecordPlanStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PlanStream)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).UpdateRecordPlanStream(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_UpdateRecordPlanStream_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).UpdateRecordPlanStream(ctx, req.(*PlanStream))
}
return interceptor(ctx, in, info, handler)
}
func _Api_RemoveRecordPlanStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(DeletePlanStreamRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).RemoveRecordPlanStream(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_RemoveRecordPlanStream_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).RemoveRecordPlanStream(ctx, req.(*DeletePlanStreamRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Api_ParsePlanTime_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ParsePlanRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).ParsePlanTime(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_ParsePlanTime_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).ParsePlanTime(ctx, req.(*ParsePlanRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Api_GetCrontabStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CrontabStatusRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).GetCrontabStatus(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_GetCrontabStatus_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).GetCrontabStatus(ctx, req.(*CrontabStatusRequest))
}
return interceptor(ctx, in, info, handler)
}
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
var Api_ServiceDesc = grpc.ServiceDesc{
ServiceName: "crontab.api",
HandlerType: (*ApiServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "List",
Handler: _Api_List_Handler,
},
{
MethodName: "Add",
Handler: _Api_Add_Handler,
},
{
MethodName: "Update",
Handler: _Api_Update_Handler,
},
{
MethodName: "Remove",
Handler: _Api_Remove_Handler,
},
{
MethodName: "ListRecordPlanStreams",
Handler: _Api_ListRecordPlanStreams_Handler,
},
{
MethodName: "AddRecordPlanStream",
Handler: _Api_AddRecordPlanStream_Handler,
},
{
MethodName: "UpdateRecordPlanStream",
Handler: _Api_UpdateRecordPlanStream_Handler,
},
{
MethodName: "RemoveRecordPlanStream",
Handler: _Api_RemoveRecordPlanStream_Handler,
},
{
MethodName: "ParsePlanTime",
Handler: _Api_ParsePlanTime_Handler,
},
{
MethodName: "GetCrontabStatus",
Handler: _Api_GetCrontabStatus_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "crontab.proto",
}

View File

@@ -0,0 +1,17 @@
package pkg
import (
"gorm.io/gorm"
)
// RecordPlan 录制计划模型
type RecordPlan struct {
gorm.Model
Name string `json:"name" gorm:"default:''"`
Plan string `json:"plan" gorm:"type:text"`
Enable bool `json:"enable" gorm:"default:false"` // 是否启用
}
func (r *RecordPlan) GetKey() uint {
return r.ID
}

View File

@@ -0,0 +1,51 @@
package pkg
import (
"gorm.io/gorm"
"time"
)
// RecordPlanStream 录制计划流信息模型
type RecordPlanStream struct {
PlanID uint `json:"plan_id" gorm:"primaryKey;type:bigint;not null"` // 录制计划ID
StreamPath string `json:"stream_path" gorm:"primaryKey;type:varchar(255)"`
Fragment string `json:"fragment" gorm:"type:text"`
FilePath string `json:"file_path" gorm:"type:varchar(255)"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt gorm.DeletedAt `gorm:"index"`
Enable bool `json:"enable" gorm:"default:false"` // 是否启用
RecordType string `json:"record_type" gorm:"type:varchar(255)"`
}
// TableName 设置表名
func (RecordPlanStream) TableName() string {
return "record_plans_streams"
}
// ScopeStreamPathLike 模糊查询 StreamPath
func ScopeStreamPathLike(streamPath string) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
if streamPath != "" {
return db.Where("record_plans_streams.stream_path LIKE ?", "%"+streamPath+"%")
}
return db
}
}
// ScopeOrderByCreatedAtDesc 按创建时间倒序
func ScopeOrderByCreatedAtDesc() func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
return db.Order("record_plans_streams.created_at DESC")
}
}
// ScopeRecordPlanID 按录制计划ID查询
func ScopeRecordPlanID(recordPlanID uint) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
if recordPlanID > 0 {
return db.Where(&RecordPlanStream{PlanID: recordPlanID})
}
return db
}
}

View File

@@ -15,6 +15,7 @@ import (
"github.com/gorilla/websocket"
"github.com/shirou/gopsutil/v4/cpu"
"github.com/shirou/gopsutil/v4/process"
"m7s.live/v5/pkg/task"
)
//go:embed static/*
@@ -40,8 +41,17 @@ type consumer struct {
}
type server struct {
task.TickTask
consumers []consumer
consumersMutex sync.RWMutex
data DataStorage
lastPause uint32
dataMutex sync.RWMutex
lastConsumerID uint
upgrader websocket.Upgrader
prevSysTime float64
prevUserTime float64
myProcess *process.Process
}
type SimplePair struct {
@@ -75,99 +85,91 @@ const (
maxCount int = 86400
)
var (
data DataStorage
lastPause uint32
mutex sync.RWMutex
lastConsumerID uint
s server
upgrader = websocket.Upgrader{
func (s *server) Start() error {
var err error
s.myProcess, err = process.NewProcess(int32(os.Getpid()))
if err != nil {
log.Printf("Failed to get process: %v", err)
}
// 初始化 WebSocket upgrader
s.upgrader = websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
prevSysTime float64
prevUserTime float64
myProcess *process.Process
)
func init() {
myProcess, _ = process.NewProcess(int32(os.Getpid()))
// preallocate arrays in data, helps save on reallocations caused by append()
// when maxCount is large
data.BytesAllocated = make([]SimplePair, 0, maxCount)
data.GcPauses = make([]SimplePair, 0, maxCount)
data.CPUUsage = make([]CPUPair, 0, maxCount)
data.Pprof = make([]PprofPair, 0, maxCount)
go s.gatherData()
s.data.BytesAllocated = make([]SimplePair, 0, maxCount)
s.data.GcPauses = make([]SimplePair, 0, maxCount)
s.data.CPUUsage = make([]CPUPair, 0, maxCount)
s.data.Pprof = make([]PprofPair, 0, maxCount)
return s.TickTask.Start()
}
func (s *server) gatherData() {
timer := time.Tick(time.Second)
func (s *server) GetTickInterval() time.Duration {
return time.Second
}
for now := range timer {
nowUnix := now.Unix()
func (s *server) Tick(any) {
now := time.Now()
nowUnix := now.Unix()
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
var ms runtime.MemStats
runtime.ReadMemStats(&ms)
u := update{
Ts: nowUnix * 1000,
Block: pprof.Lookup("block").Count(),
Goroutine: pprof.Lookup("goroutine").Count(),
Heap: pprof.Lookup("heap").Count(),
Mutex: pprof.Lookup("mutex").Count(),
Threadcreate: pprof.Lookup("threadcreate").Count(),
}
data.Pprof = append(data.Pprof, PprofPair{
uint64(nowUnix) * 1000,
u.Block,
u.Goroutine,
u.Heap,
u.Mutex,
u.Threadcreate,
})
cpuTimes, err := myProcess.Times()
if err != nil {
cpuTimes = &cpu.TimesStat{}
}
if prevUserTime != 0 {
u.CPUUser = cpuTimes.User - prevUserTime
u.CPUSys = cpuTimes.System - prevSysTime
data.CPUUsage = append(data.CPUUsage, CPUPair{uint64(nowUnix) * 1000, u.CPUUser, u.CPUSys})
}
prevUserTime = cpuTimes.User
prevSysTime = cpuTimes.System
mutex.Lock()
bytesAllocated := ms.Alloc
u.BytesAllocated = bytesAllocated
data.BytesAllocated = append(data.BytesAllocated, SimplePair{uint64(nowUnix) * 1000, bytesAllocated})
if lastPause == 0 || lastPause != ms.NumGC {
gcPause := ms.PauseNs[(ms.NumGC+255)%256]
u.GcPause = gcPause
data.GcPauses = append(data.GcPauses, SimplePair{uint64(nowUnix) * 1000, gcPause})
lastPause = ms.NumGC
}
if len(data.BytesAllocated) > maxCount {
data.BytesAllocated = data.BytesAllocated[len(data.BytesAllocated)-maxCount:]
}
if len(data.GcPauses) > maxCount {
data.GcPauses = data.GcPauses[len(data.GcPauses)-maxCount:]
}
mutex.Unlock()
s.sendToConsumers(u)
u := update{
Ts: nowUnix * 1000,
Block: pprof.Lookup("block").Count(),
Goroutine: pprof.Lookup("goroutine").Count(),
Heap: pprof.Lookup("heap").Count(),
Mutex: pprof.Lookup("mutex").Count(),
Threadcreate: pprof.Lookup("threadcreate").Count(),
}
s.data.Pprof = append(s.data.Pprof, PprofPair{
uint64(nowUnix) * 1000,
u.Block,
u.Goroutine,
u.Heap,
u.Mutex,
u.Threadcreate,
})
cpuTimes, err := s.myProcess.Times()
if err != nil {
cpuTimes = &cpu.TimesStat{}
}
if s.prevUserTime != 0 {
u.CPUUser = cpuTimes.User - s.prevUserTime
u.CPUSys = cpuTimes.System - s.prevSysTime
s.data.CPUUsage = append(s.data.CPUUsage, CPUPair{uint64(nowUnix) * 1000, u.CPUUser, u.CPUSys})
}
s.prevUserTime = cpuTimes.User
s.prevSysTime = cpuTimes.System
s.dataMutex.Lock()
bytesAllocated := ms.Alloc
u.BytesAllocated = bytesAllocated
s.data.BytesAllocated = append(s.data.BytesAllocated, SimplePair{uint64(nowUnix) * 1000, bytesAllocated})
if s.lastPause == 0 || s.lastPause != ms.NumGC {
gcPause := ms.PauseNs[(ms.NumGC+255)%256]
u.GcPause = gcPause
s.data.GcPauses = append(s.data.GcPauses, SimplePair{uint64(nowUnix) * 1000, gcPause})
s.lastPause = ms.NumGC
}
if len(s.data.BytesAllocated) > maxCount {
s.data.BytesAllocated = s.data.BytesAllocated[len(s.data.BytesAllocated)-maxCount:]
}
if len(s.data.GcPauses) > maxCount {
s.data.GcPauses = s.data.GcPauses[len(s.data.GcPauses)-maxCount:]
}
s.dataMutex.Unlock()
s.sendToConsumers(u)
}
func (s *server) sendToConsumers(u update) {
@@ -203,10 +205,10 @@ func (s *server) addConsumer() consumer {
s.consumersMutex.Lock()
defer s.consumersMutex.Unlock()
lastConsumerID++
s.lastConsumerID++
c := consumer{
id: lastConsumerID,
id: s.lastConsumerID,
c: make(chan update),
}
@@ -221,7 +223,7 @@ func (s *server) dataFeedHandler(w http.ResponseWriter, r *http.Request) {
lastPong time.Time
)
conn, err := upgrader.Upgrade(w, r, nil)
conn, err := s.upgrader.Upgrade(w, r, nil)
if err != nil {
log.Println(err)
return
@@ -268,9 +270,9 @@ func (s *server) dataFeedHandler(w http.ResponseWriter, r *http.Request) {
}
}
func dataHandler(w http.ResponseWriter, r *http.Request) {
mutex.RLock()
defer mutex.RUnlock()
func (s *server) dataHandler(w http.ResponseWriter, r *http.Request) {
s.dataMutex.RLock()
defer s.dataMutex.RUnlock()
if e := r.ParseForm(); e != nil {
log.Print("error parsing form")
@@ -284,7 +286,7 @@ func dataHandler(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
encoder := json.NewEncoder(w)
encoder.Encode(data)
encoder.Encode(s.data)
fmt.Fprint(w, ")")
}

219
plugin/debug/envcheck.go Normal file
View File

@@ -0,0 +1,219 @@
package plugin_debug
import (
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
"gopkg.in/yaml.v3"
"m7s.live/v5/pb"
"m7s.live/v5/pkg/util"
)
type EnvCheckResult struct {
Message string `json:"message"`
Type string `json:"type"` // info, success, error, complete
}
// 自定义系统信息响应结构体,用于 JSON 解析
type SysInfoResponseJSON struct {
Code int32 `json:"code"`
Message string `json:"message"`
Data struct {
StartTime string `json:"startTime"`
LocalIP string `json:"localIP"`
PublicIP string `json:"publicIP"`
Version string `json:"version"`
GoVersion string `json:"goVersion"`
OS string `json:"os"`
Arch string `json:"arch"`
CPUs int32 `json:"cpus"`
Plugins []struct {
Name string `json:"name"`
PushAddr []string `json:"pushAddr"`
PlayAddr []string `json:"playAddr"`
Description map[string]string `json:"description"`
} `json:"plugins"`
} `json:"data"`
}
// 插件配置响应结构体
type PluginConfigResponse struct {
Code int32 `json:"code"`
Message string `json:"message"`
Data struct {
File string `json:"file"`
Modified string `json:"modified"`
Merged string `json:"merged"`
} `json:"data"`
}
// TCP 配置结构体
type TCPConfig struct {
ListenAddr string `yaml:"listenaddr"`
ListenAddrTLS string `yaml:"listenaddrtls"`
}
// 插件配置结构体
type PluginConfig struct {
TCP TCPConfig `yaml:"tcp"`
}
func (p *DebugPlugin) EnvCheck(w http.ResponseWriter, r *http.Request) {
// Get target URL from query parameter
targetURL := r.URL.Query().Get("target")
if targetURL == "" {
r.URL.Path = "/static/envcheck.html"
staticFSHandler.ServeHTTP(w, r)
return
}
// Create SSE connection
util.NewSSE(w, r.Context(), func(sse *util.SSE) {
// Function to send SSE messages
sendMessage := func(message string, msgType string) {
result := EnvCheckResult{
Message: message,
Type: msgType,
}
sse.WriteJSON(result)
}
// Parse target URL
_, err := url.Parse(targetURL)
if err != nil {
sendMessage(fmt.Sprintf("Invalid URL: %v", err), "error")
return
}
// Check if we can connect to the target server
sendMessage(fmt.Sprintf("Checking connection to %s...", targetURL), "info")
// Get system info from target server
resp, err := http.Get(fmt.Sprintf("%s/api/sysinfo", targetURL))
if err != nil {
sendMessage(fmt.Sprintf("Failed to connect to target server: %v", err), "error")
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
sendMessage(fmt.Sprintf("Target server returned status code: %d", resp.StatusCode), "error")
return
}
// Read and parse system info
body, err := io.ReadAll(resp.Body)
if err != nil {
sendMessage(fmt.Sprintf("Failed to read response: %v", err), "error")
return
}
var sysInfoJSON SysInfoResponseJSON
if err := json.Unmarshal(body, &sysInfoJSON); err != nil {
sendMessage(fmt.Sprintf("Failed to parse system info: %v", err), "error")
return
}
// Convert JSON response to protobuf response
sysInfo := &pb.SysInfoResponse{
Code: sysInfoJSON.Code,
Message: sysInfoJSON.Message,
Data: &pb.SysInfoData{
LocalIP: sysInfoJSON.Data.LocalIP,
PublicIP: sysInfoJSON.Data.PublicIP,
Version: sysInfoJSON.Data.Version,
GoVersion: sysInfoJSON.Data.GoVersion,
Os: sysInfoJSON.Data.OS,
Arch: sysInfoJSON.Data.Arch,
Cpus: sysInfoJSON.Data.CPUs,
},
}
// Parse start time
if startTime, err := time.Parse(time.RFC3339, sysInfoJSON.Data.StartTime); err == nil {
sysInfo.Data.StartTime = timestamppb.New(startTime)
}
// Convert plugins
for _, pluginJSON := range sysInfoJSON.Data.Plugins {
plugin := &pb.PluginInfo{
Name: pluginJSON.Name,
PushAddr: pluginJSON.PushAddr,
PlayAddr: pluginJSON.PlayAddr,
Description: pluginJSON.Description,
}
sysInfo.Data.Plugins = append(sysInfo.Data.Plugins, plugin)
}
// Check each plugin's configuration
for _, plugin := range sysInfo.Data.Plugins {
// Get plugin configuration
configResp, err := http.Get(fmt.Sprintf("%s/api/config/get/%s", targetURL, plugin.Name))
if err != nil {
sendMessage(fmt.Sprintf("Failed to get configuration for plugin %s: %v", plugin.Name, err), "error")
continue
}
defer configResp.Body.Close()
if configResp.StatusCode != http.StatusOK {
sendMessage(fmt.Sprintf("Failed to get configuration for plugin %s: status code %d", plugin.Name, configResp.StatusCode), "error")
continue
}
var configRespJSON PluginConfigResponse
if err := json.NewDecoder(configResp.Body).Decode(&configRespJSON); err != nil {
sendMessage(fmt.Sprintf("Failed to parse configuration for plugin %s: %v", plugin.Name, err), "error")
continue
}
// Parse YAML configuration
var config PluginConfig
if err := yaml.Unmarshal([]byte(configRespJSON.Data.Merged), &config); err != nil {
sendMessage(fmt.Sprintf("Failed to parse YAML configuration for plugin %s: %v", plugin.Name, err), "error")
continue
}
// Check TCP configuration
if config.TCP.ListenAddr != "" {
host, port, err := net.SplitHostPort(config.TCP.ListenAddr)
if err != nil {
sendMessage(fmt.Sprintf("Invalid listenaddr format for plugin %s: %v", plugin.Name, err), "error")
} else {
sendMessage(fmt.Sprintf("Checking TCP listenaddr %s for plugin %s...", config.TCP.ListenAddr, plugin.Name), "info")
// Try to establish TCP connection
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", host, port), 5*time.Second)
if err != nil {
sendMessage(fmt.Sprintf("TCP listenaddr %s for plugin %s is not accessible: %v", config.TCP.ListenAddr, plugin.Name, err), "error")
} else {
conn.Close()
sendMessage(fmt.Sprintf("TCP listenaddr %s for plugin %s is accessible", config.TCP.ListenAddr, plugin.Name), "success")
}
}
}
if config.TCP.ListenAddrTLS != "" {
host, port, err := net.SplitHostPort(config.TCP.ListenAddrTLS)
if err != nil {
sendMessage(fmt.Sprintf("Invalid listenaddrtls format for plugin %s: %v", plugin.Name, err), "error")
} else {
sendMessage(fmt.Sprintf("Checking TCP TLS listenaddr %s for plugin %s...", config.TCP.ListenAddrTLS, plugin.Name), "info")
// Try to establish TCP connection
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%s", host, port), 5*time.Second)
if err != nil {
sendMessage(fmt.Sprintf("TCP TLS listenaddr %s for plugin %s is not accessible: %v", config.TCP.ListenAddrTLS, plugin.Name, err), "error")
} else {
conn.Close()
sendMessage(fmt.Sprintf("TCP TLS listenaddr %s for plugin %s is accessible", config.TCP.ListenAddrTLS, plugin.Name), "success")
}
}
}
}
sendMessage("Environment check completed", "complete")
})
}

View File

@@ -7,9 +7,11 @@ import (
"net/http"
"net/http/pprof"
"os"
"os/exec" // 新增导入
"runtime"
runtimePPROF "runtime/pprof"
"sort"
"strconv"
"strings"
"sync"
"time"
@@ -32,13 +34,13 @@ type DebugPlugin struct {
m7s.Plugin
ProfileDuration time.Duration `default:"10s" desc:"profile持续时间"`
Profile string `desc:"采集profile存储文件"`
ChartPeriod time.Duration `default:"1s" desc:"图表更新周期"`
Grfout string `default:"grf.out" desc:"grf输出文件"`
EnableChart bool `default:"true" desc:"是否启用图表功能"`
// 添加缓存字段
cpuProfileData *profile.Profile // 缓存 CPU Profile 数据
cpuProfileOnce sync.Once // 确保只采集一次
cpuProfileLock sync.Mutex // 保护缓存数据
chartServer server
}
type WriteToFile struct {
@@ -70,6 +72,10 @@ func (p *DebugPlugin) OnInit() error {
p.Info("cpu profile done")
}()
}
if p.EnableChart {
p.AddTask(&p.chartServer)
}
return nil
}
@@ -98,11 +104,11 @@ func (p *DebugPlugin) Charts_(w http.ResponseWriter, r *http.Request) {
}
func (p *DebugPlugin) Charts_data(w http.ResponseWriter, r *http.Request) {
dataHandler(w, r)
p.chartServer.dataHandler(w, r)
}
func (p *DebugPlugin) Charts_datafeed(w http.ResponseWriter, r *http.Request) {
s.dataFeedHandler(w, r)
p.chartServer.dataFeedHandler(w, r)
}
func (p *DebugPlugin) Grf(w http.ResponseWriter, r *http.Request) {
@@ -193,7 +199,7 @@ func (p *DebugPlugin) GetHeap(ctx context.Context, empty *emptypb.Empty) (*pb.He
obj.Size += size
totalSize += size
// 构建引<EFBFBD><EFBFBD><EFBFBD>关系
// 构建引关系
for i := 1; i < len(sample.Location); i++ {
loc := sample.Location[i]
if len(loc.Line) == 0 || loc.Line[0].Function == nil {
@@ -443,3 +449,42 @@ func (p *DebugPlugin) GetHeapGraph(ctx context.Context, empty *emptypb.Empty) (*
Data: dot,
}, nil
}
func (p *DebugPlugin) API_TcpDump(rw http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
args := []string{"-W", "1"}
if query.Get("interface") != "" {
args = append(args, "-i", query.Get("interface"))
}
if query.Get("filter") != "" {
args = append(args, query.Get("filter"))
}
if query.Get("extra_args") != "" {
args = append(args, strings.Fields(query.Get("extra_args"))...)
}
if query.Get("duration") == "" {
http.Error(rw, "duration is required", http.StatusBadRequest)
return
}
rw.Header().Set("Content-Type", "text/plain")
rw.Header().Set("Cache-Control", "no-cache")
rw.Header().Set("Content-Disposition", "attachment; filename=tcpdump.txt")
cmd := exec.CommandContext(p, "tcpdump", args...)
p.Info("starting tcpdump", "args", strings.Join(cmd.Args, " "))
cmd.Stdout = rw
cmd.Stderr = os.Stderr // 将错误输出重定向到标准错误
err := cmd.Start()
if err != nil {
http.Error(rw, fmt.Sprintf("failed to start tcpdump: %v", err), http.StatusInternalServerError)
return
}
duration, err := strconv.Atoi(query.Get("duration"))
if err != nil {
http.Error(rw, "invalid duration", http.StatusBadRequest)
return
}
<-time.After(time.Duration(duration) * time.Second)
if err := cmd.Process.Kill(); err != nil {
p.Error("failed to kill tcpdump process", "error", err)
}
}

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.36.0
// protoc v5.29.1
// protoc-gen-go v1.36.6
// protoc v5.29.3
// source: debug.proto
package pb
@@ -14,6 +14,7 @@ import (
_ "google.golang.org/protobuf/types/known/timestamppb"
reflect "reflect"
sync "sync"
unsafe "unsafe"
)
const (
@@ -1007,176 +1008,107 @@ func (x *RuntimeStats) GetBlockingTimeNs() uint64 {
var File_debug_proto protoreflect.FileDescriptor
var file_debug_proto_rawDesc = []byte{
0x0a, 0x0b, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x05, 0x64,
0x65, 0x62, 0x75, 0x67, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69,
0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a,
0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x22, 0x42, 0x0a, 0x0a, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18,
0x0a, 0x07, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52,
0x07, 0x72, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x75, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x08, 0x64, 0x75, 0x72, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a,
0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x12, 0x0a,
0x04, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x73, 0x69, 0x7a,
0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x72, 0x63, 0x18, 0x04, 0x20,
0x01, 0x28, 0x01, 0x52, 0x08, 0x73, 0x69, 0x7a, 0x65, 0x50, 0x65, 0x72, 0x63, 0x12, 0x18, 0x0a,
0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07,
0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, 0x66, 0x73, 0x18,
0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x72, 0x65, 0x66, 0x73, 0x22, 0xc7, 0x02, 0x0a, 0x09,
0x48, 0x65, 0x61, 0x70, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x6c, 0x6c,
0x6f, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x12,
0x1e, 0x0a, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x02, 0x20,
0x01, 0x28, 0x04, 0x52, 0x0a, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x12,
0x10, 0x0a, 0x03, 0x73, 0x79, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x03, 0x73, 0x79,
0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x75, 0x6d, 0x47, 0x43, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d,
0x52, 0x05, 0x6e, 0x75, 0x6d, 0x47, 0x43, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x70, 0x41,
0x6c, 0x6c, 0x6f, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x68, 0x65, 0x61, 0x70,
0x41, 0x6c, 0x6c, 0x6f, 0x63, 0x12, 0x18, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x70, 0x53, 0x79, 0x73,
0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x68, 0x65, 0x61, 0x70, 0x53, 0x79, 0x73, 0x12,
0x1a, 0x0a, 0x08, 0x68, 0x65, 0x61, 0x70, 0x49, 0x64, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
0x04, 0x52, 0x08, 0x68, 0x65, 0x61, 0x70, 0x49, 0x64, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x68,
0x65, 0x61, 0x70, 0x49, 0x6e, 0x75, 0x73, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
0x68, 0x65, 0x61, 0x70, 0x49, 0x6e, 0x75, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x68, 0x65, 0x61,
0x70, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52,
0x0c, 0x68, 0x65, 0x61, 0x70, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x12, 0x20, 0x0a,
0x0b, 0x68, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x0a, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12,
0x24, 0x0a, 0x0d, 0x67, 0x63, 0x43, 0x50, 0x55, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e,
0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x67, 0x63, 0x43, 0x50, 0x55, 0x46, 0x72, 0x61,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x86, 0x01, 0x0a, 0x08, 0x48, 0x65, 0x61, 0x70, 0x44, 0x61,
0x74, 0x61, 0x12, 0x26, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x10, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x53, 0x74,
0x61, 0x74, 0x73, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x73, 0x12, 0x2b, 0x0a, 0x07, 0x6f, 0x62,
0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x65,
0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07,
0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x25, 0x0a, 0x05, 0x65, 0x64, 0x67, 0x65, 0x73,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48,
0x65, 0x61, 0x70, 0x45, 0x64, 0x67, 0x65, 0x52, 0x05, 0x65, 0x64, 0x67, 0x65, 0x73, 0x22, 0x4c,
0x0a, 0x08, 0x48, 0x65, 0x61, 0x70, 0x45, 0x64, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x72,
0x6f, 0x6d, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x72, 0x6f, 0x6d, 0x12, 0x0e,
0x0a, 0x02, 0x74, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x74, 0x6f, 0x12, 0x1c,
0x0a, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x09, 0x52, 0x09, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x61, 0x0a, 0x0c,
0x48, 0x65, 0x61, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04,
0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65,
0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x04, 0x64, 0x61,
0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67,
0x2e, 0x48, 0x65, 0x61, 0x70, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22,
0x55, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x70, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73,
0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61,
0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x54, 0x0a, 0x10, 0x43, 0x70, 0x75, 0x47, 0x72, 0x61,
0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f,
0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18,
0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61,
0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0x5f, 0x0a, 0x0b,
0x43, 0x70, 0x75, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x63,
0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12,
0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09,
0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x22, 0x0a, 0x04, 0x64, 0x61, 0x74,
0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0e, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e,
0x43, 0x70, 0x75, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x22, 0xc5, 0x02,
0x0a, 0x07, 0x43, 0x70, 0x75, 0x44, 0x61, 0x74, 0x61, 0x12, 0x29, 0x0a, 0x11, 0x74, 0x6f, 0x74,
0x61, 0x6c, 0x5f, 0x63, 0x70, 0x75, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x01,
0x20, 0x01, 0x28, 0x04, 0x52, 0x0e, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x43, 0x70, 0x75, 0x54, 0x69,
0x6d, 0x65, 0x4e, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67,
0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x01,
0x28, 0x04, 0x52, 0x12, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x49, 0x6e, 0x74, 0x65,
0x72, 0x76, 0x61, 0x6c, 0x4e, 0x73, 0x12, 0x34, 0x0a, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x64, 0x65, 0x62, 0x75,
0x67, 0x2e, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c,
0x65, 0x52, 0x09, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x37, 0x0a, 0x0a,
0x67, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x17, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x47, 0x6f, 0x72, 0x6f, 0x75, 0x74, 0x69,
0x6e, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x0a, 0x67, 0x6f, 0x72, 0x6f, 0x75,
0x74, 0x69, 0x6e, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x0c, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x5f,
0x63, 0x61, 0x6c, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x64, 0x65,
0x62, 0x75, 0x67, 0x2e, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x52, 0x0b,
0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x73, 0x12, 0x38, 0x0a, 0x0d, 0x72,
0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x73, 0x18, 0x06, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x13, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x52, 0x75, 0x6e, 0x74, 0x69,
0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x52, 0x0c, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65,
0x53, 0x74, 0x61, 0x74, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x0f, 0x46, 0x75, 0x6e, 0x63, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x66, 0x75, 0x6e,
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e,
0x0a, 0x0b, 0x63, 0x70, 0x75, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x02, 0x20,
0x01, 0x28, 0x04, 0x52, 0x09, 0x63, 0x70, 0x75, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x29,
0x0a, 0x10, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x69, 0x6e, 0x76, 0x6f, 0x63, 0x61,
0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x61, 0x6c,
0x6c, 0x5f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63,
0x61, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x63, 0x6b, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x69,
0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73,
0x49, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x64, 0x22, 0x77, 0x0a, 0x10, 0x47, 0x6f, 0x72, 0x6f, 0x75,
0x74, 0x69, 0x6e, 0x65, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73,
0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74,
0x65, 0x12, 0x1e, 0x0a, 0x0b, 0x63, 0x70, 0x75, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73,
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x63, 0x70, 0x75, 0x54, 0x69, 0x6d, 0x65, 0x4e,
0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x61, 0x6c, 0x6c, 0x5f, 0x73, 0x74, 0x61, 0x63, 0x6b, 0x18,
0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x63, 0x61, 0x6c, 0x6c, 0x53, 0x74, 0x61, 0x63, 0x6b,
0x22, 0x56, 0x0a, 0x0a, 0x53, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x43, 0x61, 0x6c, 0x6c, 0x12, 0x12,
0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61,
0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0b, 0x63, 0x70, 0x75, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e,
0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x63, 0x70, 0x75, 0x54, 0x69, 0x6d, 0x65,
0x4e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28,
0x04, 0x52, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xa4, 0x01, 0x0a, 0x0c, 0x52, 0x75, 0x6e,
0x74, 0x69, 0x6d, 0x65, 0x53, 0x74, 0x61, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x67, 0x63, 0x5f,
0x63, 0x70, 0x75, 0x5f, 0x66, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01,
0x28, 0x01, 0x52, 0x0d, 0x67, 0x63, 0x43, 0x70, 0x75, 0x46, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6f,
0x6e, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x63, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20,
0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x63, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x27, 0x0a, 0x10,
0x67, 0x63, 0x5f, 0x70, 0x61, 0x75, 0x73, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73,
0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x67, 0x63, 0x50, 0x61, 0x75, 0x73, 0x65, 0x54,
0x69, 0x6d, 0x65, 0x4e, 0x73, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6e,
0x67, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52,
0x0e, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x69, 0x6e, 0x67, 0x54, 0x69, 0x6d, 0x65, 0x4e, 0x73, 0x32,
0xd9, 0x02, 0x0a, 0x03, 0x61, 0x70, 0x69, 0x12, 0x4f, 0x0a, 0x07, 0x47, 0x65, 0x74, 0x48, 0x65,
0x61, 0x70, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x13, 0x2e, 0x64, 0x65, 0x62,
0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
0x17, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x11, 0x12, 0x0f, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f,
0x61, 0x70, 0x69, 0x2f, 0x68, 0x65, 0x61, 0x70, 0x12, 0x5f, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x48,
0x65, 0x61, 0x70, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
0x1a, 0x18, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x48, 0x65, 0x61, 0x70, 0x47, 0x72, 0x61,
0x70, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1d, 0x82, 0xd3, 0xe4, 0x93,
0x02, 0x17, 0x12, 0x15, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68,
0x65, 0x61, 0x70, 0x2f, 0x67, 0x72, 0x61, 0x70, 0x68, 0x12, 0x57, 0x0a, 0x0b, 0x47, 0x65, 0x74,
0x43, 0x70, 0x75, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x11, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67,
0x2e, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x64, 0x65,
0x62, 0x75, 0x67, 0x2e, 0x43, 0x70, 0x75, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x64,
0x65, 0x62, 0x75, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x70, 0x75, 0x2f, 0x67, 0x72, 0x61,
0x70, 0x68, 0x12, 0x47, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x43, 0x70, 0x75, 0x12, 0x11, 0x2e, 0x64,
0x65, 0x62, 0x75, 0x67, 0x2e, 0x43, 0x70, 0x75, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a,
0x12, 0x2e, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2e, 0x43, 0x70, 0x75, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x16, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x10, 0x12, 0x0e, 0x2f, 0x64, 0x65,
0x62, 0x75, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x70, 0x75, 0x42, 0x1d, 0x5a, 0x1b, 0x6d,
0x37, 0x73, 0x2e, 0x6c, 0x69, 0x76, 0x65, 0x2f, 0x76, 0x35, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69,
0x6e, 0x2f, 0x64, 0x65, 0x62, 0x75, 0x67, 0x2f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x33,
}
const file_debug_proto_rawDesc = "" +
"\n" +
"\vdebug.proto\x12\x05debug\x1a\x1cgoogle/api/annotations.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"B\n" +
"\n" +
"CpuRequest\x12\x18\n" +
"\arefresh\x18\x01 \x01(\bR\arefresh\x12\x1a\n" +
"\bduration\x18\x02 \x01(\rR\bduration\"\x94\x01\n" +
"\n" +
"HeapObject\x12\x12\n" +
"\x04type\x18\x01 \x01(\tR\x04type\x12\x14\n" +
"\x05count\x18\x02 \x01(\x03R\x05count\x12\x12\n" +
"\x04size\x18\x03 \x01(\x03R\x04size\x12\x1a\n" +
"\bsizePerc\x18\x04 \x01(\x01R\bsizePerc\x12\x18\n" +
"\aaddress\x18\x05 \x01(\tR\aaddress\x12\x12\n" +
"\x04refs\x18\x06 \x03(\tR\x04refs\"\xc7\x02\n" +
"\tHeapStats\x12\x14\n" +
"\x05alloc\x18\x01 \x01(\x04R\x05alloc\x12\x1e\n" +
"\n" +
"totalAlloc\x18\x02 \x01(\x04R\n" +
"totalAlloc\x12\x10\n" +
"\x03sys\x18\x03 \x01(\x04R\x03sys\x12\x14\n" +
"\x05numGC\x18\x04 \x01(\rR\x05numGC\x12\x1c\n" +
"\theapAlloc\x18\x05 \x01(\x04R\theapAlloc\x12\x18\n" +
"\aheapSys\x18\x06 \x01(\x04R\aheapSys\x12\x1a\n" +
"\bheapIdle\x18\a \x01(\x04R\bheapIdle\x12\x1c\n" +
"\theapInuse\x18\b \x01(\x04R\theapInuse\x12\"\n" +
"\fheapReleased\x18\t \x01(\x04R\fheapReleased\x12 \n" +
"\vheapObjects\x18\n" +
" \x01(\x04R\vheapObjects\x12$\n" +
"\rgcCPUFraction\x18\v \x01(\x01R\rgcCPUFraction\"\x86\x01\n" +
"\bHeapData\x12&\n" +
"\x05stats\x18\x01 \x01(\v2\x10.debug.HeapStatsR\x05stats\x12+\n" +
"\aobjects\x18\x02 \x03(\v2\x11.debug.HeapObjectR\aobjects\x12%\n" +
"\x05edges\x18\x03 \x03(\v2\x0f.debug.HeapEdgeR\x05edges\"L\n" +
"\bHeapEdge\x12\x12\n" +
"\x04from\x18\x01 \x01(\tR\x04from\x12\x0e\n" +
"\x02to\x18\x02 \x01(\tR\x02to\x12\x1c\n" +
"\tfieldName\x18\x03 \x01(\tR\tfieldName\"a\n" +
"\fHeapResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\rR\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12#\n" +
"\x04data\x18\x03 \x01(\v2\x0f.debug.HeapDataR\x04data\"U\n" +
"\x11HeapGraphResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\rR\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12\x12\n" +
"\x04data\x18\x03 \x01(\tR\x04data\"T\n" +
"\x10CpuGraphResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\rR\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12\x12\n" +
"\x04data\x18\x03 \x01(\tR\x04data\"_\n" +
"\vCpuResponse\x12\x12\n" +
"\x04code\x18\x01 \x01(\rR\x04code\x12\x18\n" +
"\amessage\x18\x02 \x01(\tR\amessage\x12\"\n" +
"\x04data\x18\x03 \x01(\v2\x0e.debug.CpuDataR\x04data\"\xc5\x02\n" +
"\aCpuData\x12)\n" +
"\x11total_cpu_time_ns\x18\x01 \x01(\x04R\x0etotalCpuTimeNs\x120\n" +
"\x14sampling_interval_ns\x18\x02 \x01(\x04R\x12samplingIntervalNs\x124\n" +
"\tfunctions\x18\x03 \x03(\v2\x16.debug.FunctionProfileR\tfunctions\x127\n" +
"\n" +
"goroutines\x18\x04 \x03(\v2\x17.debug.GoroutineProfileR\n" +
"goroutines\x124\n" +
"\fsystem_calls\x18\x05 \x03(\v2\x11.debug.SystemCallR\vsystemCalls\x128\n" +
"\rruntime_stats\x18\x06 \x01(\v2\x13.debug.RuntimeStatsR\fruntimeStats\"\xbf\x01\n" +
"\x0fFunctionProfile\x12#\n" +
"\rfunction_name\x18\x01 \x01(\tR\ffunctionName\x12\x1e\n" +
"\vcpu_time_ns\x18\x02 \x01(\x04R\tcpuTimeNs\x12)\n" +
"\x10invocation_count\x18\x03 \x01(\x04R\x0finvocationCount\x12\x1d\n" +
"\n" +
"call_stack\x18\x04 \x03(\tR\tcallStack\x12\x1d\n" +
"\n" +
"is_inlined\x18\x05 \x01(\bR\tisInlined\"w\n" +
"\x10GoroutineProfile\x12\x0e\n" +
"\x02id\x18\x01 \x01(\x04R\x02id\x12\x14\n" +
"\x05state\x18\x02 \x01(\tR\x05state\x12\x1e\n" +
"\vcpu_time_ns\x18\x03 \x01(\x04R\tcpuTimeNs\x12\x1d\n" +
"\n" +
"call_stack\x18\x04 \x03(\tR\tcallStack\"V\n" +
"\n" +
"SystemCall\x12\x12\n" +
"\x04name\x18\x01 \x01(\tR\x04name\x12\x1e\n" +
"\vcpu_time_ns\x18\x02 \x01(\x04R\tcpuTimeNs\x12\x14\n" +
"\x05count\x18\x03 \x01(\x04R\x05count\"\xa4\x01\n" +
"\fRuntimeStats\x12&\n" +
"\x0fgc_cpu_fraction\x18\x01 \x01(\x01R\rgcCpuFraction\x12\x19\n" +
"\bgc_count\x18\x02 \x01(\x04R\agcCount\x12'\n" +
"\x10gc_pause_time_ns\x18\x03 \x01(\x04R\rgcPauseTimeNs\x12(\n" +
"\x10blocking_time_ns\x18\x04 \x01(\x04R\x0eblockingTimeNs2\xd9\x02\n" +
"\x03api\x12O\n" +
"\aGetHeap\x12\x16.google.protobuf.Empty\x1a\x13.debug.HeapResponse\"\x17\x82\xd3\xe4\x93\x02\x11\x12\x0f/debug/api/heap\x12_\n" +
"\fGetHeapGraph\x12\x16.google.protobuf.Empty\x1a\x18.debug.HeapGraphResponse\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/debug/api/heap/graph\x12W\n" +
"\vGetCpuGraph\x12\x11.debug.CpuRequest\x1a\x17.debug.CpuGraphResponse\"\x1c\x82\xd3\xe4\x93\x02\x16\x12\x14/debug/api/cpu/graph\x12G\n" +
"\x06GetCpu\x12\x11.debug.CpuRequest\x1a\x12.debug.CpuResponse\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/debug/api/cpuB\x1dZ\x1bm7s.live/v5/plugin/debug/pbb\x06proto3"
var (
file_debug_proto_rawDescOnce sync.Once
file_debug_proto_rawDescData = file_debug_proto_rawDesc
file_debug_proto_rawDescData []byte
)
func file_debug_proto_rawDescGZIP() []byte {
file_debug_proto_rawDescOnce.Do(func() {
file_debug_proto_rawDescData = protoimpl.X.CompressGZIP(file_debug_proto_rawDescData)
file_debug_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_debug_proto_rawDesc), len(file_debug_proto_rawDesc)))
})
return file_debug_proto_rawDescData
}
@@ -1233,7 +1165,7 @@ func file_debug_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_debug_proto_rawDesc,
RawDescriptor: unsafe.Slice(unsafe.StringData(file_debug_proto_rawDesc), len(file_debug_proto_rawDesc)),
NumEnums: 0,
NumMessages: 14,
NumExtensions: 0,
@@ -1244,7 +1176,6 @@ func file_debug_proto_init() {
MessageInfos: file_debug_proto_msgTypes,
}.Build()
File_debug_proto = out.File
file_debug_proto_rawDesc = nil
file_debug_proto_goTypes = nil
file_debug_proto_depIdxs = nil
}

View File

@@ -10,7 +10,6 @@ package pb
import (
"context"
"errors"
"io"
"net/http"
@@ -26,129 +25,136 @@ import (
)
// Suppress "imported and not used" errors
var (
_ codes.Code
_ io.Reader
_ status.Status
_ = errors.New
_ = runtime.String
_ = utilities.NewDoubleArray
_ = metadata.Join
)
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq emptypb.Empty
metadata runtime.ServerMetadata
)
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := client.GetHeap(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetHeap_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq emptypb.Empty
metadata runtime.ServerMetadata
)
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := server.GetHeap(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq emptypb.Empty
metadata runtime.ServerMetadata
)
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := client.GetHeapGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetHeapGraph_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq emptypb.Empty
metadata runtime.ServerMetadata
)
var protoReq emptypb.Empty
var metadata runtime.ServerMetadata
msg, err := server.GetHeapGraph(ctx, &protoReq)
return msg, metadata, err
}
var filter_Api_GetCpuGraph_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
var (
filter_Api_GetCpuGraph_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_Api_GetCpuGraph_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CpuRequest
metadata runtime.ServerMetadata
)
var protoReq CpuRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpuGraph_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetCpuGraph(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetCpuGraph_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CpuRequest
metadata runtime.ServerMetadata
)
var protoReq CpuRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpuGraph_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.GetCpuGraph(ctx, &protoReq)
return msg, metadata, err
}
var filter_Api_GetCpu_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
var (
filter_Api_GetCpu_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_Api_GetCpu_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CpuRequest
metadata runtime.ServerMetadata
)
var protoReq CpuRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpu_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetCpu(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetCpu_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var (
protoReq CpuRequest
metadata runtime.ServerMetadata
)
var protoReq CpuRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetCpu_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.GetCpu(ctx, &protoReq)
return msg, metadata, err
}
// RegisterApiHandlerServer registers the http handlers for service Api to "mux".
// UnaryRPC :call ApiServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterApiHandlerFromEndpoint instead.
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server ApiServer) error {
mux.Handle(http.MethodGet, pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -160,15 +166,20 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -180,15 +191,20 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -200,15 +216,20 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCpuGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -220,7 +241,9 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCpu_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
@@ -229,24 +252,25 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
// RegisterApiHandlerFromEndpoint is same as RegisterApiHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterApiHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.NewClient(endpoint, opts...)
conn, err := grpc.DialContext(ctx, endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterApiHandler(ctx, mux, conn)
}
@@ -260,13 +284,16 @@ func RegisterApiHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.C
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "ApiClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "ApiClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "ApiClient" to call the correct interceptors. This client ignores the HTTP middlewares.
// "ApiClient" to call the correct interceptors.
func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client ApiClient) error {
mux.Handle(http.MethodGet, pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetHeap_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeap", runtime.WithHTTPPathPattern("/debug/api/heap"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -277,13 +304,18 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetHeap_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetHeapGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetHeapGraph", runtime.WithHTTPPathPattern("/debug/api/heap/graph"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -294,13 +326,18 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetHeapGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetCpuGraph_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpuGraph", runtime.WithHTTPPathPattern("/debug/api/cpu/graph"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -311,13 +348,18 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCpuGraph_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle(http.MethodGet, pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
mux.Handle("GET", pattern_Api_GetCpu_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/debug.Api/GetCpu", runtime.WithHTTPPathPattern("/debug/api/cpu"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
@@ -328,21 +370,30 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetCpu_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Api_GetHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "heap"}, ""))
pattern_Api_GetHeap_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "heap"}, ""))
pattern_Api_GetHeapGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "heap", "graph"}, ""))
pattern_Api_GetCpuGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "cpu", "graph"}, ""))
pattern_Api_GetCpu_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "cpu"}, ""))
pattern_Api_GetCpuGraph_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"debug", "api", "cpu", "graph"}, ""))
pattern_Api_GetCpu_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"debug", "api", "cpu"}, ""))
)
var (
forward_Api_GetHeap_0 = runtime.ForwardResponseMessage
forward_Api_GetHeap_0 = runtime.ForwardResponseMessage
forward_Api_GetHeapGraph_0 = runtime.ForwardResponseMessage
forward_Api_GetCpuGraph_0 = runtime.ForwardResponseMessage
forward_Api_GetCpu_0 = runtime.ForwardResponseMessage
forward_Api_GetCpuGraph_0 = runtime.ForwardResponseMessage
forward_Api_GetCpu_0 = runtime.ForwardResponseMessage
)

View File

@@ -132,4 +132,4 @@ message RuntimeStats {
uint64 gc_count = 2; // 垃圾回收次数
uint64 gc_pause_time_ns = 3; // 垃圾回收暂停时间(纳秒)
uint64 blocking_time_ns = 4; // 阻塞时间(纳秒)
}
}

View File

@@ -1,7 +1,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.29.1
// - protoc v5.29.3
// source: debug.proto
package pb

View File

@@ -0,0 +1,122 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Environment Check</title>
<style>
body {
font-family: Arial, sans-serif;
margin: 20px;
background-color: #f5f5f5;
}
.container {
max-width: 800px;
margin: 0 auto;
background-color: white;
padding: 20px;
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.1);
}
.input-group {
margin-bottom: 20px;
}
input[type="text"] {
padding: 8px;
width: 300px;
margin-right: 10px;
}
button {
padding: 8px 16px;
background-color: #4CAF50;
color: white;
border: none;
border-radius: 4px;
cursor: pointer;
}
button:hover {
background-color: #45a049;
}
#log {
background-color: #f8f9fa;
border: 1px solid #ddd;
padding: 10px;
height: 400px;
overflow-y: auto;
font-family: monospace;
white-space: pre-wrap;
}
.success {
color: #28a745;
}
.error {
color: #dc3545;
}
.info {
color: #17a2b8;
}
</style>
</head>
<body>
<div class="container">
<h1>Environment Check</h1>
<div class="input-group">
<input type="text" id="targetUrl" placeholder="Enter target URL (e.g., http://192.168.1.100:8080)">
<button onclick="startCheck()">Start Check</button>
</div>
<div id="log"></div>
</div>
<script>
function appendLog(message, type = 'info') {
const log = document.getElementById('log');
const entry = document.createElement('div');
entry.className = type;
entry.textContent = message;
log.appendChild(entry);
log.scrollTop = log.scrollHeight;
}
function startCheck() {
const targetUrl = document.getElementById('targetUrl').value;
if (!targetUrl) {
appendLog('Please enter a target URL', 'error');
return;
}
// Clear previous log
document.getElementById('log').innerHTML = '';
appendLog('Starting environment check...');
// Create SSE connection
const eventSource = new EventSource(`/debug/envcheck?target=${encodeURIComponent(targetUrl)}`);
eventSource.onmessage = function (event) {
const data = JSON.parse(event.data);
appendLog(data.message, data.type);
if (data.type === 'complete') {
eventSource.close();
}
};
eventSource.onerror = function (error) {
appendLog('Connection error occurred', 'error');
eventSource.close();
};
}
</script>
</body>
</html>

View File

@@ -1,24 +1,12 @@
package plugin_flv
import (
"bufio"
"context"
"encoding/binary"
"io"
"io/fs"
"net/http"
"os"
"path/filepath"
"strconv"
"strings"
"time"
"google.golang.org/protobuf/types/known/emptypb"
"m7s.live/v5/pb"
"m7s.live/v5/pkg/util"
flvpb "m7s.live/v5/plugin/flv/pb"
flv "m7s.live/v5/plugin/flv/pkg"
rtmp "m7s.live/v5/plugin/rtmp/pkg"
)
func (p *FLVPlugin) List(ctx context.Context, req *flvpb.ReqRecordList) (resp *pb.ResponseList, err error) {
@@ -52,248 +40,49 @@ func (p *FLVPlugin) Delete(ctx context.Context, req *flvpb.ReqRecordDelete) (res
}
func (plugin *FLVPlugin) Download_(w http.ResponseWriter, r *http.Request) {
streamPath := strings.TrimSuffix(strings.TrimPrefix(r.URL.Path, "/download/"), ".flv")
singleFile := filepath.Join(plugin.Path, streamPath+".flv")
startTime, endTime, err := util.TimeRangeQueryParse(r.URL.Query())
// 解析请求参数
params, err := plugin.parseRequestParams(r)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
timeRange := endTime.Sub(startTime)
plugin.Info("download", "stream", streamPath, "start", startTime, "end", endTime)
dir := filepath.Join(plugin.Path, streamPath)
if util.Exist(singleFile) {
} else if util.Exist(dir) {
var fileList []fs.FileInfo
var found bool
var startOffsetTime time.Duration
err = filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {
if info.IsDir() || !strings.HasSuffix(info.Name(), ".flv") {
return nil
}
modTime := info.ModTime()
//tmp, _ := strconv.Atoi(strings.TrimSuffix(info.Name(), ".flv"))
//fileStartTime := time.Unix(tmp, 10)
if !found {
if modTime.After(startTime) {
found = true
//fmt.Println(path, modTime, startTime, found)
} else {
fileList = []fs.FileInfo{info}
startOffsetTime = startTime.Sub(modTime)
//fmt.Println(path, modTime, startTime, found)
return nil
}
}
if modTime.After(endTime) {
return fs.ErrInvalid
}
fileList = append(fileList, info)
return nil
})
if !found {
http.NotFound(w, r)
return
}
plugin.Info("download", "stream", params.streamPath, "start", params.startTime, "end", params.endTime)
w.Header().Set("Content-Type", "video/x-flv")
w.Header().Set("Content-Disposition", "attachment")
var writer io.Writer = w
flvHead := make([]byte, 9+4)
tagHead := make(util.Buffer, 11)
var contentLength uint64
// 从数据库查询录像记录
recordStreams, err := plugin.queryRecordStreams(params)
if err != nil {
plugin.Error("Failed to query record streams", "err", err)
http.Error(w, "Database query failed", http.StatusInternalServerError)
return
}
var amf *rtmp.AMF
var metaData rtmp.EcmaArray
initMetaData := func(reader io.Reader, dataLen uint32) {
data := make([]byte, dataLen+4)
_, err = io.ReadFull(reader, data)
amf = &rtmp.AMF{
Buffer: util.Buffer(data[1+2+len("onMetaData") : len(data)-4]),
}
var obj any
obj, err = amf.Unmarshal()
metaData = obj.(rtmp.EcmaArray)
}
var filepositions []uint64
var times []float64
for pass := 0; pass < 2; pass++ {
offsetTime := startOffsetTime
var offsetTimestamp, lastTimestamp uint32
var init, seqAudioWritten, seqVideoWritten bool
if pass == 1 {
metaData["keyframes"] = map[string]any{
"filepositions": filepositions,
"times": times,
}
amf.Marshals("onMetaData", metaData)
offsetDelta := amf.Len() + 15
offset := offsetDelta + len(flvHead)
contentLength += uint64(offset)
metaData["duration"] = timeRange.Seconds()
metaData["filesize"] = contentLength
for i := range filepositions {
filepositions[i] += uint64(offset)
}
metaData["keyframes"] = map[string]any{
"filepositions": filepositions,
"times": times,
}
amf.Reset()
amf.Marshals("onMetaData", metaData)
plugin.Info("start download", "metaData", metaData)
w.Header().Set("Content-Length", strconv.FormatInt(int64(contentLength), 10))
w.WriteHeader(http.StatusOK)
}
if offsetTime == 0 {
init = true
} else {
offsetTimestamp = -uint32(offsetTime.Milliseconds())
}
for i, info := range fileList {
if r.Context().Err() != nil {
return
}
filePath := filepath.Join(dir, info.Name())
plugin.Debug("read", "file", filePath)
file, err := os.Open(filePath)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reader := bufio.NewReader(file)
if i == 0 {
_, err = io.ReadFull(reader, flvHead)
if pass == 1 {
// 第一次写入头
_, err = writer.Write(flvHead)
tagHead[0] = flv.FLV_TAG_TYPE_SCRIPT
l := amf.Len()
tagHead[1] = byte(l >> 16)
tagHead[2] = byte(l >> 8)
tagHead[3] = byte(l)
flv.PutFlvTimestamp(tagHead, 0)
writer.Write(tagHead)
writer.Write(amf.Buffer)
l += 11
binary.BigEndian.PutUint32(tagHead[:4], uint32(l))
writer.Write(tagHead[:4])
}
} else {
// 后面的头跳过
_, err = reader.Discard(13)
if !init {
offsetTime = 0
offsetTimestamp = 0
}
}
for err == nil {
_, err = io.ReadFull(reader, tagHead)
if err != nil {
break
}
tmp := tagHead
t := tmp.ReadByte()
dataLen := tmp.ReadUint24()
lastTimestamp = tmp.ReadUint24() | uint32(tmp.ReadByte())<<24
//fmt.Println(lastTimestamp, tagHead)
if init {
if t == flv.FLV_TAG_TYPE_SCRIPT {
if pass == 0 {
initMetaData(reader, dataLen)
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
} else {
lastTimestamp += offsetTimestamp
if lastTimestamp >= uint32(timeRange.Milliseconds()) {
break
}
if pass == 0 {
data := make([]byte, dataLen+4)
_, err = io.ReadFull(reader, data)
frameType := (data[0] >> 4) & 0b0111
idr := frameType == 1 || frameType == 4
if idr {
filepositions = append(filepositions, contentLength)
times = append(times, float64(lastTimestamp)/1000)
}
contentLength += uint64(11 + dataLen + 4)
} else {
//fmt.Println("write", lastTimestamp)
flv.PutFlvTimestamp(tagHead, lastTimestamp)
_, err = writer.Write(tagHead)
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
}
continue
}
switch t {
case flv.FLV_TAG_TYPE_SCRIPT:
if pass == 0 {
initMetaData(reader, dataLen)
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
case flv.FLV_TAG_TYPE_AUDIO:
if !seqAudioWritten {
if pass == 0 {
contentLength += uint64(11 + dataLen + 4)
_, err = reader.Discard(int(dataLen) + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
seqAudioWritten = true
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
case flv.FLV_TAG_TYPE_VIDEO:
if !seqVideoWritten {
if pass == 0 {
contentLength += uint64(11 + dataLen + 4)
_, err = reader.Discard(int(dataLen) + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
seqVideoWritten = true
} else {
if lastTimestamp >= uint32(offsetTime.Milliseconds()) {
data := make([]byte, dataLen+4)
_, err = io.ReadFull(reader, data)
frameType := (data[0] >> 4) & 0b0111
idr := frameType == 1 || frameType == 4
if idr {
init = true
plugin.Debug("init", "lastTimestamp", lastTimestamp)
if pass == 0 {
filepositions = append(filepositions, contentLength)
times = append(times, float64(lastTimestamp)/1000)
contentLength += uint64(11 + dataLen + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
_, err = writer.Write(data)
}
}
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
}
}
}
offsetTimestamp = lastTimestamp
err = file.Close()
}
}
plugin.Info("end download")
} else {
// 构建文件信息列表
fileInfoList, found := plugin.buildFileInfoList(recordStreams, params.startTime, params.endTime)
if !found || len(fileInfoList) == 0 {
plugin.Warn("No records found", "stream", params.streamPath, "start", params.startTime, "end", params.endTime)
http.NotFound(w, r)
return
}
// 根据记录类型选择处理方式
if plugin.hasOnlyMp4Records(fileInfoList) {
// 过滤MP4文件并转换为FLV
mp4FileList := plugin.filterMp4Files(fileInfoList)
if len(mp4FileList) == 0 {
plugin.Warn("No valid MP4 files after filtering", "stream", params.streamPath)
http.NotFound(w, r)
return
}
plugin.processMp4ToFlv(w, r, mp4FileList, params)
} else {
// 过滤FLV文件并处理
flvFileList := plugin.filterFlvFiles(fileInfoList)
if len(flvFileList) == 0 {
plugin.Warn("No valid FLV files after filtering", "stream", params.streamPath)
http.NotFound(w, r)
return
}
plugin.processFlvFiles(w, r, flvFileList, params)
}
}

640
plugin/flv/download.go Normal file
View File

@@ -0,0 +1,640 @@
package plugin_flv
import (
"bufio"
"encoding/binary"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
m7s "m7s.live/v5"
codec "m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
flv "m7s.live/v5/plugin/flv/pkg"
mp4 "m7s.live/v5/plugin/mp4/pkg"
"m7s.live/v5/plugin/mp4/pkg/box"
rtmp "m7s.live/v5/plugin/rtmp/pkg"
)
// requestParams 包含请求解析后的参数
type requestParams struct {
streamPath string
startTime time.Time
endTime time.Time
timeRange time.Duration
}
// fileInfo 包含文件信息
type fileInfo struct {
filePath string
startTime time.Time
endTime time.Time
startOffsetTime time.Duration
recordType string // "flv" 或 "mp4"
}
// parseRequestParams 解析请求参数
func (plugin *FLVPlugin) parseRequestParams(r *http.Request) (*requestParams, error) {
// 从URL路径中提取流路径去除前缀 "/download/" 和后缀 ".flv"
streamPath := strings.TrimSuffix(strings.TrimPrefix(r.URL.Path, "/download/"), ".flv")
// 解析URL查询参数中的时间范围start和end参数
startTime, endTime, err := util.TimeRangeQueryParse(r.URL.Query())
if err != nil {
return nil, err
}
return &requestParams{
streamPath: streamPath,
startTime: startTime,
endTime: endTime,
timeRange: endTime.Sub(startTime),
}, nil
}
// queryRecordStreams 从数据库查询录像记录
func (plugin *FLVPlugin) queryRecordStreams(params *requestParams) ([]m7s.RecordStream, error) {
// 检查数据库是否可用
if plugin.DB == nil {
return nil, fmt.Errorf("database not available")
}
var recordStreams []m7s.RecordStream
// 首先查询FLV记录
query := plugin.DB.Model(&m7s.RecordStream{}).Where("stream_path = ? AND type = ?", params.streamPath, "flv")
// 添加时间范围查询条件
if !params.startTime.IsZero() && !params.endTime.IsZero() {
query = query.Where("(start_time <= ? AND end_time >= ?) OR (start_time >= ? AND start_time <= ?)",
params.endTime, params.startTime, params.startTime, params.endTime)
}
err := query.Order("start_time ASC").Find(&recordStreams).Error
if err != nil {
return nil, err
}
// 如果没有找到FLV记录尝试查询MP4记录
if len(recordStreams) == 0 {
query = plugin.DB.Model(&m7s.RecordStream{}).Where("stream_path = ? AND type IN (?)", params.streamPath, []string{"mp4", "fmp4"})
if !params.startTime.IsZero() && !params.endTime.IsZero() {
query = query.Where("(start_time <= ? AND end_time >= ?) OR (start_time >= ? AND start_time <= ?)",
params.endTime, params.startTime, params.startTime, params.endTime)
}
err = query.Order("start_time ASC").Find(&recordStreams).Error
if err != nil {
return nil, err
}
}
return recordStreams, nil
}
// buildFileInfoList 构建文件信息列表
func (plugin *FLVPlugin) buildFileInfoList(recordStreams []m7s.RecordStream, startTime, endTime time.Time) ([]*fileInfo, bool) {
var fileInfoList []*fileInfo
var found bool
for _, record := range recordStreams {
// 检查文件是否存在
if !util.Exist(record.FilePath) {
plugin.Warn("Record file not found", "filePath", record.FilePath)
continue
}
var startOffsetTime time.Duration
recordStartTime := record.StartTime
recordEndTime := record.EndTime
// 计算文件内的偏移时间
if startTime.After(recordStartTime) {
startOffsetTime = startTime.Sub(recordStartTime)
}
// 检查是否在时间范围内
if recordEndTime.Before(startTime) || recordStartTime.After(endTime) {
continue
}
fileInfoList = append(fileInfoList, &fileInfo{
filePath: record.FilePath,
startTime: recordStartTime,
endTime: recordEndTime,
startOffsetTime: startOffsetTime,
recordType: record.Type,
})
found = true
}
return fileInfoList, found
}
// hasOnlyMp4Records 检查是否只有MP4记录
func (plugin *FLVPlugin) hasOnlyMp4Records(fileInfoList []*fileInfo) bool {
if len(fileInfoList) == 0 {
return false
}
for _, info := range fileInfoList {
if info.recordType == "flv" {
return false
}
}
return true
}
// filterFlvFiles 过滤FLV文件
func (plugin *FLVPlugin) filterFlvFiles(fileInfoList []*fileInfo) []*fileInfo {
var filteredList []*fileInfo
for _, info := range fileInfoList {
if info.recordType == "flv" {
filteredList = append(filteredList, info)
}
}
plugin.Debug("FLV files filtered", "original", len(fileInfoList), "filtered", len(filteredList))
return filteredList
}
// filterMp4Files 过滤MP4文件
func (plugin *FLVPlugin) filterMp4Files(fileInfoList []*fileInfo) []*fileInfo {
var filteredList []*fileInfo
for _, info := range fileInfoList {
if info.recordType == "mp4" || info.recordType == "fmp4" {
filteredList = append(filteredList, info)
}
}
plugin.Debug("MP4 files filtered", "original", len(fileInfoList), "filtered", len(filteredList))
return filteredList
}
// processMp4ToFlv 将MP4记录转换为FLV输出
func (plugin *FLVPlugin) processMp4ToFlv(w http.ResponseWriter, r *http.Request, fileInfoList []*fileInfo, params *requestParams) {
plugin.Info("Converting MP4 records to FLV", "count", len(fileInfoList))
// 设置HTTP响应头
w.Header().Set("Content-Type", "video/x-flv")
w.Header().Set("Content-Disposition", "attachment")
// 创建MP4流列表
var mp4Streams []m7s.RecordStream
for _, info := range fileInfoList {
mp4Streams = append(mp4Streams, m7s.RecordStream{
FilePath: info.filePath,
StartTime: info.startTime,
EndTime: info.endTime,
Type: info.recordType,
})
}
// 创建DemuxerRange进行MP4解复用
demuxer := &mp4.DemuxerRange{
StartTime: params.startTime,
EndTime: params.endTime,
Streams: mp4Streams,
}
// 创建FLV编码器状态
flvWriter := &flvMp4Writer{
FlvWriter: flv.NewFlvWriter(w),
plugin: plugin,
hasWritten: false,
}
// 设置回调函数
demuxer.OnVideoExtraData = flvWriter.onVideoExtraData
demuxer.OnAudioExtraData = flvWriter.onAudioExtraData
demuxer.OnVideoSample = flvWriter.onVideoSample
demuxer.OnAudioSample = flvWriter.onAudioSample
// 执行解复用和转换
err := demuxer.Demux(r.Context())
if err != nil {
plugin.Error("MP4 to FLV conversion failed", "err", err)
if !flvWriter.hasWritten {
http.Error(w, "Conversion failed", http.StatusInternalServerError)
}
return
}
plugin.Info("MP4 to FLV conversion completed")
}
type ExtraDataInfo struct {
CodecType box.MP4_CODEC_TYPE
Data []byte
}
// flvMp4Writer 处理MP4到FLV的转换写入
type flvMp4Writer struct {
*flv.FlvWriter
plugin *FLVPlugin
audioExtra, videoExtra *ExtraDataInfo
hasWritten bool // 是否已经写入FLV头
ts int64 // 当前时间戳
tsOffset int64 // 时间戳偏移量,用于多文件连续播放
}
// writeFlvHeader 写入FLV文件头
func (w *flvMp4Writer) writeFlvHeader() error {
if w.hasWritten {
return nil
}
// 使用 FlvWriter 的 WriteHeader 方法
err := w.FlvWriter.WriteHeader(w.audioExtra != nil, w.videoExtra != nil) // 有音频和视频
if err != nil {
return err
}
w.hasWritten = true
if w.videoExtra != nil {
w.onVideoExtraData(w.videoExtra.CodecType, w.videoExtra.Data)
}
if w.audioExtra != nil {
w.onAudioExtraData(w.audioExtra.CodecType, w.audioExtra.Data)
}
return nil
}
// onVideoExtraData 处理视频序列头
func (w *flvMp4Writer) onVideoExtraData(codecType box.MP4_CODEC_TYPE, data []byte) error {
if !w.hasWritten {
w.videoExtra = &ExtraDataInfo{
CodecType: codecType,
Data: data,
}
return nil
}
switch codecType {
case box.MP4_CODEC_H264:
return w.WriteTag(flv.FLV_TAG_TYPE_VIDEO, uint32(w.ts), uint32(len(data)+5), []byte{(1 << 4) | 7, 0, 0, 0, 0}, data)
case box.MP4_CODEC_H265:
return w.WriteTag(flv.FLV_TAG_TYPE_VIDEO, uint32(w.ts), uint32(len(data)+5), []byte{0b1001_0000 | rtmp.PacketTypeSequenceStart, codec.FourCC_H265[0], codec.FourCC_H265[1], codec.FourCC_H265[2], codec.FourCC_H265[3]}, data)
default:
return fmt.Errorf("unsupported video codec: %v", codecType)
}
}
// onAudioExtraData 处理音频序列头
func (w *flvMp4Writer) onAudioExtraData(codecType box.MP4_CODEC_TYPE, data []byte) error {
if !w.hasWritten {
w.audioExtra = &ExtraDataInfo{
CodecType: codecType,
Data: data,
}
return nil
}
var flvCodec byte
switch codecType {
case box.MP4_CODEC_AAC:
flvCodec = 10 // AAC
case box.MP4_CODEC_G711A:
flvCodec = 7 // G.711 A-law
case box.MP4_CODEC_G711U:
flvCodec = 8 // G.711 μ-law
default:
return fmt.Errorf("unsupported audio codec: %v", codecType)
}
// 构建FLV音频标签 - 序列头
if flvCodec == 10 { // AAC 需要两个字节头部
return w.WriteTag(flv.FLV_TAG_TYPE_AUDIO, uint32(w.ts), uint32(len(data)+2), []byte{(flvCodec << 4) | (3 << 2) | (1 << 1) | 1, 0}, data)
} else {
return w.WriteTag(flv.FLV_TAG_TYPE_AUDIO, uint32(w.ts), uint32(len(data)+1), []byte{(flvCodec << 4) | (3 << 2) | (1 << 1) | 1}, data)
}
}
// onVideoSample 处理视频样本
func (w *flvMp4Writer) onVideoSample(codecType box.MP4_CODEC_TYPE, sample box.Sample) error {
if !w.hasWritten {
if err := w.writeFlvHeader(); err != nil {
return err
}
}
// 计算调整后的时间戳
w.ts = int64(sample.Timestamp) + w.tsOffset
timestamp := uint32(w.ts)
switch codecType {
case box.MP4_CODEC_H264:
frameType := byte(2) // P帧
if sample.KeyFrame {
frameType = 1 // I帧
}
return w.WriteTag(flv.FLV_TAG_TYPE_VIDEO, timestamp, uint32(len(sample.Data)+5), []byte{(frameType << 4) | 7, 1, byte(sample.CTS >> 16), byte(sample.CTS >> 8), byte(sample.CTS)}, sample.Data)
case box.MP4_CODEC_H265:
// Enhanced RTMP格式用于H.265
var b0 byte = 0b1010_0000 // P帧标识
if sample.KeyFrame {
b0 = 0b1001_0000 // 关键帧标识
}
if sample.CTS == 0 {
// CTS为0时使用PacketTypeCodedFramesX5字节头
return w.WriteTag(flv.FLV_TAG_TYPE_VIDEO, timestamp, uint32(len(sample.Data)+5), []byte{b0 | rtmp.PacketTypeCodedFramesX, codec.FourCC_H265[0], codec.FourCC_H265[1], codec.FourCC_H265[2], codec.FourCC_H265[3]}, sample.Data)
} else {
// CTS不为0时使用PacketTypeCodedFrames8字节头包含CTS
return w.WriteTag(flv.FLV_TAG_TYPE_VIDEO, timestamp, uint32(len(sample.Data)+8), []byte{b0 | rtmp.PacketTypeCodedFrames, codec.FourCC_H265[0], codec.FourCC_H265[1], codec.FourCC_H265[2], codec.FourCC_H265[3], byte(sample.CTS >> 16), byte(sample.CTS >> 8), byte(sample.CTS)}, sample.Data)
}
default:
return fmt.Errorf("unsupported video codec: %v", codecType)
}
}
// onAudioSample 处理音频样本
func (w *flvMp4Writer) onAudioSample(codec box.MP4_CODEC_TYPE, sample box.Sample) error {
if !w.hasWritten {
if err := w.writeFlvHeader(); err != nil {
return err
}
}
// 计算调整后的时间戳
w.ts = int64(sample.Timestamp) + w.tsOffset
timestamp := uint32(w.ts)
var flvCodec byte
switch codec {
case box.MP4_CODEC_AAC:
flvCodec = 10 // AAC
case box.MP4_CODEC_G711A:
flvCodec = 7 // G.711 A-law
case box.MP4_CODEC_G711U:
flvCodec = 8 // G.711 μ-law
default:
return fmt.Errorf("unsupported audio codec: %v", codec)
}
// 构建FLV音频标签 - 音频帧
if flvCodec == 10 { // AAC 需要两个字节头部
return w.WriteTag(flv.FLV_TAG_TYPE_AUDIO, timestamp, uint32(len(sample.Data)+2), []byte{(flvCodec << 4) | (3 << 2) | (1 << 1) | 1, 1}, sample.Data)
} else {
// 对于非AAC编解码器如G.711),只需要一个字节头部
return w.WriteTag(flv.FLV_TAG_TYPE_AUDIO, timestamp, uint32(len(sample.Data)+1), []byte{(flvCodec << 4) | (3 << 2) | (1 << 1) | 1}, sample.Data)
}
}
// processFlvFiles 处理原生FLV文件
func (plugin *FLVPlugin) processFlvFiles(w http.ResponseWriter, r *http.Request, fileInfoList []*fileInfo, params *requestParams) {
plugin.Info("Processing FLV files", "count", len(fileInfoList))
// 设置HTTP响应头
w.Header().Set("Content-Type", "video/x-flv")
w.Header().Set("Content-Disposition", "attachment")
var writer io.Writer = w
flvHead := make([]byte, 9+4)
tagHead := make(util.Buffer, 11)
var contentLength uint64
var startOffsetTime time.Duration
// 计算第一个文件的偏移时间
if len(fileInfoList) > 0 {
startOffsetTime = fileInfoList[0].startOffsetTime
}
var amf *rtmp.AMF
var metaData rtmp.EcmaArray
initMetaData := func(reader io.Reader, dataLen uint32) {
data := make([]byte, dataLen+4)
_, err := io.ReadFull(reader, data)
if err != nil {
return
}
amf = &rtmp.AMF{
Buffer: util.Buffer(data[1+2+len("onMetaData") : len(data)-4]),
}
var obj any
obj, err = amf.Unmarshal()
if err == nil {
metaData = obj.(rtmp.EcmaArray)
}
}
var filepositions []uint64
var times []float64
// 两次遍历:第一次计算大小,第二次写入数据
for pass := 0; pass < 2; pass++ {
offsetTime := startOffsetTime
var offsetTimestamp, lastTimestamp uint32
var init, seqAudioWritten, seqVideoWritten bool
if pass == 1 {
// 第二次遍历时,准备写入
metaData["keyframes"] = map[string]any{
"filepositions": filepositions,
"times": times,
}
amf.Marshals("onMetaData", metaData)
offsetDelta := amf.Len() + 15
offset := offsetDelta + len(flvHead)
contentLength += uint64(offset)
metaData["duration"] = params.timeRange.Seconds()
metaData["filesize"] = contentLength
for i := range filepositions {
filepositions[i] += uint64(offset)
}
metaData["keyframes"] = map[string]any{
"filepositions": filepositions,
"times": times,
}
amf.Reset()
amf.Marshals("onMetaData", metaData)
plugin.Info("start download", "metaData", metaData)
w.Header().Set("Content-Length", strconv.FormatInt(int64(contentLength), 10))
w.WriteHeader(http.StatusOK)
}
if offsetTime == 0 {
init = true
} else {
offsetTimestamp = -uint32(offsetTime.Milliseconds())
}
for i, info := range fileInfoList {
if r.Context().Err() != nil {
return
}
plugin.Debug("Processing file", "path", info.filePath)
file, err := os.Open(info.filePath)
if err != nil {
plugin.Error("Failed to open file", "path", info.filePath, "err", err)
if pass == 1 {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
reader := bufio.NewReader(file)
if i == 0 {
_, err = io.ReadFull(reader, flvHead)
if err != nil {
file.Close()
if pass == 1 {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
if pass == 1 {
// 第一次写入头
_, err = writer.Write(flvHead)
if err != nil {
file.Close()
return
}
tagHead[0] = flv.FLV_TAG_TYPE_SCRIPT
l := amf.Len()
tagHead[1] = byte(l >> 16)
tagHead[2] = byte(l >> 8)
tagHead[3] = byte(l)
flv.PutFlvTimestamp(tagHead, 0)
writer.Write(tagHead)
writer.Write(amf.Buffer)
l += 11
binary.BigEndian.PutUint32(tagHead[:4], uint32(l))
writer.Write(tagHead[:4])
}
} else {
// 后面的头跳过
_, err = reader.Discard(13)
if err != nil {
file.Close()
continue
}
if !init {
offsetTime = 0
offsetTimestamp = 0
}
}
// 处理FLV标签
for err == nil {
_, err = io.ReadFull(reader, tagHead)
if err != nil {
break
}
tmp := tagHead
t := tmp.ReadByte()
dataLen := tmp.ReadUint24()
lastTimestamp = tmp.ReadUint24() | uint32(tmp.ReadByte())<<24
if init {
if t == flv.FLV_TAG_TYPE_SCRIPT {
if pass == 0 {
initMetaData(reader, dataLen)
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
} else {
lastTimestamp += offsetTimestamp
if lastTimestamp >= uint32(params.timeRange.Milliseconds()) {
break
}
if pass == 0 {
data := make([]byte, dataLen+4)
_, err = io.ReadFull(reader, data)
if err == nil {
frameType := (data[0] >> 4) & 0b0111
idr := frameType == 1 || frameType == 4
if idr {
filepositions = append(filepositions, contentLength)
times = append(times, float64(lastTimestamp)/1000)
}
contentLength += uint64(11 + dataLen + 4)
}
} else {
flv.PutFlvTimestamp(tagHead, lastTimestamp)
_, err = writer.Write(tagHead)
if err == nil {
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
}
}
continue
}
switch t {
case flv.FLV_TAG_TYPE_SCRIPT:
if pass == 0 {
initMetaData(reader, dataLen)
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
case flv.FLV_TAG_TYPE_AUDIO:
if !seqAudioWritten {
if pass == 0 {
contentLength += uint64(11 + dataLen + 4)
_, err = reader.Discard(int(dataLen) + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
if err == nil {
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
}
seqAudioWritten = true
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
case flv.FLV_TAG_TYPE_VIDEO:
if !seqVideoWritten {
if pass == 0 {
contentLength += uint64(11 + dataLen + 4)
_, err = reader.Discard(int(dataLen) + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
if err == nil {
_, err = io.CopyN(writer, reader, int64(dataLen+4))
}
}
seqVideoWritten = true
} else {
if lastTimestamp >= uint32(offsetTime.Milliseconds()) {
data := make([]byte, dataLen+4)
_, err = io.ReadFull(reader, data)
if err == nil {
frameType := (data[0] >> 4) & 0b0111
idr := frameType == 1 || frameType == 4
if idr {
init = true
plugin.Debug("init", "lastTimestamp", lastTimestamp)
if pass == 0 {
filepositions = append(filepositions, contentLength)
times = append(times, float64(lastTimestamp)/1000)
contentLength += uint64(11 + dataLen + 4)
} else {
flv.PutFlvTimestamp(tagHead, 0)
_, err = writer.Write(tagHead)
if err == nil {
_, err = writer.Write(data)
}
}
}
}
} else {
_, err = reader.Discard(int(dataLen) + 4)
}
}
}
}
offsetTimestamp = lastTimestamp
file.Close()
}
}
plugin.Info("FLV download completed")
}

View File

@@ -2,6 +2,7 @@ package flv
import (
"errors"
"io"
"m7s.live/v5"
"m7s.live/v5/pkg/util"
@@ -15,6 +16,10 @@ type Puller struct {
func (p *Puller) Run() (err error) {
reader := util.NewBufReader(p.ReadCloser)
publisher := p.PullJob.Publisher
if publisher == nil {
io.Copy(io.Discard, p.ReadCloser)
return
}
var hasAudio, hasVideo bool
var absTS uint32
var head util.Memory

View File

@@ -9,6 +9,7 @@ import (
"time"
m7s "m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
"m7s.live/v5/pkg/util"
@@ -47,6 +48,9 @@ func (p *RecordReader) Dispose() {
func (p *RecordReader) Run() (err error) {
pullJob := &p.PullJob
publisher := pullJob.Publisher
if publisher == nil {
return pkg.ErrDisabled
}
allocator := util.NewScalableMemoryAllocator(1 << 10)
var tagHeader [11]byte
var ts int64
@@ -60,6 +64,7 @@ func (p *RecordReader) Run() (err error) {
publisher.OnGetPosition = func() time.Time {
return realTime
}
for loop := 0; loop < p.Loop; loop++ {
nextStream:
for i, stream := range p.Streams {
@@ -85,15 +90,15 @@ func (p *RecordReader) Run() (err error) {
err = head.NewReader().ReadByteTo(&flvHead[0], &flvHead[1], &flvHead[2], &version, &flag)
hasAudio := (flag & 0x04) != 0
hasVideo := (flag & 0x01) != 0
if err != nil {
return
}
if !hasAudio {
publisher.NoAudio()
}
if !hasVideo {
publisher.NoVideo()
}
if err != nil {
return
}
if flvHead != [3]byte{'F', 'L', 'V'} {
return errors.New("not flv file")
}
@@ -194,7 +199,7 @@ func (p *RecordReader) Run() (err error) {
}
}
} else {
publisher.Info("script", name, obj)
p.Info("script", name, obj)
}
default:
err = fmt.Errorf("unknown tag type: %d", t)

View File

@@ -8,7 +8,6 @@ import (
"slices"
"time"
"gorm.io/gorm"
"m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/config"
@@ -144,7 +143,6 @@ func NewRecorder(conf config.Record) m7s.IRecorder {
type Recorder struct {
m7s.DefaultRecorder
stream m7s.RecordStream
}
var CustomFileName = func(job *m7s.RecordJob) string {
@@ -155,48 +153,21 @@ var CustomFileName = func(job *m7s.RecordJob) string {
}
func (r *Recorder) createStream(start time.Time) (err error) {
recordJob := &r.RecordJob
sub := recordJob.Subscriber
r.stream = m7s.RecordStream{
StartTime: start,
StreamPath: sub.StreamPath,
FilePath: CustomFileName(&r.RecordJob),
EventId: recordJob.EventId,
EventDesc: recordJob.EventDesc,
EventName: recordJob.EventName,
EventLevel: recordJob.EventLevel,
BeforeDuration: recordJob.BeforeDuration,
AfterDuration: recordJob.AfterDuration,
Mode: recordJob.Mode,
Type: "flv",
}
dir := filepath.Dir(r.stream.FilePath)
if err = os.MkdirAll(dir, 0755); err != nil {
return
}
if sub.Publisher.HasAudioTrack() {
r.stream.AudioCodec = sub.Publisher.AudioTrack.ICodecCtx.String()
}
if sub.Publisher.HasVideoTrack() {
r.stream.VideoCodec = sub.Publisher.VideoTrack.ICodecCtx.String()
}
if recordJob.Plugin.DB != nil {
recordJob.Plugin.DB.Save(&r.stream)
}
return
return r.CreateStream(start, CustomFileName)
}
func (r *Recorder) writeTailer(end time.Time) {
if r.stream.EndTime.After(r.stream.StartTime) {
if r.Event.EndTime.After(r.Event.StartTime) {
return
}
r.stream.EndTime = end
r.Event.EndTime = end
if r.RecordJob.Plugin.DB != nil {
r.RecordJob.Plugin.DB.Save(&r.stream)
writeMetaTagQueueTask.AddTask(&eventRecordCheck{
DB: r.RecordJob.Plugin.DB,
streamPath: r.stream.StreamPath,
})
if r.RecordJob.Event != nil {
r.RecordJob.Plugin.DB.Save(&r.Event)
} else {
r.RecordJob.Plugin.DB.Save(&r.Event.RecordStream)
}
writeMetaTagQueueTask.AddTask(m7s.NewEventRecordCheck(r.Event.Type, r.Event.StreamPath, r.RecordJob.Plugin.DB))
}
}
@@ -204,40 +175,6 @@ func (r *Recorder) Dispose() {
r.writeTailer(time.Now())
}
type eventRecordCheck struct {
task.Task
DB *gorm.DB
streamPath string
}
func (t *eventRecordCheck) Run() (err error) {
var eventRecordStreams []m7s.RecordStream
queryRecord := m7s.RecordStream{
EventLevel: m7s.EventLevelHigh,
Mode: m7s.RecordModeEvent,
Type: "flv",
}
t.DB.Where(&queryRecord).Find(&eventRecordStreams, "stream_path=?", t.streamPath) //搜索事件录像,且为重要事件(无法自动删除)
if len(eventRecordStreams) > 0 {
for _, recordStream := range eventRecordStreams {
var unimportantEventRecordStreams []m7s.RecordStream
queryRecord.EventLevel = m7s.EventLevelLow
query := `(start_time BETWEEN ? AND ?)
OR (end_time BETWEEN ? AND ?)
OR (? BETWEEN start_time AND end_time)
OR (? BETWEEN start_time AND end_time) AND stream_path=? `
t.DB.Where(&queryRecord).Where(query, recordStream.StartTime, recordStream.EndTime, recordStream.StartTime, recordStream.EndTime, recordStream.StartTime, recordStream.EndTime, recordStream.StreamPath).Find(&unimportantEventRecordStreams)
if len(unimportantEventRecordStreams) > 0 {
for _, unimportantEventRecordStream := range unimportantEventRecordStreams {
unimportantEventRecordStream.EventLevel = m7s.EventLevelHigh
t.DB.Save(&unimportantEventRecordStream)
}
}
}
}
return
}
func (r *Recorder) Run() (err error) {
var file *os.File
var filepositions []uint64
@@ -248,14 +185,14 @@ func (r *Recorder) Run() (err error) {
suber := ctx.Subscriber
noFragment := ctx.RecConf.Fragment == 0 || ctx.RecConf.Append
startTime := time.Now()
if ctx.BeforeDuration > 0 {
startTime = startTime.Add(-ctx.BeforeDuration)
if ctx.Event.BeforeDuration > 0 {
startTime = startTime.Add(-time.Duration(ctx.Event.BeforeDuration) * time.Millisecond)
}
if err = r.createStream(startTime); err != nil {
return
}
if noFragment {
file, err = os.OpenFile(r.stream.FilePath, os.O_CREATE|os.O_RDWR|util.Conditional(ctx.RecConf.Append, os.O_APPEND, os.O_TRUNC), 0666)
file, err = os.OpenFile(r.Event.FilePath, os.O_CREATE|os.O_RDWR|util.Conditional(ctx.RecConf.Append, os.O_APPEND, os.O_TRUNC), 0666)
if err != nil {
return
}
@@ -291,7 +228,7 @@ func (r *Recorder) Run() (err error) {
} else if ctx.RecConf.Fragment == 0 {
_, err = file.Write(FLVHead)
} else {
if file, err = os.OpenFile(r.stream.FilePath, os.O_CREATE|os.O_RDWR, 0666); err != nil {
if file, err = os.OpenFile(r.Event.FilePath, os.O_CREATE|os.O_RDWR, 0666); err != nil {
return
}
_, err = file.Write(FLVHead)
@@ -307,7 +244,7 @@ func (r *Recorder) Run() (err error) {
if err = r.createStream(time.Now()); err != nil {
return
}
if file, err = os.OpenFile(r.stream.FilePath, os.O_CREATE|os.O_RDWR, 0666); err != nil {
if file, err = os.OpenFile(r.Event.FilePath, os.O_CREATE|os.O_RDWR, 0666); err != nil {
return
}
_, err = file.Write(FLVHead)

View File

@@ -11,6 +11,8 @@ import (
"sync"
"time"
"gorm.io/gorm"
"github.com/emiago/sipgo"
"github.com/emiago/sipgo/sip"
"m7s.live/v5/pkg/util"
@@ -86,7 +88,8 @@ func (gb *GB28181Plugin) List(ctx context.Context, req *pb.GetDevicesRequest) (*
for _, c := range channels {
pbChannels = append(pbChannels, &pb.Channel{
DeviceId: c.ChannelID,
ParentId: c.ParentID,
ParentId: c.DeviceID,
ChannelId: c.ChannelID,
Name: c.Name,
Manufacturer: c.Manufacturer,
Model: c.Model,
@@ -432,10 +435,10 @@ func (gb *GB28181Plugin) SyncDevice(ctx context.Context, req *pb.SyncDeviceReque
if !ok && gb.DB != nil {
// 如果内存中没有且数据库存在,则从数据库查询
var device Device
if err := gb.DB.Where("id = ?", req.DeviceId).First(&device).Error; err == nil {
if err := gb.DB.Where("device_id = ?", req.DeviceId).First(&device).Error; err == nil {
d = &device
// 恢复设备的必要字段
d.Logger = gb.With("id", req.DeviceId)
d.Logger = gb.Logger.With("deviceid", req.DeviceId)
d.channels.L = new(sync.RWMutex)
d.plugin = gb
@@ -611,35 +614,47 @@ func (gb *GB28181Plugin) UpdateDevice(ctx context.Context, req *pb.Device) (*pb.
// 如果需要订阅目录,创建并启动目录订阅任务
if d.Online {
if d.CatalogSubscribeTask != nil {
if d.SubscribeCatalog > 0 {
if d.SubscribeCatalog > 0 {
if d.CatalogSubscribeTask != nil {
d.CatalogSubscribeTask.Ticker.Reset(time.Second * time.Duration(d.SubscribeCatalog))
d.CatalogSubscribeTask.Tick(nil)
} else {
catalogSubTask := NewCatalogSubscribeTask(d)
d.AddTask(catalogSubTask)
d.CatalogSubscribeTask.Tick(nil)
}
d.CatalogSubscribeTask.Tick(nil)
} else {
catalogSubTask := NewCatalogSubscribeTask(d)
d.AddTask(catalogSubTask)
d.CatalogSubscribeTask.Tick(nil)
if d.CatalogSubscribeTask != nil {
d.CatalogSubscribeTask.Stop(fmt.Errorf("catalog subscription disabled"))
}
}
if d.PositionSubscribeTask != nil {
if d.SubscribePosition > 0 {
if d.SubscribePosition > 0 {
if d.PositionSubscribeTask != nil {
d.PositionSubscribeTask.Ticker.Reset(time.Second * time.Duration(d.SubscribePosition))
d.PositionSubscribeTask.Tick(nil)
} else {
positionSubTask := NewPositionSubscribeTask(d)
d.AddTask(positionSubTask)
d.PositionSubscribeTask.Tick(nil)
}
d.PositionSubscribeTask.Tick(nil)
} else {
positionSubTask := NewPositionSubscribeTask(d)
d.AddTask(positionSubTask)
d.PositionSubscribeTask.Tick(nil)
if d.PositionSubscribeTask != nil {
d.PositionSubscribeTask.Stop(fmt.Errorf("position subscription disabled"))
}
}
if d.AlarmSubscribeTask != nil {
if d.SubscribeAlarm > 0 {
if d.SubscribeAlarm > 0 {
if d.AlarmSubscribeTask != nil {
d.AlarmSubscribeTask.Ticker.Reset(time.Second * time.Duration(d.SubscribeAlarm))
d.AlarmSubscribeTask.Tick(nil)
} else {
alarmSubTask := NewAlarmSubscribeTask(d)
d.AddTask(alarmSubTask)
d.AlarmSubscribeTask.Tick(nil)
}
d.AlarmSubscribeTask.Tick(nil)
} else {
alarmSubTask := NewAlarmSubscribeTask(d)
d.AddTask(alarmSubTask)
d.AlarmSubscribeTask.Tick(nil)
if d.AlarmSubscribeTask != nil {
d.AlarmSubscribeTask.Stop(fmt.Errorf("alarm subscription disabled"))
}
}
}
} else {
@@ -1142,7 +1157,7 @@ func (gb *GB28181Plugin) QueryRecord(ctx context.Context, req *pb.QueryRecordReq
return resp, nil
}
channel, ok := device.channels.Get(req.ChannelId)
channel, ok := device.channels.Get(req.DeviceId + "_" + req.ChannelId)
if !ok {
resp.Code = 404
resp.Message = "channel not found"
@@ -1271,32 +1286,36 @@ func (gb *GB28181Plugin) TestSip(ctx context.Context, req *pb.TestSipRequest) (*
// 创建一个临时设备用于测试
device := &Device{
DeviceId: "34020000002000000001",
SipIp: "192.168.1.17",
SipIp: "192.168.1.106",
Port: 5060,
IP: "192.168.1.102",
StreamMode: "TCP-PASSIVE",
}
//From: <sip:41010500002000000001@4101050000>;tag=4183af2ecc934758ad393dfe588f2dfd
// 初始化设备的SIP相关字段
device.fromHDR = sip.FromHeader{
Address: sip.Uri{
User: gb.Serial,
Host: gb.Realm,
User: "41010500002000000001",
Host: "4101050000",
},
Params: sip.NewParams(),
}
device.fromHDR.Params.Add("tag", sip.GenerateTagN(16))
device.fromHDR.Params.Add("tag", "4183af2ecc934758ad393dfe588f2dfd")
//Contact: <sip:41010500002000000001@192.168.1.106:5060>
device.contactHDR = sip.ContactHeader{
Address: sip.Uri{
User: gb.Serial,
Host: device.SipIp,
Port: device.Port,
User: "41010500002000000001",
Host: "192.168.1.106",
Port: 5060,
},
}
//Request-Line: INVITE sip:34020000001320000006@192.168.1.102:5060 SIP/2.0
// Method: INVITE
// Request-URI: sip:34020000001320000006@192.168.1.102:5060
// [Resent Packet: False]
// 初始化SIP客户端
device.client, _ = sipgo.NewClient(gb.ua, sipgo.WithClientLogger(zerolog.New(os.Stdout)), sipgo.WithClientHostname(device.SipIp))
device.client, _ = sipgo.NewClient(gb.ua, sipgo.WithClientLogger(zerolog.New(os.Stdout)), sipgo.WithClientHostname("192.168.1.106"))
if device.client == nil {
resp.Code = 500
resp.Message = "failed to create sip client"
@@ -1321,11 +1340,11 @@ func (gb *GB28181Plugin) TestSip(ctx context.Context, req *pb.TestSipRequest) (*
// 构建SDP消息体
sdpInfo := []string{
"v=0",
fmt.Sprintf("o=%s 0 0 IN IP4 %s", "34020000001320000004", device.SipIp),
fmt.Sprintf("o=%s 0 0 IN IP4 %s", "34020000001320000102", "192.168.1.106"),
"s=Play",
"c=IN IP4 " + device.SipIp,
"c=IN IP4 192.168.1.106",
"t=0 0",
"m=video 43970 TCP/RTP/AVP 96 97 98 99",
"m=video 40940 TCP/RTP/AVP 96 97 98 99",
"a=recvonly",
"a=rtpmap:96 PS/90000",
"a=rtpmap:98 H264/90000",
@@ -1333,36 +1352,40 @@ func (gb *GB28181Plugin) TestSip(ctx context.Context, req *pb.TestSipRequest) (*
"a=rtpmap:99 H265/90000",
"a=setup:passive",
"a=connection:new",
"y=0200005507",
"y=0105006213",
}
// 设置必需的头部
contentTypeHeader := sip.ContentTypeHeader("APPLICATION/SDP")
subjectHeader := sip.NewHeader("Subject", "34020000001320000006:0200005507,34020000002000000001:0")
//Subject: 34020000001320000006:0105006213,41010500002000000001:0
subjectHeader := sip.NewHeader("Subject", "34020000001320000006:0105006213,41010500002000000001:0")
//To: <sip:34020000001320000006@192.168.1.102:5060>
toHeader := sip.ToHeader{
Address: sip.Uri{
User: "34020000001320000006",
Host: device.IP,
Port: device.Port,
Host: "192.168.1.102",
Port: 5060,
},
}
userAgentHeader := sip.NewHeader("User-Agent", "WVP-Pro v2.7.3.20241218")
//Via: SIP/2.0/UDP 192.168.1.106:5060;branch=z9hG4bK9279674404;rport
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: "UDP",
Host: device.SipIp,
Port: device.Port,
Host: "192.168.1.106",
Port: 5060,
Params: sip.HeaderParams(sip.NewParams()),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
viaHeader.Params.Add("branch", "z9hG4bK9279674404").Add("rport", "")
csqHeader := sip.CSeqHeader{
SeqNo: 13,
SeqNo: 3,
MethodName: "INVITE",
}
maxforward := sip.MaxForwardsHeader(70)
contentLengthHeader := sip.ContentLengthHeader(286)
//contentLengthHeader := sip.ContentLengthHeader(288)
request.AppendHeader(&contentTypeHeader)
request.AppendHeader(subjectHeader)
request.AppendHeader(&toHeader)
@@ -1374,7 +1397,7 @@ func (gb *GB28181Plugin) TestSip(ctx context.Context, req *pb.TestSipRequest) (*
// 创建会话并发送请求
dialogClientCache := sipgo.NewDialogClientCache(device.client, device.contactHDR)
session, err := dialogClientCache.Invite(gb, recipient, request.Body(), &csqHeader, &device.fromHDR, &toHeader, &viaHeader, &maxforward, userAgentHeader, &device.contactHDR, subjectHeader, &contentTypeHeader, &contentLengthHeader)
session, err := dialogClientCache.Invite(gb, recipient, request.Body(), &csqHeader, &device.fromHDR, &toHeader, &maxforward, userAgentHeader, &device.contactHDR, subjectHeader, &contentTypeHeader)
if err != nil {
resp.Code = 500
resp.Message = fmt.Sprintf("发送INVITE请求失败: %v", err)
@@ -1532,6 +1555,13 @@ func (gb *GB28181Plugin) AddPlatformChannel(ctx context.Context, req *pb.AddPlat
resp.Message = fmt.Sprintf("提交事务失败: %v", err)
return resp, nil
}
if platform, ok := gb.platforms.Get(req.PlatformId); !ok {
for _, channelId := range req.ChannelIds {
if channel, ok := gb.channels.Get(channelId); ok {
platform.channels.Set(channel)
}
}
}
resp.Code = 0
resp.Message = "success"
@@ -1592,7 +1622,7 @@ func (gb *GB28181Plugin) Recording(ctx context.Context, req *pb.RecordingRequest
}
// 从device.channels中查找实际通道
_, ok = actualDevice.channels.Get(result.ChannelID)
_, ok = actualDevice.channels.Get(result.DeviceID + "_" + result.ChannelID)
if !ok {
resp.Code = 404
resp.Message = "实际通道未找到"
@@ -1625,7 +1655,7 @@ func (gb *GB28181Plugin) Recording(ctx context.Context, req *pb.RecordingRequest
}
// 检查通道是否存在
_, ok = device.channels.Get(req.ChannelId)
_, ok = device.channels.Get(req.DeviceId + "_" + req.ChannelId)
if !ok {
resp.Code = 404
resp.Message = "通道未找到"
@@ -1711,7 +1741,7 @@ func (gb *GB28181Plugin) GetSnap(ctx context.Context, req *pb.GetSnapRequest) (*
}
// 从device.channels中查找实际通道
_, ok = actualDevice.channels.Get(result.ChannelID)
_, ok = actualDevice.channels.Get(result.DeviceID + "_" + result.ChannelID)
if !ok {
resp.Code = 404
resp.Message = "实际通道未找到"
@@ -1755,7 +1785,7 @@ func (gb *GB28181Plugin) GetSnap(ctx context.Context, req *pb.GetSnapRequest) (*
}
// 检查通道是否存在
_, ok = device.channels.Get(req.ChannelId)
_, ok = device.channels.Get(req.DeviceId + "_" + req.ChannelId)
if !ok {
resp.Code = 404
resp.Message = "通道未找到"
@@ -1843,8 +1873,8 @@ func (gb *GB28181Plugin) GetGroupChannels(ctx context.Context, req *pb.GetGroupC
Select(`
IFNULL(gc.id, 0) AS id,
dc.channel_id,
dc.device_id,
dc.name AS channel_name,
d.device_id AS device_id,
d.name AS device_name,
dc.status AS status,
CASE
@@ -1853,11 +1883,11 @@ func (gb *GB28181Plugin) GetGroupChannels(ctx context.Context, req *pb.GetGroupC
END AS in_group
`).
Joins("LEFT JOIN "+deviceTable+" AS d ON dc.device_id = d.device_id").
Joins("LEFT JOIN "+groupsChannelTable+" AS gc ON dc.channel_id = gc.channel_id AND gc.group_id = ?", req.GroupId)
Joins("LEFT JOIN "+groupsChannelTable+" AS gc ON dc.channel_id = gc.channel_id AND dc.device_id = gc.device_id AND gc.group_id = ?", req.GroupId)
// 如果有设备ID过滤条件
if req.DeviceId != "" {
baseQuery = baseQuery.Where("d.device_id = ?", req.DeviceId)
baseQuery = baseQuery.Where("dc.device_id = ?", req.DeviceId)
}
// 统计符合条件的通道总数
@@ -1873,7 +1903,7 @@ func (gb *GB28181Plugin) GetGroupChannels(ctx context.Context, req *pb.GetGroupC
query := baseQuery
// 添加排序
query = query.Order("channel_id ASC")
query = query.Order("dc.device_id ASC, dc.channel_id ASC")
// 如果指定了分页参数,则应用分页
if req.Page > 0 && req.Count > 0 {
@@ -1892,12 +1922,14 @@ func (gb *GB28181Plugin) GetGroupChannels(ctx context.Context, req *pb.GetGroupC
var pbGroupChannels []*pb.GroupChannel
for _, result := range results {
channelInfo := &pb.GroupChannel{
Id: int32(result.ID),
GroupId: req.GroupId,
ChannelId: result.ChannelID,
DeviceId: result.DeviceID,
ChannelName: result.ChannelName,
DeviceName: result.DeviceName,
Status: result.Status,
InGroup: result.InGroup, // 设置inGroup字段
InGroup: result.InGroup,
}
// 从内存中获取设备信息以获取传输协议
@@ -1905,13 +1937,6 @@ func (gb *GB28181Plugin) GetGroupChannels(ctx context.Context, req *pb.GetGroupC
channelInfo.StreamMode = device.StreamMode
}
if result.InGroup {
channelInfo.Id = int32(result.ID)
channelInfo.GroupId = int32(req.GroupId)
} else {
channelInfo.Id = 0
}
pbGroupChannels = append(pbGroupChannels, channelInfo)
}
@@ -2052,19 +2077,19 @@ func (gb *GB28181Plugin) getGroupChannels(groupId int32) ([]*pb.GroupChannel, er
InGroup bool `gorm:"column:in_group"`
}
// 构建查询
// 构建优化后的查询
query := gb.DB.Table(groupsChannelTable+" AS gc").
Select(`
gc.id AS id,
gc.channel_id AS channel_id,
gc.device_id AS device_id,
dc.name AS channel_name,
d.name AS device_name,
dc.status AS status,
ch.name AS channel_name,
dev.name AS device_name,
ch.status AS status,
true AS in_group
`).
Joins("LEFT JOIN "+deviceChannelTable+" AS dc ON gc.channel_id = dc.channel_id").
Joins("LEFT JOIN "+deviceTable+" AS d ON gc.device_id = d.device_id").
Joins("LEFT JOIN "+deviceChannelTable+" AS ch ON gc.device_id = ch.device_id AND gc.channel_id = ch.channel_id").
Joins("LEFT JOIN "+deviceTable+" AS dev ON ch.device_id = dev.device_id").
Where("gc.group_id = ?", groupId)
var results []Result
@@ -2077,7 +2102,7 @@ func (gb *GB28181Plugin) getGroupChannels(groupId int32) ([]*pb.GroupChannel, er
for _, result := range results {
channelInfo := &pb.GroupChannel{
Id: int32(result.ID),
GroupId: groupId,
GroupId: groupId, // 使用函数参数 groupId
ChannelId: result.ChannelID,
DeviceId: result.DeviceID,
ChannelName: result.ChannelName,
@@ -2460,12 +2485,9 @@ func (gb *GB28181Plugin) PlaybackPause(ctx context.Context, req *pb.PlaybackPaus
resp.Message = fmt.Sprintf("发送暂停请求失败: %v", err)
return resp, nil
}
gb.Server.Streams.Call(func() error {
if s, ok := gb.Server.Streams.Get(req.StreamPath); ok {
s.Pause()
}
return nil
})
if s, ok := gb.Server.Streams.SafeGet(req.StreamPath); ok {
s.Pause()
}
gb.Info("暂停回放",
"streampath", req.StreamPath)
@@ -2514,12 +2536,9 @@ func (gb *GB28181Plugin) PlaybackResume(ctx context.Context, req *pb.PlaybackRes
resp.Message = fmt.Sprintf("发送恢复请求失败: %v", err)
return resp, nil
}
gb.Server.Streams.Call(func() error {
if s, ok := gb.Server.Streams.Get(req.StreamPath); ok {
s.Resume()
}
return nil
})
if s, ok := gb.Server.Streams.SafeGet(req.StreamPath); ok {
s.Resume()
}
gb.Info("恢复回放",
"streampath", req.StreamPath)
@@ -2587,14 +2606,11 @@ func (gb *GB28181Plugin) PlaybackSpeed(ctx context.Context, req *pb.PlaybackSpee
// 发送请求
_, err := dialog.session.TransactionRequest(ctx, request)
gb.Server.Streams.Call(func() error {
if s, ok := gb.Server.Streams.Get(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
return nil
})
if s, ok := gb.Server.Streams.SafeGet(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
if err != nil {
resp.Code = 500
resp.Message = fmt.Sprintf("发送倍速请求失败: %v", err)
@@ -2818,62 +2834,54 @@ func (gb *GB28181Plugin) RemoveDevice(ctx context.Context, req *pb.RemoveDeviceR
return resp, nil
}
// 检查数据库连接
if gb.DB == nil {
resp.Code = 500
resp.Message = "数据库未初始化"
return resp, nil
}
// 开启事务
tx := gb.DB.Begin()
// 先从数据库中查找设备
var dbDevice Device
if err := tx.Where(&Device{DeviceId: req.Id}).First(&dbDevice).Error; err != nil {
tx.Rollback()
resp.Code = 404
resp.Message = fmt.Sprintf("设备不存在: %v", err)
return resp, nil
}
// 使用数据库中的 DeviceId 从内存中查找设备
if device, ok := gb.devices.Get(dbDevice.DeviceId); ok {
if device, ok := gb.devices.Get(req.Id); ok {
device.DeletedAt = gorm.DeletedAt{Time: time.Now(), Valid: true}
device.channels.Range(func(channel *Channel) bool {
channel.DeletedAt = gorm.DeletedAt{Time: time.Now(), Valid: true}
return true
})
// 停止设备相关任务
device.Stop(fmt.Errorf("device removed"))
device.WaitStopped()
// device.Stop() 会调用 Dispose(),其中已包含从 gb.devices 中移除设备的逻辑
// 开启数据库事务
tx := gb.DB.Begin()
if tx.Error != nil {
resp.Code = 500
resp.Message = "开启事务失败"
return resp, tx.Error
}
// 删除设备
if err := tx.Delete(&Device{DeviceId: req.Id}).Error; err != nil {
tx.Rollback()
resp.Code = 500
resp.Message = "删除设备失败"
return resp, err
}
// 删除设备关联的通道
if err := tx.Where("device_id = ?", req.Id).Delete(&gb28181.DeviceChannel{}).Error; err != nil {
tx.Rollback()
resp.Code = 500
resp.Message = "删除设备通道失败"
return resp, err
}
// 提交事务
if err := tx.Commit().Error; err != nil {
tx.Rollback()
resp.Code = 500
resp.Message = "提交事务失败"
return resp, err
}
resp.Code = 200
resp.Message = "设备删除成功"
}
// 删除设备关联的所有通道
if err := tx.Where(&gb28181.DeviceChannel{DeviceID: dbDevice.DeviceId}).Delete(&gb28181.DeviceChannel{}).Error; err != nil {
tx.Rollback()
resp.Code = 500
resp.Message = fmt.Sprintf("删除设备通道失败: %v", err)
return resp, nil
}
// 删除设备
if err := tx.Delete(&dbDevice).Error; err != nil {
tx.Rollback()
resp.Code = 500
resp.Message = fmt.Sprintf("删除设备失败: %v", err)
return resp, nil
}
// 提交事务
if err := tx.Commit().Error; err != nil {
resp.Code = 500
resp.Message = fmt.Sprintf("提交事务失败: %v", err)
return resp, nil
}
gb.Info("删除设备成功",
"deviceId", dbDevice.DeviceId,
"deviceName", dbDevice.Name)
resp.Code = 0
resp.Message = "success"
return resp, nil
}

View File

@@ -51,11 +51,11 @@ type Channel struct {
RecordReqs util.Collection[int, *RecordRequest]
PresetReqs util.Collection[int, *PresetRequest] // 预置位请求集合
*slog.Logger
gb28181.DeviceChannel
*gb28181.DeviceChannel
}
func (c *Channel) GetKey() string {
return c.ChannelID
return c.ID
}
type PullProxy struct {
@@ -75,7 +75,7 @@ func (p *PullProxy) Start() error {
streamPaths := strings.Split(p.GetStreamPath(), "/")
deviceId, channelId := streamPaths[0], streamPaths[1]
if device, ok := p.Plugin.GetHandler().(*GB28181Plugin).devices.Get(deviceId); ok {
if _, ok := device.channels.Get(channelId); ok {
if _, ok := device.channels.Get(deviceId + "_" + channelId); ok {
p.ChangeStatus(m7s.PullProxyStatusOnline)
}
}

View File

@@ -80,7 +80,7 @@ type Device struct {
fromHDR sip.FromHeader
toHDR sip.ToHeader
plugin *GB28181Plugin `gorm:"-:all"`
localPort int
LocalPort int
CatalogSubscribeTask *CatalogSubscribeTask `gorm:"-:all"`
PositionSubscribeTask *PositionSubscribeTask `gorm:"-:all"`
AlarmSubscribeTask *AlarmSubscribeTask `gorm:"-:all"`
@@ -92,18 +92,16 @@ func (d *Device) TableName() string {
func (d *Device) Dispose() {
if d.plugin.DB != nil {
d.plugin.DB.Save(d)
if d.channels.Length > 0 {
d.channels.Range(func(channel *Channel) bool {
d.plugin.DB.Save(channel.DeviceChannel)
//d.plugin.DB.Model(&gb28181.DeviceChannel{}).Where("device_id = ? AND device_db_id = ?", channel.DeviceId, d.ID).Updates(channel.DeviceChannel)
return true
})
} else {
// 如果没有通道,则直接更新通道状态为 OFF
d.plugin.DB.Model(&gb28181.DeviceChannel{}).Where("device_db_id = ?", d.ID).Update("status", "OFF")
}
d.plugin.DB.Save(d)
}
d.plugin.devices.RemoveByKey(d.DeviceId)
}
func (d *Device) GetKey() string {
@@ -140,6 +138,7 @@ func (r *CatalogRequest) IsComplete(channelsLength int) bool {
}
func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28181.Message) (err error) {
d.plugin.Trace("into onMessage,deviceid is ", d.DeviceId)
source := req.Source()
hostname, portStr, _ := net.SplitHostPort(source)
port, _ := strconv.Atoi(portStr)
@@ -160,6 +159,7 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
case "Keepalive":
d.KeepaliveInterval = int(time.Since(d.KeepaliveTime).Seconds())
d.KeepaliveTime = time.Now()
d.Trace("into keeplive,deviceid is ", d.DeviceId, "d.KeepaliveTime is", d.KeepaliveTime)
if d.plugin.DB != nil {
if err := d.plugin.DB.Model(d).Updates(map[string]interface{}{
"keepalive_interval": d.KeepaliveInterval,
@@ -189,7 +189,7 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
if d.plugin.DB != nil {
// 如果是第一个响应,先清空现有通道
if isFirst {
d.Debug("清空现有通道", "deviceId", d.DeviceId)
d.Trace("清空现有通道", "deviceId", d.DeviceId)
if err := d.plugin.DB.Where("device_id = ?", d.DeviceId).Delete(&gb28181.DeviceChannel{}).Error; err != nil {
d.Error("删除通道失败", "error", err, "deviceId", d.DeviceId)
}
@@ -213,7 +213,7 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
// 更新当前设备的通道数
d.ChannelCount = msg.SumNum
d.UpdateTime = time.Now()
d.Debug("save channel", "deviceid", d.DeviceId, "channels count", d.channels.Length)
d.Trace("save channel", "deviceid", d.DeviceId, "channels count", d.channels.Length)
if err := d.plugin.DB.Model(d).Updates(map[string]interface{}{
"channel_count": d.ChannelCount,
"update_time": d.UpdateTime,
@@ -228,7 +228,7 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
d.catalogReqs.RemoveByKey(msg.SN)
}
case "RecordInfo":
if channel, ok := d.channels.Get(msg.DeviceID); ok {
if channel, ok := d.channels.Get(d.DeviceId + "_" + msg.DeviceID); ok {
if req, ok := channel.RecordReqs.Get(msg.SN); ok {
// 添加响应并检查是否完成
if req.AddResponse(*msg) {
@@ -237,7 +237,7 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
}
}
case "PresetQuery":
if channel, ok := d.channels.Get(msg.DeviceID); ok {
if channel, ok := d.channels.Get(d.DeviceId + "_" + msg.DeviceID); ok {
if req, ok := channel.PresetReqs.Get(msg.SN); ok {
// 添加预置位响应
req.Response = msg.PresetList.Item
@@ -323,7 +323,10 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
}
case "DeviceInfo":
// 主设备信息
d.Name = msg.DeviceName
d.Info("DeviceInfo message", "body", req.Body(), "d.Name", d.Name, "d.DeviceId", d.DeviceId, "msg.DeviceName", msg.DeviceName)
if d.Name == "" && msg.DeviceName != "" {
d.Name = msg.DeviceName
}
d.Manufacturer = msg.Manufacturer
d.Model = msg.Model
d.Firmware = msg.Firmware
@@ -398,11 +401,12 @@ func (d *Device) onMessage(req *sip.Request, tx sip.ServerTransaction, msg *gb28
func (d *Device) send(req *sip.Request) (*sip.Response, error) {
d.SN++
d.Debug("send", "req", req.String())
d.Trace("send", "req", req.String())
return d.client.Do(context.Background(), req)
}
func (d *Device) Go() (err error) {
d.Trace("into device.Go,deviceid is ", d.DeviceId)
var response *sip.Response
// 初始化catalogReqs
@@ -420,7 +424,7 @@ func (d *Device) Go() (err error) {
if err != nil {
d.Error("catalog", "err", err)
} else {
d.Debug("catalog", "response", response.String())
d.Trace("catalog", "response", response.String())
}
// 创建并启动目录订阅任务
@@ -447,6 +451,7 @@ func (d *Device) Go() (err error) {
select {
case <-d.Done():
case <-keepLiveTick.C:
d.Trace("keepLiveTick,deviceid is", d.DeviceId, "d.KeepaliveTime is ", d.KeepaliveTime)
if timeDiff := time.Since(d.KeepaliveTime); timeDiff > time.Duration(3*keepaliveSeconds)*time.Second {
d.Online = false
d.Status = DeviceOfflineStatus
@@ -455,7 +460,7 @@ func (d *Device) Go() (err error) {
channel.Status = "OFF"
return true
})
d.Stop(fmt.Errorf("device keepalive timeout after %v", timeDiff))
d.Stop(fmt.Errorf("device keepalive timeout after %v,deviceid is %s", timeDiff, d.DeviceId))
return
}
case <-catalogTick.C:
@@ -467,7 +472,7 @@ func (d *Device) Go() (err error) {
if err != nil {
d.Error("catalog", "err", err)
} else {
d.Debug("catalogTick", "response", response.String())
d.Trace("catalogTick", "response", response.String())
}
//case event := <-d.eventChan:
// d.Debug("eventChan", "event", event)
@@ -519,7 +524,7 @@ func (d *Device) CreateRequest(Method sip.RequestMethod, Recipient any) *sip.Req
// ProtocolVersion: "2.0",
// Transport: "UDP",
// Host: d.SipIp,
// Port: d.localPort,
// Port: d.LocalPort,
// Params: sip.HeaderParams(sip.NewParams()),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(10)).Add("rport", "")
@@ -612,15 +617,16 @@ func (d *Device) frontEndCmdString(cmdCode int32, parameter1 int32, parameter2 i
}
func (d *Device) addOrUpdateChannel(c gb28181.DeviceChannel) {
if channel, ok := d.channels.Get(c.ChannelID); ok {
channel.DeviceChannel = c
if channel, ok := d.channels.Get(c.ID); ok {
channel.DeviceChannel = &c
} else {
channel = &Channel{
Device: d,
Logger: d.Logger.With("channel", c.ChannelID),
DeviceChannel: c,
Logger: d.Logger.With("channel", c.ID),
DeviceChannel: &c,
}
d.channels.Set(channel)
d.plugin.channels.Set(channel.DeviceChannel)
}
}

View File

@@ -4,6 +4,7 @@ import (
"errors"
"fmt"
"math/rand"
"net"
"net/url"
"strconv"
"strings"
@@ -30,10 +31,25 @@ type Dialog struct {
StreamMode string // 数据流传输模式UDP:udp传输/TCP-ACTIVEtcp主动模式/TCP-PASSIVEtcp被动模式
targetIP string // 目标设备的IP地址
targetPort int // 目标设备的端口
/**
子码流的配置,默认格式为:
stream=stream:0;stream=stream:1
GB28181-2022:
stream=streanumber:0;stream=streamnumber:1
大华为:
stream=streamprofile:0;stream=streamprofile:1
水星,tp-link:
stream=streamMode:main;stream=streamMode:sub
*/
stream string
}
func (d *Dialog) GetCallID() string {
return d.session.InviteRequest.CallID().Value()
if d.session != nil && d.session.InviteRequest != nil && d.session.InviteRequest.CallID() != nil {
return d.session.InviteRequest.CallID().Value()
} else {
return ""
}
}
func (d *Dialog) GetPullJob() *m7s.PullJob {
@@ -72,7 +88,7 @@ func (d *Dialog) Start() (err error) {
var device *Device
if deviceTmp, ok := d.gb.devices.Get(deviceId); ok {
device = deviceTmp
if channel, ok := deviceTmp.channels.Get(channelId); ok {
if channel, ok := deviceTmp.channels.Get(deviceId + "_" + channelId); ok {
d.Channel = channel
d.StreamMode = device.StreamMode
} else {
@@ -84,29 +100,34 @@ func (d *Dialog) Start() (err error) {
d.gb.dialogs.Set(d)
//defer d.gb.dialogs.Remove(d)
if d.gb.MediaPort.Valid() {
select {
case d.MediaPort = <-d.gb.tcpPorts:
default:
return fmt.Errorf("no available tcp port")
}
if d.gb.tcpPort > 0 {
d.MediaPort = d.gb.tcpPort
} else {
d.MediaPort = d.gb.MediaPort[0]
if d.gb.MediaPort.Valid() {
select {
case d.MediaPort = <-d.gb.tcpPorts:
default:
return fmt.Errorf("no available tcp port")
}
} else {
d.MediaPort = d.gb.MediaPort[0]
}
}
ssrc := d.CreateSSRC(d.gb.Serial)
d.Info("MediaIp is ", device.MediaIp)
// 构建 SDP 内容
sdpInfo := []string{
"v=0",
fmt.Sprintf("o=%s 0 0 IN IP4 %s", channelId, device.MediaIp),
fmt.Sprintf("o=%s 0 0 IN IP4 %s", channelId, device.SipIp),
fmt.Sprintf("s=%s", util.Conditional(d.IsLive(), "Play", "Playback")), // 根据是否有时间参数决定
}
// 非直播模式下添加u行保持在s=和c=之间
//if !d.IsLive() {
sdpInfo = append(sdpInfo, fmt.Sprintf("u=%s:0", channelId))
//}
if !d.IsLive() {
sdpInfo = append(sdpInfo, fmt.Sprintf("u=%s:0", channelId))
}
// 添加c行
sdpInfo = append(sdpInfo, "c=IN IP4 "+device.MediaIp)
@@ -115,7 +136,7 @@ func (d *Dialog) Start() (err error) {
if !d.IsLive() {
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{"start": []string{d.start}, "end": []string{d.end}})
if err != nil {
d.Stop(errors.New("parse end time error"))
return errors.New("parse end time error")
}
sdpInfo = append(sdpInfo, fmt.Sprintf("t=%d %d", startTime.Unix(), endTime.Unix()))
} else {
@@ -134,7 +155,12 @@ func (d *Dialog) Start() (err error) {
}
sdpInfo = append(sdpInfo, mediaLine)
sdpInfo = append(sdpInfo, "a=recvonly")
if d.stream != "" {
sdpInfo = append(sdpInfo, "a="+d.stream)
}
sdpInfo = append(sdpInfo, "a=rtpmap:96 PS/90000")
//根据传输模式添加 setup 和 connection 属性
switch strings.ToUpper(device.StreamMode) {
@@ -149,14 +175,13 @@ func (d *Dialog) Start() (err error) {
"a=connection:new",
)
case "UDP":
d.Stop(errors.New("do not support udp mode"))
return errors.New("do not support udp mode")
default:
sdpInfo = append(sdpInfo,
"a=setup:passive",
"a=connection:new",
)
}
sdpInfo = append(sdpInfo, "a=rtpmap:96 PS/90000")
// 添加 SSRC
sdpInfo = append(sdpInfo, fmt.Sprintf("y=%s", ssrc))
@@ -185,7 +210,7 @@ func (d *Dialog) Start() (err error) {
ProtocolVersion: "2.0",
Transport: "UDP",
Host: device.MediaIp,
Port: device.localPort,
Port: device.LocalPort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(10)).Add("rport", "")
@@ -199,8 +224,8 @@ func (d *Dialog) Start() (err error) {
contactHDR := sip.ContactHeader{
Address: sip.Uri{
User: d.gb.Serial,
Host: device.SipIp,
Port: device.localPort,
Host: device.MediaIp,
Port: device.LocalPort,
},
}
@@ -208,12 +233,12 @@ func (d *Dialog) Start() (err error) {
Address: sip.Uri{
User: d.gb.Serial,
Host: device.MediaIp,
Port: device.localPort,
Port: device.LocalPort,
},
Params: sip.NewParams(),
}
fromHDR.Params.Add("tag", sip.GenerateTagN(32))
dialogClientCache := sipgo.NewDialogClientCache(device.client, device.contactHDR)
dialogClientCache := sipgo.NewDialogClientCache(device.client, contactHDR)
// 创建会话
d.gb.Info("start to invite,recipient:", recipient, " viaHeader:", viaHeader, " fromHDR:", fromHDR, " toHeader:", toHeader, " device.contactHDR:", device.contactHDR, "contactHDR:", contactHDR)
// 判断当前系统类型
@@ -223,18 +248,21 @@ func (d *Dialog) Start() (err error) {
d.session, err = dialogClientCache.Invite(d.gb, recipient, []byte(strings.Join(sdpInfo, "\r\n")+"\r\n"), &callID, &csqHeader, &fromHDR, &toHeader, &maxforward, userAgentHeader, subjectHeader, &contentTypeHeader)
//}
// 最后添加Content-Length头部
if err != nil {
return errors.New("dialog invite error" + err.Error())
}
return
}
func (d *Dialog) Run() (err error) {
d.Channel.Info("before WaitAnswer")
d.gb.Info("before WaitAnswer")
err = d.session.WaitAnswer(d.gb, sipgo.AnswerOptions{})
d.Channel.Info("after WaitAnswer")
d.gb.Info("after WaitAnswer")
if err != nil {
return
return errors.New("wait answer error" + err.Error())
}
inviteResponseBody := string(d.session.InviteResponse.Body())
d.Channel.Info("inviteResponse", "body", inviteResponseBody)
d.gb.Info("inviteResponse", "body", inviteResponseBody)
ds := strings.Split(inviteResponseBody, "\r\n")
for _, l := range ds {
if ls := strings.Split(l, "="); len(ls) > 1 {
@@ -244,7 +272,7 @@ func (d *Dialog) Run() (err error) {
if _ssrc, err := strconv.ParseInt(ls[1], 10, 0); err == nil {
d.SSRC = uint32(_ssrc)
} else {
d.gb.Error("read invite response y ", "err", err)
return errors.New("read invite respose y error" + err.Error())
}
}
case "c":
@@ -277,10 +305,26 @@ func (d *Dialog) Run() (err error) {
if d.StreamMode == "TCP-ACTIVE" {
pub.Receiver.ListenAddr = fmt.Sprintf("%s:%d", d.targetIP, d.targetPort)
} else {
if d.gb.tcpPort > 0 {
d.Info("into single port mode,use gb.tcpPort", d.gb.tcpPort)
if d.gb.netListener != nil {
d.Info("use gb.netListener", d.gb.netListener.Addr())
pub.Receiver.Listener = d.gb.netListener
} else {
d.Info("listen tcp4", fmt.Sprintf(":%d", d.gb.tcpPort))
pub.Receiver.Listener, _ = net.Listen("tcp4", fmt.Sprintf(":%d", d.gb.tcpPort))
d.gb.netListener = pub.Receiver.Listener
}
pub.Receiver.SSRC = d.SSRC
}
pub.Receiver.ListenAddr = fmt.Sprintf(":%d", d.MediaPort)
}
pub.Receiver.StreamMode = d.StreamMode
d.AddTask(&pub.Receiver)
startResult := pub.Receiver.WaitStarted()
if startResult != nil {
return fmt.Errorf("pub.Receiver.WaitStarted %s", startResult)
}
pub.Demux()
return
}
@@ -290,14 +334,20 @@ func (d *Dialog) GetKey() uint32 {
}
func (d *Dialog) Dispose() {
d.gb.tcpPorts <- d.MediaPort
err := d.session.Bye(d)
if err != nil {
d.Error("dialog bye bye err", err)
if d.gb.tcpPort == 0 {
// 如果没有设置tcp端口则将MediaPort设置为0表示不再使用
d.gb.tcpPorts <- d.MediaPort
}
err = d.session.Close()
if err != nil {
d.Error("dialog close session err", err)
d.Info("dialog dispose", "ssrc", d.SSRC, "mediaPort", d.MediaPort, "streamMode", d.StreamMode, "deviceId", d.Channel.DeviceID, "channelId", d.Channel.ChannelID)
if d.session != nil {
err := d.session.Bye(d)
if err != nil {
d.Error("dialog bye bye err", err)
}
err = d.session.Close()
if err != nil {
d.Error("dialog close session err", err)
}
}
d.gb.dialogs.Remove(d)
}

View File

@@ -70,7 +70,7 @@ func (d *ForwardDialog) Start() (err error) {
var device *Device
if deviceTmp, ok := d.gb.devices.Get(deviceId); ok {
device = deviceTmp
if channel, ok := deviceTmp.channels.Get(channelId); ok {
if channel, ok := deviceTmp.channels.Get(deviceId + "_" + channelId); ok {
d.channel = channel
} else {
return fmt.Errorf("channel %s not found", channelId)
@@ -191,7 +191,7 @@ func (d *ForwardDialog) Start() (err error) {
ProtocolVersion: "2.0",
Transport: "UDP",
Host: device.SipIp,
Port: device.localPort,
Port: device.LocalPort,
Params: sip.HeaderParams(sip.NewParams()),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
@@ -199,7 +199,7 @@ func (d *ForwardDialog) Start() (err error) {
Address: sip.Uri{
User: d.gb.Serial,
Host: device.MediaIp,
Port: device.localPort,
Port: device.LocalPort,
},
Params: sip.NewParams(),
}

File diff suppressed because it is too large Load Diff

View File

@@ -300,3 +300,7 @@ func (d *DeviceChannel) appendInfoContent(content *string) {
*content += " <SVCTimeSupportMode>" + strconv.Itoa(d.SVCTimeSupportMode) + "</SVCTimeSupportMode>\n"
}
}
func (d *DeviceChannel) GetKey() string {
return d.ID
}

View File

@@ -22,9 +22,7 @@ package gb28181
import (
"fmt"
"log/slog"
"net"
"os"
"strconv"
"strings"
"sync"
@@ -61,8 +59,7 @@ type RTPForwarder struct {
SendInterval time.Duration // 发送间隔,可用于限流
lastSendTime time.Time // 上次发送时间
stopChan chan struct{} // 停止信号通道
*slog.Logger
StreamMode string // 数据流传输模式UDP:udp传输/TCP-ACTIVEtcp主动模式/TCP-PASSIVEtcp被动模式
StreamMode string // 数据流传输模式UDP:udp传输/TCP-ACTIVEtcp主动模式/TCP-PASSIVEtcp被动模式
}
// NewRTPForwarder 创建一个新的RTP转发器
@@ -71,7 +68,6 @@ func NewRTPForwarder() *RTPForwarder {
FeedChan: make(chan []byte, 2000), // 增加缓冲区大小,减少丢包风险
SendInterval: time.Millisecond * 0, // 默认不限制发送间隔,最大速度转发
stopChan: make(chan struct{}),
Logger: slog.New(slog.NewTextHandler(os.Stdout, nil)),
}
ret.bufferPool = sync.Pool{
@@ -90,7 +86,7 @@ func (p *RTPForwarder) ReadRTP(rtpBuf util.Buffer) (err error) {
return
}
if p.Enabled(p, task.TraceLevel) {
if p.TraceEnabled() {
p.Trace("rtp", "len", rtpBuf.Len(), "seq", p.SequenceNumber, "payloadType", p.PayloadType, "ssrc", p.SSRC)
}
@@ -347,7 +343,7 @@ func (p *RTPForwarder) Demux() {
}
p.lastSendTime = time.Now()
if p.Enabled(p, task.TraceLevel) && p.ForwardCount%1000 == 0 {
if p.TraceEnabled() && p.ForwardCount%1000 == 0 {
p.Trace("forward rtp packet", "count", p.ForwardCount, "TCP", p.TCP, "TCPActive", p.TCPActive)
}
}

View File

@@ -66,3 +66,8 @@ type PlatformChannel struct {
func (*PlatformChannel) TableName() string {
return "gb28181_platform_channel"
}
func (p *PlatformChannel) GetKey() string {
return p.PlatformServerGBID + "_" + p.ChannelDBID
}

View File

@@ -9,43 +9,44 @@ import (
// 包含了平台的基本信息、SIP服务配置、设备信息、认证信息等。
// 用于存储和管理GB28181平台的所有相关参数。
type PlatformModel struct {
Enable bool `gorm:"column:enable" json:"enable"` // Enable表示该平台配置是否启用
Name string `gorm:"column:name;omitempty" json:"name"` // Name表示平台的名称
ServerGBID string `gorm:"primaryKey;column:server_gb_id;omitempty" json:"serverGBId"` // ServerGBID表示SIP服务器的国标编码
ServerGBDomain string `gorm:"column:server_gb_domain;omitempty" json:"serverGBDomain"` // ServerGBDomain表示SIP服务器的国标域
ServerIP string `gorm:"column:server_ip;omitempty" json:"serverIp"` // ServerIP表示SIP服务器的IP地址
ServerPort int `gorm:"column:server_port;omitempty" json:"serverPort"` // ServerPort表示SIP服务器的端口号
DeviceGBID string `gorm:"column:device_gb_id;omitempty" json:"deviceGBId"` // DeviceGBID表示设备的国标编号
DeviceIP string `gorm:"column:device_ip;omitempty" json:"deviceIp"` // DeviceIP表示设备的IP地址
DevicePort int `gorm:"column:device_port;omitempty" json:"devicePort"` // DevicePort表示设备的端口号
Username string `gorm:"column:username;omitempty" json:"username"` // Username表示SIP认证的用户名默认使用设备国标编号
Password string `gorm:"column:password;omitempty" json:"password"` // Password表示SIP认证的密码
Expires int `gorm:"column:expires;omitempty" json:"expires"` // Expires表示注册的过期时间单位为秒
KeepTimeout int `gorm:"column:keep_timeout;omitempty" json:"keepTimeout"` // KeepTimeout表示心跳超时时间单位为秒
Transport string `gorm:"column:transport;omitempty" json:"transport"` // Transport表示传输协议类型
CharacterSet string `gorm:"column:character_set;omitempty" json:"characterSet"` // CharacterSet表示字符集编码
PTZ bool `gorm:"column:ptz" json:"ptz"` // PTZ表示是否允许云台控制
RTCP bool `gorm:"column:rtcp" json:"rtcp"` // RTCP表示是否启用RTCP流保活
Status bool `gorm:"column:status" json:"status"` // Status表示平台当前的在线状态
ChannelCount int `gorm:"column:channel_count;omitempty" json:"channelCount"` // ChannelCount表示通道数量
CatalogSubscribe bool `gorm:"column:catalog_subscribe" json:"catalogSubscribe"` // CatalogSubscribe表示是否已订阅目录信息
AlarmSubscribe bool `gorm:"column:alarm_subscribe" json:"alarmSubscribe"` // AlarmSubscribe表示是否已订阅报警信息
MobilePositionSubscribe bool `gorm:"column:mobile_position_subscribe" json:"mobilePositionSubscribe"` // MobilePositionSubscribe表示是否已订阅移动位置信息
CatalogGroup int `gorm:"column:catalog_group;omitempty" json:"catalogGroup"` // CatalogGroup表示目录分组大小每次向上级发送通道数量
UpdateTime string `gorm:"column:update_time;omitempty" json:"updateTime"` // UpdateTime表示最后更新时间
CreateTime string `gorm:"column:create_time;omitempty" json:"createTime"` // CreateTime表示创建时间
AsMessageChannel bool `gorm:"column:as_message_channel" json:"asMessageChannel"` // AsMessageChannel表示是否作为消息通道使用
SendStreamIP string `gorm:"column:send_stream_ip;omitempty" json:"sendStreamIp"` // SendStreamIP表示点播回复200OK时使用的IP地址
AutoPushChannel bool `gorm:"column:auto_push_channel" json:"autoPushChannel"` // AutoPushChannel表示是否自动推送通道变化
CatalogWithPlatform int `gorm:"column:catalog_with_platform;omitempty" json:"catalogWithPlatform"` // CatalogWithPlatform表示目录信息是否包含平台信息(0:关闭,1:打开)
CatalogWithGroup int `gorm:"column:catalog_with_group;omitempty" json:"catalogWithGroup"` // CatalogWithGroup表示目录信息是否包含分组信息(0:关闭,1:打开)
CatalogWithRegion int `gorm:"column:catalog_with_region;omitempty" json:"catalogWithRegion"` // CatalogWithRegion表示目录信息是否包含行政区划(0:关闭,1:打开)
CivilCode string `gorm:"column:civil_code;omitempty" json:"civilCode"` // CivilCode表示行政区划代码
Manufacturer string `gorm:"column:manufacturer;omitempty" json:"manufacturer"` // Manufacturer表示平台厂商
Model string `gorm:"column:model;omitempty" json:"model"` // Model表示平台型号
Address string `gorm:"column:address;omitempty" json:"address"` // Address表示平台安装地址
RegisterWay int `gorm:"column:register_way;omitempty" json:"registerWay"` // RegisterWay表示注册方式(1:标准认证注册,2:口令认证,3:数字证书双向认证,4:数字证书单向认证)
Secrecy int `gorm:"column:secrecy;omitempty" json:"secrecy"` // Secrecy表示保密属性(0:不涉密,1:涉密)
Enable bool `gorm:"column:enable" json:"enable"` // Enable表示该平台配置是否启用
Name string `gorm:"column:name;omitempty" json:"name"` // Name表示平台的名称
ServerGBID string `gorm:"primaryKey;column:server_gb_id;omitempty" json:"serverGBId"` // ServerGBID表示SIP服务器的国标编码
ServerGBDomain string `gorm:"column:server_gb_domain;omitempty" json:"serverGBDomain"` // ServerGBDomain表示SIP服务器的国标域
ServerIP string `gorm:"column:server_ip;omitempty" json:"serverIp"` // ServerIP表示SIP服务器的IP地址
ServerPort int `gorm:"column:server_port;omitempty" json:"serverPort"` // ServerPort表示SIP服务器的端口号
DeviceGBID string `gorm:"column:device_gb_id;omitempty" json:"deviceGBId"` // DeviceGBID表示设备的国标编号
DeviceIP string `gorm:"column:device_ip;omitempty" json:"deviceIp"` // DeviceIP表示设备的IP地址
DevicePort int `gorm:"column:device_port;omitempty" json:"devicePort"` // DevicePort表示设备的端口号
Username string `gorm:"column:username;omitempty" json:"username"` // Username表示SIP认证的用户名默认使用设备国标编号
Password string `gorm:"column:password;omitempty" json:"password"` // Password表示SIP认证的密码
Expires int `gorm:"column:expires;omitempty" json:"expires"` // Expires表示注册的过期时间单位为秒
KeepTimeout int `gorm:"column:keep_timeout;omitempty" json:"keepTimeout"` // KeepTimeout表示心跳超时时间单位为秒
Transport string `gorm:"column:transport;omitempty" json:"transport"` // Transport表示传输协议类型
CharacterSet string `gorm:"column:character_set;omitempty" json:"characterSet"` // CharacterSet表示字符集编码
PTZ bool `gorm:"column:ptz" json:"ptz"` // PTZ表示是否允许云台控制
RTCP bool `gorm:"column:rtcp" json:"rtcp"` // RTCP表示是否启用RTCP流保活
Status bool `gorm:"column:status" json:"status"` // Status表示平台当前的在线状态
ChannelCount int `gorm:"column:channel_count;omitempty" json:"channelCount"` // ChannelCount表示通道数量
CatalogSubscribe bool `gorm:"column:catalog_subscribe" json:"catalogSubscribe"` // CatalogSubscribe表示是否已订阅目录信息
AlarmSubscribe bool `gorm:"column:alarm_subscribe" json:"alarmSubscribe"` // AlarmSubscribe表示是否已订阅报警信息
MobilePositionSubscribe bool `gorm:"column:mobile_position_subscribe" json:"mobilePositionSubscribe"` // MobilePositionSubscribe表示是否已订阅移动位置信息
CatalogGroup int `gorm:"column:catalog_group;omitempty" json:"catalogGroup"` // CatalogGroup表示目录分组大小每次向上级发送通道数量
UpdateTime string `gorm:"column:update_time;omitempty" json:"updateTime"` // UpdateTime表示最后更新时间
CreateTime string `gorm:"column:create_time;omitempty" json:"createTime"` // CreateTime表示创建时间
AsMessageChannel bool `gorm:"column:as_message_channel" json:"asMessageChannel"` // AsMessageChannel表示是否作为消息通道使用
SendStreamIP string `gorm:"column:send_stream_ip;omitempty" json:"sendStreamIp"` // SendStreamIP表示点播回复200OK时使用的IP地址
AutoPushChannel bool `gorm:"column:auto_push_channel" json:"autoPushChannel"` // AutoPushChannel表示是否自动推送通道变化
CatalogWithPlatform int `gorm:"column:catalog_with_platform;omitempty" json:"catalogWithPlatform"` // CatalogWithPlatform表示目录信息是否包含平台信息(0:关闭,1:打开)
CatalogWithGroup int `gorm:"column:catalog_with_group;omitempty" json:"catalogWithGroup"` // CatalogWithGroup表示目录信息是否包含分组信息(0:关闭,1:打开)
CatalogWithRegion int `gorm:"column:catalog_with_region;omitempty" json:"catalogWithRegion"` // CatalogWithRegion表示目录信息是否包含行政区划(0:关闭,1:打开)
CivilCode string `gorm:"column:civil_code;omitempty" json:"civilCode"` // CivilCode表示行政区划代码
Manufacturer string `gorm:"column:manufacturer;omitempty" json:"manufacturer"` // Manufacturer表示平台厂商
Model string `gorm:"column:model;omitempty" json:"model"` // Model表示平台型号
Address string `gorm:"column:address;omitempty" json:"address"` // Address表示平台安装地址
RegisterWay int `gorm:"column:register_way;omitempty" json:"registerWay"` // RegisterWay表示注册方式(1:标准认证注册,2:口令认证,3:数字证书双向认证,4:数字证书单向认证)
Secrecy int `gorm:"column:secrecy;omitempty" json:"secrecy"` // Secrecy表示保密属性(0:不涉密,1:涉密)
PlatformChannels []*PlatformChannel `gorm:"-:all"`
}
// TableName 指定数据库表名

View File

@@ -44,8 +44,9 @@ type Receiver struct {
psAudio PSAudio
RTPReader *rtp2.TCP
ListenAddr string
listener net.Listener
Listener net.Listener
StreamMode string // 数据流传输模式UDP:udp传输/TCP-ACTIVEtcp主动模式/TCP-PASSIVEtcp被动模式
SSRC uint32 // RTP SSRC
}
func NewPSPublisher(puber *m7s.Publisher) *PSPublisher {
@@ -147,13 +148,29 @@ func (p *Receiver) ReadRTP(rtp util.Buffer) (err error) {
p.Error("unmarshal error", "err", err)
return
}
// 如果设置了SSRC过滤只处理匹配的SSRC
if p.SSRC != 0 && p.SSRC != p.Packet.SSRC {
p.Info("into single port mode, ssrc mismatch", "expected", p.SSRC, "actual", p.Packet.SSRC)
if p.TraceEnabled() {
p.Trace("rtp ssrc mismatch, skip", "expected", p.SSRC, "actual", p.Packet.SSRC)
}
return nil
}
if lastSeq == 0 || p.SequenceNumber == lastSeq+1 {
if p.Enabled(p, task.TraceLevel) {
p.Trace("rtp", "len", rtp.Len(), "seq", p.SequenceNumber, "payloadType", p.PayloadType, "ssrc", p.SSRC)
if p.TraceEnabled() {
p.Trace("rtp", "len", rtp.Len(), "seq", p.SequenceNumber, "payloadType", p.PayloadType, "ssrc", p.Packet.SSRC)
}
copyData := make([]byte, len(p.Payload))
copy(copyData, p.Payload)
p.FeedChan <- copyData
select {
case p.FeedChan <- copyData:
// 成功发送数据
case <-p.Done():
// 任务已停止,返回错误
return task.ErrTaskComplete
}
return
}
return ErrRTPReceiveLost
@@ -166,18 +183,24 @@ func (p *Receiver) Start() (err error) {
return nil
}
// TCP被动模式
p.listener, err = net.Listen("tcp4", p.ListenAddr)
if err != nil {
p.Error("start listen", "err", err)
return errors.New("start listen,err" + err.Error())
if p.Listener == nil {
p.Info("start new listener", "addr", p.ListenAddr)
p.Listener, err = net.Listen("tcp4", p.ListenAddr)
if err != nil {
p.Error("start listen", "err", err)
return errors.New("start listen,err" + err.Error())
}
}
p.Info("start listen", "addr", p.ListenAddr)
return
}
func (p *Receiver) Dispose() {
if p.listener != nil {
p.listener.Close()
if p.SSRC == 0 {
p.Info("into multiport mode ,close listener ", p.SSRC)
if p.Listener != nil {
p.Listener.Close()
}
}
if p.RTPReader != nil {
p.RTPReader.Close()
@@ -210,7 +233,7 @@ func (p *Receiver) Go() error {
}
// TCP被动模式
p.Info("start accept")
conn, err := p.listener.Accept()
conn, err := p.Listener.Accept()
if err != nil {
p.Error("accept", "err", err)
return err

View File

@@ -3,6 +3,7 @@ package plugin_gb28181pro
import (
"context"
"fmt"
"m7s.live/v5/pkg/util"
"net/http"
"strconv"
"strings"
@@ -40,6 +41,7 @@ type Platform struct {
plugin *GB28181Plugin
ctx context.Context
unRegister bool
channels util.Collection[string, *gb28181.DeviceChannel] `gorm:"-:all"`
}
func NewPlatform(pm *gb28181.PlatformModel, plugin *GB28181Plugin, unRegister bool) *Platform {
@@ -49,7 +51,7 @@ func NewPlatform(pm *gb28181.PlatformModel, plugin *GB28181Plugin, unRegister bo
unRegister: unRegister,
}
p.ctx = context.Background()
client, err := sipgo.NewClient(p.plugin.ua, sipgo.WithClientHostname(p.PlatformModel.DeviceIP), sipgo.WithClientPort(p.PlatformModel.DevicePort))
client, err := sipgo.NewClient(p.plugin.ua, sipgo.WithClientHostname(p.PlatformModel.DeviceIP))
if err != nil {
p.Error("failed to create sip client: %v", err)
}
@@ -155,16 +157,16 @@ func (p *Platform) Keepalive() (*sipgo.DialogClientSession, error) {
}
req.AppendHeader(&toHeader)
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
req.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//req.AppendHeader(&viaHeader)
req.SetBody(gb28181.BuildKeepAliveXML(p.SN, p.PlatformModel.DeviceGBID))
p.SN++
@@ -240,16 +242,16 @@ func (p *Platform) Register(isUnregister bool) error {
req.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
req.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//req.AppendHeader(&viaHeader)
req.AppendHeader(&p.MaxForwardsHDR)
@@ -333,6 +335,8 @@ func (p *Platform) Register(isUnregister bool) error {
newReq := req.Clone()
newReq.RemoveHeader("Via") // 必须由传输层重新生成
newReq.AppendHeader(sip.NewHeader("Authorization", cred.String()))
newReq.CSeq().SeqNo = uint32(p.SN) // 更新CSeq序号
p.SN++
// 发送认证请求
tx, err = p.Client.TransactionRequest(p.ctx, newReq, sipgo.ClientRequestAddVia)
@@ -457,14 +461,17 @@ func (p *Platform) handleCatalog(req *sip.Request, tx sip.ServerTransaction, msg
// 查询通道列表
var channels []gb28181.DeviceChannel
if p.plugin.DB != nil {
if err := p.plugin.DB.Table("gb28181_channel gc").
Select(`gc.*`).
Joins("left join gb28181_platform_channel gpc on gc.id=gpc.channel_db_id").
Where("gpc.platform_server_gb_id = ? and gc.status='ON'", p.PlatformModel.ServerGBID).
Find(&channels).Error; err != nil {
return fmt.Errorf("query channels error: %v", err)
}
//if p.plugin.DB != nil {
// if err := p.plugin.DB.Table("gb28181_channel gc").
// Select(`gc.*`).
// Joins("left join gb28181_platform_channel gpc on gc.id=gpc.channel_db_id").
// Where("gpc.platform_server_gb_id = ? and gc.status='ON'", p.PlatformModel.ServerGBID).
// Find(&channels).Error; err != nil {
// return fmt.Errorf("query channels error: %v", err)
// }
//}
for channel := range p.channels.Range {
channels = append(channels, *channel)
}
// 发送目录响应,无论是否有通道
@@ -506,16 +513,16 @@ func (p *Platform) sendCatalogResponse(req *sip.Request, sn string, fromTag stri
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
request.SetTransport(req.Transport())
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")
@@ -526,7 +533,7 @@ func (p *Platform) sendCatalogResponse(req *sip.Request, sn string, fromTag stri
<Response>
<CmdType>Catalog</CmdType>
<SN>%s</SN>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<SumNum>0</SumNum>
<DeviceList Num="0">
</DeviceList>
@@ -648,16 +655,16 @@ func (p *Platform) sendCatalogResponse(req *sip.Request, sn string, fromTag stri
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
request.SetTransport(req.Transport())
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")
@@ -669,7 +676,7 @@ func (p *Platform) sendCatalogResponse(req *sip.Request, sn string, fromTag stri
<Response>
<CmdType>Catalog</CmdType>
<SN>%s</SN>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<SumNum>%d</SumNum>
<DeviceList Num="1">
%s
@@ -807,7 +814,7 @@ func (p *Platform) buildChannelItem(channel gb28181.DeviceChannel) string {
}
return fmt.Sprintf(`<Item>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<Name>%s</Name>
<Manufacturer>%s</Manufacturer>
<Model>%s</Model>
@@ -882,16 +889,16 @@ func (p *Platform) handleDeviceControl(req *sip.Request, tx sip.ServerTransactio
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: device.Transport,
Host: device.SipIp,
Port: device.localPort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: device.Transport,
// Host: device.SipIp,
// Port: device.LocalPort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
// 设置Content-Type
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")
@@ -988,16 +995,16 @@ func (p *Platform) sendDeviceStatusResponse(req *sip.Request, device *Device, sn
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
// 设置Content-Type
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")
@@ -1037,7 +1044,7 @@ func (p *Platform) sendDeviceStatusResponse(req *sip.Request, device *Device, sn
<Response>
<CmdType>DeviceStatus</CmdType>
<SN>%s</SN>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<Result>OK</Result>
<Online>%s</Online>
<Status>%s</Status>
@@ -1136,16 +1143,16 @@ func (p *Platform) sendDeviceInfoResponse(req *sip.Request, device *Device, sn s
}
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: p.PlatformModel.Transport,
Host: p.PlatformModel.DeviceIP,
Port: p.PlatformModel.DevicePort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: p.PlatformModel.Transport,
// Host: p.PlatformModel.DeviceIP,
// Port: p.PlatformModel.DevicePort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")
request.AppendHeader(&contentTypeHeader)
@@ -1157,7 +1164,7 @@ func (p *Platform) sendDeviceInfoResponse(req *sip.Request, device *Device, sn s
<Response>
<CmdType>DeviceInfo</CmdType>
<SN>%s</SN>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<Result>OK</Result>
<DeviceName>%s</DeviceName>
<Manufacturer>%s</Manufacturer>
@@ -1171,7 +1178,7 @@ func (p *Platform) sendDeviceInfoResponse(req *sip.Request, device *Device, sn s
<Response>
<CmdType>DeviceInfo</CmdType>
<SN>%s</SN>
<DeviceId>%s</DeviceId>
<DeviceID>%s</DeviceID>
<Result>OK</Result>
<DeviceName>%s</DeviceName>
<Manufacturer>%s</Manufacturer>
@@ -1340,16 +1347,16 @@ func (p *Platform) handlePresetQuery(req *sip.Request, tx sip.ServerTransaction,
request.AppendHeader(&toHeader)
// 添加Via头部
viaHeader := sip.ViaHeader{
ProtocolName: "SIP",
ProtocolVersion: "2.0",
Transport: device.Transport,
Host: device.SipIp,
Port: device.localPort,
Params: sip.NewParams(),
}
viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
request.AppendHeader(&viaHeader)
//viaHeader := sip.ViaHeader{
// ProtocolName: "SIP",
// ProtocolVersion: "2.0",
// Transport: device.Transport,
// Host: device.SipIp,
// Port: device.LocalPort,
// Params: sip.NewParams(),
//}
//viaHeader.Params.Add("branch", sip.GenerateBranchN(16)).Add("rport", "")
//request.AppendHeader(&viaHeader)
// 设置Content-Type
contentTypeHeader := sip.ContentTypeHeader("Application/MANSCDP+xml")

View File

@@ -17,7 +17,7 @@ func (gb *GB28181Plugin) RecordInfoQuery(deviceID string, channelID string, star
return nil, fmt.Errorf("device not found: %s", deviceID)
}
channel, ok := device.channels.Get(channelID)
channel, ok := device.channels.Get(deviceID + "_" + channelID)
if !ok {
return nil, fmt.Errorf("channel not found: %s", channelID)
}

View File

@@ -0,0 +1,487 @@
package plugin_gb28181pro
import (
"errors"
"fmt"
"net"
"os"
"strconv"
"sync"
"time"
"github.com/emiago/sipgo"
"github.com/emiago/sipgo/sip"
myip "github.com/husanpao/ip"
"github.com/icholy/digest"
"github.com/rs/zerolog"
"gorm.io/gorm"
"m7s.live/v5"
"m7s.live/v5/pkg/task"
"m7s.live/v5/pkg/util"
)
type DeviceRegisterQueueTask struct {
task.Work
deviceId string
}
func (queueTask *DeviceRegisterQueueTask) GetKey() string {
return queueTask.deviceId
}
type registerHandlerTask struct {
task.Task
gb *GB28181Plugin
req *sip.Request
tx sip.ServerTransaction
}
// getDevicePassword 获取设备密码
func (task *registerHandlerTask) getDevicePassword(device *Device) string {
if device != nil && device.Password != "" {
return device.Password
}
return task.gb.Password
}
func (task *registerHandlerTask) Run() (err error) {
var password string
var device *Device
var recover = false
from := task.req.From()
if from == nil || from.Address.User == "" {
task.gb.Error("OnRegister", "error", "no user")
return
}
isUnregister := false
deviceid := from.Address.User
if existingDevice, exists := task.gb.devices.Get(deviceid); exists && existingDevice != nil {
device = existingDevice
recover = true
} else {
// 尝试从数据库加载设备信息
device = &Device{DeviceId: deviceid}
if task.gb.DB != nil {
if err := task.gb.DB.First(device, Device{DeviceId: deviceid}).Error; err != nil {
if !errors.Is(err, gorm.ErrRecordNotFound) {
task.gb.Error("OnRegister", "error", err)
}
}
}
}
// 获取设备密码
password = task.getDevicePassword(device)
exp := task.req.GetHeader("Expires")
if exp == nil {
task.gb.Error("OnRegister", "error", "no expires")
return
}
expSec, err := strconv.ParseInt(exp.Value(), 10, 32)
if err != nil {
task.gb.Error("OnRegister", "error", err.Error())
return
}
if expSec == 0 {
isUnregister = true
}
// 需要密码认证的情况
if password != "" {
h := task.req.GetHeader("Authorization")
if h == nil {
// 生成认证挑战
nonce := fmt.Sprintf("%d", time.Now().UnixMicro())
chal := digest.Challenge{
Realm: task.gb.Realm,
Nonce: nonce,
Opaque: "monibuca",
Algorithm: "MD5",
QOP: []string{"auth"},
}
res := sip.NewResponseFromRequest(task.req, sip.StatusUnauthorized, "Unauthorized", nil)
res.AppendHeader(sip.NewHeader("WWW-Authenticate", chal.String()))
task.gb.Debug("sending auth challenge", "nonce", nonce, "realm", task.gb.Realm)
if err = task.tx.Respond(res); err != nil {
task.gb.Error("respond Unauthorized", "error", err.Error())
}
return
}
// 解析认证信息
cred, err := digest.ParseCredentials(h.Value())
if err != nil {
task.gb.Error("parsing credentials failed", "error", err.Error())
if err = task.tx.Respond(sip.NewResponseFromRequest(task.req, sip.StatusUnauthorized, "Bad credentials", nil)); err != nil {
task.gb.Error("respond Bad credentials", "error", err.Error())
}
return err
}
task.gb.Debug("received auth info",
"username", cred.Username,
"realm", cred.Realm,
"nonce", cred.Nonce,
"uri", cred.URI,
"qop", cred.QOP,
"nc", cred.Nc,
"cnonce", cred.Cnonce,
"response", cred.Response)
// 使用设备ID作为用户名
if cred.Username != deviceid {
task.gb.Error("username mismatch", "expected", deviceid, "got", cred.Username)
if err = task.tx.Respond(sip.NewResponseFromRequest(task.req, sip.StatusForbidden, "Invalid username", nil)); err != nil {
task.gb.Error("respond Invalid username", "error", err.Error())
}
return err
}
// 计算期望的响应
opts := digest.Options{
Method: "REGISTER",
URI: cred.URI,
Username: deviceid,
Password: password,
Cnonce: cred.Cnonce,
Count: int(cred.Nc),
}
digCred, err := digest.Digest(&digest.Challenge{
Realm: cred.Realm,
Nonce: cred.Nonce,
Opaque: cred.Opaque,
Algorithm: cred.Algorithm,
QOP: []string{cred.QOP},
}, opts)
if err != nil {
task.gb.Error("calculating digest failed", "error", err.Error())
if err = task.tx.Respond(sip.NewResponseFromRequest(task.req, sip.StatusUnauthorized, "Bad credentials", nil)); err != nil {
task.gb.Error("respond Bad credentials", "error", err.Error())
}
return err
}
task.gb.Debug("calculated response info",
"username", opts.Username,
"uri", opts.URI,
"qop", cred.QOP,
"nc", cred.Nc,
"cnonce", opts.Cnonce,
"count", opts.Count,
"response", digCred.Response)
// 比对响应
if cred.Response != digCred.Response {
task.gb.Error("response mismatch",
"expected", digCred.Response,
"got", cred.Response,
"method", opts.Method,
"uri", opts.URI,
"username", opts.Username)
if err = task.tx.Respond(sip.NewResponseFromRequest(task.req, sip.StatusUnauthorized, "Invalid credentials", nil)); err != nil {
task.gb.Error("respond Invalid credentials", "error", err.Error())
}
return err
}
task.gb.Debug("auth successful", "username", deviceid)
}
response := sip.NewResponseFromRequest(task.req, sip.StatusOK, "OK", nil)
response.AppendHeader(sip.NewHeader("Expires", fmt.Sprintf("%d", expSec)))
response.AppendHeader(sip.NewHeader("Date", time.Now().Local().Format(util.LocalTimeFormat)))
response.AppendHeader(sip.NewHeader("Server", "M7S/"+m7s.Version))
response.AppendHeader(sip.NewHeader("Allow", "INVITE,ACK,CANCEL,BYE,NOTIFY,OPTIONS,PRACK,UPDATE,REFER"))
//hostname, portStr, _ := net.SplitHostPort(req.Source())
//port, _ := strconv.Atoi(portStr)
//response.AppendHeader(&sip.ContactHeader{
// Address: sip.Uri{
// User: deviceid,
// Host: hostname,
// Port: port,
// },
//})
if err = task.tx.Respond(response); err != nil {
task.gb.Error("respond OK", "error", err.Error())
}
if isUnregister { //取消绑定操作
if d, ok := task.gb.devices.Get(deviceid); ok {
d.Online = false
d.Status = DeviceOfflineStatus
if task.gb.DB != nil {
// 更新设备状态
var dbDevice Device
if err := task.gb.DB.First(&dbDevice, Device{DeviceId: deviceid}).Error; err == nil {
d.ID = dbDevice.ID
}
d.channels.Range(func(channel *Channel) bool {
channel.Status = "OFF"
return true
})
}
d.Stop(errors.New("unregister"))
}
} else {
if recover {
task.gb.Info("into recoverdevice", "deviceId", device.DeviceId)
device.Status = DeviceOnlineStatus
task.RecoverDevice(device, task.req)
} else {
var newDevice *Device
if device == nil {
newDevice = &Device{DeviceId: deviceid}
} else {
newDevice = device
}
task.gb.Info("into StoreDevice", "deviceId", from)
task.StoreDevice(deviceid, task.req, newDevice)
}
}
task.gb.Info("registerHandlerTask start end", "deviceid", deviceid, "expires", expSec, "isUnregister", isUnregister)
return nil
}
func (task *registerHandlerTask) RecoverDevice(d *Device, req *sip.Request) {
from := req.From()
source := req.Source()
desc := req.Destination()
myIP, myPortStr, _ := net.SplitHostPort(desc)
sourceIP, sourcePortStr, _ := net.SplitHostPort(source)
sourcePort, _ := strconv.Atoi(sourcePortStr)
myPort, _ := strconv.Atoi(myPortStr)
// 如果设备IP是内网IP则使用内网IP
myIPParse := net.ParseIP(myIP)
sourceIPParse := net.ParseIP(sourceIP)
// 优先使用内网IP
myLanIP := myip.InternalIPv4()
myWanIP := myip.ExternalIPv4()
task.gb.Info("Start RecoverDevice", "source", source, "desc", desc, "myLanIP", myLanIP, "myWanIP", myWanIP)
// 处理目标地址和源地址的IP映射关系
if sourceIPParse != nil { // 源IP有效时才进行处理
if myIPParse == nil { // 目标地址是域名
if sourceIPParse.IsPrivate() { // 源IP是内网IP
myWanIP = myLanIP // 使用内网IP作为外网IP
}
} else { // 目标地址是IP
if sourceIPParse.IsPrivate() { // 源IP是内网IP
myLanIP, myWanIP = myIP, myIP // 使用目标IP作为内外网IP
}
}
}
if task.gb.MediaIP != "" {
myWanIP = task.gb.MediaIP
}
if task.gb.SipIP != "" {
myLanIP = task.gb.SipIP
}
// 设置 Recipient
d.Recipient = sip.Uri{
Host: sourceIP,
Port: sourcePort,
User: from.Address.User,
}
// 设置 contactHDR
d.contactHDR = sip.ContactHeader{
Address: sip.Uri{
User: task.gb.Serial,
Host: myIP,
Port: myPort,
},
}
d.SipIp = myLanIP
d.StartTime = time.Now()
d.IP = sourceIP
d.Port = sourcePort
d.HostAddress = d.IP + ":" + sourcePortStr
d.Status = DeviceOnlineStatus
d.UpdateTime = time.Now()
d.RegisterTime = time.Now()
d.Online = true
d.client, _ = sipgo.NewClient(task.gb.ua, sipgo.WithClientLogger(zerolog.New(os.Stdout)), sipgo.WithClientHostname(d.SipIp))
d.channels.L = new(sync.RWMutex)
d.catalogReqs.L = new(sync.RWMutex)
d.plugin = task.gb
d.plugin.Info("RecoverDevice", "source", source, "desc", desc, "device.SipIp", myLanIP, "device.WanIP", myWanIP, "recipient", req.Recipient, "myPort", myPort)
if task.gb.DB != nil {
//var existing Device
//if err := gb.DB.First(&existing, Device{DeviceId: d.DeviceId}).Error; err == nil {
// d.ID = existing.ID // 保持原有的自增ID
// gb.Info("RecoverDevice", "type", "更新设备", "deviceId", d.DeviceId)
//} else {
// gb.Info("RecoverDevice", "type", "新增设备", "deviceId", d.DeviceId)
//}
task.gb.DB.Save(d)
}
return
}
func (task *registerHandlerTask) StoreDevice(deviceid string, req *sip.Request, d *Device) {
task.gb.Debug("deviceid is ", deviceid, "req.via() is ", req.Via(), "req.Source() is ", req.Source())
source := req.Source()
sourceIP, sourcePortStr, _ := net.SplitHostPort(source)
sourcePort, _ := strconv.Atoi(sourcePortStr)
desc := req.Destination()
myIP, myPortStr, _ := net.SplitHostPort(desc)
myPort, _ := strconv.Atoi(myPortStr)
exp := req.GetHeader("Expires")
if exp == nil {
task.gb.Error("OnRegister", "error", "no expires")
return
}
expSec, err := strconv.ParseInt(exp.Value(), 10, 32)
if err != nil {
task.gb.Error("OnRegister", "error", err.Error())
return
}
// 检查myPort是否在sipPorts中如果不在则使用sipPorts[0]
if len(task.gb.sipPorts) > 0 {
portFound := false
for _, port := range task.gb.sipPorts {
if port == myPort {
portFound = true
break
}
}
if !portFound {
myPort = task.gb.sipPorts[0]
task.gb.Debug("StoreDevice", "使用默认端口替换", myPort)
}
}
// 如果设备IP是内网IP则使用内网IP
myIPParse := net.ParseIP(myIP)
sourceIPParse := net.ParseIP(sourceIP)
// 优先使用内网IP
myLanIP := myip.InternalIPv4()
myWanIP := myip.ExternalIPv4()
task.gb.Info("Start StoreDevice", "source", source, "desc", desc, "myLanIP", myLanIP, "myWanIP", myWanIP)
// 处理目标地址和源地址的IP映射关系
if sourceIPParse != nil { // 源IP有效时才进行处理
if myIPParse == nil { // 目标地址是域名
if sourceIPParse.IsPrivate() { // 源IP是内网IP
myWanIP = myLanIP // 使用内网IP作为外网IP
}
} else { // 目标地址是IP
if sourceIPParse.IsPrivate() { // 源IP是内网IP
myLanIP, myWanIP = myIP, myIP // 使用目标IP作为内外网IP
}
}
}
if task.gb.MediaIP != "" {
myWanIP = task.gb.MediaIP
}
if task.gb.SipIP != "" {
myLanIP = task.gb.SipIP
}
now := time.Now()
d.CreateTime = now
d.UpdateTime = now
d.RegisterTime = now
d.KeepaliveTime = now
d.Status = DeviceOnlineStatus
d.Online = true
d.StreamMode = "TCP-PASSIVE" // 默认UDP传输
d.Charset = "GB2312" // 默认GB2312字符集
d.GeoCoordSys = "WGS84" // 默认WGS84坐标系
d.Transport = req.Transport() // 传输协议
d.IP = sourceIP
d.Port = sourcePort
d.HostAddress = sourceIP + ":" + sourcePortStr
d.SipIp = myLanIP
d.MediaIp = myWanIP
d.Expires = int(expSec)
d.eventChan = make(chan any, 10)
d.Recipient = sip.Uri{
Host: sourceIP,
Port: sourcePort,
User: deviceid,
}
d.contactHDR = sip.ContactHeader{
Address: sip.Uri{
User: task.gb.Serial,
Host: myWanIP,
Port: myPort,
},
}
d.fromHDR = sip.FromHeader{
Address: sip.Uri{
User: task.gb.Serial,
Host: myWanIP,
Port: myPort,
},
Params: sip.NewParams(),
}
d.plugin = task.gb
d.LocalPort = myPort
d.Logger = task.gb.Logger.With("deviceid", deviceid)
d.fromHDR.Params.Add("tag", sip.GenerateTagN(16))
d.client, _ = sipgo.NewClient(task.gb.ua, sipgo.WithClientLogger(zerolog.New(os.Stdout)), sipgo.WithClientHostname(d.SipIp))
d.channels.L = new(sync.RWMutex)
d.catalogReqs.L = new(sync.RWMutex)
d.Info("StoreDevice", "source", source, "desc", desc, "device.SipIp", myLanIP, "device.WanIP", myWanIP, "req.Recipient", req.Recipient, "myPort", myPort, "d.Recipient", d.Recipient)
// 使用简单的 hash 函数将设备 ID 转换为 uint32
var hash uint32
for i := 0; i < len(d.DeviceId); i++ {
ch := d.DeviceId[i]
hash = hash*31 + uint32(ch)
}
d.Task.ID = hash
d.OnStart(func() {
task.gb.devices.Set(d)
d.channels.OnAdd(func(c *Channel) {
if absDevice, ok := task.gb.Server.PullProxies.Find(func(absDevice m7s.IPullProxy) bool {
conf := absDevice.GetConfig()
return conf.Type == "gb28181" && conf.URL == fmt.Sprintf("%s/%s", d.DeviceId, c.ChannelID)
}); ok {
c.PullProxyTask = absDevice.(*PullProxy)
absDevice.ChangeStatus(m7s.PullProxyStatusOnline)
}
})
})
d.OnDispose(func() {
d.Status = DeviceOfflineStatus
if task.gb.devices.RemoveByKey(d.DeviceId) {
for c := range d.channels.Range {
if c.PullProxyTask != nil {
c.PullProxyTask.ChangeStatus(m7s.PullProxyStatusOffline)
}
}
}
})
task.gb.AddTask(d).WaitStarted()
if task.gb.DB != nil {
var existing Device
if err := task.gb.DB.First(&existing, Device{DeviceId: d.DeviceId}).Error; err == nil {
d.ID = existing.ID // 保持原有的自增ID
task.gb.DB.Save(d).Omit("create_time")
task.gb.Info("StoreDevice", "type", "更新设备", "deviceId", d.DeviceId)
} else {
task.gb.DB.Save(d)
task.gb.Info("StoreDevice", "type", "新增设备", "deviceId", d.DeviceId)
}
}
return
}

Submodule plugin/gridb deleted from e0f8dbad92

682
plugin/hls/download.go Normal file
View File

@@ -0,0 +1,682 @@
package plugin_hls
import (
"bufio"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"time"
m7s "m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/util"
hls "m7s.live/v5/plugin/hls/pkg"
mpegts "m7s.live/v5/plugin/hls/pkg/ts"
mp4 "m7s.live/v5/plugin/mp4/pkg"
"m7s.live/v5/plugin/mp4/pkg/box"
)
// requestParams 包含请求解析后的参数
type requestParams struct {
streamPath string
startTime time.Time
endTime time.Time
timeRange time.Duration
}
// fileInfo 包含文件信息
type fileInfo struct {
filePath string
startTime time.Time
endTime time.Time
startOffsetTime time.Duration
recordType string // "ts", "mp4", "fmp4"
}
// parseRequestParams 解析请求参数
func (plugin *HLSPlugin) parseRequestParams(r *http.Request) (*requestParams, error) {
// 从URL路径中提取流路径去除前缀 "/download/" 和后缀 ".ts"
streamPath := strings.TrimSuffix(strings.TrimPrefix(r.URL.Path, "/download/"), ".ts")
// 解析URL查询参数中的时间范围start和end参数
startTime, endTime, err := util.TimeRangeQueryParse(r.URL.Query())
if err != nil {
return nil, err
}
return &requestParams{
streamPath: streamPath,
startTime: startTime,
endTime: endTime,
timeRange: endTime.Sub(startTime),
}, nil
}
// queryRecordStreams 从数据库查询录像记录
func (plugin *HLSPlugin) queryRecordStreams(params *requestParams) ([]m7s.RecordStream, error) {
// 检查数据库是否可用
if plugin.DB == nil {
return nil, fmt.Errorf("database not available")
}
var recordStreams []m7s.RecordStream
// 首先查询HLS记录 (ts)
query := plugin.DB.Model(&m7s.RecordStream{}).Where("stream_path = ? AND type = ?", params.streamPath, "hls")
// 添加时间范围查询条件
if !params.startTime.IsZero() && !params.endTime.IsZero() {
query = query.Where("(start_time <= ? AND end_time >= ?) OR (start_time >= ? AND start_time <= ?)",
params.endTime, params.startTime, params.startTime, params.endTime)
}
err := query.Order("start_time ASC").Find(&recordStreams).Error
if err != nil {
return nil, err
}
// 如果没有找到HLS记录尝试查询MP4记录
if len(recordStreams) == 0 {
query = plugin.DB.Model(&m7s.RecordStream{}).Where("stream_path = ? AND type IN (?)", params.streamPath, []string{"mp4", "fmp4"})
if !params.startTime.IsZero() && !params.endTime.IsZero() {
query = query.Where("(start_time <= ? AND end_time >= ?) OR (start_time >= ? AND start_time <= ?)",
params.endTime, params.startTime, params.startTime, params.endTime)
}
err = query.Order("start_time ASC").Find(&recordStreams).Error
if err != nil {
return nil, err
}
}
return recordStreams, nil
}
// buildFileInfoList 构建文件信息列表
func (plugin *HLSPlugin) buildFileInfoList(recordStreams []m7s.RecordStream, startTime, endTime time.Time) ([]*fileInfo, bool) {
var fileInfoList []*fileInfo
var found bool
for _, record := range recordStreams {
// 检查文件是否存在
if !util.Exist(record.FilePath) {
plugin.Warn("Record file not found", "filePath", record.FilePath)
continue
}
var startOffsetTime time.Duration
recordStartTime := record.StartTime
recordEndTime := record.EndTime
// 计算文件内的偏移时间
if startTime.After(recordStartTime) {
startOffsetTime = startTime.Sub(recordStartTime)
}
// 检查是否在时间范围内
if recordEndTime.Before(startTime) || recordStartTime.After(endTime) {
continue
}
fileInfoList = append(fileInfoList, &fileInfo{
filePath: record.FilePath,
startTime: recordStartTime,
endTime: recordEndTime,
startOffsetTime: startOffsetTime,
recordType: record.Type,
})
found = true
}
return fileInfoList, found
}
// hasOnlyMp4Records 检查是否只有MP4记录
func (plugin *HLSPlugin) hasOnlyMp4Records(fileInfoList []*fileInfo) bool {
if len(fileInfoList) == 0 {
return false
}
for _, info := range fileInfoList {
if info.recordType == "hls" {
return false
}
}
return true
}
// filterTsFiles 过滤HLS TS文件
func (plugin *HLSPlugin) filterTsFiles(fileInfoList []*fileInfo) []*fileInfo {
var filteredList []*fileInfo
for _, info := range fileInfoList {
if info.recordType == "hls" {
filteredList = append(filteredList, info)
}
}
plugin.Debug("TS files filtered", "original", len(fileInfoList), "filtered", len(filteredList))
return filteredList
}
// filterMp4Files 过滤MP4文件
func (plugin *HLSPlugin) filterMp4Files(fileInfoList []*fileInfo) []*fileInfo {
var filteredList []*fileInfo
for _, info := range fileInfoList {
if info.recordType == "mp4" || info.recordType == "fmp4" {
filteredList = append(filteredList, info)
}
}
plugin.Debug("MP4 files filtered", "original", len(fileInfoList), "filtered", len(filteredList))
return filteredList
}
// processMp4ToTs 将MP4记录转换为TS输出
func (plugin *HLSPlugin) processMp4ToTs(w http.ResponseWriter, r *http.Request, fileInfoList []*fileInfo, params *requestParams) {
plugin.Info("Converting MP4 records to TS", "count", len(fileInfoList))
// 设置HTTP响应头
w.Header().Set("Content-Type", "video/mp2t")
w.Header().Set("Content-Disposition", "attachment")
// 创建一个TS写入器在循环外面所有MP4文件共享同一个TsInMemory
tsWriter := &simpleTsWriter{
TsInMemory: &hls.TsInMemory{},
plugin: plugin,
}
// 对于MP4到TS的转换我们采用简化的方法
// 直接将每个MP4文件转换输出
for _, info := range fileInfoList {
if r.Context().Err() != nil {
return
}
plugin.Debug("Converting MP4 file to TS", "path", info.filePath)
// 创建MP4解复用器
demuxer := &mp4.DemuxerRange{
StartTime: params.startTime,
EndTime: params.endTime,
Streams: []m7s.RecordStream{{
FilePath: info.filePath,
StartTime: info.startTime,
EndTime: info.endTime,
Type: info.recordType,
}},
}
// 设置回调函数
demuxer.OnVideoExtraData = tsWriter.onVideoExtraData
demuxer.OnAudioExtraData = tsWriter.onAudioExtraData
demuxer.OnVideoSample = tsWriter.onVideoSample
demuxer.OnAudioSample = tsWriter.onAudioSample
// 执行解复用和转换
err := demuxer.Demux(r.Context())
if err != nil {
plugin.Error("MP4 to TS conversion failed", "err", err, "file", info.filePath)
if !tsWriter.hasWritten {
http.Error(w, "Conversion failed", http.StatusInternalServerError)
}
return
}
}
// 将所有累积的 TsInMemory 内容写入到响应
_, err := tsWriter.WriteTo(w)
if err != nil {
plugin.Error("Failed to write TS data to response", "error", err)
return
}
plugin.Info("MP4 to TS conversion completed")
}
// simpleTsWriter 简化的TS写入器
type simpleTsWriter struct {
*hls.TsInMemory
plugin *HLSPlugin
hasWritten bool
spsData []byte
ppsData []byte
videoCodec box.MP4_CODEC_TYPE
audioCodec box.MP4_CODEC_TYPE
}
func (w *simpleTsWriter) WritePMT() {
// 初始化 TsInMemory 的 PMT
var videoCodec, audioCodec [4]byte
switch w.videoCodec {
case box.MP4_CODEC_H264:
copy(videoCodec[:], []byte("H264"))
case box.MP4_CODEC_H265:
copy(videoCodec[:], []byte("H265"))
}
switch w.audioCodec {
case box.MP4_CODEC_AAC:
copy(audioCodec[:], []byte("MP4A"))
}
w.WritePMTPacket(audioCodec, videoCodec)
w.hasWritten = true
}
// onVideoExtraData 处理视频序列头
func (w *simpleTsWriter) onVideoExtraData(codecType box.MP4_CODEC_TYPE, data []byte) error {
w.videoCodec = codecType
// 解析并存储SPS/PPS数据
if codecType == box.MP4_CODEC_H264 && len(data) > 0 {
if w.plugin != nil {
w.plugin.Debug("Processing H264 extra data", "size", len(data))
}
// 解析AVCC格式的extra data
if len(data) >= 8 {
// AVCC格式: configurationVersion(1) + AVCProfileIndication(1) + profile_compatibility(1) + AVCLevelIndication(1) +
// lengthSizeMinusOne(1) + numOfSequenceParameterSets(1) + ...
offset := 5 // 跳过前5个字节
if offset < len(data) {
// 读取SPS数量
numSPS := data[offset] & 0x1f
offset++
// 解析SPS
for i := 0; i < int(numSPS) && offset < len(data)-1; i++ {
if offset+1 >= len(data) {
break
}
spsLength := int(data[offset])<<8 | int(data[offset+1])
offset += 2
if offset+spsLength <= len(data) {
// 添加起始码并存储SPS
w.spsData = make([]byte, 4+spsLength)
copy(w.spsData[0:4], []byte{0x00, 0x00, 0x00, 0x01})
copy(w.spsData[4:], data[offset:offset+spsLength])
offset += spsLength
if w.plugin != nil {
w.plugin.Debug("Extracted SPS", "length", spsLength)
}
break // 只取第一个SPS
}
}
// 读取PPS数量
if offset < len(data) {
numPPS := data[offset]
offset++
// 解析PPS
for i := 0; i < int(numPPS) && offset < len(data)-1; i++ {
if offset+1 >= len(data) {
break
}
ppsLength := int(data[offset])<<8 | int(data[offset+1])
offset += 2
if offset+ppsLength <= len(data) {
// 添加起始码并存储PPS
w.ppsData = make([]byte, 4+ppsLength)
copy(w.ppsData[0:4], []byte{0x00, 0x00, 0x00, 0x01})
copy(w.ppsData[4:], data[offset:offset+ppsLength])
if w.plugin != nil {
w.plugin.Debug("Extracted PPS", "length", ppsLength)
}
break // 只取第一个PPS
}
}
}
}
}
}
return nil
}
// onAudioExtraData 处理音频序列头
func (w *simpleTsWriter) onAudioExtraData(codecType box.MP4_CODEC_TYPE, data []byte) error {
w.audioCodec = codecType
w.plugin.Debug("Processing audio extra data", "codec", codecType, "size", len(data))
return nil
}
// onVideoSample 处理视频样本
func (w *simpleTsWriter) onVideoSample(codecType box.MP4_CODEC_TYPE, sample box.Sample) error {
if !w.hasWritten {
w.WritePMT()
}
w.plugin.Debug("Processing video sample", "size", len(sample.Data), "keyFrame", sample.KeyFrame, "timestamp", sample.Timestamp)
// 转换AVCC格式到Annex-B格式
annexBData, err := w.convertAVCCToAnnexB(sample.Data, sample.KeyFrame)
if err != nil {
w.plugin.Error("Failed to convert AVCC to Annex-B", "error", err)
return err
}
if len(annexBData) == 0 {
w.plugin.Warn("Empty Annex-B data after conversion")
return nil
}
// 创建视频帧结构
videoFrame := mpegts.MpegtsPESFrame{
Pid: mpegts.PID_VIDEO,
IsKeyFrame: sample.KeyFrame,
}
// 创建 AnnexB 帧
annexBFrame := &pkg.AnnexB{
PTS: (time.Duration(sample.Timestamp) + time.Duration(sample.CTS)) * 90,
DTS: time.Duration(sample.Timestamp) * 90, // 对于MP4转换假设PTS=DTS
}
// 根据编解码器类型设置 Hevc 标志
if codecType == box.MP4_CODEC_H265 {
annexBFrame.Hevc = true
}
annexBFrame.AppendOne(annexBData)
// 使用 WriteVideoFrame 写入TS包
err = w.WriteVideoFrame(annexBFrame, &videoFrame)
if err != nil {
w.plugin.Error("Failed to write video frame", "error", err)
return err
}
return nil
}
// convertAVCCToAnnexB 将AVCC格式转换为Annex-B格式
func (w *simpleTsWriter) convertAVCCToAnnexB(avccData []byte, isKeyFrame bool) ([]byte, error) {
if len(avccData) == 0 {
return nil, fmt.Errorf("empty AVCC data")
}
var annexBBuffer []byte
// 如果是关键帧先添加SPS和PPS
if isKeyFrame {
if len(w.spsData) > 0 {
annexBBuffer = append(annexBBuffer, w.spsData...)
w.plugin.Debug("Added SPS to key frame", "spsSize", len(w.spsData))
}
if len(w.ppsData) > 0 {
annexBBuffer = append(annexBBuffer, w.ppsData...)
w.plugin.Debug("Added PPS to key frame", "ppsSize", len(w.ppsData))
}
}
// 解析AVCC格式的NAL单元
offset := 0
nalCount := 0
for offset < len(avccData) {
// AVCC格式4字节长度 + NAL数据
if offset+4 > len(avccData) {
break
}
// 读取NAL单元长度大端序
nalLength := int(avccData[offset])<<24 |
int(avccData[offset+1])<<16 |
int(avccData[offset+2])<<8 |
int(avccData[offset+3])
offset += 4
if nalLength <= 0 || offset+nalLength > len(avccData) {
w.plugin.Warn("Invalid NAL length", "length", nalLength, "remaining", len(avccData)-offset)
break
}
nalData := avccData[offset : offset+nalLength]
offset += nalLength
nalCount++
if len(nalData) > 0 {
nalType := nalData[0] & 0x1f
w.plugin.Debug("Converting NAL unit", "type", nalType, "length", nalLength)
// 添加起始码前缀
annexBBuffer = append(annexBBuffer, []byte{0x00, 0x00, 0x00, 0x01}...)
annexBBuffer = append(annexBBuffer, nalData...)
}
}
if nalCount == 0 {
return nil, fmt.Errorf("no NAL units found in AVCC data")
}
w.plugin.Debug("AVCC to Annex-B conversion completed",
"inputSize", len(avccData),
"outputSize", len(annexBBuffer),
"nalUnits", nalCount)
return annexBBuffer, nil
}
// onAudioSample 处理音频样本
func (w *simpleTsWriter) onAudioSample(codecType box.MP4_CODEC_TYPE, sample box.Sample) error {
if !w.hasWritten {
w.WritePMT()
}
w.plugin.Debug("Processing audio sample", "codec", codecType, "size", len(sample.Data), "timestamp", sample.Timestamp)
// 创建音频帧结构
audioFrame := mpegts.MpegtsPESFrame{
Pid: mpegts.PID_AUDIO,
}
// 根据编解码器类型处理音频数据
switch codecType {
case box.MP4_CODEC_AAC: // AAC
// 创建 ADTS 帧
adtsFrame := &pkg.ADTS{
DTS: time.Duration(sample.Timestamp) * 90,
}
// 将音频数据添加到帧中
copy(adtsFrame.NextN(len(sample.Data)), sample.Data)
// 使用 WriteAudioFrame 写入TS包
err := w.WriteAudioFrame(adtsFrame, &audioFrame)
if err != nil {
w.plugin.Error("Failed to write audio frame", "error", err)
return err
}
default:
// 对于非AAC音频暂时使用原来的PES包方式
pesPacket := mpegts.MpegTsPESPacket{
Header: mpegts.MpegTsPESHeader{
PacketStartCodePrefix: 0x000001,
StreamID: mpegts.STREAM_ID_AUDIO,
},
}
// 设置可选字段
pesPacket.Header.ConstTen = 0x80
pesPacket.Header.PtsDtsFlags = 0x80 // 只有PTS
pesPacket.Header.PesHeaderDataLength = 5
pesPacket.Header.Pts = uint64(sample.Timestamp)
pesPacket.Buffers = append(pesPacket.Buffers, sample.Data)
// 写入TS包
err := w.WritePESPacket(&audioFrame, pesPacket)
if err != nil {
w.plugin.Error("Failed to write audio PES packet", "error", err)
return err
}
}
return nil
}
// processTsFiles 处理原生TS文件拼接
func (plugin *HLSPlugin) processTsFiles(w http.ResponseWriter, r *http.Request, fileInfoList []*fileInfo, params *requestParams) {
plugin.Info("Processing TS files", "count", len(fileInfoList))
// 设置HTTP响应头
w.Header().Set("Content-Type", "video/mp2t")
w.Header().Set("Content-Disposition", "attachment")
var writer io.Writer = w
var totalSize uint64
// 第一次遍历:计算总大小
for _, info := range fileInfoList {
if r.Context().Err() != nil {
return
}
fileInfo, err := os.Stat(info.filePath)
if err != nil {
plugin.Error("Failed to stat file", "path", info.filePath, "err", err)
continue
}
totalSize += uint64(fileInfo.Size())
}
// 设置内容长度
w.Header().Set("Content-Length", strconv.FormatUint(totalSize, 10))
w.WriteHeader(http.StatusOK)
// 第二次遍历:写入数据
for i, info := range fileInfoList {
if r.Context().Err() != nil {
return
}
plugin.Debug("Processing TS file", "path", info.filePath)
file, err := os.Open(info.filePath)
if err != nil {
plugin.Error("Failed to open file", "path", info.filePath, "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
reader := bufio.NewReader(file)
if i == 0 {
// 第一个文件,直接拷贝
_, err = io.Copy(writer, reader)
} else {
// 后续文件跳过PAT/PMT包只拷贝媒体数据
err = plugin.copyTsFileSkipHeaders(writer, reader)
}
file.Close()
if err != nil {
plugin.Error("Failed to copy file", "path", info.filePath, "err", err)
return
}
}
plugin.Info("TS download completed")
}
// copyTsFileSkipHeaders 拷贝TS文件跳过PAT/PMT包
func (plugin *HLSPlugin) copyTsFileSkipHeaders(writer io.Writer, reader *bufio.Reader) error {
buffer := make([]byte, mpegts.TS_PACKET_SIZE)
for {
n, err := io.ReadFull(reader, buffer)
if err != nil {
if err == io.EOF || err == io.ErrUnexpectedEOF {
break
}
return err
}
if n != mpegts.TS_PACKET_SIZE {
continue
}
// 检查同步字节
if buffer[0] != 0x47 {
continue
}
// 提取PID
pid := uint16(buffer[1]&0x1f)<<8 | uint16(buffer[2])
// 跳过PAT(PID=0)和PMT(PID=256)包
if pid == mpegts.PID_PAT || pid == mpegts.PID_PMT {
continue
}
// 写入媒体数据包
_, err = writer.Write(buffer)
if err != nil {
return err
}
}
return nil
}
// download 下载处理函数
func (plugin *HLSPlugin) download(w http.ResponseWriter, r *http.Request) {
// 解析请求参数
params, err := plugin.parseRequestParams(r)
if err != nil {
plugin.Error("Failed to parse request params", "err", err)
http.Error(w, "Invalid parameters", http.StatusBadRequest)
return
}
plugin.Info("TS download request", "streamPath", params.streamPath, "timeRange", params.timeRange)
// 查询录像记录
recordStreams, err := plugin.queryRecordStreams(params)
if err != nil {
plugin.Error("Failed to query record streams", "err", err)
http.Error(w, "Database error", http.StatusInternalServerError)
return
}
if len(recordStreams) == 0 {
plugin.Warn("No records found", "streamPath", params.streamPath)
http.Error(w, "No records found", http.StatusNotFound)
return
}
// 构建文件信息列表
fileInfoList, found := plugin.buildFileInfoList(recordStreams, params.startTime, params.endTime)
if !found {
plugin.Warn("No valid files found", "streamPath", params.streamPath)
http.Error(w, "No valid files found", http.StatusNotFound)
return
}
// 检查文件类型并处理
if plugin.hasOnlyMp4Records(fileInfoList) {
// 只有MP4记录转换为TS
mp4Files := plugin.filterMp4Files(fileInfoList)
plugin.processMp4ToTs(w, r, mp4Files, params)
} else {
// 有TS记录优先使用TS文件
tsFiles := plugin.filterTsFiles(fileInfoList)
if len(tsFiles) > 0 {
plugin.processTsFiles(w, r, tsFiles, params)
} else {
// 没有TS文件使用MP4转换
mp4Files := plugin.filterMp4Files(fileInfoList)
plugin.processMp4ToTs(w, r, mp4Files, params)
}
}
}

View File

@@ -59,6 +59,7 @@ func (p *HLSPlugin) OnInit() (err error) {
func (p *HLSPlugin) RegisterHandler() map[string]http.HandlerFunc {
return map[string]http.HandlerFunc{
"/vod/{streamPath...}": p.vod,
"/download/{streamPath...}": p.download,
"/api/record/start/{streamPath...}": p.API_record_start,
"/api/record/stop/{id}": p.API_record_stop,
}
@@ -104,9 +105,8 @@ func (config *HLSPlugin) vod(w http.ResponseWriter, r *http.Request) {
playlist.Init()
for _, record := range records {
duration := record.EndTime.Sub(record.StartTime).Seconds()
playlist.WriteInf(hls.PlaylistInf{
Duration: duration,
Duration: float64(record.Duration) / 1000,
URL: fmt.Sprintf("/mp4/download/%s.fmp4?id=%d", streamPath, record.ID),
Title: record.StartTime.Format(time.RFC3339),
})
@@ -128,9 +128,8 @@ func (config *HLSPlugin) vod(w http.ResponseWriter, r *http.Request) {
playlist.Init()
for _, record := range records {
duration := record.EndTime.Sub(record.StartTime).Seconds()
playlist.WriteInf(hls.PlaylistInf{
Duration: duration,
Duration: float64(record.Duration) / 1000,
URL: record.FilePath,
})
}

View File

@@ -2,16 +2,13 @@ package hls
import (
"fmt"
"os"
"path/filepath"
"time"
"gorm.io/gorm"
"m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
"m7s.live/v5/pkg/util"
mpegts "m7s.live/v5/plugin/hls/pkg/ts"
)
@@ -22,7 +19,6 @@ func NewRecorder(conf config.Record) m7s.IRecorder {
type Recorder struct {
m7s.DefaultRecorder
stream m7s.RecordStream
ts *TsInFile
pesAudio *mpegts.MpegtsPESFrame
pesVideo *mpegts.MpegtsPESFrame
@@ -39,81 +35,11 @@ var CustomFileName = func(job *m7s.RecordJob) string {
}
func (r *Recorder) createStream(start time.Time) (err error) {
recordJob := &r.RecordJob
sub := recordJob.Subscriber
r.stream = m7s.RecordStream{
StartTime: start,
StreamPath: sub.StreamPath,
FilePath: CustomFileName(&r.RecordJob),
EventId: recordJob.EventId,
EventDesc: recordJob.EventDesc,
EventName: recordJob.EventName,
EventLevel: recordJob.EventLevel,
BeforeDuration: recordJob.BeforeDuration,
AfterDuration: recordJob.AfterDuration,
Mode: recordJob.Mode,
Type: "hls",
}
dir := filepath.Dir(r.stream.FilePath)
dir = filepath.Clean(dir)
if err = os.MkdirAll(dir, 0755); err != nil {
r.Error("create directory failed", "err", err, "dir", dir)
return
}
if sub.Publisher.HasAudioTrack() {
r.stream.AudioCodec = sub.Publisher.AudioTrack.ICodecCtx.String()
}
if sub.Publisher.HasVideoTrack() {
r.stream.VideoCodec = sub.Publisher.VideoTrack.ICodecCtx.String()
}
if recordJob.Plugin.DB != nil {
recordJob.Plugin.DB.Save(&r.stream)
}
return
}
type eventRecordCheck struct {
task.Task
DB *gorm.DB
streamPath string
}
func (t *eventRecordCheck) Run() (err error) {
var eventRecordStreams []m7s.RecordStream
queryRecord := m7s.RecordStream{
EventLevel: m7s.EventLevelHigh,
Mode: m7s.RecordModeEvent,
Type: "hls",
}
t.DB.Where(&queryRecord).Find(&eventRecordStreams, "stream_path=?", t.streamPath) //搜索事件录像,且为重要事件(无法自动删除)
if len(eventRecordStreams) > 0 {
for _, recordStream := range eventRecordStreams {
var unimportantEventRecordStreams []m7s.RecordStream
queryRecord.EventLevel = m7s.EventLevelLow
query := `(start_time BETWEEN ? AND ?)
OR (end_time BETWEEN ? AND ?)
OR (? BETWEEN start_time AND end_time)
OR (? BETWEEN start_time AND end_time) AND stream_path=? `
t.DB.Where(&queryRecord).Where(query, recordStream.StartTime, recordStream.EndTime, recordStream.StartTime, recordStream.EndTime, recordStream.StartTime, recordStream.EndTime, recordStream.StreamPath).Find(&unimportantEventRecordStreams)
if len(unimportantEventRecordStreams) > 0 {
for _, unimportantEventRecordStream := range unimportantEventRecordStreams {
unimportantEventRecordStream.EventLevel = m7s.EventLevelHigh
t.DB.Save(&unimportantEventRecordStream)
}
}
}
}
return
return r.CreateStream(start, CustomFileName)
}
func (r *Recorder) writeTailer(end time.Time) {
if r.stream.EndTime.After(r.stream.StartTime) {
return
}
r.stream.EndTime = end
if r.RecordJob.Plugin.DB != nil {
r.RecordJob.Plugin.DB.Save(&r.stream)
}
r.WriteTail(end, nil)
}
func (r *Recorder) Dispose() {
@@ -131,9 +57,9 @@ func (r *Recorder) createNewTs() {
r.ts.Close()
}
var err error
r.ts, err = NewTsInFile(r.stream.FilePath)
r.ts, err = NewTsInFile(r.Event.FilePath)
if err != nil {
r.Error("create ts file failed", "err", err, "path", r.stream.FilePath)
r.Error("create ts file failed", "err", err, "path", r.Event.FilePath)
return
}
if oldPMT.Len() > 0 {
@@ -175,8 +101,8 @@ func (r *Recorder) Run() (err error) {
ctx := &r.RecordJob
suber := ctx.Subscriber
startTime := time.Now()
if ctx.BeforeDuration > 0 {
startTime = startTime.Add(-ctx.BeforeDuration)
if ctx.Event.BeforeDuration > 0 {
startTime = startTime.Add(-time.Duration(ctx.Event.BeforeDuration) * time.Millisecond)
}
// 创建第一个片段记录

View File

@@ -2,13 +2,14 @@ package plugin_monitor
import (
"encoding/json"
"os"
"strings"
"time"
"m7s.live/v5"
"m7s.live/v5/pkg/task"
"m7s.live/v5/plugin/monitor/pb"
monitor "m7s.live/v5/plugin/monitor/pkg"
"os"
"strings"
"time"
)
var _ = m7s.InstallPlugin[MonitorPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
@@ -65,7 +66,7 @@ func (cfg *MonitorPlugin) OnInit() (err error) {
cfg.Plugin.Server.OnBeforeDispose(func() {
cfg.saveTask(cfg.Plugin.Server)
})
cfg.Plugin.Server.OnChildDispose(cfg.saveTask)
cfg.Plugin.Server.OnDescendantsDispose(cfg.saveTask)
}
return
}

View File

@@ -103,71 +103,104 @@ func (p *MP4Plugin) downloadSingleFile(stream *m7s.RecordStream, flag mp4.Flag,
}
}
// download 处理 MP4 文件下载请求
// 支持两种模式:
// 1. 单个文件下载:通过 id 参数指定特定的录制文件
// 2. 时间范围合并下载:根据时间范围合并多个录制文件
func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
// 检查数据库连接
if p.DB == nil {
http.Error(w, pkg.ErrNoDB.Error(), http.StatusInternalServerError)
return
}
// 设置响应头为 MP4 视频格式
w.Header().Set("Content-Type", "video/mp4")
// 从路径中提取流路径,并检查是否为分片格式
streamPath := r.PathValue("streamPath")
var flag mp4.Flag
if strings.HasSuffix(streamPath, ".fmp4") {
// 分片 MP4 格式
flag = mp4.FLAG_FRAGMENT
streamPath = strings.TrimSuffix(streamPath, ".fmp4")
} else {
// 常规 MP4 格式
streamPath = strings.TrimSuffix(streamPath, ".mp4")
}
query := r.URL.Query()
var streams []m7s.RecordStream
// 处理单个文件下载请求
if id := query.Get("id"); id != "" {
// 设置下载文件名
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s_%s.mp4", streamPath, id))
// 从数据库查询指定 ID 的录制记录
p.DB.Find(&streams, "id=? AND stream_path=?", id, streamPath)
if len(streams) == 0 {
http.Error(w, "record not found", http.StatusNotFound)
return
}
// 下载单个文件
p.downloadSingleFile(&streams[0], flag, w, r)
return
}
// 合并多个 mp4
// 处理时间范围合并下载请求
// 解析时间范围参数
startTime, endTime, err := util.TimeRangeQueryParse(query)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p.Info("download", "streamPath", streamPath, "start", startTime, "end", endTime)
// 设置合并下载的文件名,包含时间范围
w.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=%s_%s_%s.mp4", streamPath, startTime.Format("20060102150405"), endTime.Format("20060102150405")))
// 构建查询条件,查找指定时间范围内的录制记录
queryRecord := m7s.RecordStream{
Mode: m7s.RecordModeAuto,
Type: "mp4",
}
p.DB.Where(&queryRecord).Find(&streams, "end_time>? AND start_time<? AND stream_path=?", startTime, endTime, streamPath)
p.DB.Where(&queryRecord).Find(&streams, "event_id=0 AND end_time>? AND start_time<? AND stream_path=?", startTime, endTime, streamPath)
// 创建 MP4 混合器
muxer := mp4.NewMuxer(flag)
ftyp := muxer.CreateFTYPBox()
n := ftyp.Size()
muxer.CurrentOffset = int64(n)
var lastTs, tsOffset int64
var parts []*ContentPart
sampleOffset := muxer.CurrentOffset + mp4.BeforeMdatData
mdatOffset := sampleOffset
var audioTrack, videoTrack *mp4.Track
var file *os.File
var moov box.IBox
streamCount := len(streams)
// 初始化变量
var lastTs, tsOffset int64 // 时间戳偏移量,用于合并多个文件时保持时间连续性
var parts []*ContentPart // 内容片段列表
sampleOffset := muxer.CurrentOffset + mp4.BeforeMdatData // 样本数据偏移量
mdatOffset := sampleOffset // 媒体数据偏移量
var audioTrack, videoTrack *mp4.Track // 音频和视频轨道
var file *os.File // 当前处理的文件
var moov box.IBox // MOOV box包含元数据
streamCount := len(streams) // 流的总数
// Track ExtraData history for each track
// 轨道额外数据历史记录,用于处理编码参数变化的情况
type TrackHistory struct {
Track *mp4.Track
ExtraData []byte
}
var audioHistory, videoHistory []TrackHistory
// 添加音频轨道的函数
addAudioTrack := func(track *mp4.Track) {
t := muxer.AddTrack(track.Cid)
t.ExtraData = track.ExtraData
t.SampleSize = track.SampleSize
t.SampleRate = track.SampleRate
t.ChannelCount = track.ChannelCount
// 如果之前有音频轨道,继承其样本列表
if len(audioHistory) > 0 {
t.Samplelist = audioHistory[len(audioHistory)-1].Track.Samplelist
}
@@ -175,11 +208,13 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
audioHistory = append(audioHistory, TrackHistory{Track: t, ExtraData: track.ExtraData})
}
// 添加视频轨道的函数
addVideoTrack := func(track *mp4.Track) {
t := muxer.AddTrack(track.Cid)
t.ExtraData = track.ExtraData
t.Width = track.Width
t.Height = track.Height
// 如果之前有视频轨道,继承其样本列表
if len(videoHistory) > 0 {
t.Samplelist = videoHistory[len(videoHistory)-1].Track.Samplelist
}
@@ -187,6 +222,7 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
videoHistory = append(videoHistory, TrackHistory{Track: t, ExtraData: track.ExtraData})
}
// 智能添加轨道的函数,处理编码参数变化
addTrack := func(track *mp4.Track) {
var lastAudioTrack, lastVideoTrack *TrackHistory
if len(audioHistory) > 0 {
@@ -195,105 +231,150 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
if len(videoHistory) > 0 {
lastVideoTrack = &videoHistory[len(videoHistory)-1]
}
if track.Cid.IsAudio() {
if lastAudioTrack == nil {
// 首次添加音频轨道
addAudioTrack(track)
} else if !bytes.Equal(lastAudioTrack.ExtraData, track.ExtraData) {
// 音频编码参数发生变化,检查是否已存在相同参数的轨道
for _, history := range audioHistory {
if bytes.Equal(history.ExtraData, track.ExtraData) {
// 找到相同参数的轨道,重用它
audioTrack = history.Track
audioTrack.Samplelist = audioHistory[len(audioHistory)-1].Track.Samplelist
return
}
}
// 创建新的音频轨道
addAudioTrack(track)
}
} else if track.Cid.IsVideo() {
if lastVideoTrack == nil {
// 首次添加视频轨道
addVideoTrack(track)
} else if !bytes.Equal(lastVideoTrack.ExtraData, track.ExtraData) {
// 视频编码参数发生变化,检查是否已存在相同参数的轨道
for _, history := range videoHistory {
if bytes.Equal(history.ExtraData, track.ExtraData) {
// 找到相同参数的轨道,重用它
videoTrack = history.Track
videoTrack.Samplelist = videoHistory[len(videoHistory)-1].Track.Samplelist
return
}
}
// 创建新的视频轨道
addVideoTrack(track)
}
}
}
// 遍历处理每个录制文件
for i, stream := range streams {
tsOffset = lastTs
tsOffset = lastTs // 设置时间戳偏移
// 打开录制文件
file, err = os.Open(stream.FilePath)
if err != nil {
return
}
p.Info("read", "file", file.Name())
// 创建解复用器并解析文件
demuxer := mp4.NewDemuxer(file)
err = demuxer.Demux()
if err != nil {
return
}
trackCount := len(demuxer.Tracks)
// 处理轨道信息
if i == 0 || flag == mp4.FLAG_FRAGMENT {
// 第一个文件或分片模式,添加所有轨道
for _, track := range demuxer.Tracks {
addTrack(track)
}
}
// 检查轨道数量是否发生变化
if trackCount != len(muxer.Tracks) {
if flag == mp4.FLAG_FRAGMENT {
// 分片模式下重新生成 MOOV box
moov = muxer.MakeMoov()
}
}
// 处理开始时间偏移(仅第一个文件)
if i == 0 {
startTimestamp := startTime.Sub(stream.StartTime).Milliseconds()
var startSample *box.Sample
if startSample, err = demuxer.SeekTime(uint64(startTimestamp)); err != nil {
tsOffset = 0
continue
if startTimestamp > 0 {
// 如果请求的开始时间晚于文件开始时间,需要定位到指定时间点
var startSample *box.Sample
if startSample, err = demuxer.SeekTime(uint64(startTimestamp)); err != nil {
continue
}
tsOffset = -int64(startSample.Timestamp)
}
tsOffset = -int64(startSample.Timestamp)
}
var part *ContentPart
// 遍历处理每个样本
for track, sample := range demuxer.RangeSample {
// 检查是否超出结束时间(仅最后一个文件)
if i == streamCount-1 && int64(sample.Timestamp) > endTime.Sub(stream.StartTime).Milliseconds() {
break
}
// 创建内容片段
if part == nil {
part = &ContentPart{
File: file,
Start: sample.Offset,
}
}
// 计算调整后的时间戳
lastTs = int64(sample.Timestamp + uint32(tsOffset))
fixSample := *sample
fixSample.Timestamp += uint32(tsOffset)
if flag == 0 {
// 常规 MP4 模式
fixSample.Offset = sampleOffset + (fixSample.Offset - part.Start)
part.Size += sample.Size
// 将样本添加到对应的轨道
if track.Cid.IsAudio() {
audioTrack.AddSampleEntry(fixSample)
} else if track.Cid.IsVideo() {
videoTrack.AddSampleEntry(fixSample)
}
} else {
// 分片 MP4 模式
// 读取样本数据
part.Seek(sample.Offset, io.SeekStart)
fixSample.Data = make([]byte, sample.Size)
part.Read(fixSample.Data)
// 创建分片
var moof, mdat box.IBox
if track.Cid.IsAudio() {
moof, mdat = muxer.CreateFlagment(audioTrack, fixSample)
} else if track.Cid.IsVideo() {
moof, mdat = muxer.CreateFlagment(videoTrack, fixSample)
}
// 添加分片到内容片段
if moof != nil {
part.boxies = append(part.boxies, moof, mdat)
part.Size += int(moof.Size() + mdat.Size())
}
}
}
// 更新偏移量并添加到片段列表
if part != nil {
sampleOffset += int64(part.Size)
parts = append(parts, part)
@@ -301,14 +382,21 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
}
if flag == 0 {
// 常规 MP4 模式:生成完整的 MP4 文件
moovSize := muxer.MakeMoov().Size()
dataSize := uint64(sampleOffset - mdatOffset)
// 设置内容长度
w.Header().Set("Content-Length", fmt.Sprintf("%d", uint64(sampleOffset)+moovSize))
// 调整样本偏移量以适应 MOOV box
for _, track := range muxer.Tracks {
for i := range track.Samplelist {
track.Samplelist[i].Offset += int64(moovSize)
}
}
// 创建 MDAT box
mdatBox := box.CreateBaseBox(box.TypeMDAT, dataSize+box.BasicBoxLen)
var freeBox *box.FreeBox
@@ -318,11 +406,13 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
var written, totalWritten int64
// 写入文件头部FTYP、MOOV、FREE、MDAT header
totalWritten, err = box.WriteTo(w, ftyp, muxer.MakeMoov(), freeBox, mdatBox)
if err != nil {
return
}
// 写入所有内容片段的数据
for _, part := range parts {
part.Seek(part.Start, io.SeekStart)
written, err = io.CopyN(w, part.File, int64(part.Size))
@@ -333,15 +423,21 @@ func (p *MP4Plugin) download(w http.ResponseWriter, r *http.Request) {
part.Close()
}
} else {
// 分片 MP4 模式:输出分片格式
var children []box.IBox
var totalSize uint64
// 添加文件头和所有分片
children = append(children, ftyp, moov)
totalSize += uint64(ftyp.Size() + moov.Size())
for _, part := range parts {
totalSize += uint64(part.Size)
children = append(children, part.boxies...)
part.Close()
}
// 设置内容长度并写入数据
w.Header().Set("Content-Length", fmt.Sprintf("%d", totalSize))
_, err = box.WriteTo(w, children...)
if err != nil {
@@ -361,49 +457,51 @@ func (p *MP4Plugin) StartRecord(ctx context.Context, req *mp4pb.ReqStartRecord)
filePath = req.FilePath
}
res = &mp4pb.ResponseStartRecord{}
p.Server.Records.Call(func() error {
_, recordExists = p.Server.Records.Find(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath && job.RecConf.FilePath == req.FilePath
})
return nil
_, recordExists = p.Server.Records.SafeFind(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath && job.RecConf.FilePath == req.FilePath
})
if recordExists {
err = pkg.ErrRecordExists
return
}
p.Server.Streams.Call(func() error {
if stream, ok := p.Server.Streams.Get(req.StreamPath); ok {
recordConf := config.Record{
Append: false,
Fragment: fragment,
FilePath: filePath,
recordConf := config.Record{
Append: false,
Fragment: fragment,
FilePath: filePath,
}
if stream, ok := p.Server.Streams.SafeGet(req.StreamPath); ok {
job := p.Record(stream, recordConf, nil)
res.Data = uint64(uintptr(unsafe.Pointer(job.GetTask())))
} else {
sub, err := p.Subscribe(ctx, req.StreamPath)
if err == nil && sub != nil {
if stream, ok := p.Server.Streams.SafeGet(req.StreamPath); ok {
job := p.Record(stream, recordConf, nil)
res.Data = uint64(uintptr(unsafe.Pointer(job.GetTask())))
} else {
err = pkg.ErrNotFound
}
job := p.Record(stream, recordConf, nil)
res.Data = uint64(uintptr(unsafe.Pointer(job.GetTask())))
} else {
err = pkg.ErrNotFound
}
return nil
})
}
return
}
func (p *MP4Plugin) StopRecord(ctx context.Context, req *mp4pb.ReqStopRecord) (res *mp4pb.ResponseStopRecord, err error) {
res = &mp4pb.ResponseStopRecord{}
var recordJob *m7s.RecordJob
p.Server.Records.Call(func() error {
recordJob, _ = p.Server.Records.Find(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath
})
if recordJob != nil {
t := recordJob.GetTask()
if t != nil {
res.Data = uint64(uintptr(unsafe.Pointer(t)))
t.Stop(task.ErrStopByUser)
}
}
return nil
recordJob, _ = p.Server.Records.SafeFind(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath
})
if recordJob != nil {
t := recordJob.GetTask()
if t != nil {
res.Data = uint64(uintptr(unsafe.Pointer(t)))
t.Stop(task.ErrStopByUser)
}
}
return
}
@@ -425,57 +523,53 @@ func (p *MP4Plugin) EventStart(ctx context.Context, req *mp4pb.ReqEventRecord) (
}
//recorder := p.Meta.Recorder(config.Record{})
var tmpJob *m7s.RecordJob
p.Server.Records.Call(func() error {
tmpJob, _ = p.Server.Records.Find(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath
})
return nil
tmpJob, _ = p.Server.Records.SafeFind(func(job *m7s.RecordJob) bool {
return job.StreamPath == req.StreamPath
})
if tmpJob == nil { //为空表示没有正在进行的录制,也就是没有自动录像,则进行正常的事件录像
p.Server.Streams.Call(func() error {
if stream, ok := p.Server.Streams.Get(req.StreamPath); ok {
recordConf := config.Record{
Append: false,
Fragment: 0,
FilePath: filepath.Join(p.EventRecordFilePath, stream.StreamPath, time.Now().Local().Format("2006-01-02-15-04-05")),
}
//recordJob := recorder.GetRecordJob()
var subconfig config.Subscribe
defaults.SetDefaults(&subconfig)
subconfig.BufferTime = beforeDuration
recordJob := p.Record(stream, recordConf, &subconfig)
recordJob.EventId = req.EventId
recordJob.EventLevel = req.EventLevel
recordJob.EventName = req.EventName
recordJob.EventDesc = req.EventDesc
recordJob.AfterDuration = afterDuration
recordJob.BeforeDuration = beforeDuration
recordJob.Mode = m7s.RecordModeEvent
if stream, ok := p.Server.Streams.SafeGet(req.StreamPath); ok {
recordConf := config.Record{
Append: false,
Fragment: 0,
FilePath: filepath.Join(p.EventRecordFilePath, stream.StreamPath, time.Now().Local().Format("2006-01-02-15-04-05")),
Mode: config.RecordModeEvent,
Event: &config.RecordEvent{
EventId: req.EventId,
EventLevel: req.EventLevel,
EventName: req.EventName,
EventDesc: req.EventDesc,
BeforeDuration: uint32(beforeDuration / time.Millisecond),
AfterDuration: uint32(afterDuration / time.Millisecond),
},
}
return nil
})
//recordJob := recorder.GetRecordJob()
var subconfig config.Subscribe
defaults.SetDefaults(&subconfig)
subconfig.BufferTime = beforeDuration
p.Record(stream, recordConf, &subconfig)
}
} else {
if tmpJob.AfterDuration != 0 { //当前有事件录像正在录制,则更新该录像的结束时间
tmpJob.AfterDuration = time.Duration(tmpJob.Subscriber.VideoReader.AbsTime)*time.Millisecond + afterDuration
if tmpJob.Event != nil { //当前有事件录像正在录制,则更新该录像的结束时间
tmpJob.Event.AfterDuration = tmpJob.Subscriber.VideoReader.AbsTime + uint32(afterDuration/time.Millisecond)
if p.DB != nil {
p.DB.Save(&tmpJob.Event)
}
} else { //当前有自动录像正在录制,则生成事件录像的记录,而不去生成事件录像的文件
recordStream := &m7s.RecordStream{
StreamPath: req.StreamPath,
newEvent := &config.RecordEvent{
EventId: req.EventId,
EventLevel: req.EventLevel,
EventDesc: req.EventDesc,
EventName: req.EventName,
Mode: m7s.RecordModeEvent,
BeforeDuration: beforeDuration,
AfterDuration: afterDuration,
Type: "mp4",
EventDesc: req.EventDesc,
BeforeDuration: uint32(beforeDuration / time.Millisecond),
AfterDuration: uint32(afterDuration / time.Millisecond),
}
now := time.Now()
startTime := now.Add(-beforeDuration)
endTime := now.Add(afterDuration)
recordStream.StartTime = startTime
recordStream.EndTime = endTime
if p.DB != nil {
p.DB.Save(&recordStream)
p.DB.Save(&m7s.EventRecordStream{
RecordEvent: newEvent,
RecordStream: m7s.RecordStream{
StreamPath: req.StreamPath,
},
})
}
}
}

1209
plugin/mp4/api_extract.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -37,9 +37,27 @@
| | | | | | sbgp | | sample-to-group |
| | | | | | sgpd | | sample group description |
| | | | | | subs | | sub-sample information |
| | | udta | | | | | user-data (track level)<br>轨道级别的用户数据容器 |
| | | | cprt | | | | copyright etc.<br>版权信息 |
| | | | titl | | | | title<br>标题 |
| | | | auth | | | | author<br>作者 |
| | mvex | | | | | | movie extends box |
| | | mehd | | | | | movie extends header box |
| | | trex | | | | ✓ | track extends defaults |
| | udta | | | | | | user-data (movie level)<br>电影级别的用户数据容器 |
| | | cprt | | | | | copyright etc.<br>版权信息 |
| | | titl | | | | | title<br>标题 |
| | | auth | | | | | author<br>作者 |
| | | albm | | | | | album<br>专辑 |
| | | yrrc | | | | | year<br>年份 |
| | | rtng | | | | | rating<br>评级 |
| | | clsf | | | | | classification<br>分类 |
| | | kywd | | | | | keywords<br>关键词 |
| | | loci | | | | | location information<br>位置信息 |
| | | dscp | | | | | description<br>描述 |
| | | perf | | | | | performer<br>表演者 |
| | | gnre | | | | | genre<br>类型 |
| | | meta | | | | | metadata atom<br>元数据原子 |
| | ipmc | | | | | | IPMP Control Box |
| moof | | | | | | | movie fragment |
| | mfhd | | | | | ✓ | movie fragment header |
@@ -54,8 +72,10 @@
| mdat | | | | | | | media data container |
| free | | | | | | | free space |
| skip | | | | | | | free space |
| | udta | | | | | | user-data |
| | | cprt | | | | | copyright etc. |
| udta | | | | | | | user-data (file level)<br>文件级别的用户数据容器 |
| | cprt | | | | | | copyright etc.<br>版权信息 |
| | titl | | | | | | title<br>标题 |
| | auth | | | | | | author<br>作者 |
| meta | | | | | | | metadata |
| | hdlr | | | | | ✓ | handler, declares the metadata (handler) type |
| | dinf | | | | | | data information box, container |

View File

@@ -91,9 +91,6 @@ func (p *DeleteRecordTask) deleteOldestFile() {
}
for _, filePath := range filePaths {
for p.getDiskOutOfSpace(filePath) {
queryRecord := m7s.RecordStream{
EventLevel: m7s.EventLevelLow, // 查询条件event_level = 1,非重要事件
}
var eventRecords []m7s.RecordStream
// 使用不同的方法进行路径匹配避免ESCAPE语法问题
// 解决方案用MySQL能理解的简单方式匹配路径前缀
@@ -103,7 +100,7 @@ func (p *DeleteRecordTask) deleteOldestFile() {
searchPattern := basePath + "%"
p.Info("deleteOldestFile", "searching with path pattern", searchPattern)
err := p.DB.Where(&queryRecord).Where("end_time IS NOT NULL").
err := p.DB.Where("event_id=0 AND end_time IS NOT NULL").
Where("file_path LIKE ?", searchPattern).
Order("end_time ASC").Find(&eventRecords).Error
if err == nil {
@@ -149,14 +146,11 @@ func (t *DeleteRecordTask) Tick(any) {
if t.RecordFileExpireDays <= 0 {
return
}
//搜索event_records表中event_level值为1的(非重要)数据并将其create_time与当前时间比对大于RecordFileExpireDays则进行删除数据库标记is_delete为1磁盘上删除录像文件
//搜索event_records表中event_id值为0的(非事件)录像并将其create_time与当前时间比对大于RecordFileExpireDays则进行删除数据库标记is_delete为1磁盘上删除录像文件
var eventRecords []m7s.RecordStream
expireTime := time.Now().AddDate(0, 0, -t.RecordFileExpireDays)
t.Debug("RecordFileExpireDays is set to auto delete oldestfile", "expireTime", expireTime.Format("2006-01-02 15:04:05"))
queryRecord := m7s.RecordStream{
EventLevel: m7s.EventLevelLow, // 查询条件event_level = low,非重要事件
}
err := t.DB.Where(&queryRecord).Find(&eventRecords, "end_time < ? AND end_time IS NOT NULL", expireTime).Error
err := t.DB.Find(&eventRecords, "event_id=0 AND end_time < ? AND end_time IS NOT NULL", expireTime).Error
if err == nil {
for _, record := range eventRecords {
t.Info("RecordFileExpireDays is set to auto delete oldestfile", "ID", record.ID, "create time", record.EndTime, "filepath", record.FilePath)

View File

@@ -76,7 +76,11 @@ var _ = m7s.InstallPlugin[MP4Plugin](m7s.PluginMeta{
func (p *MP4Plugin) RegisterHandler() map[string]http.HandlerFunc {
return map[string]http.HandlerFunc{
"/download/{streamPath...}": p.download,
"/download/{streamPath...}": p.download,
"/extractClip/{streamPath...}": p.extractClipToFileHandel,
"/extractCompressed/{streamPath...}": p.extractCompressedVideoHandel,
"/extractGop/{streamPath...}": p.extractGopVideoHandel,
"/snap/{streamPath...}": p.snapHandel,
}
}

View File

@@ -343,7 +343,25 @@ var (
TypeAUXV = f("auxv")
TypeHINT = f("hint")
TypeUDTA = f("udta")
TypeM7SP = f("m7sp") // Custom box type for M7S StreamPath
// Common metadata box types
TypeTITL = f("©nam") // Title
TypeART = f("©ART") // Artist/Author
TypeALB = f("©alb") // Album
TypeDAY = f("©day") // Date/Year
TypeCMT = f("©cmt") // Comment/Description
TypeGEN = f("©gen") // Genre
TypeCPRT = f("cprt") // Copyright
TypeENCO = f("©too") // Encoder/Tool
TypeWRT = f("©wrt") // Writer/Composer
TypePRD = f("©prd") // Producer
TypePRF = f("©prf") // Performer
TypeGRP = f("©grp") // Grouping
TypeLYR = f("©lyr") // Lyrics
TypeKEYW = f("keyw") // Keywords
TypeLOCI = f("loci") // Location Information
TypeRTNG = f("rtng") // Rating
TypeMETA_CUST = f("----") // Custom metadata (iTunes-style)
)
// aligned(8) class Box (unsigned int(32) boxtype, optional unsigned int(8)[16] extended_type) {

View File

@@ -0,0 +1,334 @@
package box
import (
"encoding/binary"
"io"
"time"
)
// Metadata holds various metadata information for MP4
type Metadata struct {
Title string // 标题
Artist string // 艺术家/作者
Album string // 专辑
Date string // 日期
Comment string // 注释/描述
Genre string // 类型
Copyright string // 版权信息
Encoder string // 编码器
Writer string // 作词者
Producer string // 制作人
Performer string // 表演者
Grouping string // 分组
Lyrics string // 歌词
Keywords string // 关键词
Location string // 位置信息
Rating uint8 // 评级 (0-5)
Custom map[string]string // 自定义键值对
}
// Text Data Box - for storing text metadata
type TextDataBox struct {
FullBox
Text string
}
// Metadata Data Box - for storing binary metadata with type indicator
type MetadataDataBox struct {
FullBox
DataType uint32 // Data type indicator
Country uint32 // Country code
Language uint32 // Language code
Data []byte // Actual data
}
// Copyright Box
type CopyrightBox struct {
FullBox
Language [3]byte
Notice string
}
// Custom Metadata Box (iTunes-style ---- box)
type CustomMetadataBox struct {
BaseBox
Mean string // Mean (namespace)
Name string // Name (key)
Data []byte // Data
}
// Create functions
func CreateTextDataBox(boxType BoxType, text string) *TextDataBox {
return &TextDataBox{
FullBox: FullBox{
BaseBox: BaseBox{
typ: boxType,
size: uint32(FullBoxLen + len(text)),
},
Version: 0,
Flags: [3]byte{0, 0, 0},
},
Text: text,
}
}
func CreateMetadataDataBox(dataType uint32, data []byte) *MetadataDataBox {
return &MetadataDataBox{
FullBox: FullBox{
BaseBox: BaseBox{
typ: f("data"),
size: uint32(FullBoxLen + 8 + len(data)), // 8 bytes for type+country+language
},
Version: 0,
Flags: [3]byte{0, 0, 0},
},
DataType: dataType,
Country: 0,
Language: 0,
Data: data,
}
}
func CreateCopyrightBox(language [3]byte, notice string) *CopyrightBox {
return &CopyrightBox{
FullBox: FullBox{
BaseBox: BaseBox{
typ: TypeCPRT,
size: uint32(FullBoxLen + 3 + 1 + len(notice)), // 3 for language, 1 for null terminator
},
Version: 0,
Flags: [3]byte{0, 0, 0},
},
Language: language,
Notice: notice,
}
}
func CreateCustomMetadataBox(mean, name string, data []byte) *CustomMetadataBox {
size := uint32(BasicBoxLen + 4 + len(mean) + 4 + len(name) + len(data))
return &CustomMetadataBox{
BaseBox: BaseBox{
typ: TypeMETA_CUST,
size: size,
},
Mean: mean,
Name: name,
Data: data,
}
}
// WriteTo methods
func (box *TextDataBox) WriteTo(w io.Writer) (n int64, err error) {
nn, err := w.Write([]byte(box.Text))
return int64(nn), err
}
func (box *MetadataDataBox) WriteTo(w io.Writer) (n int64, err error) {
var tmp [8]byte
binary.BigEndian.PutUint32(tmp[0:4], box.DataType)
binary.BigEndian.PutUint32(tmp[4:8], box.Country)
// Language field is implicit zero
nn, err := w.Write(tmp[:8])
if err != nil {
return int64(nn), err
}
n = int64(nn)
nn, err = w.Write(box.Data)
return n + int64(nn), err
}
func (box *CopyrightBox) WriteTo(w io.Writer) (n int64, err error) {
// Write language code
nn, err := w.Write(box.Language[:])
if err != nil {
return int64(nn), err
}
n = int64(nn)
// Write notice + null terminator
nn, err = w.Write([]byte(box.Notice + "\x00"))
return n + int64(nn), err
}
func (box *CustomMetadataBox) WriteTo(w io.Writer) (n int64, err error) {
var tmp [4]byte
// Write mean length + mean
binary.BigEndian.PutUint32(tmp[:], uint32(len(box.Mean)))
nn, err := w.Write(tmp[:])
if err != nil {
return int64(nn), err
}
n = int64(nn)
nn, err = w.Write([]byte(box.Mean))
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
// Write name length + name
binary.BigEndian.PutUint32(tmp[:], uint32(len(box.Name)))
nn, err = w.Write(tmp[:])
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
nn, err = w.Write([]byte(box.Name))
if err != nil {
return n + int64(nn), err
}
n += int64(nn)
// Write data
nn, err = w.Write(box.Data)
return n + int64(nn), err
}
// Unmarshal methods
func (box *TextDataBox) Unmarshal(buf []byte) (IBox, error) {
box.Text = string(buf)
return box, nil
}
func (box *MetadataDataBox) Unmarshal(buf []byte) (IBox, error) {
if len(buf) < 8 {
return nil, io.ErrShortBuffer
}
box.DataType = binary.BigEndian.Uint32(buf[0:4])
box.Country = binary.BigEndian.Uint32(buf[4:8])
box.Data = buf[8:]
return box, nil
}
func (box *CopyrightBox) Unmarshal(buf []byte) (IBox, error) {
if len(buf) < 4 {
return nil, io.ErrShortBuffer
}
copy(box.Language[:], buf[0:3])
// Find null terminator
for i := 3; i < len(buf); i++ {
if buf[i] == 0 {
box.Notice = string(buf[3:i])
break
}
}
if box.Notice == "" && len(buf) > 3 {
box.Notice = string(buf[3:])
}
return box, nil
}
func (box *CustomMetadataBox) Unmarshal(buf []byte) (IBox, error) {
if len(buf) < 8 {
return nil, io.ErrShortBuffer
}
offset := 0
// Read mean length + mean
meanLen := binary.BigEndian.Uint32(buf[offset:])
offset += 4
if offset+int(meanLen) > len(buf) {
return nil, io.ErrShortBuffer
}
box.Mean = string(buf[offset : offset+int(meanLen)])
offset += int(meanLen)
// Read name length + name
if offset+4 > len(buf) {
return nil, io.ErrShortBuffer
}
nameLen := binary.BigEndian.Uint32(buf[offset:])
offset += 4
if offset+int(nameLen) > len(buf) {
return nil, io.ErrShortBuffer
}
box.Name = string(buf[offset : offset+int(nameLen)])
offset += int(nameLen)
// Read remaining data
box.Data = buf[offset:]
return box, nil
}
// Create metadata entries from Metadata struct
func CreateMetadataEntries(metadata *Metadata) []IBox {
var entries []IBox
// Standard text metadata
if metadata.Title != "" {
entries = append(entries, CreateTextDataBox(TypeTITL, metadata.Title))
}
if metadata.Artist != "" {
entries = append(entries, CreateTextDataBox(TypeART, metadata.Artist))
}
if metadata.Album != "" {
entries = append(entries, CreateTextDataBox(TypeALB, metadata.Album))
}
if metadata.Date != "" {
entries = append(entries, CreateTextDataBox(TypeDAY, metadata.Date))
}
if metadata.Comment != "" {
entries = append(entries, CreateTextDataBox(TypeCMT, metadata.Comment))
}
if metadata.Genre != "" {
entries = append(entries, CreateTextDataBox(TypeGEN, metadata.Genre))
}
if metadata.Encoder != "" {
entries = append(entries, CreateTextDataBox(TypeENCO, metadata.Encoder))
}
if metadata.Writer != "" {
entries = append(entries, CreateTextDataBox(TypeWRT, metadata.Writer))
}
if metadata.Producer != "" {
entries = append(entries, CreateTextDataBox(TypePRD, metadata.Producer))
}
if metadata.Performer != "" {
entries = append(entries, CreateTextDataBox(TypePRF, metadata.Performer))
}
if metadata.Grouping != "" {
entries = append(entries, CreateTextDataBox(TypeGRP, metadata.Grouping))
}
if metadata.Lyrics != "" {
entries = append(entries, CreateTextDataBox(TypeLYR, metadata.Lyrics))
}
if metadata.Keywords != "" {
entries = append(entries, CreateTextDataBox(TypeKEYW, metadata.Keywords))
}
if metadata.Location != "" {
entries = append(entries, CreateTextDataBox(TypeLOCI, metadata.Location))
}
// Copyright (special format)
if metadata.Copyright != "" {
entries = append(entries, CreateCopyrightBox([3]byte{'u', 'n', 'd'}, metadata.Copyright))
}
// Custom metadata
for key, value := range metadata.Custom {
entries = append(entries, CreateCustomMetadataBox("live.m7s.custom", key, []byte(value)))
}
return entries
}
// Helper function to create current date string
func GetCurrentDateString() string {
return time.Now().Format("2006-01-02")
}
func init() {
RegisterBox[*TextDataBox](TypeTITL, TypeART, TypeALB, TypeDAY, TypeCMT, TypeGEN, TypeENCO, TypeWRT, TypePRD, TypePRF, TypeGRP, TypeLYR, TypeKEYW, TypeLOCI, TypeRTNG)
RegisterBox[*MetadataDataBox](f("data"))
RegisterBox[*CopyrightBox](TypeCPRT)
RegisterBox[*CustomMetadataBox](TypeMETA_CUST)
}

View File

@@ -54,8 +54,16 @@ func (t *TrakBox) Unmarshal(buf []byte) (b IBox, err error) {
return t, err
}
// SampleCallback 定义样本处理回调函数类型
type SampleCallback func(sample *Sample, sampleIndex int) error
// ParseSamples parses the sample table and builds the sample list
func (t *TrakBox) ParseSamples() (samplelist []Sample) {
return t.ParseSamplesWithCallback(nil)
}
// ParseSamplesWithCallback parses the sample table and builds the sample list with optional callback
func (t *TrakBox) ParseSamplesWithCallback(callback SampleCallback) (samplelist []Sample) {
stbl := t.MDIA.MINF.STBL
var chunkOffsets []uint64
if stbl.STCO != nil {
@@ -150,6 +158,17 @@ func (t *TrakBox) ParseSamples() (samplelist []Sample) {
}
}
// 调用回调函数处理每个样本
if callback != nil {
for i := range samplelist {
if err := callback(&samplelist[i], i); err != nil {
// 如果回调返回错误,可以选择记录或处理,但不中断解析
// 这里为了保持向后兼容性,我们继续处理
continue
}
}
}
return samplelist
}

View File

@@ -12,12 +12,6 @@ type UserDataBox struct {
Entries []IBox
}
// Custom metadata box for storing stream path
type StreamPathBox struct {
FullBox
StreamPath string
}
// Create a new User Data Box
func CreateUserDataBox(entries ...IBox) *UserDataBox {
size := uint32(BasicBoxLen)
@@ -33,21 +27,6 @@ func CreateUserDataBox(entries ...IBox) *UserDataBox {
}
}
// Create a new StreamPath Box
func CreateStreamPathBox(streamPath string) *StreamPathBox {
return &StreamPathBox{
FullBox: FullBox{
BaseBox: BaseBox{
typ: TypeM7SP, // Custom box type for M7S StreamPath
size: uint32(FullBoxLen + len(streamPath)),
},
Version: 0,
Flags: [3]byte{0, 0, 0},
},
StreamPath: streamPath,
}
}
// WriteTo writes the UserDataBox to the given writer
func (box *UserDataBox) WriteTo(w io.Writer) (n int64, err error) {
return WriteTo(w, box.Entries...)
@@ -69,19 +48,6 @@ func (box *UserDataBox) Unmarshal(buf []byte) (IBox, error) {
return box, nil
}
// WriteTo writes the StreamPathBox to the given writer
func (box *StreamPathBox) WriteTo(w io.Writer) (n int64, err error) {
nn, err := w.Write([]byte(box.StreamPath))
return int64(nn), err
}
// Unmarshal parses the given buffer into a StreamPathBox
func (box *StreamPathBox) Unmarshal(buf []byte) (IBox, error) {
box.StreamPath = string(buf)
return box, nil
}
func init() {
RegisterBox[*UserDataBox](TypeUDTA)
RegisterBox[*StreamPathBox](TypeM7SP)
}

View File

@@ -0,0 +1,121 @@
package mp4
import (
"context"
"os"
"time"
"m7s.live/v5"
"m7s.live/v5/plugin/mp4/pkg/box"
)
type DemuxerRange struct {
StartTime, EndTime time.Time
Streams []m7s.RecordStream
OnAudioExtraData func(codec box.MP4_CODEC_TYPE, data []byte) error
OnVideoExtraData func(codec box.MP4_CODEC_TYPE, data []byte) error
OnAudioSample func(codec box.MP4_CODEC_TYPE, sample box.Sample) error
OnVideoSample func(codec box.MP4_CODEC_TYPE, sample box.Sample) error
}
func (d *DemuxerRange) Demux(ctx context.Context) error {
var ts, tsOffset int64
for _, stream := range d.Streams {
// 检查流的时间范围是否在指定范围内
if stream.EndTime.Before(d.StartTime) || stream.StartTime.After(d.EndTime) {
continue
}
tsOffset = ts
file, err := os.Open(stream.FilePath)
if err != nil {
continue
}
defer file.Close()
demuxer := NewDemuxer(file)
if err = demuxer.Demux(); err != nil {
return err
}
// 处理每个轨道的额外数据 (序列头)
for _, track := range demuxer.Tracks {
switch track.Cid {
case box.MP4_CODEC_H264, box.MP4_CODEC_H265:
if d.OnVideoExtraData != nil {
err := d.OnVideoExtraData(track.Cid, track.ExtraData)
if err != nil {
return err
}
}
case box.MP4_CODEC_AAC, box.MP4_CODEC_G711A, box.MP4_CODEC_G711U:
if d.OnAudioExtraData != nil {
err := d.OnAudioExtraData(track.Cid, track.ExtraData)
if err != nil {
return err
}
}
}
}
// 计算起始时间戳偏移
if !d.StartTime.IsZero() {
startTimestamp := d.StartTime.Sub(stream.StartTime).Milliseconds()
if startTimestamp < 0 {
startTimestamp = 0
}
if startSample, err := demuxer.SeekTime(uint64(startTimestamp)); err == nil {
tsOffset = -int64(startSample.Timestamp)
} else {
tsOffset = 0
}
}
// 读取和处理样本
for track, sample := range demuxer.ReadSample {
if ctx.Err() != nil {
return context.Cause(ctx)
}
// 检查是否超出结束时间
sampleTime := stream.StartTime.Add(time.Duration(sample.Timestamp) * time.Millisecond)
if !d.EndTime.IsZero() && sampleTime.After(d.EndTime) {
break
}
// 计算样本数据偏移和读取数据
sampleOffset := int(sample.Offset) - int(demuxer.mdatOffset)
if sampleOffset < 0 || sampleOffset+sample.Size > len(demuxer.mdat.Data) {
continue
}
sample.Data = demuxer.mdat.Data[sampleOffset : sampleOffset+sample.Size]
// 计算时间戳
if int64(sample.Timestamp)+tsOffset < 0 {
ts = 0
} else {
ts = int64(sample.Timestamp + uint32(tsOffset))
}
sample.Timestamp = uint32(ts)
// 根据轨道类型调用相应的回调函数
switch track.Cid {
case box.MP4_CODEC_H264, box.MP4_CODEC_H265:
if d.OnVideoSample != nil {
err := d.OnVideoSample(track.Cid, sample)
if err != nil {
return err
}
}
case box.MP4_CODEC_AAC, box.MP4_CODEC_G711A, box.MP4_CODEC_G711U:
if d.OnAudioSample != nil {
err := d.OnAudioSample(track.Cid, sample)
if err != nil {
return err
}
}
}
}
}
return nil
}

View File

@@ -6,8 +6,10 @@ import (
"slices"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
"m7s.live/v5/plugin/mp4/pkg/box"
. "m7s.live/v5/plugin/mp4/pkg/box"
rtmp "m7s.live/v5/plugin/rtmp/pkg"
)
type (
@@ -30,7 +32,7 @@ type (
Number uint32
CryptByteBlock uint8
SkipByteBlock uint8
PsshBoxes []*PsshBox
PsshBoxes []*box.PsshBox
}
SubSamplePattern struct {
BytesClear uint16
@@ -43,16 +45,28 @@ type (
chunkoffset uint64
}
RTMPFrame struct {
Frame any // 可以是 *rtmp.RTMPVideo 或 *rtmp.RTMPAudio
}
Demuxer struct {
reader io.ReadSeeker
Tracks []*Track
ReadSampleIdx []uint32
IsFragment bool
// pssh []*PsshBox
moov *MoovBox
mdat *MediaDataBox
// pssh []*box.PsshBox
moov *box.MoovBox
mdat *box.MediaDataBox
mdatOffset uint64
QuicTime bool
// 预生成的 RTMP 帧
RTMPVideoSequence *rtmp.RTMPVideo
RTMPAudioSequence *rtmp.RTMPAudio
RTMPFrames []RTMPFrame
// RTMP 帧生成配置
RTMPAllocator *util.ScalableMemoryAllocator
}
)
@@ -63,6 +77,10 @@ func NewDemuxer(r io.ReadSeeker) *Demuxer {
}
func (d *Demuxer) Demux() (err error) {
return d.DemuxWithAllocator(nil)
}
func (d *Demuxer) DemuxWithAllocator(allocator *util.ScalableMemoryAllocator) (err error) {
// decodeVisualSampleEntry := func() (offset int, err error) {
// var encv VisualSampleEntry
@@ -96,7 +114,7 @@ func (d *Demuxer) Demux() (err error) {
// }
// return
// }
var b IBox
var b box.IBox
var offset uint64
for {
b, err = box.ReadFrom(d.reader)
@@ -107,53 +125,59 @@ func (d *Demuxer) Demux() (err error) {
return err
}
offset += b.Size()
switch box := b.(type) {
case *FileTypeBox:
if slices.Contains(box.CompatibleBrands, [4]byte{'q', 't', ' ', ' '}) {
switch boxData := b.(type) {
case *box.FileTypeBox:
if slices.Contains(boxData.CompatibleBrands, [4]byte{'q', 't', ' ', ' '}) {
d.QuicTime = true
}
case *FreeBox:
case *MediaDataBox:
d.mdat = box
d.mdatOffset = offset - b.Size() + uint64(box.HeaderSize())
case *MoovBox:
if box.MVEX != nil {
case *box.FreeBox:
case *box.MediaDataBox:
d.mdat = boxData
d.mdatOffset = offset - b.Size() + uint64(boxData.HeaderSize())
case *box.MoovBox:
if boxData.MVEX != nil {
d.IsFragment = true
}
for _, trak := range box.Tracks {
for _, trak := range boxData.Tracks {
track := &Track{}
track.TrackId = trak.TKHD.TrackID
track.Duration = uint32(trak.TKHD.Duration)
track.Timescale = trak.MDIA.MDHD.Timescale
track.Samplelist = trak.ParseSamples()
// 创建RTMP样本处理回调
var sampleCallback box.SampleCallback
if d.RTMPAllocator != nil {
sampleCallback = d.createRTMPSampleCallback(track, trak)
}
track.Samplelist = trak.ParseSamplesWithCallback(sampleCallback)
if len(trak.MDIA.MINF.STBL.STSD.Entries) > 0 {
entryBox := trak.MDIA.MINF.STBL.STSD.Entries[0]
switch entry := entryBox.(type) {
case *AudioSampleEntry:
case *box.AudioSampleEntry:
switch entry.Type() {
case TypeMP4A:
track.Cid = MP4_CODEC_AAC
case TypeALAW:
track.Cid = MP4_CODEC_G711A
case TypeULAW:
track.Cid = MP4_CODEC_G711U
case TypeOPUS:
track.Cid = MP4_CODEC_OPUS
case box.TypeMP4A:
track.Cid = box.MP4_CODEC_AAC
case box.TypeALAW:
track.Cid = box.MP4_CODEC_G711A
case box.TypeULAW:
track.Cid = box.MP4_CODEC_G711U
case box.TypeOPUS:
track.Cid = box.MP4_CODEC_OPUS
}
track.SampleRate = entry.Samplerate
track.ChannelCount = uint8(entry.ChannelCount)
track.SampleSize = entry.SampleSize
switch extra := entry.ExtraData.(type) {
case *ESDSBox:
track.Cid, track.ExtraData = DecodeESDescriptor(extra.Data)
case *box.ESDSBox:
track.Cid, track.ExtraData = box.DecodeESDescriptor(extra.Data)
}
case *VisualSampleEntry:
track.ExtraData = entry.ExtraData.(*DataBox).Data
case *box.VisualSampleEntry:
track.ExtraData = entry.ExtraData.(*box.DataBox).Data
switch entry.Type() {
case TypeAVC1:
track.Cid = MP4_CODEC_H264
case TypeHVC1:
track.Cid = MP4_CODEC_H265
case box.TypeAVC1:
track.Cid = box.MP4_CODEC_H264
case box.TypeHVC1, box.TypeHEV1:
track.Cid = box.MP4_CODEC_H265
}
track.Width = uint32(entry.Width)
track.Height = uint32(entry.Height)
@@ -161,9 +185,9 @@ func (d *Demuxer) Demux() (err error) {
}
d.Tracks = append(d.Tracks, track)
}
d.moov = box
case *MovieFragmentBox:
for _, traf := range box.TRAFs {
d.moov = boxData
case *box.MovieFragmentBox:
for _, traf := range boxData.TRAFs {
track := d.Tracks[traf.TFHD.TrackID-1]
track.defaultSize = traf.TFHD.DefaultSampleSize
track.defaultDuration = traf.TFHD.DefaultSampleDuration
@@ -171,6 +195,7 @@ func (d *Demuxer) Demux() (err error) {
}
}
d.ReadSampleIdx = make([]uint32, len(d.Tracks))
// for _, track := range d.Tracks {
// if len(track.Samplelist) > 0 {
// track.StartDts = uint64(track.Samplelist[0].DTS) * 1000 / uint64(track.Timescale)
@@ -180,7 +205,7 @@ func (d *Demuxer) Demux() (err error) {
return nil
}
func (d *Demuxer) SeekTime(dts uint64) (sample *Sample, err error) {
func (d *Demuxer) SeekTime(dts uint64) (sample *box.Sample, err error) {
var audioTrack, videoTrack *Track
for _, track := range d.Tracks {
if track.Cid.IsAudio() {
@@ -218,6 +243,54 @@ func (d *Demuxer) SeekTime(dts uint64) (sample *Sample, err error) {
return
}
/**
* @brief 函数跳帧到dts 前面的第一个关键帧位置
*
* @param 参数名dts 跳帧位置
*
* @todo 待实现的功能或改进点 audioTrack 没有同步改进
* @author erroot
* @date 250614
*
**/
func (d *Demuxer) SeekTimePreIDR(dts uint64) (sample *Sample, err error) {
var audioTrack, videoTrack *Track
for _, track := range d.Tracks {
if track.Cid.IsAudio() {
audioTrack = track
} else if track.Cid.IsVideo() {
videoTrack = track
}
}
if videoTrack != nil {
idx := videoTrack.SeekPreIDR(dts)
if idx == -1 {
return nil, errors.New("seek failed")
}
d.ReadSampleIdx[videoTrack.TrackId-1] = uint32(idx)
sample = &videoTrack.Samplelist[idx]
if audioTrack != nil {
for i, sample := range audioTrack.Samplelist {
if sample.Offset < int64(videoTrack.Samplelist[idx].Offset) {
continue
}
d.ReadSampleIdx[audioTrack.TrackId-1] = uint32(i)
break
}
}
} else if audioTrack != nil {
idx := audioTrack.Seek(dts)
if idx == -1 {
return nil, errors.New("seek failed")
}
d.ReadSampleIdx[audioTrack.TrackId-1] = uint32(idx)
sample = &audioTrack.Samplelist[idx]
} else {
return nil, pkg.ErrNoTrack
}
return
}
// func (d *Demuxer) decodeTRUN(trun *TrackRunBox) {
// dataOffset := trun.Dataoffset
// nextDts := d.currentTrack.StartDts
@@ -377,10 +450,10 @@ func (d *Demuxer) SeekTime(dts uint64) (sample *Sample, err error) {
// return nil
// }
func (d *Demuxer) ReadSample(yield func(*Track, Sample) bool) {
func (d *Demuxer) ReadSample(yield func(*Track, box.Sample) bool) {
for {
maxdts := int64(-1)
minTsSample := Sample{Timestamp: uint32(maxdts)}
minTsSample := box.Sample{Timestamp: uint32(maxdts)}
var whichTrack *Track
whichTracki := 0
for i, track := range d.Tracks {
@@ -393,8 +466,8 @@ func (d *Demuxer) ReadSample(yield func(*Track, Sample) bool) {
whichTrack = track
whichTracki = i
} else {
dts1 := minTsSample.Timestamp * uint32(d.moov.MVHD.Timescale) / uint32(whichTrack.Timescale)
dts2 := track.Samplelist[idx].Timestamp * uint32(d.moov.MVHD.Timescale) / uint32(track.Timescale)
dts1 := uint64(minTsSample.Timestamp) * uint64(d.moov.MVHD.Timescale) / uint64(whichTrack.Timescale)
dts2 := uint64(track.Samplelist[idx].Timestamp) * uint64(d.moov.MVHD.Timescale) / uint64(track.Timescale)
if dts1 > dts2 {
minTsSample = track.Samplelist[idx]
whichTrack = track
@@ -414,9 +487,9 @@ func (d *Demuxer) ReadSample(yield func(*Track, Sample) bool) {
}
}
func (d *Demuxer) RangeSample(yield func(*Track, *Sample) bool) {
func (d *Demuxer) RangeSample(yield func(*Track, *box.Sample) bool) {
for {
var minTsSample *Sample
var minTsSample *box.Sample
var whichTrack *Track
whichTracki := 0
for i, track := range d.Tracks {
@@ -448,6 +521,244 @@ func (d *Demuxer) RangeSample(yield func(*Track, *Sample) bool) {
}
// GetMoovBox returns the Movie Box from the demuxer
func (d *Demuxer) GetMoovBox() *MoovBox {
func (d *Demuxer) GetMoovBox() *box.MoovBox {
return d.moov
}
// CreateRTMPSequenceFrame 创建 RTMP 序列帧
func (d *Demuxer) CreateRTMPSequenceFrame(track *Track, allocator *util.ScalableMemoryAllocator) (videoSeq *rtmp.RTMPVideo, audioSeq *rtmp.RTMPAudio, err error) {
switch track.Cid {
case box.MP4_CODEC_H264:
videoSeq = &rtmp.RTMPVideo{}
videoSeq.SetAllocator(allocator)
videoSeq.Append([]byte{0x17, 0x00, 0x00, 0x00, 0x00}, track.ExtraData)
case box.MP4_CODEC_H265:
videoSeq = &rtmp.RTMPVideo{}
videoSeq.SetAllocator(allocator)
videoSeq.Append([]byte{0b1001_0000 | rtmp.PacketTypeSequenceStart}, codec.FourCC_H265[:], track.ExtraData)
case box.MP4_CODEC_AAC:
audioSeq = &rtmp.RTMPAudio{}
audioSeq.SetAllocator(allocator)
audioSeq.Append([]byte{0xaf, 0x00}, track.ExtraData)
}
return
}
// ConvertSampleToRTMP 将 MP4 sample 转换为 RTMP 格式
func (d *Demuxer) ConvertSampleToRTMP(track *Track, sample box.Sample, allocator *util.ScalableMemoryAllocator, timestampOffset uint64) (videoFrame *rtmp.RTMPVideo, audioFrame *rtmp.RTMPAudio, err error) {
switch track.Cid {
case box.MP4_CODEC_H264:
videoFrame = &rtmp.RTMPVideo{}
videoFrame.SetAllocator(allocator)
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = uint32(uint64(sample.Timestamp)*1000/uint64(track.Timescale) + timestampOffset)
videoFrame.AppendOne([]byte{util.Conditional[byte](sample.KeyFrame, 0x17, 0x27), 0x01, byte(videoFrame.CTS >> 24), byte(videoFrame.CTS >> 8), byte(videoFrame.CTS)})
videoFrame.AddRecycleBytes(sample.Data)
case box.MP4_CODEC_H265:
videoFrame = &rtmp.RTMPVideo{}
videoFrame.SetAllocator(allocator)
videoFrame.CTS = uint32(sample.CTS)
videoFrame.Timestamp = uint32(uint64(sample.Timestamp)*1000/uint64(track.Timescale) + timestampOffset)
var head []byte
var b0 byte = 0b1010_0000
if sample.KeyFrame {
b0 = 0b1001_0000
}
if videoFrame.CTS == 0 {
head = videoFrame.NextN(5)
head[0] = b0 | rtmp.PacketTypeCodedFramesX
} else {
head = videoFrame.NextN(8)
head[0] = b0 | rtmp.PacketTypeCodedFrames
util.PutBE(head[5:8], videoFrame.CTS) // cts
}
copy(head[1:], codec.FourCC_H265[:])
videoFrame.AddRecycleBytes(sample.Data)
case box.MP4_CODEC_AAC:
audioFrame = &rtmp.RTMPAudio{}
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(uint64(sample.Timestamp)*1000/uint64(track.Timescale) + timestampOffset)
audioFrame.AppendOne([]byte{0xaf, 0x01})
audioFrame.AddRecycleBytes(sample.Data)
case box.MP4_CODEC_G711A:
audioFrame = &rtmp.RTMPAudio{}
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(uint64(sample.Timestamp)*1000/uint64(track.Timescale) + timestampOffset)
audioFrame.AppendOne([]byte{0x72})
audioFrame.AddRecycleBytes(sample.Data)
case box.MP4_CODEC_G711U:
audioFrame = &rtmp.RTMPAudio{}
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(uint64(sample.Timestamp)*1000/uint64(track.Timescale) + timestampOffset)
audioFrame.AppendOne([]byte{0x82})
audioFrame.AddRecycleBytes(sample.Data)
}
return
}
// GetRTMPSequenceFrames 获取预生成的 RTMP 序列帧
func (d *Demuxer) GetRTMPSequenceFrames() (videoSeq *rtmp.RTMPVideo, audioSeq *rtmp.RTMPAudio) {
return d.RTMPVideoSequence, d.RTMPAudioSequence
}
// IterateRTMPFrames 迭代预生成的 RTMP 帧
func (d *Demuxer) IterateRTMPFrames(timestampOffset uint64, yield func(*RTMPFrame) bool) {
for i := range d.RTMPFrames {
frame := &d.RTMPFrames[i]
// 应用时间戳偏移
switch f := frame.Frame.(type) {
case *rtmp.RTMPVideo:
f.Timestamp += uint32(timestampOffset)
case *rtmp.RTMPAudio:
f.Timestamp += uint32(timestampOffset)
}
if !yield(frame) {
return
}
}
}
// GetMaxTimestamp 获取所有帧中的最大时间戳
func (d *Demuxer) GetMaxTimestamp() uint64 {
var maxTimestamp uint64
for _, frame := range d.RTMPFrames {
var timestamp uint64
switch f := frame.Frame.(type) {
case *rtmp.RTMPVideo:
timestamp = uint64(f.Timestamp)
case *rtmp.RTMPAudio:
timestamp = uint64(f.Timestamp)
}
if timestamp > maxTimestamp {
maxTimestamp = timestamp
}
}
return maxTimestamp
}
// generateRTMPFrames 生成RTMP序列帧和所有帧数据
func (d *Demuxer) generateRTMPFrames(allocator *util.ScalableMemoryAllocator) (err error) {
// 生成序列帧
for _, track := range d.Tracks {
if track.Cid.IsVideo() && d.RTMPVideoSequence == nil {
d.RTMPVideoSequence, _, err = d.CreateRTMPSequenceFrame(track, allocator)
if err != nil {
return err
}
} else if track.Cid.IsAudio() && d.RTMPAudioSequence == nil {
_, d.RTMPAudioSequence, err = d.CreateRTMPSequenceFrame(track, allocator)
if err != nil {
return err
}
}
}
// 预生成所有 RTMP 帧
d.RTMPFrames = make([]RTMPFrame, 0)
// 收集所有样本并按时间戳排序
type sampleInfo struct {
track *Track
sample box.Sample
sampleIndex uint32
trackIndex int
}
var allSamples []sampleInfo
for trackIdx, track := range d.Tracks {
for sampleIdx, sample := range track.Samplelist {
// 读取样本数据
if _, err = d.reader.Seek(sample.Offset, io.SeekStart); err != nil {
return err
}
sample.Data = allocator.Malloc(sample.Size)
if _, err = io.ReadFull(d.reader, sample.Data); err != nil {
allocator.Free(sample.Data)
return err
}
allSamples = append(allSamples, sampleInfo{
track: track,
sample: sample,
sampleIndex: uint32(sampleIdx),
trackIndex: trackIdx,
})
}
}
// 按时间戳排序样本
slices.SortFunc(allSamples, func(a, b sampleInfo) int {
timeA := uint64(a.sample.Timestamp) * uint64(d.moov.MVHD.Timescale) / uint64(a.track.Timescale)
timeB := uint64(b.sample.Timestamp) * uint64(d.moov.MVHD.Timescale) / uint64(b.track.Timescale)
if timeA < timeB {
return -1
} else if timeA > timeB {
return 1
}
return 0
})
// 预生成 RTMP 帧
for _, sampleInfo := range allSamples {
videoFrame, audioFrame, err := d.ConvertSampleToRTMP(sampleInfo.track, sampleInfo.sample, allocator, 0)
if err != nil {
return err
}
if videoFrame != nil {
d.RTMPFrames = append(d.RTMPFrames, RTMPFrame{Frame: videoFrame})
}
if audioFrame != nil {
d.RTMPFrames = append(d.RTMPFrames, RTMPFrame{Frame: audioFrame})
}
}
return nil
}
// createRTMPSampleCallback 创建RTMP样本处理回调函数
func (d *Demuxer) createRTMPSampleCallback(track *Track, trak *box.TrakBox) box.SampleCallback {
// 首先生成序列帧
if track.Cid.IsVideo() && d.RTMPVideoSequence == nil {
videoSeq, _, err := d.CreateRTMPSequenceFrame(track, d.RTMPAllocator)
if err == nil {
d.RTMPVideoSequence = videoSeq
}
} else if track.Cid.IsAudio() && d.RTMPAudioSequence == nil {
_, audioSeq, err := d.CreateRTMPSequenceFrame(track, d.RTMPAllocator)
if err == nil {
d.RTMPAudioSequence = audioSeq
}
}
return func(sample *box.Sample, sampleIndex int) error {
// 读取样本数据
if _, err := d.reader.Seek(sample.Offset, io.SeekStart); err != nil {
return err
}
sample.Data = d.RTMPAllocator.Malloc(sample.Size)
if _, err := io.ReadFull(d.reader, sample.Data); err != nil {
d.RTMPAllocator.Free(sample.Data)
return err
}
// 转换为 RTMP 格式
videoFrame, audioFrame, err := d.ConvertSampleToRTMP(track, *sample, d.RTMPAllocator, 0)
if err != nil {
return err
}
// 内部收集RTMP帧
if videoFrame != nil {
d.RTMPFrames = append(d.RTMPFrames, RTMPFrame{Frame: videoFrame})
}
if audioFrame != nil {
d.RTMPFrames = append(d.RTMPFrames, RTMPFrame{Frame: audioFrame})
}
return nil
}
}

View File

@@ -29,7 +29,8 @@ type (
moov IBox
mdatOffset uint64
mdatSize uint64
StreamPath string // Added to store the stream path
StreamPath string // Added to store the stream path
Metadata *Metadata // 添加元数据支持
}
)
@@ -52,6 +53,7 @@ func NewMuxer(flag Flag) *Muxer {
Tracks: make(map[uint32]*Track),
Flag: flag,
fragDuration: 2000,
Metadata: &Metadata{Custom: make(map[string]string)},
}
}
@@ -59,6 +61,8 @@ func NewMuxer(flag Flag) *Muxer {
func NewMuxerWithStreamPath(flag Flag, streamPath string) *Muxer {
muxer := NewMuxer(flag)
muxer.StreamPath = streamPath
muxer.Metadata.Producer = "M7S Live"
muxer.Metadata.Album = streamPath
return muxer
}
@@ -232,10 +236,10 @@ func (m *Muxer) MakeMoov() IBox {
children = append(children, m.makeMvex())
}
// Add user data box with stream path if available
if m.StreamPath != "" {
streamPathBox := CreateStreamPathBox(m.StreamPath)
udta := CreateUserDataBox(streamPathBox)
// Add user data box with metadata if available
metadataEntries := CreateMetadataEntries(m.Metadata)
if len(metadataEntries) > 0 {
udta := CreateUserDataBox(metadataEntries...)
children = append(children, udta)
}
@@ -365,3 +369,82 @@ func (m *Muxer) WriteTrailer(file *os.File) (err error) {
func (m *Muxer) SetFragmentDuration(duration uint32) {
m.fragDuration = duration
}
// SetMetadata sets the metadata for the MP4 file
func (m *Muxer) SetMetadata(metadata *Metadata) {
m.Metadata = metadata
if metadata.Custom == nil {
metadata.Custom = make(map[string]string)
}
}
// SetTitle sets the title metadata
func (m *Muxer) SetTitle(title string) {
m.Metadata.Title = title
}
// SetArtist sets the artist/author metadata
func (m *Muxer) SetArtist(artist string) {
m.Metadata.Artist = artist
}
// SetAlbum sets the album metadata
func (m *Muxer) SetAlbum(album string) {
m.Metadata.Album = album
}
// SetComment sets the comment/description metadata
func (m *Muxer) SetComment(comment string) {
m.Metadata.Comment = comment
}
// SetGenre sets the genre metadata
func (m *Muxer) SetGenre(genre string) {
m.Metadata.Genre = genre
}
// SetCopyright sets the copyright metadata
func (m *Muxer) SetCopyright(copyright string) {
m.Metadata.Copyright = copyright
}
// SetEncoder sets the encoder metadata
func (m *Muxer) SetEncoder(encoder string) {
m.Metadata.Encoder = encoder
}
// SetDate sets the date metadata (format: YYYY-MM-DD)
func (m *Muxer) SetDate(date string) {
m.Metadata.Date = date
}
// SetCurrentDate sets the date metadata to current date
func (m *Muxer) SetCurrentDate() {
m.Metadata.Date = GetCurrentDateString()
}
// AddCustomMetadata adds custom key-value metadata
func (m *Muxer) AddCustomMetadata(key, value string) {
if m.Metadata.Custom == nil {
m.Metadata.Custom = make(map[string]string)
}
m.Metadata.Custom[key] = value
}
// SetKeywords sets the keywords metadata
func (m *Muxer) SetKeywords(keywords string) {
m.Metadata.Keywords = keywords
}
// SetLocation sets the location metadata
func (m *Muxer) SetLocation(location string) {
m.Metadata.Location = location
}
// SetRating sets the rating metadata (0-5)
func (m *Muxer) SetRating(rating uint8) {
if rating > 5 {
rating = 5
}
m.Metadata.Rating = rating
}

View File

@@ -3,13 +3,12 @@ package mp4
import (
"errors"
"io"
"slices"
"strings"
"time"
m7s "m7s.live/v5"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
"m7s.live/v5/plugin/mp4/pkg/box"
rtmp "m7s.live/v5/plugin/rtmp/pkg"
)
@@ -20,6 +19,10 @@ type HTTPReader struct {
func (p *HTTPReader) Run() (err error) {
pullJob := &p.PullJob
publisher := pullJob.Publisher
if publisher == nil {
io.Copy(io.Discard, p.ReadCloser)
return
}
allocator := util.NewScalableMemoryAllocator(1 << 10)
var demuxer *Demuxer
defer allocator.Recycle()
@@ -31,102 +34,113 @@ func (p *HTTPReader) Run() (err error) {
content, err = io.ReadAll(p.ReadCloser)
demuxer = NewDemuxer(strings.NewReader(string(content)))
}
if err = demuxer.Demux(); err != nil {
// 设置RTMP分配器以启用RTMP帧收集
demuxer.RTMPAllocator = allocator
if err = demuxer.DemuxWithAllocator(allocator); err != nil {
return
}
// 获取demuxer内部收集的RTMP帧
rtmpFrames := demuxer.RTMPFrames
// 按时间戳排序所有帧
slices.SortFunc(rtmpFrames, func(a, b RTMPFrame) int {
var timeA, timeB uint64
switch f := a.Frame.(type) {
case *rtmp.RTMPVideo:
timeA = uint64(f.Timestamp)
case *rtmp.RTMPAudio:
timeA = uint64(f.Timestamp)
}
switch f := b.Frame.(type) {
case *rtmp.RTMPVideo:
timeB = uint64(f.Timestamp)
case *rtmp.RTMPAudio:
timeB = uint64(f.Timestamp)
}
if timeA < timeB {
return -1
} else if timeA > timeB {
return 1
}
return 0
})
publisher.OnSeek = func(seekTime time.Time) {
p.Stop(errors.New("seek"))
pullJob.Args.Set(util.StartKey, seekTime.Local().Format(util.LocalTimeFormat))
pullJob.Connection.Args.Set(util.StartKey, seekTime.Local().Format(util.LocalTimeFormat))
newHTTPReader := &HTTPReader{}
pullJob.AddTask(newHTTPReader)
}
if pullJob.Args.Get(util.StartKey) != "" {
seekTime, _ := time.Parse(util.LocalTimeFormat, pullJob.Args.Get(util.StartKey))
if pullJob.Connection.Args.Get(util.StartKey) != "" {
seekTime, _ := time.Parse(util.LocalTimeFormat, pullJob.Connection.Args.Get(util.StartKey))
demuxer.SeekTime(uint64(seekTime.UnixMilli()))
}
for _, track := range demuxer.Tracks {
switch track.Cid {
case box.MP4_CODEC_H264:
var sequence rtmp.RTMPVideo
sequence.SetAllocator(allocator)
sequence.Append([]byte{0x17, 0x00, 0x00, 0x00, 0x00}, track.ExtraData)
err = publisher.WriteVideo(&sequence)
case box.MP4_CODEC_H265:
var sequence rtmp.RTMPVideo
sequence.SetAllocator(allocator)
sequence.Append([]byte{0b1001_0000 | rtmp.PacketTypeSequenceStart}, codec.FourCC_H265[:], track.ExtraData)
err = publisher.WriteVideo(&sequence)
case box.MP4_CODEC_AAC:
var sequence rtmp.RTMPAudio
sequence.SetAllocator(allocator)
sequence.Append([]byte{0xaf, 0x00}, track.ExtraData)
err = publisher.WriteAudio(&sequence)
// 读取预生成的 RTMP 序列帧
videoSeq, audioSeq := demuxer.GetRTMPSequenceFrames()
if videoSeq != nil {
err = publisher.WriteVideo(videoSeq)
if err != nil {
return err
}
}
for track, sample := range demuxer.ReadSample {
if p.IsStopped() {
break
if audioSeq != nil {
err = publisher.WriteAudio(audioSeq)
if err != nil {
return err
}
if _, err = demuxer.reader.Seek(sample.Offset, io.SeekStart); err != nil {
return
}
// 计算最大时间戳用于累计偏移
var maxTimestamp uint64
for _, frame := range rtmpFrames {
var timestamp uint64
switch f := frame.Frame.(type) {
case *rtmp.RTMPVideo:
timestamp = uint64(f.Timestamp)
case *rtmp.RTMPAudio:
timestamp = uint64(f.Timestamp)
}
sample.Data = allocator.Malloc(sample.Size)
if _, err = io.ReadFull(demuxer.reader, sample.Data); err != nil {
allocator.Free(sample.Data)
return
if timestamp > maxTimestamp {
maxTimestamp = timestamp
}
switch track.Cid {
case box.MP4_CODEC_H264:
var videoFrame rtmp.RTMPVideo
videoFrame.SetAllocator(allocator)
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = sample.Timestamp * 1000 / track.Timescale
videoFrame.AppendOne([]byte{util.Conditional[byte](sample.KeyFrame, 0x17, 0x27), 0x01, byte(videoFrame.CTS >> 24), byte(videoFrame.CTS >> 8), byte(videoFrame.CTS)})
videoFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteVideo(&videoFrame)
case box.MP4_CODEC_H265:
var videoFrame rtmp.RTMPVideo
videoFrame.SetAllocator(allocator)
videoFrame.CTS = uint32(sample.CTS)
videoFrame.Timestamp = sample.Timestamp * 1000 / track.Timescale
var head []byte
var b0 byte = 0b1010_0000
if sample.KeyFrame {
b0 = 0b1001_0000
}
var timestampOffset uint64
loop := p.PullJob.Loop
for {
// 使用预生成的 RTMP 帧进行播放
for _, frame := range rtmpFrames {
if p.IsStopped() {
return nil
}
if videoFrame.CTS == 0 {
head = videoFrame.NextN(5)
head[0] = b0 | rtmp.PacketTypeCodedFramesX
} else {
head = videoFrame.NextN(8)
head[0] = b0 | rtmp.PacketTypeCodedFrames
util.PutBE(head[5:8], videoFrame.CTS) // cts
// 应用时间戳偏移
switch f := frame.Frame.(type) {
case *rtmp.RTMPVideo:
f.Timestamp += uint32(timestampOffset)
err = publisher.WriteVideo(f)
case *rtmp.RTMPAudio:
f.Timestamp += uint32(timestampOffset)
err = publisher.WriteAudio(f)
}
if err != nil {
return err
}
copy(head[1:], codec.FourCC_H265[:])
videoFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteVideo(&videoFrame)
case box.MP4_CODEC_AAC:
var audioFrame rtmp.RTMPAudio
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = sample.Timestamp * 1000 / track.Timescale
audioFrame.AppendOne([]byte{0xaf, 0x01})
audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711A:
var audioFrame rtmp.RTMPAudio
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = sample.Timestamp * 1000 / track.Timescale
audioFrame.AppendOne([]byte{0x72})
audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711U:
var audioFrame rtmp.RTMPAudio
audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = sample.Timestamp * 1000 / track.Timescale
audioFrame.AppendOne([]byte{0x82})
audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
}
if loop >= 0 {
loop--
if loop == -1 {
break
}
}
// 每次循环后累计时间戳偏移,确保下次循环的时间戳是递增的
timestampOffset += maxTimestamp + 1
}
return
}

View File

@@ -1,11 +1,11 @@
package mp4
import (
"os"
"strings"
"time"
m7s "m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
@@ -39,152 +39,159 @@ func NewPuller(conf config.Pull) m7s.IPuller {
func (p *RecordReader) Run() (err error) {
pullJob := &p.PullJob
publisher := pullJob.Publisher
// allocator := util.NewScalableMemoryAllocator(1 << 10)
var ts, tsOffset int64
if publisher == nil {
return pkg.ErrDisabled
}
var realTime time.Time
// defer allocator.Recycle()
publisher.OnGetPosition = func() time.Time {
return realTime
}
for loop := 0; loop < p.Loop; loop++ {
nextStream:
for i, stream := range p.Streams {
tsOffset = ts
if p.File != nil {
p.File.Close()
// 简化的时间戳管理变量
var ts int64 // 当前时间戳
var tsOffset int64 // 时间戳偏移量
// 创建可复用的 DemuxerRange 实例
demuxerRange := &DemuxerRange{}
// 设置音视频额外数据回调(序列头)
demuxerRange.OnVideoExtraData = func(codecType box.MP4_CODEC_TYPE, data []byte) error {
switch codecType {
case box.MP4_CODEC_H264:
var sequence rtmp.RTMPVideo
sequence.Append([]byte{0x17, 0x00, 0x00, 0x00, 0x00}, data)
err = publisher.WriteVideo(&sequence)
case box.MP4_CODEC_H265:
var sequence rtmp.RTMPVideo
sequence.Append([]byte{0b1001_0000 | rtmp.PacketTypeSequenceStart}, codec.FourCC_H265[:], data)
err = publisher.WriteVideo(&sequence)
}
return err
}
demuxerRange.OnAudioExtraData = func(codecType box.MP4_CODEC_TYPE, data []byte) error {
if codecType == box.MP4_CODEC_AAC {
var sequence rtmp.RTMPAudio
sequence.Append([]byte{0xaf, 0x00}, data)
err = publisher.WriteAudio(&sequence)
}
return err
}
// 设置视频样本回调
demuxerRange.OnVideoSample = func(codecType box.MP4_CODEC_TYPE, sample box.Sample) error {
if publisher.Paused != nil {
publisher.Paused.Await()
}
// 检查是否需要跳转
if needSeek, seekErr := p.CheckSeek(); seekErr != nil {
return seekErr
} else if needSeek {
return pkg.ErrSkip
}
// 简化的时间戳处理
if int64(sample.Timestamp)+tsOffset < 0 {
ts = 0
} else {
ts = int64(sample.Timestamp) + tsOffset
}
// 更新实时时间
realTime = time.Now() // 这里可以根据需要调整为更精确的时间计算
// 根据编解码器类型处理视频帧
switch codecType {
case box.MP4_CODEC_H264:
var videoFrame rtmp.RTMPVideo
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = uint32(ts)
videoFrame.Append([]byte{util.Conditional[byte](sample.KeyFrame, 0x17, 0x27), 0x01, byte(videoFrame.CTS >> 24), byte(videoFrame.CTS >> 8), byte(videoFrame.CTS)}, sample.Data)
err = publisher.WriteVideo(&videoFrame)
case box.MP4_CODEC_H265:
var videoFrame rtmp.RTMPVideo
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = uint32(ts)
var head []byte
var b0 byte = 0b1010_0000
if sample.KeyFrame {
b0 = 0b1001_0000
}
p.File, err = os.Open(stream.FilePath)
if err != nil {
if videoFrame.CTS == 0 {
head = videoFrame.NextN(5)
head[0] = b0 | rtmp.PacketTypeCodedFramesX
} else {
head = videoFrame.NextN(8)
head[0] = b0 | rtmp.PacketTypeCodedFrames
util.PutBE(head[5:8], videoFrame.CTS) // cts
}
copy(head[1:], codec.FourCC_H265[:])
videoFrame.AppendOne(sample.Data)
err = publisher.WriteVideo(&videoFrame)
}
return err
}
// 设置音频样本回调
demuxerRange.OnAudioSample = func(codecType box.MP4_CODEC_TYPE, sample box.Sample) error {
if publisher.Paused != nil {
publisher.Paused.Await()
}
// 检查是否需要跳转
if needSeek, seekErr := p.CheckSeek(); seekErr != nil {
return seekErr
} else if needSeek {
return pkg.ErrSkip
}
// 简化的时间戳处理
if int64(sample.Timestamp)+tsOffset < 0 {
ts = 0
} else {
ts = int64(sample.Timestamp) + tsOffset
}
// 根据编解码器类型处理音频帧
switch codecType {
case box.MP4_CODEC_AAC:
var audioFrame rtmp.RTMPAudio
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0xaf, 0x01}, sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711A:
var audioFrame rtmp.RTMPAudio
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0x72}, sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711U:
var audioFrame rtmp.RTMPAudio
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0x82}, sample.Data)
err = publisher.WriteAudio(&audioFrame)
}
return err
}
for loop := 0; loop < p.Loop; loop++ {
// 每次循环时更新时间戳偏移量以保持连续性
tsOffset = ts
demuxerRange.StartTime = p.PullStartTime
if !p.PullEndTime.IsZero() {
demuxerRange.EndTime = p.PullEndTime
} else if p.MaxTS > 0 {
demuxerRange.EndTime = p.PullStartTime.Add(time.Duration(p.MaxTS) * time.Millisecond)
} else {
demuxerRange.EndTime = time.Now()
}
if err = demuxerRange.Demux(p.Context); err != nil {
if err == pkg.ErrSkip {
loop--
continue
}
p.demuxer = NewDemuxer(p.File)
if err = p.demuxer.Demux(); err != nil {
return
}
for _, track := range p.demuxer.Tracks {
switch track.Cid {
case box.MP4_CODEC_H264:
var sequence rtmp.RTMPVideo
// sequence.SetAllocator(allocator)
sequence.Append([]byte{0x17, 0x00, 0x00, 0x00, 0x00}, track.ExtraData)
err = publisher.WriteVideo(&sequence)
case box.MP4_CODEC_H265:
var sequence rtmp.RTMPVideo
// sequence.SetAllocator(allocator)
sequence.Append([]byte{0b1001_0000 | rtmp.PacketTypeSequenceStart}, codec.FourCC_H265[:], track.ExtraData)
err = publisher.WriteVideo(&sequence)
case box.MP4_CODEC_AAC:
var sequence rtmp.RTMPAudio
// sequence.SetAllocator(allocator)
sequence.Append([]byte{0xaf, 0x00}, track.ExtraData)
err = publisher.WriteAudio(&sequence)
}
}
if i == 0 {
startTimestamp := p.PullStartTime.Sub(stream.StartTime).Milliseconds()
if startTimestamp < 0 {
startTimestamp = 0
}
var startSample *box.Sample
if startSample, err = p.demuxer.SeekTime(uint64(startTimestamp)); err != nil {
tsOffset = 0
continue
}
tsOffset = -int64(startSample.Timestamp)
}
for track, sample := range p.demuxer.ReadSample {
if p.IsStopped() {
return p.StopReason()
}
if publisher.Paused != nil {
publisher.Paused.Await()
}
if needSeek, err := p.CheckSeek(); err != nil {
continue
} else if needSeek {
goto nextStream
}
// if _, err = p.demuxer.reader.Seek(sample.Offset, io.SeekStart); err != nil {
// return
// }
sampleOffset := int(sample.Offset) - int(p.demuxer.mdatOffset)
if sampleOffset < 0 || sampleOffset+sample.Size > len(p.demuxer.mdat.Data) {
return
}
sample.Data = p.demuxer.mdat.Data[sampleOffset : sampleOffset+sample.Size]
// sample.Data = allocator.Malloc(sample.Size)
// if _, err = io.ReadFull(p.demuxer.reader, sample.Data); err != nil {
// allocator.Free(sample.Data)
// return
// }
if int64(sample.Timestamp)+tsOffset < 0 {
ts = 0
} else {
ts = int64(sample.Timestamp + uint32(tsOffset))
}
realTime = stream.StartTime.Add(time.Duration(sample.Timestamp) * time.Millisecond)
if p.MaxTS > 0 && ts > p.MaxTS {
return
}
switch track.Cid {
case box.MP4_CODEC_H264:
var videoFrame rtmp.RTMPVideo
// videoFrame.SetAllocator(allocator)
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = uint32(ts)
videoFrame.Append([]byte{util.Conditional[byte](sample.KeyFrame, 0x17, 0x27), 0x01, byte(videoFrame.CTS >> 24), byte(videoFrame.CTS >> 8), byte(videoFrame.CTS)}, sample.Data)
// videoFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteVideo(&videoFrame)
case box.MP4_CODEC_H265:
var videoFrame rtmp.RTMPVideo
// videoFrame.SetAllocator(allocator)
videoFrame.CTS = sample.CTS
videoFrame.Timestamp = uint32(ts)
var head []byte
var b0 byte = 0b1010_0000
if sample.KeyFrame {
b0 = 0b1001_0000
}
if videoFrame.CTS == 0 {
head = videoFrame.NextN(5)
head[0] = b0 | rtmp.PacketTypeCodedFramesX
} else {
head = videoFrame.NextN(8)
head[0] = b0 | rtmp.PacketTypeCodedFrames
util.PutBE(head[5:8], videoFrame.CTS) // cts
}
copy(head[1:], codec.FourCC_H265[:])
videoFrame.AppendOne(sample.Data)
// videoFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteVideo(&videoFrame)
case box.MP4_CODEC_AAC:
var audioFrame rtmp.RTMPAudio
// audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0xaf, 0x01}, sample.Data)
// audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711A:
var audioFrame rtmp.RTMPAudio
// audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0x72}, sample.Data)
// audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
case box.MP4_CODEC_G711U:
var audioFrame rtmp.RTMPAudio
// audioFrame.SetAllocator(allocator)
audioFrame.Timestamp = uint32(ts)
audioFrame.Append([]byte{0x82}, sample.Data)
// audioFrame.AddRecycleBytes(sample.Data)
err = publisher.WriteAudio(&audioFrame)
}
}
return err
}
}
return

View File

@@ -7,7 +7,6 @@ import (
"path/filepath"
"time"
"gorm.io/gorm"
m7s "m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
@@ -107,39 +106,6 @@ func (t *writeTrailerTask) Run() (err error) {
return
}
type eventRecordCheck struct {
task.Task
DB *gorm.DB
streamPath string
}
func (t *eventRecordCheck) Run() (err error) {
var eventRecordStreams []m7s.RecordStream
queryRecord := m7s.RecordStream{
EventLevel: m7s.EventLevelHigh,
Mode: m7s.RecordModeEvent,
Type: "mp4",
StreamPath: t.streamPath,
}
t.DB.Where(&queryRecord).Find(&eventRecordStreams) //搜索事件录像,且为重要事件(无法自动删除)
if len(eventRecordStreams) > 0 {
for _, recordStream := range eventRecordStreams {
var unimportantEventRecordStreams []m7s.RecordStream
queryRecord.EventLevel = m7s.EventLevelLow
queryRecord.Mode = m7s.RecordModeAuto
query := `start_time <= ? and end_time >= ?`
t.DB.Where(&queryRecord).Where(query, recordStream.EndTime, recordStream.StartTime).Find(&unimportantEventRecordStreams)
if len(unimportantEventRecordStreams) > 0 {
for _, unimportantEventRecordStream := range unimportantEventRecordStreams {
unimportantEventRecordStream.EventLevel = m7s.EventLevelHigh
t.DB.Save(&unimportantEventRecordStream)
}
}
}
}
return
}
func init() {
m7s.Servers.AddTask(&writeTrailerQueueTask)
}
@@ -150,20 +116,12 @@ func NewRecorder(conf config.Record) m7s.IRecorder {
type Recorder struct {
m7s.DefaultRecorder
muxer *Muxer
file *os.File
stream m7s.RecordStream
muxer *Muxer
file *os.File
}
func (r *Recorder) writeTailer(end time.Time) {
r.stream.EndTime = end
if r.RecordJob.Plugin.DB != nil {
r.RecordJob.Plugin.DB.Save(&r.stream)
writeTrailerQueueTask.AddTask(&eventRecordCheck{
DB: r.RecordJob.Plugin.DB,
streamPath: r.stream.StreamPath,
})
}
r.WriteTail(end, &writeTrailerQueueTask)
writeTrailerQueueTask.AddTask(&writeTrailerTask{
muxer: r.muxer,
file: r.file,
@@ -178,46 +136,7 @@ var CustomFileName = func(job *m7s.RecordJob) string {
}
func (r *Recorder) createStream(start time.Time) (err error) {
recordJob := &r.RecordJob
sub := recordJob.Subscriber
r.stream = m7s.RecordStream{
StartTime: start,
StreamPath: sub.StreamPath,
FilePath: CustomFileName(&r.RecordJob),
EventId: recordJob.EventId,
EventDesc: recordJob.EventDesc,
EventName: recordJob.EventName,
EventLevel: recordJob.EventLevel,
BeforeDuration: recordJob.BeforeDuration,
AfterDuration: recordJob.AfterDuration,
Mode: recordJob.Mode,
Type: "mp4",
}
dir := filepath.Dir(r.stream.FilePath)
if err = os.MkdirAll(dir, 0755); err != nil {
return
}
r.file, err = os.Create(r.stream.FilePath)
if err != nil {
return
}
if recordJob.RecConf.Type == "fmp4" {
r.stream.Type = "fmp4"
r.muxer = NewMuxerWithStreamPath(FLAG_FRAGMENT, r.stream.StreamPath)
} else {
r.muxer = NewMuxerWithStreamPath(0, r.stream.StreamPath)
}
r.muxer.WriteInitSegment(r.file)
if sub.Publisher.HasAudioTrack() {
r.stream.AudioCodec = sub.Publisher.AudioTrack.ICodecCtx.String()
}
if sub.Publisher.HasVideoTrack() {
r.stream.VideoCodec = sub.Publisher.VideoTrack.ICodecCtx.String()
}
if recordJob.Plugin.DB != nil {
recordJob.Plugin.DB.Save(&r.stream)
}
return
return r.CreateStream(start, CustomFileName)
}
func (r *Recorder) Dispose() {
@@ -231,17 +150,28 @@ func (r *Recorder) Run() (err error) {
sub := recordJob.Subscriber
var audioTrack, videoTrack *Track
startTime := time.Now()
if recordJob.BeforeDuration > 0 {
startTime = startTime.Add(-recordJob.BeforeDuration)
if recordJob.Event != nil {
startTime = startTime.Add(-time.Duration(recordJob.Event.BeforeDuration) * time.Millisecond)
}
err = r.createStream(startTime)
if err != nil {
return
}
r.file, err = os.Create(r.Event.FilePath)
if err != nil {
return
}
if recordJob.RecConf.Type == "fmp4" {
r.Event.Type = "fmp4"
r.muxer = NewMuxerWithStreamPath(FLAG_FRAGMENT, r.Event.StreamPath)
} else {
r.muxer = NewMuxerWithStreamPath(0, r.Event.StreamPath)
}
r.muxer.WriteInitSegment(r.file)
var at, vt *pkg.AVTrack
checkEventRecordStop := func(absTime uint32) (err error) {
if duration := int64(absTime); time.Duration(duration)*time.Millisecond >= recordJob.AfterDuration+recordJob.BeforeDuration {
if absTime >= recordJob.Event.AfterDuration+recordJob.Event.BeforeDuration {
r.RecordJob.Stop(task.ErrStopByUser)
}
return
@@ -269,8 +199,9 @@ func (r *Recorder) Run() (err error) {
}
return m7s.PlayBlock(sub, func(audio *pkg.RawAudio) error {
r.Event.Duration = sub.AudioReader.AbsTime
if sub.VideoReader == nil {
if recordJob.AfterDuration != 0 {
if recordJob.Event != nil {
err := checkEventRecordStop(sub.VideoReader.AbsTime)
if err != nil {
return err
@@ -313,8 +244,9 @@ func (r *Recorder) Run() (err error) {
Timestamp: uint32(dts),
})
}, func(video *rtmp.RTMPVideo) error {
r.Event.Duration = sub.VideoReader.AbsTime
if sub.VideoReader.Value.IDR {
if recordJob.AfterDuration != 0 {
if recordJob.Event != nil {
err := checkEventRecordStop(sub.VideoReader.AbsTime)
if err != nil {
return err

View File

@@ -102,6 +102,28 @@ func (track *Track) Seek(dts uint64) int {
return -1
}
/**
* @brief 函数跳帧到dts 前面的第一个关键帧位置
*
* @param 参数名dts 跳帧位置
*
* @author erroot
* @date 250614
*
**/
func (track *Track) SeekPreIDR(dts uint64) int {
idx := 0
for i, sample := range track.Samplelist {
if track.Cid.IsVideo() && sample.KeyFrame {
idx = i
}
if sample.Timestamp*1000/uint32(track.Timescale) > uint32(dts) {
break
}
}
return idx
}
func (track *Track) makeEdtsBox() *ContainerBox {
return CreateContainerBox(TypeEDTS, track.makeElstBox())
}

View File

@@ -1,6 +1,7 @@
package plugin_mp4
import (
"fmt"
"os"
"path/filepath"
"strings"
@@ -15,24 +16,22 @@ import (
// RecordRecoveryTask 从录像文件中恢复数据库记录的任务
type RecordRecoveryTask struct {
task.TickTask
task.Task
DB *gorm.DB
plugin *MP4Plugin
}
// GetTickInterval 设置任务执行间隔
func (t *RecordRecoveryTask) GetTickInterval() time.Duration {
return 24 * time.Hour // 默认每天执行一次
// RecoveryStats 恢复统计信息
type RecoveryStats struct {
TotalFiles int
SuccessCount int
FailureCount int
SkippedCount int
Errors []error
}
// Tick 执行任务
func (t *RecordRecoveryTask) Tick(any) {
t.Info("Starting record recovery task")
t.recoverRecordsFromFiles()
}
// recoverRecordsFromFiles 从文件系统中恢复录像记录
func (t *RecordRecoveryTask) recoverRecordsFromFiles() {
// Start 从文件系统中恢复录像记录
func (t *RecordRecoveryTask) Start() error {
// 获取所有录像目录
var recordDirs []string
if len(t.plugin.GetCommonConf().OnPub.Record) > 0 {
@@ -46,20 +45,60 @@ func (t *RecordRecoveryTask) recoverRecordsFromFiles() {
recordDirs = append(recordDirs, dirPath)
}
// 遍历所有录像目录
for _, dir := range recordDirs {
t.scanDirectory(dir)
if len(recordDirs) == 0 {
t.Info("No record directories configured, skipping recovery")
return nil
}
stats := &RecoveryStats{}
// 遍历所有录像目录,收集所有错误而不是在第一个错误时停止
for _, dir := range recordDirs {
dirStats, err := t.scanDirectory(dir)
if dirStats != nil {
stats.TotalFiles += dirStats.TotalFiles
stats.SuccessCount += dirStats.SuccessCount
stats.FailureCount += dirStats.FailureCount
stats.SkippedCount += dirStats.SkippedCount
stats.Errors = append(stats.Errors, dirStats.Errors...)
}
if err != nil {
stats.Errors = append(stats.Errors, fmt.Errorf("failed to scan directory %s: %w", dir, err))
}
}
// 记录统计信息
t.Info("Recovery completed",
"totalFiles", stats.TotalFiles,
"success", stats.SuccessCount,
"failed", stats.FailureCount,
"skipped", stats.SkippedCount,
"errors", len(stats.Errors))
// 如果有错误,返回一个汇总错误
if len(stats.Errors) > 0 {
var errorMsgs []string
for _, err := range stats.Errors {
errorMsgs = append(errorMsgs, err.Error())
}
return fmt.Errorf("recovery completed with %d errors: %s", len(stats.Errors), strings.Join(errorMsgs, "; "))
}
return nil
}
// scanDirectory 扫描目录中的MP4文件
func (t *RecordRecoveryTask) scanDirectory(dir string) {
func (t *RecordRecoveryTask) scanDirectory(dir string) (*RecoveryStats, error) {
t.Info("Scanning directory for MP4 files", "directory", dir)
stats := &RecoveryStats{}
// 递归遍历目录
err := filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Error("Error accessing path", "path", path, "error", err)
stats.Errors = append(stats.Errors, fmt.Errorf("failed to access path %s: %w", path, err))
return nil // 继续遍历
}
@@ -73,33 +112,50 @@ func (t *RecordRecoveryTask) scanDirectory(dir string) {
return nil
}
stats.TotalFiles++
// 检查文件是否已经有记录
var count int64
t.DB.Model(&m7s.RecordStream{}).Where("file_path = ?", path).Count(&count)
if err := t.DB.Model(&m7s.RecordStream{}).Where("file_path = ?", path).Count(&count).Error; err != nil {
t.Error("Failed to check existing record", "file", path, "error", err)
stats.FailureCount++
stats.Errors = append(stats.Errors, fmt.Errorf("failed to check existing record for %s: %w", path, err))
return nil
}
if count > 0 {
// 已有记录,跳过
stats.SkippedCount++
return nil
}
// 解析MP4文件并创建记录
t.recoverRecordFromFile(path)
if err := t.recoverRecordFromFile(path); err != nil {
stats.FailureCount++
stats.Errors = append(stats.Errors, fmt.Errorf("failed to recover record from %s: %w", path, err))
} else {
stats.SuccessCount++
}
return nil
})
if err != nil {
t.Error("Error walking directory", "directory", dir, "error", err)
return stats, fmt.Errorf("failed to walk directory %s: %w", dir, err)
}
return stats, nil
}
// recoverRecordFromFile 从MP4文件中恢复记录
func (t *RecordRecoveryTask) recoverRecordFromFile(filePath string) {
func (t *RecordRecoveryTask) recoverRecordFromFile(filePath string) error {
t.Info("Recovering record from file", "file", filePath)
// 打开文件
file, err := os.Open(filePath)
if err != nil {
t.Error("Failed to open MP4 file", "file", filePath, "error", err)
return
return fmt.Errorf("failed to open MP4 file %s: %w", filePath, err)
}
defer file.Close()
@@ -108,14 +164,14 @@ func (t *RecordRecoveryTask) recoverRecordFromFile(filePath string) {
err = demuxer.Demux()
if err != nil {
t.Error("Failed to demux MP4 file", "file", filePath, "error", err)
return
return fmt.Errorf("failed to demux MP4 file %s: %w", filePath, err)
}
// 提取文件信息
fileInfo, err := file.Stat()
if err != nil {
t.Error("Failed to get file info", "file", filePath, "error", err)
return
return fmt.Errorf("failed to get file info for %s: %w", filePath, err)
}
// 尝试从MP4文件中提取流路径如果没有则从文件名和路径推断
@@ -129,8 +185,6 @@ func (t *RecordRecoveryTask) recoverRecordFromFile(filePath string) {
FilePath: filePath,
StreamPath: streamPath,
Type: "mp4",
Mode: m7s.RecordModeAuto, // 默认为自动录制模式
EventLevel: m7s.EventLevelLow, // 默认为低级别事件
}
// 设置开始和结束时间
@@ -151,10 +205,11 @@ func (t *RecordRecoveryTask) recoverRecordFromFile(filePath string) {
err = t.DB.Create(&record).Error
if err != nil {
t.Error("Failed to save record to database", "file", filePath, "error", err)
return
return fmt.Errorf("failed to save record to database for %s: %w", filePath, err)
}
t.Info("Successfully recovered record", "file", filePath, "streamPath", streamPath)
return nil
}
// extractStreamPathFromMP4 从MP4文件中提取流路径
@@ -163,8 +218,8 @@ func extractStreamPathFromMP4(demuxer *mp4.Demuxer) string {
moov := demuxer.GetMoovBox()
if moov != nil && moov.UDTA != nil {
for _, entry := range moov.UDTA.Entries {
if streamPathBox, ok := entry.(*box.StreamPathBox); ok {
return streamPathBox.StreamPath
if entry.Type() == box.TypeALB {
return entry.(*box.TextDataBox).Text
}
}
}

338
plugin/mp4/util.go Normal file
View File

@@ -0,0 +1,338 @@
package plugin_mp4
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"image"
"image/color"
"image/jpeg"
"io"
"log"
"os"
"os/exec"
mp4 "m7s.live/v5/plugin/mp4/pkg"
"m7s.live/v5/plugin/mp4/pkg/box"
)
func saveAsJPG(img image.Image, path string) error {
file, err := os.Create(path)
if err != nil {
return err
}
defer file.Close()
opt := jpeg.Options{Quality: 90}
return jpeg.Encode(file, img, &opt)
}
func ExtractH264SPSPPS(extraData []byte) (sps, pps []byte, err error) {
if len(extraData) < 7 {
return nil, nil, fmt.Errorf("extradata too short")
}
// 解析 SPS 数量 (第6字节低5位)
spsCount := int(extraData[5] & 0x1F)
offset := 6 // 当前解析位置
// 提取 SPS
for i := 0; i < spsCount; i++ {
if offset+2 > len(extraData) {
return nil, nil, fmt.Errorf("invalid sps length")
}
spsLen := int(binary.BigEndian.Uint16(extraData[offset : offset+2]))
offset += 2
if offset+spsLen > len(extraData) {
return nil, nil, fmt.Errorf("sps data overflow")
}
sps = extraData[offset : offset+spsLen]
offset += spsLen
}
// 提取 PPS 数量
if offset >= len(extraData) {
return nil, nil, fmt.Errorf("missing pps count")
}
ppsCount := int(extraData[offset])
offset++
// 提取 PPS
for i := 0; i < ppsCount; i++ {
if offset+2 > len(extraData) {
return nil, nil, fmt.Errorf("invalid pps length")
}
ppsLen := int(binary.BigEndian.Uint16(extraData[offset : offset+2]))
offset += 2
if offset+ppsLen > len(extraData) {
return nil, nil, fmt.Errorf("pps data overflow")
}
pps = extraData[offset : offset+ppsLen]
offset += ppsLen
}
return sps, pps, nil
}
// 转换函数(支持动态插入参数集)
func ConvertAVCCH264ToAnnexB(data []byte, extraData []byte, isFirst *bool) ([]byte, error) {
var buf bytes.Buffer
pos := 0
for pos < len(data) {
if pos+4 > len(data) {
break
}
nalSize := binary.BigEndian.Uint32(data[pos : pos+4])
pos += 4
nalStart := pos
pos += int(nalSize)
if pos > len(data) {
break
}
nalu := data[nalStart:pos]
nalType := nalu[0] & 0x1F
// 关键帧前插入SPS/PPS仅需执行一次
if *isFirst && nalType == 5 {
sps, pps, err := ExtractH264SPSPPS(extraData)
if err != nil {
//panic(err)
return nil, err
}
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
buf.Write(sps)
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
buf.Write(pps)
//buf.Write(videoTrack.ExtraData)
*isFirst = false // 仅首帧插入
}
// 保留SEI单元类型6和所有其他单元
if nalType == 5 || nalType == 6 { // IDR/SEI用4字节起始码
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
} else {
buf.Write([]byte{0x00, 0x00, 0x01}) // 其他用3字节
}
buf.Write(nalu)
}
return buf.Bytes(), nil
}
/*
H.264与H.265的AVCC格式差异
VPS引入H.265新增视频参数集VPS用于描述多层编码、时序等信息
*/
// 提取H.265的VPS/SPS/PPSHEVCDecoderConfigurationRecord格式
func ExtractHEVCParams(extraData []byte) (vps, sps, pps []byte, err error) {
if len(extraData) < 22 {
return nil, nil, nil, errors.New("extra data too short")
}
// HEVC的extradata格式参考ISO/IEC 14496-15
offset := 22 // 跳过头部22字节
if offset+2 > len(extraData) {
return nil, nil, nil, errors.New("invalid extra data")
}
numOfArrays := int(extraData[offset])
offset++
for i := 0; i < numOfArrays; i++ {
if offset+3 > len(extraData) {
break
}
naluType := extraData[offset] & 0x3F
offset++
count := int(binary.BigEndian.Uint16(extraData[offset:]))
offset += 2
for j := 0; j < count; j++ {
if offset+2 > len(extraData) {
break
}
naluSize := int(binary.BigEndian.Uint16(extraData[offset:]))
offset += 2
if offset+naluSize > len(extraData) {
break
}
naluData := extraData[offset : offset+naluSize]
offset += naluSize
// 根据类型存储参数集
switch naluType {
case 32: // VPS
if vps == nil {
vps = make([]byte, len(naluData))
copy(vps, naluData)
}
case 33: // SPS
if sps == nil {
sps = make([]byte, len(naluData))
copy(sps, naluData)
}
case 34: // PPS
if pps == nil {
pps = make([]byte, len(naluData))
copy(pps, naluData)
}
}
}
}
if vps == nil || sps == nil || pps == nil {
return nil, nil, nil, errors.New("missing required parameter sets")
}
return vps, sps, pps, nil
}
// H.265的AVCC转Annex B
func ConvertAVCCHEVCToAnnexB(data []byte, extraData []byte, isFirst *bool) ([]byte, error) {
var buf bytes.Buffer
pos := 0
// 首帧插入VPS/SPS/PPS
if *isFirst {
vps, sps, pps, err := ExtractHEVCParams(extraData)
if err == nil {
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
buf.Write(vps)
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
buf.Write(sps)
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
buf.Write(pps)
} else {
return nil, err
}
}
// 处理NALU
for pos < len(data) {
if pos+4 > len(data) {
break
}
nalSize := binary.BigEndian.Uint32(data[pos : pos+4])
pos += 4
nalStart := pos
pos += int(nalSize)
if pos > len(data) {
break
}
nalu := data[nalStart:pos]
nalType := (nalu[0] >> 1) & 0x3F // H.265的NALU类型在头部的第2-7位
// 关键帧或参数集使用4字节起始码
if nalType == 19 || nalType == 20 || nalType >= 32 && nalType <= 34 {
buf.Write([]byte{0x00, 0x00, 0x00, 0x01})
} else {
buf.Write([]byte{0x00, 0x00, 0x01})
}
buf.Write(nalu)
}
return buf.Bytes(), nil
}
// ffmpeg -hide_banner -i gop.mp4 -vf "select=eq(n\,15)" -vframes 1 -f image2 -pix_fmt bgr24 output.bmp
func ProcessWithFFmpeg(samples []box.Sample, index int, videoTrack *mp4.Track) (image.Image, error) {
// code := "h264"
// if videoTrack.Cid == box.MP4_CODEC_H265 {
// code = "hevc"
// }
cmd := exec.Command("ffmpeg",
"-hide_banner",
//"-f", code, //"h264" 强制指定输入格式为H.264裸流
"-i", "pipe:0",
"-vf", fmt.Sprintf("select=eq(n\\,%d)", index),
"-vframes", "1",
"-pix_fmt", "bgr24",
"-f", "rawvideo",
"pipe:1")
stdin, err := cmd.StdinPipe()
if err != nil {
return nil, err
}
stdout, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
stderr, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
go func() {
errOutput, _ := io.ReadAll(stderr)
log.Printf("FFmpeg stderr: %s", errOutput)
}()
if err = cmd.Start(); err != nil {
log.Printf("cmd.Start失败: %v", err)
return nil, err
}
go func() {
defer stdin.Close()
isFirst := true
for _, sample := range samples {
if videoTrack.Cid == box.MP4_CODEC_H264 {
annexb, _ := ConvertAVCCH264ToAnnexB(sample.Data, videoTrack.ExtraData, &isFirst)
if _, err := stdin.Write(annexb); err != nil {
log.Printf("写入失败: %v", err)
break
}
} else {
annexb, _ := ConvertAVCCHEVCToAnnexB(sample.Data, videoTrack.ExtraData, &isFirst)
if _, err := stdin.Write(annexb); err != nil {
log.Printf("写入失败: %v", err)
break
}
}
}
}()
// 读取原始RGB数据
var buf bytes.Buffer
if _, err = io.Copy(&buf, stdout); err != nil {
log.Printf("读取失败: %v", err)
return nil, err
}
if err = cmd.Wait(); err != nil {
log.Printf("cmd.Wait失败: %v", err)
return nil, err
}
//log.Printf("ffmpeg 提取成功: data size:%v", buf.Len())
// 转换为image.Image对象
data := buf.Bytes()
//width, height := parseBMPDimensions(data)
width := int(videoTrack.Width)
height := int(videoTrack.Height)
log.Printf("ffmpeg size: %v,%v", width, height)
//FFmpeg的 rawvideo 输出默认采用​​从上到下​​的扫描方式
img := image.NewRGBA(image.Rect(0, 0, width, height))
for y := 0; y < height; y++ {
for x := 0; x < width; x++ {
//pos := (height-y-1)*width*3 + x*3
pos := (y*width + x) * 3 // 关键修复:按行顺序读取
img.Set(x, y, color.RGBA{
R: data[pos+2],
G: data[pos+1],
B: data[pos],
A: 255,
})
}
}
return img, nil
}

View File

@@ -40,7 +40,7 @@ type RTMPServer struct {
func (p *RTMPPlugin) OnTCPConnect(conn *net.TCPConn) task.ITask {
ret := &RTMPServer{conf: p}
ret.Init(conn)
ret.Logger = p.With("remote", conn.RemoteAddr().String())
ret.Logger = p.Logger.With("remote", conn.RemoteAddr().String())
return ret
}

View File

@@ -3,12 +3,13 @@ package rtmp
import (
"crypto/tls"
"errors"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
"net"
"net/url"
"strings"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
"m7s.live/v5"
)
@@ -28,7 +29,7 @@ func (c *Client) Start() (err error) {
return
}
ps := strings.Split(c.u.Path, "/")
if len(ps) < 3 {
if len(ps) < 2 {
return errors.New("illegal rtmp url")
}
isRtmps := c.u.Scheme == "rtmps"
@@ -53,7 +54,7 @@ func (c *Client) Start() (err error) {
return err
}
c.Init(conn)
c.Logger = c.Logger.With("local", conn.LocalAddr().String())
c.SetDescription("local", conn.LocalAddr().String())
c.Info("connect")
c.WriteChunkSize = c.chunkSize
c.AppName = strings.Join(ps[1:len(ps)-1], "/")
@@ -157,7 +158,9 @@ func (c *Client) Run() (err error) {
if len(args) > 0 {
m.StreamName += "?" + args.Encode()
}
c.Receivers[response.StreamId] = c.pullCtx.Publisher
if c.pullCtx.Publisher != nil {
c.Receivers[response.StreamId] = c.pullCtx.Publisher
}
err = c.SendMessage(RTMP_MSG_AMF0_COMMAND, m)
// if response, ok := msg.MsgData.(*ResponsePlayMessage); ok {
// if response.Object["code"] == "NetStream.Play.Start" {

View File

@@ -79,10 +79,10 @@ func (nc *NetConnection) Handshake(checkC2 bool) (err error) {
if len(C1) != C1S1_SIZE {
return errors.New("C1 Error")
}
var ts int
util.GetBE(C1[4:8], &ts)
var zero int
util.GetBE(C1[4:8], &zero)
if ts == 0 {
if zero == 0 {
return nc.simple_handshake(C1, checkC2)
}
@@ -92,12 +92,26 @@ func (nc *NetConnection) Handshake(checkC2 bool) (err error) {
func (nc *NetConnection) ClientHandshake() (err error) {
C0C1 := nc.mediaDataPool.NextN(C1S1_SIZE + 1)
defer nc.mediaDataPool.Recycle()
// 构造 C0
C0C1[0] = RTMP_HANDSHAKE_VERSION
// 构造 C1 使用简单握手格式
C1 := C0C1[1:]
// Time (4 bytes): 当前时间戳
util.PutBE(C1[0:4], time.Now().Unix()&0xFFFFFFFF)
// Zero (4 bytes): 必须为 0确保使用简单握手
util.PutBE(C1[4:8], 0)
// Random data (1528 bytes): 填充随机数据
for i := 8; i < C1S1_SIZE; i++ {
C1[i] = byte(rand.Int() % 256)
}
if _, err = nc.Write(C0C1); err == nil {
// read S0 S1
if _, err = io.ReadFull(nc.Conn, C0C1); err == nil {
if C0C1[0] != RTMP_HANDSHAKE_VERSION {
err = errors.New("S1 C1 Error")
err = errors.New("S0 Error")
// C2
} else if _, err = nc.Write(C0C1[1:]); err == nil {
_, err = io.ReadFull(nc.Conn, C0C1[1:]) // S2
@@ -222,13 +236,7 @@ func clientScheme(C1 []byte, schem int) (scheme int, challenge []byte, digest []
return 0, nil, nil, false, err
}
// ok
if bytes.Compare(digest, tmp_Hash) == 0 {
ok = true
} else {
ok = false
}
ok = bytes.Equal(digest, tmp_Hash)
// challenge scheme
challenge = C1[key_offset : key_offset+C1S1_KEY_DATA_SIZE]
scheme = schem

View File

@@ -5,6 +5,7 @@ import (
"net"
"runtime"
"sync/atomic"
"time"
"m7s.live/v5"
"m7s.live/v5/pkg/task"
@@ -128,6 +129,7 @@ func (nc *NetConnection) ResponseCreateStream(tid uint64, streamID uint32) error
// }
func (nc *NetConnection) readChunk() (msg *Chunk, err error) {
nc.SetReadDeadline(time.Now().Add(time.Second * 5)) // 设置读取超时时间为5秒
head, err := nc.ReadByte()
if err != nil {
return nil, err
@@ -313,6 +315,9 @@ func (nc *NetConnection) RecvMessage() (msg *Chunk, err error) {
}
}
}
if nc.IsStopped() {
err = nc.StopReason()
}
}
return
}
@@ -344,6 +349,7 @@ func (nc *NetConnection) SendMessage(t byte, msg RtmpMessage) (err error) {
if sid, ok := msg.(HaveStreamID); ok {
head.MessageStreamID = sid.GetStreamID()
}
nc.SetWriteDeadline(time.Now().Add(time.Second * 5)) // 设置写入超时时间为5秒
return nc.sendChunk(net.Buffers{nc.tmpBuf}, head, RTMP_CHUNK_HEAD_12)
}

View File

@@ -53,7 +53,7 @@ func (av *Sender) SendFrame(frame *RTMPData) (err error) {
// 后面开始,就是直接发送音视频数据,那么直接发送,不需要完整的块(Chunk Basic Header(1) + Chunk Message Header(7))
// 当Chunk Type为0时(即Chunk12),
if av.lastAbs == 0 {
av.SetTimestamp(frame.Timestamp)
av.SetTimestamp(1)
err = av.sendChunk(frame.Memory.Buffers, &av.ChunkHeader, RTMP_CHUNK_HEAD_12)
} else {
av.SetTimestamp(frame.Timestamp - av.lastAbs)

View File

@@ -31,8 +31,8 @@ func (avcc *RTMPVideo) filterH264(naluSizeLen int) {
reader := avcc.NewReader()
lenReader := reader.NewReader()
reader.Skip(5)
lenReader.Skip(5)
var afterFilter util.Memory
lenReader.RangeN(5, afterFilter.AppendOne)
allocator := avcc.GetAllocator()
var hasBadNalu bool
for {
@@ -49,7 +49,29 @@ func (avcc *RTMPVideo) filterH264(naluSizeLen int) {
reader.RangeN(int(naluLen), func(b []byte) {
naluBuffer = append(naluBuffer, b)
})
if badType := codec.ParseH264NALUType(naluBuffer[0][0]); badType > 9 {
badType := codec.ParseH264NALUType(naluBuffer[0][0])
// 替换之前打印 badType 的逻辑,解码并打印 SliceType
if badType == 5 { // NALU type for Coded slice of a non-IDR picture or Coded slice of an IDR picture
naluData := bytes.Join(naluBuffer, nil) // bytes 包已导入
if len(naluData) > 0 {
// h264parser 包已导入 as "github.com/deepch/vdk/codec/h264parser"
// ParseSliceHeaderFromNALU 返回的第一个值就是 SliceType
sliceType, err := h264parser.ParseSliceHeaderFromNALU(naluData)
if err == nil {
println("Decoded SliceType:", sliceType.String())
} else {
println("Error parsing H.264 slice header:", err.Error())
}
} else {
println("NALU data is empty, cannot parse H.264 slice header.")
}
}
switch badType {
case 5, 6, 7, 8, 1, 2, 3, 4:
afterFilter.Append(lenBuffer...)
afterFilter.Append(naluBuffer...)
default:
hasBadNalu = true
if allocator != nil {
for _, nalu := range lenBuffer {
@@ -59,9 +81,6 @@ func (avcc *RTMPVideo) filterH264(naluSizeLen int) {
allocator.Free(nalu)
}
}
} else {
afterFilter.Append(lenBuffer...)
afterFilter.Append(naluBuffer...)
}
}
if hasBadNalu {
@@ -135,17 +154,17 @@ func (avcc *RTMPVideo) Parse(t *AVTrack) (err error) {
err = parseSequence()
return
case PacketTypeCodedFrames:
switch ctx := t.ICodecCtx.(type) {
switch t.ICodecCtx.(type) {
case *H265Ctx:
if avcc.CTS, err = reader.ReadBE(3); err != nil {
return err
}
avcc.filterH265(int(ctx.RecordInfo.LengthSizeMinusOne) + 1)
// avcc.filterH265(int(ctx.RecordInfo.LengthSizeMinusOne) + 1)
case *AV1Ctx:
// return avcc.parseAV1(reader)
}
case PacketTypeCodedFramesX:
avcc.filterH265(int(t.ICodecCtx.(*H265Ctx).RecordInfo.LengthSizeMinusOne) + 1)
// avcc.filterH265(int(t.ICodecCtx.(*H265Ctx).RecordInfo.LengthSizeMinusOne) + 1)
}
} else {
b0, err = reader.ReadByte() //sequence frame flag
@@ -172,7 +191,7 @@ func (avcc *RTMPVideo) Parse(t *AVTrack) (err error) {
// case *H265Ctx:
// avcc.filterH265(int(ctx.RecordInfo.LengthSizeMinusOne) + 1)
// }
// if avcc.Size == 0 {
// if avcc.Size <= 5 {
// return ErrSkip
// }
}

View File

@@ -29,7 +29,7 @@ type RTSPPlugin struct {
func (p *RTSPPlugin) OnTCPConnect(conn *net.TCPConn) task.ITask {
ret := &RTSPServer{NetConnection: NewNetConnection(conn), conf: p}
ret.Logger = p.With("remote", conn.RemoteAddr().String())
ret.Logger = p.Logger.With("remote", conn.RemoteAddr().String())
return ret
}

View File

@@ -395,18 +395,9 @@ func (c *NetConnection) Receive(sendMode bool, onReceive func(byte, []byte) erro
// 如果回调返回错误,检查是否是丢弃错误
needToFree = (err != pkg.ErrDiscard)
}
continue
}
} else if onRTCP != nil { // 奇数通道RTCP数据
err := onRTCP(channelID, buf)
if err == nil {
// 如果回调返回nil表示内存被接管
needToFree = false
} else {
// 如果回调返回错误,检查是否是丢弃错误
needToFree = (err != pkg.ErrDiscard)
}
continue
onRTCP(channelID, buf) // 处理RTCP数据,及时释放内存
}
// 如果需要释放内存,则释放

Some files were not shown because too many files have changed in this diff Show More