mirror of
https://github.com/langhuihui/monibuca.git
synced 2025-12-24 13:48:04 +08:00
Compare commits
62 Commits
dev
...
refactor-f
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
479a0a79f6 | ||
|
|
b6ee2843b0 | ||
|
|
1a8e2bc816 | ||
|
|
bc0c761aa8 | ||
|
|
cabd0e3088 | ||
|
|
2034f068c0 | ||
|
|
eba62c4054 | ||
|
|
a070dc64f8 | ||
|
|
e10dfec816 | ||
|
|
96b9cbfc08 | ||
|
|
2bbee90a9f | ||
|
|
272def302a | ||
|
|
04843002bf | ||
|
|
e4810e9c55 | ||
|
|
15d830f1eb | ||
|
|
ad32f6f96e | ||
|
|
56c4ea5907 | ||
|
|
28c71545db | ||
|
|
17faf3f064 | ||
|
|
131af312f1 | ||
|
|
cf3b7dfabe | ||
|
|
584c2e9932 | ||
|
|
a7f04faa23 | ||
|
|
966153f873 | ||
|
|
4391ad2d8d | ||
|
|
747a5a1104 | ||
|
|
97d8de523d | ||
|
|
cad47aec5c | ||
|
|
baf3640b23 | ||
|
|
3d68712ff6 | ||
|
|
f06f43dbe9 | ||
|
|
75efcba311 | ||
|
|
6b58e2a9b5 | ||
|
|
7b6259ed67 | ||
|
|
0d3d86518d | ||
|
|
ac3ad009a7 | ||
|
|
5731c2e8da | ||
|
|
cf6153fa91 | ||
|
|
70e1ea51ac | ||
|
|
8f5a829900 | ||
|
|
10f4fe3fc6 | ||
|
|
3a2901fa5f | ||
|
|
55f5408f64 | ||
|
|
9e45c3eb71 | ||
|
|
01fa1f3ed8 | ||
|
|
830da3aaab | ||
|
|
5a04dc814d | ||
|
|
af5d2bc1f2 | ||
|
|
a3e0c1864e | ||
|
|
33d385d2bf | ||
|
|
29c47a8d08 | ||
|
|
5bf5e7bb20 | ||
|
|
4b74ea5841 | ||
|
|
43710fb017 | ||
|
|
962dda8d08 | ||
|
|
ec56bba75a | ||
|
|
b2b511d755 | ||
|
|
42acf47250 | ||
|
|
6206ee847d | ||
|
|
6cfdc03e4a | ||
|
|
b425b8da1f | ||
|
|
e105243cd5 |
5
.cursor/rules/monibuca.mdc
Normal file
5
.cursor/rules/monibuca.mdc
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: build pb
|
||||
alwaysApply: false
|
||||
---
|
||||
如果修改了 proto 文件需要编译,请使用 scripts 目录下的脚本来编译
|
||||
22
.github/workflows/go.yml
vendored
22
.github/workflows/go.yml
vendored
@@ -93,18 +93,16 @@ jobs:
|
||||
tar -zxvf bin/m7s_v5_linux_arm64.tar.gz
|
||||
mv m7s monibuca_arm64
|
||||
docker login -u langhuihui -p ${{ secrets.DOCKER_PASSWORD }}
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 --push .
|
||||
- name: docker push version tag
|
||||
if: success() && !contains(env.version, 'beta')
|
||||
run: |
|
||||
docker tag langhuihui/monibuca:v5 langhuihui/monibuca:${{ env.version }}
|
||||
docker push langhuihui/monibuca:${{ env.version }}
|
||||
if [[ "${{ env.version }}" == *"beta"* ]]; then
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 --push .
|
||||
else
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 -t langhuihui/monibuca:${{ env.version }} --push .
|
||||
fi
|
||||
- name: docker build lite version
|
||||
if: success() && startsWith(github.ref, 'refs/tags/')
|
||||
run: |
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest --push .
|
||||
- name: docker lite push version tag
|
||||
if: success() && !contains(env.version, 'beta')
|
||||
run: |
|
||||
docker tag monibuca/v5 monibuca/v5:${{ env.version }}
|
||||
docker push lmonibuca/v5:${{ env.version }}
|
||||
if [[ "${{ env.version }}" == *"beta"* ]]; then
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest --push .
|
||||
else
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest -t monibuca/v5:${{ env.version }} --push .
|
||||
fi
|
||||
5
.gitignore
vendored
5
.gitignore
vendored
@@ -19,4 +19,7 @@ __debug*
|
||||
example/default/*
|
||||
!example/default/main.go
|
||||
!example/default/config.yaml
|
||||
shutdown.sh
|
||||
shutdown.sh
|
||||
!example/test/test.db
|
||||
*.mp4
|
||||
shutdown.bat
|
||||
199
CLAUDE.md
Normal file
199
CLAUDE.md
Normal file
@@ -0,0 +1,199 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Monibuca is a high-performance streaming server framework written in Go. It's designed to be a modular, scalable platform for real-time audio/video streaming with support for multiple protocols including RTMP, RTSP, HLS, WebRTC, GB28181, and more.
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Building and Running
|
||||
|
||||
**Basic Run (with SQLite):**
|
||||
```bash
|
||||
cd example/default
|
||||
go run -tags sqlite main.go
|
||||
```
|
||||
|
||||
**Build Tags:**
|
||||
- `sqlite` - Enable SQLite database support
|
||||
- `sqliteCGO` - Enable SQLite with CGO
|
||||
- `mysql` - Enable MySQL database support
|
||||
- `postgres` - Enable PostgreSQL database support
|
||||
- `duckdb` - Enable DuckDB database support
|
||||
- `disable_rm` - Disable memory pool
|
||||
- `fasthttp` - Use fasthttp instead of net/http
|
||||
- `taskpanic` - Enable panics for testing
|
||||
|
||||
**Protocol Buffer Generation:**
|
||||
```bash
|
||||
# Generate all proto files
|
||||
sh scripts/protoc.sh
|
||||
|
||||
# Generate specific plugin proto
|
||||
sh scripts/protoc.sh plugin_name
|
||||
```
|
||||
|
||||
**Release Building:**
|
||||
```bash
|
||||
# Uses goreleaser configuration
|
||||
goreleaser build
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
```bash
|
||||
go test ./...
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
**Server (`server.go`):** Main server instance that manages plugins, streams, and configurations. Implements the central event loop and lifecycle management.
|
||||
|
||||
**Plugin System (`plugin.go`):** Modular architecture where functionality is provided through plugins. Each plugin implements the `IPlugin` interface and can provide:
|
||||
- Protocol handlers (RTMP, RTSP, etc.)
|
||||
- Media transformers
|
||||
- Pull/Push proxies
|
||||
- Recording capabilities
|
||||
- Custom HTTP endpoints
|
||||
|
||||
**Configuration System (`pkg/config/`):** Hierarchical configuration system with priority order: dynamic modifications > environment variables > config files > default YAML > global config > defaults.
|
||||
|
||||
**Task System (`pkg/task/`):** Asynchronous task management with dependency handling, lifecycle management, and graceful shutdown capabilities.
|
||||
|
||||
### Key Interfaces
|
||||
|
||||
**Publisher:** Handles incoming media streams and manages track information
|
||||
**Subscriber:** Handles outgoing media streams to clients
|
||||
**Puller:** Pulls streams from external sources
|
||||
**Pusher:** Pushes streams to external destinations
|
||||
**Transformer:** Processes/transcodes media streams
|
||||
**Recorder:** Records streams to storage
|
||||
|
||||
### Stream Processing Flow
|
||||
|
||||
1. **Publisher** receives media data and creates tracks
|
||||
2. **Tracks** handle audio/video data with specific codecs
|
||||
3. **Subscribers** attach to publishers to receive media
|
||||
4. **Transformers** can process streams between publishers and subscribers
|
||||
5. **Plugins** provide protocol-specific implementations
|
||||
|
||||
## Plugin Development
|
||||
|
||||
### Creating a Plugin
|
||||
|
||||
1. Implement the `IPlugin` interface
|
||||
2. Define plugin metadata using `PluginMeta`
|
||||
3. Register with `InstallPlugin[YourPluginType](meta)`
|
||||
4. Optionally implement protocol-specific interfaces:
|
||||
- `ITCPPlugin` for TCP servers
|
||||
- `IUDPPlugin` for UDP servers
|
||||
- `IQUICPlugin` for QUIC servers
|
||||
- `IRegisterHandler` for HTTP endpoints
|
||||
|
||||
### Plugin Lifecycle
|
||||
|
||||
1. **Init:** Configuration parsing and initialization
|
||||
2. **Start:** Network listeners and task registration
|
||||
3. **Run:** Active operation
|
||||
4. **Dispose:** Cleanup and shutdown
|
||||
|
||||
## Configuration Structure
|
||||
|
||||
### Global Configuration
|
||||
- HTTP/TCP/UDP/QUIC listeners
|
||||
- Database connections (SQLite, MySQL, PostgreSQL, DuckDB)
|
||||
- Authentication settings
|
||||
- Admin interface settings
|
||||
- Global stream alias mappings
|
||||
|
||||
### Plugin Configuration
|
||||
Each plugin can define its own configuration structure that gets merged with global settings.
|
||||
|
||||
## Database Integration
|
||||
|
||||
Supports multiple database backends:
|
||||
- **SQLite:** Default lightweight option
|
||||
- **MySQL:** Production deployments
|
||||
- **PostgreSQL:** Production deployments
|
||||
- **DuckDB:** Analytics use cases
|
||||
|
||||
Automatic migration is handled for core models including users, proxies, and stream aliases.
|
||||
|
||||
## Protocol Support
|
||||
|
||||
### Built-in Plugins
|
||||
- **RTMP:** Real-time messaging protocol
|
||||
- **RTSP:** Real-time streaming protocol
|
||||
- **HLS:** HTTP live streaming
|
||||
- **WebRTC:** Web real-time communication
|
||||
- **GB28181:** Chinese surveillance standard
|
||||
- **FLV:** Flash video format
|
||||
- **MP4:** MPEG-4 format
|
||||
- **SRT:** Secure reliable transport
|
||||
|
||||
## Authentication & Security
|
||||
|
||||
- JWT-based authentication for admin interface
|
||||
- Stream-level authentication with URL signing
|
||||
- Role-based access control (admin/user)
|
||||
- Webhook support for external auth integration
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
### Code Style
|
||||
- Follow existing patterns and naming conventions
|
||||
- Use the task system for async operations
|
||||
- Implement proper error handling and logging
|
||||
- Use the configuration system for all settings
|
||||
|
||||
### Testing
|
||||
- Unit tests should be placed alongside source files
|
||||
- Integration tests can use the example configurations
|
||||
- Use the mock.py script for protocol testing
|
||||
|
||||
### Performance Considerations
|
||||
- Memory pool is enabled by default (disable with `disable_rm`)
|
||||
- Zero-copy design for media data where possible
|
||||
- Lock-free data structures for high concurrency
|
||||
- Efficient buffer management with ring buffers
|
||||
|
||||
## Debugging
|
||||
|
||||
### Built-in Debug Plugin
|
||||
- Performance monitoring and profiling
|
||||
- Real-time metrics via Prometheus endpoint (`/api/metrics`)
|
||||
- pprof integration for memory/cpu profiling
|
||||
|
||||
### Logging
|
||||
- Structured logging with zerolog
|
||||
- Configurable log levels
|
||||
- Log rotation support
|
||||
- Fatal crash logging
|
||||
|
||||
## Web Admin Interface
|
||||
|
||||
- Web-based admin UI served from `admin.zip`
|
||||
- RESTful API for all operations
|
||||
- Real-time stream monitoring
|
||||
- Configuration management
|
||||
- User management (when auth enabled)
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Port Conflicts
|
||||
- Default HTTP port: 8080
|
||||
- Default gRPC port: 50051
|
||||
- Check plugin-specific port configurations
|
||||
|
||||
### Database Connection
|
||||
- Ensure proper build tags for database support
|
||||
- Check DSN configuration strings
|
||||
- Verify database file permissions
|
||||
|
||||
### Plugin Loading
|
||||
- Plugins are auto-discovered from imports
|
||||
- Check plugin enable/disable status
|
||||
- Verify configuration merging
|
||||
@@ -1,5 +1,45 @@
|
||||
# Monibuca v5.0.x Release Notes
|
||||
|
||||
## v5.0.4 (2025-08-15)
|
||||
|
||||
### 新增 / 改进 (Features & Improvements)
|
||||
- GB28181: 支持更新 channelName / channelId(eba62c4)
|
||||
- 定时任务(crontab): 初始化 SQL 支持(2bbee90)
|
||||
- Snap 插件: 支持批量抓图(272def3)
|
||||
- 管理后台: 支持自定义首页(15d830f)
|
||||
- 推/拉代理: 支持可选参数更新(ad32f6f)
|
||||
- 心跳/脉冲: pulse interval 允许为 0(17faf3f)
|
||||
- 告警上报: 通过 Hook 发送报警(baf3640)
|
||||
- 告警信息上报: 通过 Hook 发送 alarminfo(cad47ae)
|
||||
|
||||
## v5.0.3 (2025-06-27)
|
||||
|
||||
### 🎉 新功能 (New Features)
|
||||
|
||||
#### 录像与流媒体协议增强
|
||||
- **MP4/FLV录像优化**:多项修复和优化录像拉取、分片、写入、格式转换等功能,提升兼容性和稳定性。
|
||||
- **GB28181协议增强**:支持pullproxy代理GB28181流,完善平台配置、子码流播放、单独media port等能力。
|
||||
- **插件与配置系统**:插件初始化、配置加载、数据库适配等增强,支持获取全部配置yaml示例。
|
||||
- **WebRTC/HLS/RTMP协议适配**:WebRTC支持更多编解码器,HLS/RTMP协议兼容性提升。
|
||||
- **crontab计划录像**:定时任务插件支持计划录像,拉流代理支持禁用。
|
||||
|
||||
### 🐛 问题修复 (Bug Fixes)
|
||||
- **录像/流媒体相关**:修复mp4、flv、rtmp、hls等协议的多项bug,包括clone buffer、SQL语法、表结构适配等。
|
||||
- **GB28181/数据库**:修复注册、流订阅、表结构、SQL语法等问题,适配PostgreSQL。
|
||||
- **插件系统**:修复插件初始化、数据库对象赋值、配置加载等问题。
|
||||
|
||||
### 🛠️ 优化改进 (Improvements)
|
||||
- **代码结构重构**:重构mp4、record、插件等系统,提升可维护性。
|
||||
- **文档与示例**:完善文档说明,增加配置和API示例。
|
||||
- **Docker镜像**:优化tcpdump、ffmpeg等工具集成。
|
||||
|
||||
### 👥 贡献者 (Contributors)
|
||||
- langhuihui
|
||||
- pggiroro
|
||||
- banshan
|
||||
|
||||
---
|
||||
|
||||
## v5.0.2 (2025-06-05)
|
||||
|
||||
### 🎉 新功能 (New Features)
|
||||
|
||||
25
alarm.go
Normal file
25
alarm.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package m7s
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// AlarmInfo 报警信息实体,用于存储到数据库
|
||||
type AlarmInfo struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"` // 主键,自增ID
|
||||
ServerInfo string `gorm:"type:varchar(255);not null" json:"serverInfo"` // 服务器信息
|
||||
StreamName string `gorm:"type:varchar(255);index" json:"streamName"` // 流名称
|
||||
StreamPath string `gorm:"type:varchar(500)" json:"streamPath"` // 流的streampath
|
||||
AlarmName string `gorm:"type:varchar(255);not null" json:"alarmName"` // 报警名称
|
||||
AlarmDesc string `gorm:"type:varchar(500);not null" json:"alarmDesc"` // 报警描述
|
||||
AlarmType int `gorm:"not null;index" json:"alarmType"` // 报警类型(对应之前定义的常量)
|
||||
IsSent bool `gorm:"default:false" json:"isSent"` // 是否已成功发送
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"createdAt"` // 创建时间,报警时间
|
||||
UpdatedAt time.Time `gorm:"autoUpdateTime" json:"updatedAt"` // 更新时间
|
||||
FilePath string `gorm:"type:varchar(255)" json:"filePath"` // 文件路径
|
||||
}
|
||||
|
||||
// TableName 指定表名
|
||||
func (AlarmInfo) TableName() string {
|
||||
return "alarm_info"
|
||||
}
|
||||
8
alias.go
8
alias.go
@@ -48,7 +48,7 @@ func (s *Server) initStreamAlias() {
|
||||
|
||||
func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *pb.StreamAliasListResponse, err error) {
|
||||
res = &pb.StreamAliasListResponse{}
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
for alias := range s.AliasStreams.Range {
|
||||
info := &pb.StreamAlias{
|
||||
StreamPath: alias.StreamPath,
|
||||
@@ -62,18 +62,17 @@ func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *p
|
||||
}
|
||||
res.Data = append(res.Data, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasRequest) (res *pb.SuccessResponse, err error) {
|
||||
res = &pb.SuccessResponse{}
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if req.StreamPath != "" {
|
||||
u, err := url.Parse(req.StreamPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
req.StreamPath = strings.TrimPrefix(u.Path, "/")
|
||||
publisher, canReplace := s.Streams.Get(req.StreamPath)
|
||||
@@ -159,7 +158,6 @@ func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasReque
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
292
api.go
292
api.go
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
myip "github.com/husanpao/ip"
|
||||
@@ -25,7 +26,7 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
"m7s.live/v5/pb"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
@@ -96,22 +97,13 @@ func (s *Server) api_Stream_AnnexB_(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
defer reader.StopRead()
|
||||
if reader.Value.Raw == nil {
|
||||
if err = reader.Value.Demux(publisher.VideoTrack.ICodecCtx); err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
var annexb pkg.AnnexB
|
||||
var t pkg.AVTrack
|
||||
|
||||
t.ICodecCtx, t.SequenceFrame, err = annexb.ConvertCtx(publisher.VideoTrack.ICodecCtx)
|
||||
if t.ICodecCtx == nil {
|
||||
http.Error(rw, "unsupported codec", http.StatusInternalServerError)
|
||||
var annexb format.AnnexB
|
||||
err = pkg.ConvertFrameType(reader.Value.Wraps[0], &annexb)
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
annexb.Mux(t.ICodecCtx, &reader.Value)
|
||||
_, err = annexb.WriteTo(rw)
|
||||
annexb.WriteTo(rw)
|
||||
}
|
||||
|
||||
func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err error) {
|
||||
@@ -158,6 +150,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
}
|
||||
res.Data.AudioTrack.SampleRate = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetSampleRate())
|
||||
res.Data.AudioTrack.Channels = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetChannels())
|
||||
if pub.State == PublisherStateInit {
|
||||
res.Data.State = int32(PublisherStateTrackAdded)
|
||||
}
|
||||
}
|
||||
}
|
||||
if t := pub.VideoTrack.AVTrack; t != nil {
|
||||
@@ -173,6 +168,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
}
|
||||
res.Data.VideoTrack.Width = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Width())
|
||||
res.Data.VideoTrack.Height = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Height())
|
||||
if pub.State == PublisherStateInit {
|
||||
res.Data.State = int32(PublisherStateTrackAdded)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -180,7 +178,7 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
|
||||
func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.StreamInfoResponse, err error) {
|
||||
var recordings []*pb.RecordingDetail
|
||||
s.Records.SafeRange(func(record *RecordJob) bool {
|
||||
s.Records.Range(func(record *RecordJob) bool {
|
||||
if record.StreamPath == req.StreamPath {
|
||||
recordings = append(recordings, &pb.RecordingDetail{
|
||||
FilePath: record.RecConf.FilePath,
|
||||
@@ -220,11 +218,13 @@ func (s *Server) TaskTree(context.Context, *emptypb.Empty) (res *pb.TaskTreeResp
|
||||
StartTime: timestamppb.New(t.StartTime),
|
||||
Description: m.GetDescriptions(),
|
||||
StartReason: t.StartReason,
|
||||
Level: uint32(t.GetLevel()),
|
||||
}
|
||||
if job, ok := m.(task.IJob); ok {
|
||||
if blockedTask := job.Blocked(); blockedTask != nil {
|
||||
res.Blocked = fillData(blockedTask)
|
||||
}
|
||||
res.EventLoopRunning = job.EventLoopRunning()
|
||||
for t := range job.RangeSubTask {
|
||||
child := fillData(t)
|
||||
if child == nil {
|
||||
@@ -259,7 +259,7 @@ func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp
|
||||
|
||||
func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb.RecordingListResponse, err error) {
|
||||
resp = &pb.RecordingListResponse{}
|
||||
s.Records.SafeRange(func(record *RecordJob) bool {
|
||||
s.Records.Range(func(record *RecordJob) bool {
|
||||
resp.Data = append(resp.Data, &pb.Recording{
|
||||
StreamPath: record.StreamPath,
|
||||
StartTime: timestamppb.New(record.StartTime),
|
||||
@@ -272,7 +272,7 @@ func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb
|
||||
}
|
||||
|
||||
func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *pb.SubscribersResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
var subscribers []*pb.SubscriberSnapShot
|
||||
for subscriber := range s.Subscribers.Range {
|
||||
meta, _ := json.Marshal(subscriber.GetDescriptions())
|
||||
@@ -311,7 +311,6 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
|
||||
Data: subscribers,
|
||||
Total: int32(s.Subscribers.Length),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -331,7 +330,8 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
|
||||
}
|
||||
}
|
||||
pub.AudioTrack.Ring.Do(func(v *pkg.AVFrame) {
|
||||
if len(v.Wraps) > 0 {
|
||||
if len(v.Wraps) > 0 && v.TryRLock() {
|
||||
defer v.RUnlock()
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = v.Sequence
|
||||
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
|
||||
@@ -341,7 +341,7 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
|
||||
data.RingDataSize += uint32(v.Wraps[0].GetSize())
|
||||
for i, wrap := range v.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -382,7 +382,7 @@ func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
snap.KeyFrame = frame.IDR
|
||||
for i, wrap := range frame.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -415,7 +415,7 @@ func (s *Server) api_AudioTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
snap.KeyFrame = frame.IDR
|
||||
for i, wrap := range frame.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -441,7 +441,8 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
}
|
||||
}
|
||||
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
|
||||
if len(v.Wraps) > 0 {
|
||||
if len(v.Wraps) > 0 && v.TryRLock() {
|
||||
defer v.RUnlock()
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = v.Sequence
|
||||
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
|
||||
@@ -451,7 +452,7 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
data.RingDataSize += uint32(v.Wraps[0].GetSize())
|
||||
for i, wrap := range v.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -484,29 +485,27 @@ func (s *Server) Shutdown(ctx context.Context, req *pb.RequestWithId) (res *pb.S
|
||||
}
|
||||
|
||||
func (s *Server) ChangeSubscribe(ctx context.Context, req *pb.ChangeSubscribeRequest) (res *pb.SuccessResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
|
||||
if pub, ok := s.Streams.Get(req.StreamPath); ok {
|
||||
subscriber.Publisher.RemoveSubscriber(subscriber)
|
||||
subscriber.StreamPath = req.StreamPath
|
||||
pub.AddSubscriber(subscriber)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
err = pkg.ErrNotFound
|
||||
return nil
|
||||
})
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
|
||||
func (s *Server) StopSubscribe(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
|
||||
subscriber.Stop(errors.New("stop by api"))
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
@@ -551,7 +550,7 @@ func (s *Server) StopPublish(ctx context.Context, req *pb.StreamSnapRequest) (re
|
||||
// /api/stream/list
|
||||
func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *pb.StreamListResponse, err error) {
|
||||
recordingMap := make(map[string][]*pb.RecordingDetail)
|
||||
for record := range s.Records.SafeRange {
|
||||
for record := range s.Records.Range {
|
||||
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
|
||||
FilePath: record.RecConf.FilePath,
|
||||
Mode: record.RecConf.Mode,
|
||||
@@ -575,14 +574,46 @@ func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *
|
||||
}
|
||||
|
||||
func (s *Server) WaitList(context.Context, *emptypb.Empty) (res *pb.StreamWaitListResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
res = &pb.StreamWaitListResponse{
|
||||
List: make(map[string]int32),
|
||||
}
|
||||
for subs := range s.Waiting.Range {
|
||||
res.List[subs.StreamPath] = int32(subs.Length)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetSubscriptionProgress(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SubscriptionProgressResponse, err error) {
|
||||
s.CallOnStreamTask(func() {
|
||||
if waitStream, ok := s.Waiting.Get(req.StreamPath); ok {
|
||||
progress := waitStream.Progress
|
||||
res = &pb.SubscriptionProgressResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
Data: &pb.SubscriptionProgressData{
|
||||
CurrentStep: int32(progress.CurrentStep),
|
||||
},
|
||||
}
|
||||
// Convert steps
|
||||
for _, step := range progress.Steps {
|
||||
pbStep := &pb.Step{
|
||||
Name: step.Name,
|
||||
Description: step.Description,
|
||||
Error: step.Error,
|
||||
}
|
||||
if !step.StartedAt.IsZero() {
|
||||
pbStep.StartedAt = timestamppb.New(step.StartedAt)
|
||||
}
|
||||
if !step.CompletedAt.IsZero() {
|
||||
pbStep.CompletedAt = timestamppb.New(step.CompletedAt)
|
||||
}
|
||||
res.Data.Steps = append(res.Data.Steps, pbStep)
|
||||
}
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -651,10 +682,10 @@ func (s *Server) Summary(context.Context, *emptypb.Empty) (res *pb.SummaryRespon
|
||||
netWorks = append(netWorks, info)
|
||||
}
|
||||
res.StreamCount = int32(s.Streams.Length)
|
||||
res.PullCount = int32(s.Pulls.Length)
|
||||
res.PushCount = int32(s.Pushs.Length)
|
||||
res.PullCount = int32(s.Pulls.Length())
|
||||
res.PushCount = int32(s.Pushs.Length())
|
||||
res.SubscribeCount = int32(s.Subscribers.Length)
|
||||
res.RecordCount = int32(s.Records.Length)
|
||||
res.RecordCount = int32(s.Records.Length())
|
||||
res.TransformCount = int32(s.Transforms.Length)
|
||||
res.NetWork = netWorks
|
||||
s.lastSummary = res
|
||||
@@ -736,7 +767,63 @@ func (s *Server) GetConfig(_ context.Context, req *pb.GetConfigRequest) (res *pb
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.ResponseList, err error) {
|
||||
func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.RecordResponseList, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
}
|
||||
if req.PageSize == 0 {
|
||||
req.PageSize = 10
|
||||
}
|
||||
if req.PageNum == 0 {
|
||||
req.PageNum = 1
|
||||
}
|
||||
offset := (req.PageNum - 1) * req.PageSize // 计算偏移量
|
||||
var totalCount int64 //总条数
|
||||
|
||||
var result []*RecordStream
|
||||
query := s.DB.Model(&RecordStream{})
|
||||
if strings.Contains(req.StreamPath, "*") {
|
||||
query = query.Where("stream_path like ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
|
||||
} else if req.StreamPath != "" {
|
||||
query = query.Where("stream_path = ?", req.StreamPath)
|
||||
}
|
||||
if req.Type != "" {
|
||||
query = query.Where("type = ?", req.Type)
|
||||
}
|
||||
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{"range": []string{req.Range}, "start": []string{req.Start}, "end": []string{req.End}})
|
||||
if err == nil {
|
||||
if !startTime.IsZero() {
|
||||
query = query.Where("start_time >= ?", startTime)
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
query = query.Where("end_time <= ?", endTime)
|
||||
}
|
||||
}
|
||||
|
||||
query.Count(&totalCount)
|
||||
err = query.Offset(int(offset)).Limit(int(req.PageSize)).Order("start_time desc").Find(&result).Error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp = &pb.RecordResponseList{
|
||||
Total: uint32(totalCount),
|
||||
PageNum: req.PageNum,
|
||||
PageSize: req.PageSize,
|
||||
}
|
||||
for _, recordFile := range result {
|
||||
resp.Data = append(resp.Data, &pb.RecordFile{
|
||||
Id: uint32(recordFile.ID),
|
||||
StartTime: timestamppb.New(recordFile.StartTime),
|
||||
EndTime: timestamppb.New(recordFile.EndTime),
|
||||
FilePath: recordFile.FilePath,
|
||||
StreamPath: recordFile.StreamPath,
|
||||
})
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetEventRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.EventRecordResponseList, err error) {
|
||||
if s.DB == nil {
|
||||
err = pkg.ErrNoDB
|
||||
return
|
||||
@@ -751,15 +838,12 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
|
||||
var totalCount int64 //总条数
|
||||
|
||||
var result []*EventRecordStream
|
||||
query := s.DB.Model(&RecordStream{})
|
||||
query := s.DB.Model(&EventRecordStream{})
|
||||
if strings.Contains(req.StreamPath, "*") {
|
||||
query = query.Where("stream_path like ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
|
||||
} else if req.StreamPath != "" {
|
||||
query = query.Where("stream_path = ?", req.StreamPath)
|
||||
}
|
||||
if req.Mode != "" {
|
||||
query = query.Where("mode = ?", req.Mode)
|
||||
}
|
||||
if req.Type != "" {
|
||||
query = query.Where("type = ?", req.Type)
|
||||
}
|
||||
@@ -781,21 +865,22 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp = &pb.ResponseList{
|
||||
resp = &pb.EventRecordResponseList{
|
||||
Total: uint32(totalCount),
|
||||
PageNum: req.PageNum,
|
||||
PageSize: req.PageSize,
|
||||
}
|
||||
for _, recordFile := range result {
|
||||
resp.Data = append(resp.Data, &pb.RecordFile{
|
||||
resp.Data = append(resp.Data, &pb.EventRecordFile{
|
||||
Id: uint32(recordFile.ID),
|
||||
StartTime: timestamppb.New(recordFile.StartTime),
|
||||
EndTime: timestamppb.New(recordFile.EndTime),
|
||||
FilePath: recordFile.FilePath,
|
||||
StreamPath: recordFile.StreamPath,
|
||||
EventLevel: recordFile.EventLevel,
|
||||
EventDesc: recordFile.EventDesc,
|
||||
EventId: recordFile.EventId,
|
||||
EventName: recordFile.EventName,
|
||||
EventDesc: recordFile.EventDesc,
|
||||
})
|
||||
}
|
||||
return
|
||||
@@ -874,7 +959,7 @@ func (s *Server) DeleteRecord(ctx context.Context, req *pb.ReqRecordDelete) (res
|
||||
|
||||
func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res *pb.TransformListResponse, err error) {
|
||||
res = &pb.TransformListResponse{}
|
||||
s.Transforms.Call(func() error {
|
||||
s.Transforms.Call(func() {
|
||||
for transform := range s.Transforms.Range {
|
||||
info := &pb.Transform{
|
||||
StreamPath: transform.StreamPath,
|
||||
@@ -886,13 +971,126 @@ func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res
|
||||
result, err = yaml.Marshal(transform.TransformJob.Config)
|
||||
if err != nil {
|
||||
s.Error("marshal transform config failed", "error", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
info.Config = string(result)
|
||||
}
|
||||
res.Data = append(res.Data, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetAlarmList(ctx context.Context, req *pb.AlarmListRequest) (res *pb.AlarmListResponse, err error) {
|
||||
// 初始化响应对象
|
||||
res = &pb.AlarmListResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
PageNum: req.PageNum,
|
||||
PageSize: req.PageSize,
|
||||
}
|
||||
|
||||
// 检查数据库连接是否可用
|
||||
if s.DB == nil {
|
||||
res.Code = 500
|
||||
res.Message = "数据库连接不可用"
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 构建查询条件
|
||||
query := s.DB.Model(&AlarmInfo{})
|
||||
|
||||
// 添加时间范围过滤
|
||||
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{
|
||||
"range": []string{req.Range},
|
||||
"start": []string{req.Start},
|
||||
"end": []string{req.End},
|
||||
})
|
||||
if err == nil {
|
||||
if !startTime.IsZero() {
|
||||
query = query.Where("created_at >= ?", startTime)
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
query = query.Where("created_at <= ?", endTime)
|
||||
}
|
||||
}
|
||||
|
||||
// 添加告警类型过滤
|
||||
if req.AlarmType != 0 {
|
||||
query = query.Where("alarm_type = ?", req.AlarmType)
|
||||
}
|
||||
|
||||
// 添加 StreamPath 过滤
|
||||
if req.StreamPath != "" {
|
||||
if strings.Contains(req.StreamPath, "*") {
|
||||
// 支持通配符搜索
|
||||
query = query.Where("stream_path LIKE ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
|
||||
} else {
|
||||
query = query.Where("stream_path = ?", req.StreamPath)
|
||||
}
|
||||
}
|
||||
|
||||
// 添加 StreamName 过滤
|
||||
if req.StreamName != "" {
|
||||
if strings.Contains(req.StreamName, "*") {
|
||||
// 支持通配符搜索
|
||||
query = query.Where("stream_name LIKE ?", strings.ReplaceAll(req.StreamName, "*", "%"))
|
||||
} else {
|
||||
query = query.Where("stream_name = ?", req.StreamName)
|
||||
}
|
||||
}
|
||||
|
||||
// 计算总记录数
|
||||
var total int64
|
||||
if err = query.Count(&total).Error; err != nil {
|
||||
res.Code = 500
|
||||
res.Message = "查询告警信息总数失败: " + err.Error()
|
||||
return res, nil
|
||||
}
|
||||
res.Total = int32(total)
|
||||
|
||||
// 如果没有记录,直接返回
|
||||
if total == 0 {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 处理分页参数
|
||||
if req.PageNum <= 0 {
|
||||
req.PageNum = 1
|
||||
}
|
||||
if req.PageSize <= 0 {
|
||||
req.PageSize = 10
|
||||
}
|
||||
|
||||
// 查询分页数据
|
||||
var alarmInfoList []AlarmInfo
|
||||
offset := (req.PageNum - 1) * req.PageSize
|
||||
if err = query.Order("created_at DESC").
|
||||
Offset(int(offset)).
|
||||
Limit(int(req.PageSize)).
|
||||
Find(&alarmInfoList).Error; err != nil {
|
||||
res.Code = 500
|
||||
res.Message = "查询告警信息失败: " + err.Error()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 转换为 protobuf 格式
|
||||
res.Data = make([]*pb.AlarmInfo, len(alarmInfoList))
|
||||
for i, alarm := range alarmInfoList {
|
||||
res.Data[i] = &pb.AlarmInfo{
|
||||
Id: uint32(alarm.ID),
|
||||
ServerInfo: alarm.ServerInfo,
|
||||
StreamName: alarm.StreamName,
|
||||
StreamPath: alarm.StreamPath,
|
||||
AlarmDesc: alarm.AlarmDesc,
|
||||
AlarmName: alarm.AlarmName,
|
||||
AlarmType: int32(alarm.AlarmType),
|
||||
IsSent: alarm.IsSent,
|
||||
CreatedAt: timestamppb.New(alarm.CreatedAt),
|
||||
UpdatedAt: timestamppb.New(alarm.UpdatedAt),
|
||||
FilePath: alarm.FilePath,
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
324
api_config.go
Normal file
324
api_config.go
Normal file
@@ -0,0 +1,324 @@
|
||||
package m7s
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func getIndent(line string) int {
|
||||
return len(line) - len(strings.TrimLeft(line, " "))
|
||||
}
|
||||
|
||||
func addCommentsToYAML(yamlData []byte) []byte {
|
||||
lines := strings.Split(string(yamlData), "\n")
|
||||
var result strings.Builder
|
||||
var commentBuffer []string
|
||||
var keyLineBuffer string
|
||||
var keyLineIndent int
|
||||
inMultilineValue := false
|
||||
|
||||
for _, line := range lines {
|
||||
trimmedLine := strings.TrimSpace(line)
|
||||
indent := getIndent(line)
|
||||
|
||||
if strings.HasPrefix(trimmedLine, "_description:") {
|
||||
description := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_description:"))
|
||||
commentBuffer = append(commentBuffer, "# "+description)
|
||||
} else if strings.HasPrefix(trimmedLine, "_enum:") {
|
||||
enum := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_enum:"))
|
||||
commentBuffer = append(commentBuffer, "# 可选值: "+enum)
|
||||
} else if strings.HasPrefix(trimmedLine, "_value:") {
|
||||
valueStr := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_value:"))
|
||||
if valueStr != "" && valueStr != "{}" && valueStr != "[]" {
|
||||
// Single line value
|
||||
result.WriteString(strings.Repeat(" ", keyLineIndent))
|
||||
result.WriteString(keyLineBuffer)
|
||||
result.WriteString(": ")
|
||||
result.WriteString(valueStr)
|
||||
if len(commentBuffer) > 0 {
|
||||
result.WriteString(" ")
|
||||
for j, c := range commentBuffer {
|
||||
c = strings.TrimSpace(strings.TrimPrefix(c, "#"))
|
||||
result.WriteString("# " + c)
|
||||
if j < len(commentBuffer)-1 {
|
||||
result.WriteString(" ")
|
||||
}
|
||||
}
|
||||
}
|
||||
result.WriteString("\n")
|
||||
} else {
|
||||
// Multi-line value (struct/map)
|
||||
for _, comment := range commentBuffer {
|
||||
result.WriteString(strings.Repeat(" ", keyLineIndent))
|
||||
result.WriteString(comment)
|
||||
result.WriteString("\n")
|
||||
}
|
||||
result.WriteString(strings.Repeat(" ", keyLineIndent))
|
||||
result.WriteString(keyLineBuffer)
|
||||
result.WriteString(":")
|
||||
result.WriteString("\n")
|
||||
inMultilineValue = true
|
||||
}
|
||||
commentBuffer = nil
|
||||
keyLineBuffer = ""
|
||||
keyLineIndent = 0
|
||||
} else if strings.Contains(trimmedLine, ":") {
|
||||
// This is a key line
|
||||
if keyLineBuffer != "" { // flush previous key line
|
||||
result.WriteString(strings.Repeat(" ", keyLineIndent) + keyLineBuffer + ":\n")
|
||||
}
|
||||
inMultilineValue = false
|
||||
keyLineBuffer = strings.TrimSuffix(trimmedLine, ":")
|
||||
keyLineIndent = indent
|
||||
} else if inMultilineValue {
|
||||
// These are the lines of a multiline value
|
||||
if trimmedLine != "" {
|
||||
result.WriteString(line + "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
if keyLineBuffer != "" {
|
||||
result.WriteString(strings.Repeat(" ", keyLineIndent) + keyLineBuffer + ":\n")
|
||||
}
|
||||
|
||||
// Final cleanup to remove empty lines and special keys
|
||||
finalOutput := []string{}
|
||||
for _, line := range strings.Split(result.String(), "\n") {
|
||||
trimmed := strings.TrimSpace(line)
|
||||
if trimmed == "" || strings.HasPrefix(trimmed, "_") {
|
||||
continue
|
||||
}
|
||||
finalOutput = append(finalOutput, line)
|
||||
}
|
||||
|
||||
return []byte(strings.Join(finalOutput, "\n"))
|
||||
}
|
||||
|
||||
func (s *Server) api_Config_YAML_All(rw http.ResponseWriter, r *http.Request) {
|
||||
query := r.URL.Query()
|
||||
filterName := query.Get("name")
|
||||
shouldMergeCommon := query.Get("common") != "false"
|
||||
|
||||
configSections := []struct {
|
||||
name string
|
||||
data any
|
||||
}{}
|
||||
|
||||
// 1. Get common config if it needs to be merged.
|
||||
var commonConfig map[string]any
|
||||
if shouldMergeCommon {
|
||||
if c, ok := extractStructConfig(reflect.ValueOf(s.Plugin.GetCommonConf())).(map[string]any); ok {
|
||||
commonConfig = c
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Process global config.
|
||||
if filterName == "" || filterName == "global" {
|
||||
if globalConf, ok := extractStructConfig(reflect.ValueOf(s.ServerConfig)).(map[string]any); ok {
|
||||
if shouldMergeCommon && commonConfig != nil {
|
||||
mergedConf := make(map[string]any)
|
||||
for k, v := range commonConfig {
|
||||
mergedConf[k] = v
|
||||
}
|
||||
for k, v := range globalConf {
|
||||
mergedConf[k] = v // Global overrides common
|
||||
}
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{"global", mergedConf})
|
||||
} else {
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{"global", globalConf})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Process plugin configs.
|
||||
for _, meta := range plugins {
|
||||
if filterName != "" && !strings.EqualFold(meta.Name, filterName) {
|
||||
continue
|
||||
}
|
||||
name := strings.ToLower(meta.Name)
|
||||
configType := meta.Type
|
||||
if configType.Kind() == reflect.Ptr {
|
||||
configType = configType.Elem()
|
||||
}
|
||||
|
||||
if pluginConf, ok := extractStructConfig(reflect.New(configType)).(map[string]any); ok {
|
||||
pluginConf["enable"] = map[string]any{
|
||||
"_value": true,
|
||||
"_description": "在global配置disableall时能启用特定插件",
|
||||
}
|
||||
if shouldMergeCommon && commonConfig != nil {
|
||||
mergedConf := make(map[string]any)
|
||||
for k, v := range commonConfig {
|
||||
mergedConf[k] = v
|
||||
}
|
||||
for k, v := range pluginConf {
|
||||
mergedConf[k] = v // Plugin overrides common
|
||||
}
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{name, mergedConf})
|
||||
} else {
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{name, pluginConf})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 4. Serialize each section and combine.
|
||||
var yamlParts []string
|
||||
for _, section := range configSections {
|
||||
if section.data == nil {
|
||||
continue
|
||||
}
|
||||
partMap := map[string]any{section.name: section.data}
|
||||
partYAML, err := yaml.Marshal(partMap)
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
yamlParts = append(yamlParts, string(partYAML))
|
||||
}
|
||||
|
||||
finalYAML := strings.Join(yamlParts, "")
|
||||
|
||||
rw.Header().Set("Content-Type", "text/yaml; charset=utf-8")
|
||||
rw.Write(addCommentsToYAML([]byte(finalYAML)))
|
||||
}
|
||||
|
||||
func extractStructConfig(v reflect.Value) any {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
m := make(map[string]any)
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
field := v.Type().Field(i)
|
||||
if !field.IsExported() {
|
||||
continue
|
||||
}
|
||||
// Filter out Plugin and UnimplementedApiServer
|
||||
fieldType := field.Type
|
||||
if fieldType.Kind() == reflect.Ptr {
|
||||
fieldType = fieldType.Elem()
|
||||
}
|
||||
if fieldType.Name() == "Plugin" || fieldType.Name() == "UnimplementedApiServer" {
|
||||
continue
|
||||
}
|
||||
yamlTag := field.Tag.Get("yaml")
|
||||
if yamlTag == "-" {
|
||||
continue
|
||||
}
|
||||
fieldName := strings.Split(yamlTag, ",")[0]
|
||||
if fieldName == "" {
|
||||
fieldName = strings.ToLower(field.Name)
|
||||
}
|
||||
m[fieldName] = extractFieldConfig(field, v.Field(i))
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func extractFieldConfig(field reflect.StructField, value reflect.Value) any {
|
||||
result := make(map[string]any)
|
||||
description := field.Tag.Get("desc")
|
||||
enum := field.Tag.Get("enum")
|
||||
if description != "" {
|
||||
result["_description"] = description
|
||||
}
|
||||
if enum != "" {
|
||||
result["_enum"] = enum
|
||||
}
|
||||
|
||||
kind := value.Kind()
|
||||
if kind == reflect.Ptr {
|
||||
if value.IsNil() {
|
||||
value = reflect.New(value.Type().Elem())
|
||||
}
|
||||
value = value.Elem()
|
||||
kind = value.Kind()
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Struct:
|
||||
if dur, ok := value.Interface().(time.Duration); ok {
|
||||
result["_value"] = extractDurationConfig(field, dur)
|
||||
} else {
|
||||
result["_value"] = extractStructConfig(value)
|
||||
}
|
||||
case reflect.Map, reflect.Slice:
|
||||
if value.IsNil() {
|
||||
result["_value"] = make(map[string]any)
|
||||
if kind == reflect.Slice {
|
||||
result["_value"] = make([]any, 0)
|
||||
}
|
||||
} else {
|
||||
result["_value"] = value.Interface()
|
||||
}
|
||||
default:
|
||||
result["_value"] = extractBasicTypeConfig(field, value)
|
||||
}
|
||||
|
||||
if description == "" && enum == "" {
|
||||
return result["_value"]
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func extractBasicTypeConfig(field reflect.StructField, value reflect.Value) any {
|
||||
if value.IsZero() {
|
||||
if defaultValue := field.Tag.Get("default"); defaultValue != "" {
|
||||
return parseDefaultValue(defaultValue, field.Type)
|
||||
}
|
||||
}
|
||||
return value.Interface()
|
||||
}
|
||||
|
||||
func extractDurationConfig(field reflect.StructField, value time.Duration) any {
|
||||
if value == 0 {
|
||||
if defaultValue := field.Tag.Get("default"); defaultValue != "" {
|
||||
return defaultValue
|
||||
}
|
||||
}
|
||||
return value.String()
|
||||
}
|
||||
|
||||
func parseDefaultValue(defaultValue string, t reflect.Type) any {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
return defaultValue
|
||||
case reflect.Bool:
|
||||
return defaultValue == "true"
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
if v, err := strconv.ParseInt(defaultValue, 10, 64); err == nil {
|
||||
return v
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
if v, err := strconv.ParseUint(defaultValue, 10, 64); err == nil {
|
||||
return v
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
if v, err := strconv.ParseFloat(defaultValue, 64); err == nil {
|
||||
return v
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
279
doc/arch/auth.md
Normal file
279
doc/arch/auth.md
Normal file
@@ -0,0 +1,279 @@
|
||||
# Stream Authentication Mechanism
|
||||
|
||||
Monibuca V5 provides a comprehensive stream authentication mechanism to control access permissions for publishing and subscribing to streams. The authentication mechanism supports multiple methods, including key-based signature authentication and custom authentication handlers.
|
||||
|
||||
## Authentication Principles
|
||||
|
||||
### 1. Authentication Flow Sequence Diagrams
|
||||
|
||||
#### Publishing Authentication Sequence Diagram
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as Publishing Client
|
||||
participant Plugin as Plugin
|
||||
participant AuthHandler as Auth Handler
|
||||
participant Server as Server
|
||||
|
||||
Client->>Plugin: Publishing Request (streamPath, args)
|
||||
Plugin->>Plugin: Check EnableAuth && Type == PublishTypeServer
|
||||
|
||||
alt Authentication Enabled
|
||||
Plugin->>Plugin: Look for custom auth handler
|
||||
|
||||
alt Custom Handler Exists
|
||||
Plugin->>AuthHandler: onAuthPub(publisher)
|
||||
AuthHandler->>AuthHandler: Execute custom auth logic
|
||||
AuthHandler-->>Plugin: Auth result
|
||||
else Use Key-based Auth
|
||||
Plugin->>Plugin: Check if conf.Key exists
|
||||
alt Key Configured
|
||||
Plugin->>Plugin: auth(streamPath, key, secret, expire)
|
||||
Plugin->>Plugin: Validate timestamp
|
||||
Plugin->>Plugin: Validate secret length
|
||||
Plugin->>Plugin: Calculate MD5 signature
|
||||
Plugin->>Plugin: Compare signatures
|
||||
Plugin-->>Plugin: Auth result
|
||||
end
|
||||
end
|
||||
|
||||
alt Auth Failed
|
||||
Plugin-->>Client: Auth failed, reject publishing
|
||||
else Auth Success
|
||||
Plugin->>Server: Create Publisher and add to stream management
|
||||
Server-->>Plugin: Publishing successful
|
||||
Plugin-->>Client: Publishing established successfully
|
||||
end
|
||||
else Auth Disabled
|
||||
Plugin->>Server: Create Publisher directly
|
||||
Server-->>Plugin: Publishing successful
|
||||
Plugin-->>Client: Publishing established successfully
|
||||
end
|
||||
```
|
||||
|
||||
#### Subscribing Authentication Sequence Diagram
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as Subscribing Client
|
||||
participant Plugin as Plugin
|
||||
participant AuthHandler as Auth Handler
|
||||
participant Server as Server
|
||||
|
||||
Client->>Plugin: Subscribing Request (streamPath, args)
|
||||
Plugin->>Plugin: Check EnableAuth && Type == SubscribeTypeServer
|
||||
|
||||
alt Authentication Enabled
|
||||
Plugin->>Plugin: Look for custom auth handler
|
||||
|
||||
alt Custom Handler Exists
|
||||
Plugin->>AuthHandler: onAuthSub(subscriber)
|
||||
AuthHandler->>AuthHandler: Execute custom auth logic
|
||||
AuthHandler-->>Plugin: Auth result
|
||||
else Use Key-based Auth
|
||||
Plugin->>Plugin: Check if conf.Key exists
|
||||
alt Key Configured
|
||||
Plugin->>Plugin: auth(streamPath, key, secret, expire)
|
||||
Plugin->>Plugin: Validate timestamp
|
||||
Plugin->>Plugin: Validate secret length
|
||||
Plugin->>Plugin: Calculate MD5 signature
|
||||
Plugin->>Plugin: Compare signatures
|
||||
Plugin-->>Plugin: Auth result
|
||||
end
|
||||
end
|
||||
|
||||
alt Auth Failed
|
||||
Plugin-->>Client: Auth failed, reject subscribing
|
||||
else Auth Success
|
||||
Plugin->>Server: Create Subscriber and wait for Publisher
|
||||
Server->>Server: Wait for stream publishing and track ready
|
||||
Server-->>Plugin: Subscribing ready
|
||||
Plugin-->>Client: Start streaming data transmission
|
||||
end
|
||||
else Auth Disabled
|
||||
Plugin->>Server: Create Subscriber directly
|
||||
Server-->>Plugin: Subscribing successful
|
||||
Plugin-->>Client: Start streaming data transmission
|
||||
end
|
||||
```
|
||||
|
||||
### 2. Authentication Trigger Points
|
||||
|
||||
Authentication is triggered in the following two scenarios:
|
||||
|
||||
- **Publishing Authentication**: Triggered when there's a publishing request in the `PublishWithConfig` method
|
||||
- **Subscribing Authentication**: Triggered when there's a subscribing request in the `SubscribeWithConfig` method
|
||||
|
||||
### 3. Authentication Condition Checks
|
||||
|
||||
Authentication is only executed when the following conditions are met simultaneously:
|
||||
|
||||
```go
|
||||
if p.config.EnableAuth && publisher.Type == PublishTypeServer
|
||||
```
|
||||
|
||||
- `EnableAuth`: Authentication is enabled in the plugin configuration
|
||||
- `Type == PublishTypeServer/SubscribeTypeServer`: Only authenticate server-type publishing/subscribing
|
||||
|
||||
### 4. Authentication Method Priority
|
||||
|
||||
The system executes authentication in the following priority order:
|
||||
|
||||
1. **Custom Authentication Handler** (Highest priority)
|
||||
2. **Key-based Signature Authentication**
|
||||
3. **No Authentication** (Default pass)
|
||||
|
||||
## Custom Authentication Handlers
|
||||
|
||||
### Publishing Authentication Handler
|
||||
|
||||
```go
|
||||
onAuthPub := p.Meta.OnAuthPub
|
||||
if onAuthPub == nil {
|
||||
onAuthPub = p.Server.Meta.OnAuthPub
|
||||
}
|
||||
if onAuthPub != nil {
|
||||
if err = onAuthPub(publisher).Await(); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Authentication handler lookup order:
|
||||
1. Plugin-level authentication handler `p.Meta.OnAuthPub`
|
||||
2. Server-level authentication handler `p.Server.Meta.OnAuthPub`
|
||||
|
||||
### Subscribing Authentication Handler
|
||||
|
||||
```go
|
||||
onAuthSub := p.Meta.OnAuthSub
|
||||
if onAuthSub == nil {
|
||||
onAuthSub = p.Server.Meta.OnAuthSub
|
||||
}
|
||||
if onAuthSub != nil {
|
||||
if err = onAuthSub(subscriber).Await(); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Key-based Signature Authentication
|
||||
|
||||
When there's no custom authentication handler, if a Key is configured, the system will use MD5-based signature authentication mechanism.
|
||||
|
||||
### Authentication Algorithm
|
||||
|
||||
```go
|
||||
func (p *Plugin) auth(streamPath string, key string, secret string, expire string) (err error) {
|
||||
// 1. Validate expiration time
|
||||
if unixTime, err := strconv.ParseInt(expire, 16, 64); err != nil || time.Now().Unix() > unixTime {
|
||||
return fmt.Errorf("auth failed expired")
|
||||
}
|
||||
|
||||
// 2. Validate secret length
|
||||
if len(secret) != 32 {
|
||||
return fmt.Errorf("auth failed secret length must be 32")
|
||||
}
|
||||
|
||||
// 3. Calculate the true secret
|
||||
trueSecret := md5.Sum([]byte(key + streamPath + expire))
|
||||
|
||||
// 4. Compare secrets
|
||||
if secret == hex.EncodeToString(trueSecret[:]) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("auth failed invalid secret")
|
||||
}
|
||||
```
|
||||
|
||||
### Signature Calculation Steps
|
||||
|
||||
1. **Construct signature string**: `key + streamPath + expire`
|
||||
2. **MD5 encryption**: Perform MD5 hash on the signature string
|
||||
3. **Hexadecimal encoding**: Convert MD5 result to 32-character hexadecimal string
|
||||
4. **Verify signature**: Compare calculation result with client-provided secret
|
||||
|
||||
### Parameter Description
|
||||
|
||||
| Parameter | Type | Description | Example |
|
||||
|-----------|------|-------------|---------|
|
||||
| key | string | Secret key set in configuration file | "mySecretKey" |
|
||||
| streamPath | string | Stream path | "live/test" |
|
||||
| expire | string | Expiration timestamp (hexadecimal) | "64a1b2c3" |
|
||||
| secret | string | Client-calculated signature (32-char hex) | "5d41402abc4b2a76b9719d911017c592" |
|
||||
|
||||
### Timestamp Handling
|
||||
|
||||
- Expiration time uses hexadecimal Unix timestamp
|
||||
- System validates if current time exceeds expiration time
|
||||
- Timestamp parsing failure or expiration will cause authentication failure
|
||||
|
||||
## API Key Generation
|
||||
|
||||
The system also provides API interfaces for key generation, supporting authentication needs for admin dashboard:
|
||||
|
||||
```go
|
||||
p.handle("/api/secret/{type}/{streamPath...}", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
// JWT Token validation
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
|
||||
_, err := p.Server.ValidateToken(tokenString)
|
||||
|
||||
// Generate publishing or subscribing key
|
||||
streamPath := r.PathValue("streamPath")
|
||||
t := r.PathValue("type")
|
||||
expire := r.URL.Query().Get("expire")
|
||||
|
||||
if t == "publish" {
|
||||
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
} else if t == "subscribe" {
|
||||
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
}
|
||||
}))
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Enable Authentication
|
||||
|
||||
```yaml
|
||||
# Plugin configuration
|
||||
rtmp:
|
||||
enableAuth: true
|
||||
publish:
|
||||
key: "your-publish-key"
|
||||
subscribe:
|
||||
key: "your-subscribe-key"
|
||||
```
|
||||
|
||||
### Publishing URL Example
|
||||
|
||||
```
|
||||
rtmp://localhost/live/test?secret=5d41402abc4b2a76b9719d911017c592&expire=64a1b2c3
|
||||
```
|
||||
|
||||
### Subscribing URL Example
|
||||
|
||||
```
|
||||
http://localhost:8080/flv/live/test.flv?secret=a1b2c3d4e5f6789012345678901234ab&expire=64a1b2c3
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
1. **Key Protection**: Keys in configuration files should be properly secured to prevent leakage
|
||||
2. **Time Window**: Set reasonable expiration times to balance security and usability
|
||||
3. **HTTPS Transport**: Use HTTPS for transmitting authentication parameters in production
|
||||
4. **Logging**: Authentication failures are logged as warnings for security auditing
|
||||
|
||||
## Error Handling
|
||||
|
||||
Common causes of authentication failure:
|
||||
|
||||
- `auth failed expired`: Timestamp expired or format error
|
||||
- `auth failed secret length must be 32`: Incorrect secret length
|
||||
- `auth failed invalid secret`: Signature verification failed
|
||||
- `invalid token`: JWT verification failed during API key generation
|
||||
@@ -93,7 +93,7 @@ Plugins can add global middleware using the `AddMiddleware` method to handle all
|
||||
|
||||
Example code:
|
||||
```go
|
||||
func (p *YourPlugin) OnInit() {
|
||||
func (p *YourPlugin) Start() {
|
||||
// Add authentication middleware
|
||||
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
|
||||
### Plugin Development
|
||||
|
||||
[plugin/README.md](../plugin/README.md)
|
||||
[plugin/README.md](../../plugin/README.md)
|
||||
|
||||
## Task System
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ type MyLogHandler struct {
|
||||
}
|
||||
|
||||
// Add handler during plugin initialization
|
||||
func (p *MyPlugin) OnInit() error {
|
||||
func (p *MyPlugin) Start() error {
|
||||
handler := &MyLogHandler{}
|
||||
p.Server.LogHandler.Add(handler)
|
||||
return nil
|
||||
|
||||
@@ -93,7 +93,7 @@ Plugins start through the `Plugin.Start` method, executing these operations in s
|
||||
- Start QUIC services (if implementing IQUICPlugin interface)
|
||||
|
||||
4. Plugin Initialization Callback
|
||||
- Call plugin's OnInit method
|
||||
- Call plugin's Start method
|
||||
- Handle initialization errors
|
||||
|
||||
5. Timer Task Setup
|
||||
@@ -109,7 +109,7 @@ The startup phase is crucial for plugins to begin providing services, with all p
|
||||
|
||||
### 4. Stop Phase (Stop)
|
||||
|
||||
The plugin stop phase is implemented through the `Plugin.OnStop` method and related stop handling logic, including:
|
||||
The plugin stop phase is implemented through the `Plugin.OnDispose` method and related stop handling logic, including:
|
||||
|
||||
1. Service Shutdown
|
||||
- Stop all network services (HTTP/HTTPS/TCP/UDP/QUIC)
|
||||
@@ -127,7 +127,7 @@ The plugin stop phase is implemented through the `Plugin.OnStop` method and rela
|
||||
- Trigger stop event notifications
|
||||
|
||||
4. Callback Processing
|
||||
- Call plugin's custom OnStop method
|
||||
- Call plugin's custom OnDispose method
|
||||
- Execute registered stop callback functions
|
||||
- Handle errors during stop process
|
||||
|
||||
@@ -143,7 +143,7 @@ The stop phase aims to ensure plugins can safely and cleanly stop running withou
|
||||
The plugin destroy phase is implemented through the `Plugin.Dispose` method, the final phase in a plugin's lifecycle, including:
|
||||
|
||||
1. Resource Release
|
||||
- Call plugin's OnStop method for stop processing
|
||||
- Call plugin's OnDispose method for stop processing
|
||||
- Remove from server's plugin list
|
||||
- Release all allocated system resources
|
||||
|
||||
|
||||
@@ -0,0 +1,279 @@
|
||||
# 流鉴权机制
|
||||
|
||||
Monibuca V5 提供了完善的流鉴权机制,用于控制推流和拉流的访问权限。鉴权机制支持多种方式,包括基于密钥的签名鉴权和自定义鉴权处理器。
|
||||
|
||||
## 鉴权原理
|
||||
|
||||
### 1. 鉴权流程时序图
|
||||
|
||||
#### 推流鉴权时序图
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as 推流客户端
|
||||
participant Plugin as 插件
|
||||
participant AuthHandler as 鉴权处理器
|
||||
participant Server as 服务器
|
||||
|
||||
Client->>Plugin: 推流请求 (streamPath, args)
|
||||
Plugin->>Plugin: 检查 EnableAuth && Type == PublishTypeServer
|
||||
|
||||
alt 启用鉴权
|
||||
Plugin->>Plugin: 查找自定义鉴权处理器
|
||||
|
||||
alt 存在自定义处理器
|
||||
Plugin->>AuthHandler: onAuthPub(publisher)
|
||||
AuthHandler->>AuthHandler: 执行自定义鉴权逻辑
|
||||
AuthHandler-->>Plugin: 鉴权结果
|
||||
else 使用密钥鉴权
|
||||
Plugin->>Plugin: 检查 conf.Key 是否存在
|
||||
alt 配置了Key
|
||||
Plugin->>Plugin: auth(streamPath, key, secret, expire)
|
||||
Plugin->>Plugin: 验证时间戳
|
||||
Plugin->>Plugin: 验证secret长度
|
||||
Plugin->>Plugin: 计算MD5签名
|
||||
Plugin->>Plugin: 比较签名
|
||||
Plugin-->>Plugin: 鉴权结果
|
||||
end
|
||||
end
|
||||
|
||||
alt 鉴权失败
|
||||
Plugin-->>Client: 鉴权失败,拒绝推流
|
||||
else 鉴权成功
|
||||
Plugin->>Server: 创建Publisher并添加到流管理
|
||||
Server-->>Plugin: 推流成功
|
||||
Plugin-->>Client: 推流建立成功
|
||||
end
|
||||
else 未启用鉴权
|
||||
Plugin->>Server: 直接创建Publisher
|
||||
Server-->>Plugin: 推流成功
|
||||
Plugin-->>Client: 推流建立成功
|
||||
end
|
||||
```
|
||||
|
||||
#### 拉流鉴权时序图
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
participant Client as 拉流客户端
|
||||
participant Plugin as 插件
|
||||
participant AuthHandler as 鉴权处理器
|
||||
participant Server as 服务器
|
||||
|
||||
Client->>Plugin: 拉流请求 (streamPath, args)
|
||||
Plugin->>Plugin: 检查 EnableAuth && Type == SubscribeTypeServer
|
||||
|
||||
alt 启用鉴权
|
||||
Plugin->>Plugin: 查找自定义鉴权处理器
|
||||
|
||||
alt 存在自定义处理器
|
||||
Plugin->>AuthHandler: onAuthSub(subscriber)
|
||||
AuthHandler->>AuthHandler: 执行自定义鉴权逻辑
|
||||
AuthHandler-->>Plugin: 鉴权结果
|
||||
else 使用密钥鉴权
|
||||
Plugin->>Plugin: 检查 conf.Key 是否存在
|
||||
alt 配置了Key
|
||||
Plugin->>Plugin: auth(streamPath, key, secret, expire)
|
||||
Plugin->>Plugin: 验证时间戳
|
||||
Plugin->>Plugin: 验证secret长度
|
||||
Plugin->>Plugin: 计算MD5签名
|
||||
Plugin->>Plugin: 比较签名
|
||||
Plugin-->>Plugin: 鉴权结果
|
||||
end
|
||||
end
|
||||
|
||||
alt 鉴权失败
|
||||
Plugin-->>Client: 鉴权失败,拒绝拉流
|
||||
else 鉴权成功
|
||||
Plugin->>Server: 创建Subscriber并等待Publisher
|
||||
Server->>Server: 等待流发布和轨道就绪
|
||||
Server-->>Plugin: 拉流准备就绪
|
||||
Plugin-->>Client: 开始传输流数据
|
||||
end
|
||||
else 未启用鉴权
|
||||
Plugin->>Server: 直接创建Subscriber
|
||||
Server-->>Plugin: 拉流成功
|
||||
Plugin-->>Client: 开始传输流数据
|
||||
end
|
||||
```
|
||||
|
||||
### 2. 鉴权触发时机
|
||||
|
||||
鉴权在以下两种情况下触发:
|
||||
|
||||
- **推流鉴权**:当有推流请求时,在`PublishWithConfig`方法中触发
|
||||
- **拉流鉴权**:当有拉流请求时,在`SubscribeWithConfig`方法中触发
|
||||
|
||||
### 3. 鉴权条件判断
|
||||
|
||||
鉴权只在以下条件同时满足时才会执行:
|
||||
|
||||
```go
|
||||
if p.config.EnableAuth && publisher.Type == PublishTypeServer
|
||||
```
|
||||
|
||||
- `EnableAuth`:插件配置中启用了鉴权
|
||||
- `Type == PublishTypeServer/SubscribeTypeServer`:只对服务端类型的推流/拉流进行鉴权
|
||||
|
||||
### 4. 鉴权方式优先级
|
||||
|
||||
系统按以下优先级执行鉴权:
|
||||
|
||||
1. **自定义鉴权处理器**(最高优先级)
|
||||
2. **基于密钥的签名鉴权**
|
||||
3. **无鉴权**(默认通过)
|
||||
|
||||
## 自定义鉴权处理器
|
||||
|
||||
### 推流鉴权处理器
|
||||
|
||||
```go
|
||||
onAuthPub := p.Meta.OnAuthPub
|
||||
if onAuthPub == nil {
|
||||
onAuthPub = p.Server.Meta.OnAuthPub
|
||||
}
|
||||
if onAuthPub != nil {
|
||||
if err = onAuthPub(publisher).Await(); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
鉴权处理器查找顺序:
|
||||
1. 插件级别的鉴权处理器 `p.Meta.OnAuthPub`
|
||||
2. 服务器级别的鉴权处理器 `p.Server.Meta.OnAuthPub`
|
||||
|
||||
### 拉流鉴权处理器
|
||||
|
||||
```go
|
||||
onAuthSub := p.Meta.OnAuthSub
|
||||
if onAuthSub == nil {
|
||||
onAuthSub = p.Server.Meta.OnAuthSub
|
||||
}
|
||||
if onAuthSub != nil {
|
||||
if err = onAuthSub(subscriber).Await(); err != nil {
|
||||
p.Warn("auth failed", "error", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 基于密钥的签名鉴权
|
||||
|
||||
当没有自定义鉴权处理器时,如果配置了Key,系统将使用基于MD5的签名鉴权机制。
|
||||
|
||||
### 鉴权算法
|
||||
|
||||
```go
|
||||
func (p *Plugin) auth(streamPath string, key string, secret string, expire string) (err error) {
|
||||
// 1. 验证过期时间
|
||||
if unixTime, err := strconv.ParseInt(expire, 16, 64); err != nil || time.Now().Unix() > unixTime {
|
||||
return fmt.Errorf("auth failed expired")
|
||||
}
|
||||
|
||||
// 2. 验证secret长度
|
||||
if len(secret) != 32 {
|
||||
return fmt.Errorf("auth failed secret length must be 32")
|
||||
}
|
||||
|
||||
// 3. 计算真实的secret
|
||||
trueSecret := md5.Sum([]byte(key + streamPath + expire))
|
||||
|
||||
// 4. 比较secret
|
||||
if secret == hex.EncodeToString(trueSecret[:]) {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("auth failed invalid secret")
|
||||
}
|
||||
```
|
||||
|
||||
### 签名计算步骤
|
||||
|
||||
1. **构造签名字符串**:`key + streamPath + expire`
|
||||
2. **MD5加密**:对签名字符串进行MD5哈希
|
||||
3. **十六进制编码**:将MD5结果转换为32位十六进制字符串
|
||||
4. **验证签名**:比较计算结果与客户端提供的secret
|
||||
|
||||
### 参数说明
|
||||
|
||||
| 参数 | 类型 | 说明 | 示例 |
|
||||
|------|------|------|------|
|
||||
| key | string | 密钥,在配置文件中设置 | "mySecretKey" |
|
||||
| streamPath | string | 流路径 | "live/test" |
|
||||
| expire | string | 过期时间戳(16进制) | "64a1b2c3" |
|
||||
| secret | string | 客户端计算的签名(32位十六进制) | "5d41402abc4b2a76b9719d911017c592" |
|
||||
|
||||
### 时间戳处理
|
||||
|
||||
- 过期时间使用16进制Unix时间戳
|
||||
- 系统会验证当前时间是否超过过期时间
|
||||
- 时间戳解析失败或已过期都会导致鉴权失败
|
||||
|
||||
## API密钥生成
|
||||
|
||||
系统还提供了API接口用于生成密钥,支持管理后台的鉴权需求:
|
||||
|
||||
```go
|
||||
p.handle("/api/secret/{type}/{streamPath...}", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
// JWT Token验证
|
||||
authHeader := r.Header.Get("Authorization")
|
||||
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
|
||||
_, err := p.Server.ValidateToken(tokenString)
|
||||
|
||||
// 生成推流或拉流密钥
|
||||
streamPath := r.PathValue("streamPath")
|
||||
t := r.PathValue("type")
|
||||
expire := r.URL.Query().Get("expire")
|
||||
|
||||
if t == "publish" {
|
||||
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
} else if t == "subscribe" {
|
||||
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
}
|
||||
}))
|
||||
```
|
||||
|
||||
## 配置示例
|
||||
|
||||
### 启用鉴权
|
||||
|
||||
```yaml
|
||||
# 插件配置
|
||||
rtmp:
|
||||
enableAuth: true
|
||||
publish:
|
||||
key: "your-publish-key"
|
||||
subscribe:
|
||||
key: "your-subscribe-key"
|
||||
```
|
||||
|
||||
### 推流URL示例
|
||||
|
||||
```
|
||||
rtmp://localhost/live/test?secret=5d41402abc4b2a76b9719d911017c592&expire=64a1b2c3
|
||||
```
|
||||
|
||||
### 拉流URL示例
|
||||
|
||||
```
|
||||
http://localhost:8080/flv/live/test.flv?secret=a1b2c3d4e5f6789012345678901234ab&expire=64a1b2c3
|
||||
```
|
||||
|
||||
## 安全考虑
|
||||
|
||||
1. **密钥保护**:配置文件中的key应当妥善保管,避免泄露
|
||||
2. **时间窗口**:合理设置过期时间,平衡安全性和可用性
|
||||
3. **HTTPS传输**:生产环境建议使用HTTPS传输鉴权参数
|
||||
4. **日志记录**:鉴权失败会记录警告日志,便于安全审计
|
||||
|
||||
## 错误处理
|
||||
|
||||
鉴权失败的常见原因:
|
||||
|
||||
- `auth failed expired`:时间戳已过期或格式错误
|
||||
- `auth failed secret length must be 32`:secret长度不正确
|
||||
- `auth failed invalid secret`:签名验证失败
|
||||
- `invalid token`:API密钥生成时JWT验证失败
|
||||
@@ -93,7 +93,7 @@ func (p *YourPlugin) RegisterHandler() {
|
||||
|
||||
示例代码:
|
||||
```go
|
||||
func (p *YourPlugin) OnInit() {
|
||||
func (p *YourPlugin) Start() {
|
||||
// 添加认证中间件
|
||||
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
@@ -116,7 +116,7 @@ type MyLogHandler struct {
|
||||
}
|
||||
|
||||
// 在插件初始化时添加处理器
|
||||
func (p *MyPlugin) OnInit() error {
|
||||
func (p *MyPlugin) Start() error {
|
||||
handler := &MyLogHandler{}
|
||||
p.Server.LogHandler.Add(handler)
|
||||
return nil
|
||||
|
||||
@@ -109,7 +109,7 @@ Monibuca 采用插件化架构设计,通过插件机制来扩展功能。插
|
||||
|
||||
### 4. 停止阶段 (Stop)
|
||||
|
||||
插件的停止阶段通过 `Plugin.OnStop` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
|
||||
插件的停止阶段通过 `Plugin.OnDispose` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
|
||||
|
||||
1. 停止服务
|
||||
- 停止所有网络服务(HTTP/HTTPS/TCP/UDP/QUIC)
|
||||
|
||||
@@ -10,3 +10,5 @@ cascadeclient:
|
||||
onsub:
|
||||
pull:
|
||||
.*: m7s://$0
|
||||
flv:
|
||||
enable: true
|
||||
|
||||
@@ -9,7 +9,7 @@ transcode:
|
||||
transform:
|
||||
^live.+:
|
||||
input:
|
||||
mode: rtsp
|
||||
mode: pipe
|
||||
output:
|
||||
- target: rtmp://localhost/trans/$0/small
|
||||
conf: -loglevel debug -c:a aac -c:v h264 -vf scale=320:240
|
||||
|
||||
2
example/test/config.yaml
Normal file
2
example/test/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
global:
|
||||
log_level: debug
|
||||
40
example/test/main.go
Normal file
40
example/test/main.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
|
||||
_ "m7s.live/v5/plugin/crypto"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/hls"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
_ "m7s.live/v5/plugin/onvif"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
_ "m7s.live/v5/plugin/rtmp"
|
||||
_ "m7s.live/v5/plugin/rtp"
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/snap"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
_ "m7s.live/v5/plugin/webtransport"
|
||||
)
|
||||
|
||||
func main() {
|
||||
conf := flag.String("c", "config.yaml", "config file")
|
||||
flag.Parse()
|
||||
// ctx, _ := context.WithDeadline(context.Background(), time.Now().Add(time.Second*100))
|
||||
err := m7s.Run(context.Background(), *conf)
|
||||
fmt.Println(err)
|
||||
}
|
||||
40
go.mod
40
go.mod
@@ -29,14 +29,14 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/mcuadros/go-defaults v1.2.0
|
||||
github.com/mozillazg/go-pinyin v0.20.0
|
||||
github.com/ncruces/go-sqlite3 v0.18.1
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0
|
||||
github.com/pion/interceptor v0.1.37
|
||||
github.com/pion/logging v0.2.2
|
||||
github.com/ncruces/go-sqlite3 v0.27.1
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/logging v0.2.4
|
||||
github.com/pion/rtcp v1.2.15
|
||||
github.com/pion/rtp v1.8.10
|
||||
github.com/pion/sdp/v3 v3.0.9
|
||||
github.com/pion/webrtc/v4 v4.0.7
|
||||
github.com/pion/rtp v1.8.21
|
||||
github.com/pion/sdp/v3 v3.0.15
|
||||
github.com/pion/webrtc/v4 v4.1.4
|
||||
github.com/quic-go/qpack v0.5.1
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
@@ -47,7 +47,7 @@ require (
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7
|
||||
golang.org/x/image v0.22.0
|
||||
golang.org/x/text v0.24.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
@@ -98,15 +98,15 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/julianday v1.0.0 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.4 // indirect
|
||||
github.com/pion/ice/v4 v4.0.3 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.7 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/sctp v1.8.35 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.7 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pion/turn/v4 v4.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
@@ -117,7 +117,7 @@ require (
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/tetratelabs/wazero v1.8.0 // indirect
|
||||
github.com/tetratelabs/wazero v1.9.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
@@ -131,7 +131,7 @@ require (
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect
|
||||
)
|
||||
|
||||
@@ -149,11 +149,11 @@ require (
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/quangngotan95/go-m3u8 v0.1.0
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
87
go.sum
87
go.sum
@@ -189,10 +189,10 @@ github.com/mozillazg/go-pinyin v0.20.0 h1:BtR3DsxpApHfKReaPO1fCqF4pThRwH9uwvXzm+
|
||||
github.com/mozillazg/go-pinyin v0.20.0/go.mod h1:iR4EnMMRXkfpFVV5FMi4FNB6wGq9NV6uDWbUuPhP4Yc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-sqlite3 v0.18.1 h1:iN8IMZV5EMxpH88NUac9vId23eTKNFUhP7jgY0EBbNc=
|
||||
github.com/ncruces/go-sqlite3 v0.18.1/go.mod h1:eEOyZnW1dGTJ+zDpMuzfYamEUBtdFz5zeYhqLBtHxvM=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0 h1:KqP9a9wlX/Ba+yG+aeVX4pnNBNdaSO6xHdNDWzPxPnk=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0/go.mod h1:RXeT1hknrz3A0tBDL6IfluDHuNkHdJeImn5TBMQg9zc=
|
||||
github.com/ncruces/go-sqlite3 v0.27.1 h1:suqlM7xhSyDVMV9RgX99MCPqt9mB6YOCzHZuiI36K34=
|
||||
github.com/ncruces/go-sqlite3 v0.27.1/go.mod h1:gpF5s+92aw2MbDmZK0ZOnCdFlpe11BH20CTspVqri0c=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0 h1:81sHeq3CCdhjoqAB650n5wEdRlLO9VBvosArskcN3+c=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0/go.mod h1:vXfVWdBfg7qOgqQqHpzUWl9LLswD0h+8mK4oouaV2oc=
|
||||
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
|
||||
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
@@ -208,36 +208,36 @@ github.com/phsym/console-slog v0.3.1 h1:Fuzcrjr40xTc004S9Kni8XfNsk+qrptQmyR+wZw9
|
||||
github.com/phsym/console-slog v0.3.1/go.mod h1:oJskjp/X6e6c0mGpfP8ELkfKUsrkDifYRAqJQgmdDS0=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
|
||||
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
|
||||
github.com/pion/ice/v4 v4.0.3 h1:9s5rI1WKzF5DRqhJ+Id8bls/8PzM7mau0mj1WZb4IXE=
|
||||
github.com/pion/ice/v4 v4.0.3/go.mod h1:VfHy0beAZ5loDT7BmJ2LtMtC4dbawIkkkejHPRZNB3Y=
|
||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/dtls/v3 v3.0.7 h1:bItXtTYYhZwkPFk4t1n3Kkf5TDrfj6+4wG+CZR8uI9Q=
|
||||
github.com/pion/dtls/v3 v3.0.7/go.mod h1:uDlH5VPrgOQIw59irKYkMudSFprY9IEFCqz/eTz16f8=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
|
||||
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU=
|
||||
github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
||||
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
|
||||
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
||||
github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
|
||||
github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.15 h1:F0I1zds+K/+37ZrzdADmx2Q44OFDOPRLhPnNTaUX9hk=
|
||||
github.com/pion/sdp/v3 v3.0.15/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.7 h1:QUElw0A/FUg3MP8/KNMZB3i0m8F9XeMnTum86F7S4bs=
|
||||
github.com/pion/srtp/v3 v3.0.7/go.mod h1:qvnHeqbhT7kDdB+OGB05KA/P067G3mm7XBfLaLiaNF0=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
||||
github.com/pion/webrtc/v4 v4.0.7 h1:aeq78uVnFZd2umXW0O9A2VFQYuS7+BZxWetQvSp2jPo=
|
||||
github.com/pion/webrtc/v4 v4.0.7/go.mod h1:oFVBBVSHU3vAEwSgnk3BuKCwAUwpDwQhko1EDwyZWbU=
|
||||
github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc=
|
||||
github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8=
|
||||
github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
|
||||
github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||
@@ -287,22 +287,15 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I=
|
||||
github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g=
|
||||
github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
|
||||
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
|
||||
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
@@ -341,8 +334,8 @@ golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -350,17 +343,17 @@ golang.org/x/image v0.22.0 h1:UtK5yLUzilVrkjMAZAZ34DXGpASN8i8pj8g+O+yd10g=
|
||||
golang.org/x/image v0.22.0/go.mod h1:9hPFhljd4zZ1GNSIZJ49sqbp45GKK9t6w+iXvGqZUz4=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -381,19 +374,19 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
|
||||
|
||||
1760
pb/global.pb.go
1760
pb/global.pb.go
File diff suppressed because it is too large
Load Diff
@@ -1401,7 +1401,7 @@ func local_request_Api_RemovePullProxy_1(ctx context.Context, marshaler runtime.
|
||||
}
|
||||
|
||||
func request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PullProxyInfo
|
||||
var protoReq UpdatePullProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1414,7 +1414,7 @@ func request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marsha
|
||||
}
|
||||
|
||||
func local_request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PullProxyInfo
|
||||
var protoReq UpdatePullProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1427,7 +1427,7 @@ func local_request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.
|
||||
}
|
||||
|
||||
func request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PullProxyInfo
|
||||
var protoReq UpdatePullProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1440,7 +1440,7 @@ func request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marsha
|
||||
}
|
||||
|
||||
func local_request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PullProxyInfo
|
||||
var protoReq UpdatePullProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1557,7 +1557,7 @@ func local_request_Api_RemovePushProxy_0(ctx context.Context, marshaler runtime.
|
||||
}
|
||||
|
||||
func request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PushProxyInfo
|
||||
var protoReq UpdatePushProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1570,7 +1570,7 @@ func request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marsha
|
||||
}
|
||||
|
||||
func local_request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PushProxyInfo
|
||||
var protoReq UpdatePushProxyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
@@ -1708,6 +1708,96 @@ func local_request_Api_GetRecordList_0(ctx context.Context, marshaler runtime.Ma
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
filter_Api_GetEventRecordList_0 = &utilities.DoubleArray{Encoding: map[string]int{"type": 0, "streamPath": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
|
||||
)
|
||||
|
||||
func request_Api_GetEventRecordList_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ReqRecordList
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["type"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "type")
|
||||
}
|
||||
|
||||
protoReq.Type, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "type", err)
|
||||
}
|
||||
|
||||
val, ok = pathParams["streamPath"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
|
||||
}
|
||||
|
||||
protoReq.StreamPath, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
|
||||
}
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetEventRecordList_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetEventRecordList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Api_GetEventRecordList_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ReqRecordList
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["type"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "type")
|
||||
}
|
||||
|
||||
protoReq.Type, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "type", err)
|
||||
}
|
||||
|
||||
val, ok = pathParams["streamPath"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
|
||||
}
|
||||
|
||||
protoReq.StreamPath, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
|
||||
}
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetEventRecordList_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetEventRecordList(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Api_GetRecordCatalog_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq ReqRecordCatalog
|
||||
var metadata runtime.ServerMetadata
|
||||
@@ -1840,6 +1930,94 @@ func local_request_Api_DeleteRecord_0(ctx context.Context, marshaler runtime.Mar
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
filter_Api_GetAlarmList_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
)
|
||||
|
||||
func request_Api_GetAlarmList_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq AlarmListRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetAlarmList_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetAlarmList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Api_GetAlarmList_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq AlarmListRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetAlarmList_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetAlarmList(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Api_GetSubscriptionProgress_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq StreamSnapRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["streamPath"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
|
||||
}
|
||||
|
||||
protoReq.StreamPath, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetSubscriptionProgress(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Api_GetSubscriptionProgress_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq StreamSnapRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["streamPath"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
|
||||
}
|
||||
|
||||
protoReq.StreamPath, err = runtime.String(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetSubscriptionProgress(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterApiHandlerServer registers the http handlers for service Api to "mux".
|
||||
// UnaryRPC :call ApiServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -2896,6 +3074,31 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetEventRecordList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetEventRecordList", runtime.WithHTTPPathPattern("/api/record/{type}/event/list/{streamPath=**}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetEventRecordList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetEventRecordList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetRecordCatalog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -2946,6 +3149,56 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetAlarmList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetAlarmList", runtime.WithHTTPPathPattern("/api/alarm/list"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetAlarmList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetAlarmList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetSubscriptionProgress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetSubscriptionProgress", runtime.WithHTTPPathPattern("/api/stream/progress/{streamPath=**}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Api_GetSubscriptionProgress_0(annotatedContext, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetSubscriptionProgress_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3911,6 +4164,28 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetEventRecordList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetEventRecordList", runtime.WithHTTPPathPattern("/api/record/{type}/event/list/{streamPath=**}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetEventRecordList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetEventRecordList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetRecordCatalog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
@@ -3955,6 +4230,50 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetAlarmList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetAlarmList", runtime.WithHTTPPathPattern("/api/alarm/list"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetAlarmList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetAlarmList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Api_GetSubscriptionProgress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetSubscriptionProgress", runtime.WithHTTPPathPattern("/api/stream/progress/{streamPath=**}"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Api_GetSubscriptionProgress_0(annotatedContext, inboundMarshaler, client, req, pathParams)
|
||||
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Api_GetSubscriptionProgress_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4043,9 +4362,15 @@ var (
|
||||
|
||||
pattern_Api_GetRecordList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "record", "type", "list", "streamPath"}, ""))
|
||||
|
||||
pattern_Api_GetEventRecordList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 2, 4, 3, 0, 4, 1, 5, 5}, []string{"api", "record", "type", "event", "list", "streamPath"}, ""))
|
||||
|
||||
pattern_Api_GetRecordCatalog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"api", "record", "type", "catalog"}, ""))
|
||||
|
||||
pattern_Api_DeleteRecord_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "record", "type", "delete", "streamPath"}, ""))
|
||||
|
||||
pattern_Api_GetAlarmList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "alarm", "list"}, ""))
|
||||
|
||||
pattern_Api_GetSubscriptionProgress_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 3, 0, 4, 1, 5, 3}, []string{"api", "stream", "progress", "streamPath"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -4133,7 +4458,13 @@ var (
|
||||
|
||||
forward_Api_GetRecordList_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Api_GetEventRecordList_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Api_GetRecordCatalog_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Api_DeleteRecord_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Api_GetAlarmList_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Api_GetSubscriptionProgress_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
137
pb/global.proto
137
pb/global.proto
@@ -181,7 +181,7 @@ service api {
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc UpdatePullProxy (PullProxyInfo) returns (SuccessResponse) {
|
||||
rpc UpdatePullProxy (UpdatePullProxyRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/pull/update"
|
||||
body: "*"
|
||||
@@ -208,7 +208,7 @@ service api {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc UpdatePushProxy (PushProxyInfo) returns (SuccessResponse) {
|
||||
rpc UpdatePushProxy (UpdatePushProxyRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/push/update"
|
||||
body: "*"
|
||||
@@ -224,11 +224,16 @@ service api {
|
||||
get: "/api/transform/list"
|
||||
};
|
||||
}
|
||||
rpc GetRecordList (ReqRecordList) returns (ResponseList) {
|
||||
rpc GetRecordList (ReqRecordList) returns (RecordResponseList) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/record/{type}/list/{streamPath=**}"
|
||||
};
|
||||
}
|
||||
rpc GetEventRecordList (ReqRecordList) returns (EventRecordResponseList) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/record/{type}/event/list/{streamPath=**}"
|
||||
};
|
||||
}
|
||||
rpc GetRecordCatalog (ReqRecordCatalog) returns (ResponseCatalog) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/record/{type}/catalog"
|
||||
@@ -240,6 +245,16 @@ service api {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc GetAlarmList (AlarmListRequest) returns (AlarmListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/alarm/list"
|
||||
};
|
||||
}
|
||||
rpc GetSubscriptionProgress (StreamSnapRequest) returns (SubscriptionProgressResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/stream/progress/{streamPath=**}"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message DisabledPluginsResponse {
|
||||
@@ -356,6 +371,8 @@ message TaskTreeData {
|
||||
TaskTreeData blocked = 8;
|
||||
uint64 pointer = 9;
|
||||
string startReason = 10;
|
||||
bool eventLoopRunning = 11;
|
||||
uint32 level = 12;
|
||||
}
|
||||
|
||||
message TaskTreeResponse {
|
||||
@@ -561,6 +578,22 @@ message PullProxyInfo {
|
||||
string streamPath = 16; // 流路径
|
||||
}
|
||||
|
||||
message UpdatePullProxyRequest {
|
||||
uint32 ID = 1;
|
||||
optional uint32 parentID = 2; // 父设备ID
|
||||
optional string name = 3; // 设备名称
|
||||
optional string type = 4; // 设备类型
|
||||
optional uint32 status = 5; // 设备状态
|
||||
optional string pullURL = 6; // 拉流地址
|
||||
optional bool pullOnStart = 7; // 启动时拉流
|
||||
optional bool stopOnIdle = 8; // 空闲时停止拉流
|
||||
optional bool audio = 9; // 是否拉取音频
|
||||
optional string description = 10; // 设备描述
|
||||
optional string recordPath = 11; // 录制路径
|
||||
optional google.protobuf.Duration recordFragment = 12; // 录制片段长度
|
||||
optional string streamPath = 13; // 流路径
|
||||
}
|
||||
|
||||
message PushProxyInfo {
|
||||
uint32 ID = 1;
|
||||
google.protobuf.Timestamp createTime = 2;
|
||||
@@ -577,6 +610,20 @@ message PushProxyInfo {
|
||||
string streamPath = 13; // 流路径
|
||||
}
|
||||
|
||||
message UpdatePushProxyRequest {
|
||||
uint32 ID = 1;
|
||||
optional uint32 parentID = 2; // 父设备ID
|
||||
optional string name = 3; // 设备名称
|
||||
optional string type = 4; // 设备类型
|
||||
optional uint32 status = 5; // 设备状态
|
||||
optional string pushURL = 6; // 推流地址
|
||||
optional bool pushOnStart = 7; // 启动时推流
|
||||
optional bool audio = 8; // 是否推音频
|
||||
optional string description = 9; // 设备描述
|
||||
optional uint32 rtt = 10; // 平均RTT
|
||||
optional string streamPath = 11; // 流路径
|
||||
}
|
||||
|
||||
message PushProxyListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
@@ -664,9 +711,8 @@ message ReqRecordList {
|
||||
string end = 4;
|
||||
uint32 pageNum = 5;
|
||||
uint32 pageSize = 6;
|
||||
string eventId = 7;
|
||||
string type = 8;
|
||||
string eventLevel = 9;
|
||||
string type = 7;
|
||||
string eventLevel = 8;
|
||||
}
|
||||
|
||||
message RecordFile {
|
||||
@@ -675,12 +721,21 @@ message RecordFile {
|
||||
string streamPath = 3;
|
||||
google.protobuf.Timestamp startTime = 4;
|
||||
google.protobuf.Timestamp endTime = 5;
|
||||
string eventLevel = 6;
|
||||
string eventName = 7;
|
||||
string eventDesc = 8;
|
||||
}
|
||||
|
||||
message ResponseList {
|
||||
message EventRecordFile {
|
||||
uint32 id = 1;
|
||||
string filePath = 2;
|
||||
string streamPath = 3;
|
||||
google.protobuf.Timestamp startTime = 4;
|
||||
google.protobuf.Timestamp endTime = 5;
|
||||
string eventId = 6;
|
||||
string eventLevel = 7;
|
||||
string eventName = 8;
|
||||
string eventDesc = 9;
|
||||
}
|
||||
|
||||
message RecordResponseList {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
uint32 total = 3;
|
||||
@@ -689,6 +744,15 @@ message ResponseList {
|
||||
repeated RecordFile data = 6;
|
||||
}
|
||||
|
||||
message EventRecordResponseList {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
uint32 total = 3;
|
||||
uint32 pageNum = 4;
|
||||
uint32 pageSize = 5;
|
||||
repeated EventRecordFile data = 6;
|
||||
}
|
||||
|
||||
message Catalog {
|
||||
string streamPath = 1;
|
||||
uint32 count = 2;
|
||||
@@ -719,4 +783,57 @@ message ResponseDelete {
|
||||
|
||||
message ReqRecordCatalog {
|
||||
string type = 1;
|
||||
}
|
||||
|
||||
message AlarmInfo {
|
||||
uint32 id = 1;
|
||||
string serverInfo = 2;
|
||||
string streamName = 3;
|
||||
string streamPath = 4;
|
||||
string alarmDesc = 5;
|
||||
string alarmName = 6;
|
||||
int32 alarmType = 7;
|
||||
bool isSent = 8;
|
||||
string filePath = 9;
|
||||
google.protobuf.Timestamp createdAt = 10;
|
||||
google.protobuf.Timestamp updatedAt = 11;
|
||||
}
|
||||
|
||||
message AlarmListRequest {
|
||||
int32 pageNum = 1;
|
||||
int32 pageSize = 2;
|
||||
string range = 3;
|
||||
string start = 4;
|
||||
string end = 5;
|
||||
int32 alarmType = 6;
|
||||
string streamPath = 7;
|
||||
string streamName = 8;
|
||||
}
|
||||
|
||||
message AlarmListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
int32 total = 3;
|
||||
int32 pageNum = 4;
|
||||
int32 pageSize = 5;
|
||||
repeated AlarmInfo data = 6;
|
||||
}
|
||||
|
||||
message Step {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
string error = 3;
|
||||
google.protobuf.Timestamp startedAt = 4;
|
||||
google.protobuf.Timestamp completedAt = 5;
|
||||
}
|
||||
|
||||
message SubscriptionProgressData {
|
||||
repeated Step steps = 1;
|
||||
int32 currentStep = 2;
|
||||
}
|
||||
|
||||
message SubscriptionProgressResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
SubscriptionProgressData data = 3;
|
||||
}
|
||||
@@ -20,46 +20,49 @@ import (
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
|
||||
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
|
||||
Api_Summary_FullMethodName = "/global.api/Summary"
|
||||
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
|
||||
Api_Restart_FullMethodName = "/global.api/Restart"
|
||||
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
|
||||
Api_StopTask_FullMethodName = "/global.api/StopTask"
|
||||
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
|
||||
Api_StreamList_FullMethodName = "/global.api/StreamList"
|
||||
Api_WaitList_FullMethodName = "/global.api/WaitList"
|
||||
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
|
||||
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
|
||||
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
|
||||
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
|
||||
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
|
||||
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
|
||||
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
|
||||
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
|
||||
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
|
||||
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
|
||||
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
|
||||
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
|
||||
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
|
||||
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
|
||||
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
|
||||
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
|
||||
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
|
||||
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
|
||||
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
|
||||
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
|
||||
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
|
||||
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
|
||||
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
|
||||
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
|
||||
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
|
||||
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
|
||||
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
|
||||
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
|
||||
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
|
||||
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
|
||||
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
|
||||
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
|
||||
Api_Summary_FullMethodName = "/global.api/Summary"
|
||||
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
|
||||
Api_Restart_FullMethodName = "/global.api/Restart"
|
||||
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
|
||||
Api_StopTask_FullMethodName = "/global.api/StopTask"
|
||||
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
|
||||
Api_StreamList_FullMethodName = "/global.api/StreamList"
|
||||
Api_WaitList_FullMethodName = "/global.api/WaitList"
|
||||
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
|
||||
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
|
||||
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
|
||||
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
|
||||
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
|
||||
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
|
||||
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
|
||||
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
|
||||
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
|
||||
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
|
||||
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
|
||||
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
|
||||
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
|
||||
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
|
||||
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
|
||||
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
|
||||
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
|
||||
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
|
||||
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
|
||||
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
|
||||
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
|
||||
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
|
||||
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
|
||||
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
|
||||
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
|
||||
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
|
||||
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
|
||||
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
|
||||
Api_GetEventRecordList_FullMethodName = "/global.api/GetEventRecordList"
|
||||
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
|
||||
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
|
||||
Api_GetAlarmList_FullMethodName = "/global.api/GetAlarmList"
|
||||
Api_GetSubscriptionProgress_FullMethodName = "/global.api/GetSubscriptionProgress"
|
||||
)
|
||||
|
||||
// ApiClient is the client API for Api service.
|
||||
@@ -96,16 +99,19 @@ type ApiClient interface {
|
||||
GetPullProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PullProxyListResponse, error)
|
||||
AddPullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePullProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetPushProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PushProxyListResponse, error)
|
||||
AddPushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePushProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetRecording(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*RecordingListResponse, error)
|
||||
GetTransformList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*TransformListResponse, error)
|
||||
GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*ResponseList, error)
|
||||
GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*RecordResponseList, error)
|
||||
GetEventRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*EventRecordResponseList, error)
|
||||
GetRecordCatalog(ctx context.Context, in *ReqRecordCatalog, opts ...grpc.CallOption) (*ResponseCatalog, error)
|
||||
DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts ...grpc.CallOption) (*ResponseDelete, error)
|
||||
GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error)
|
||||
GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
@@ -416,7 +422,7 @@ func (c *apiClient) RemovePullProxy(ctx context.Context, in *RequestWithId, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, Api_UpdatePullProxy_FullMethodName, in, out, cOpts...)
|
||||
@@ -456,7 +462,7 @@ func (c *apiClient) RemovePushProxy(ctx context.Context, in *RequestWithId, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, Api_UpdatePushProxy_FullMethodName, in, out, cOpts...)
|
||||
@@ -486,9 +492,9 @@ func (c *apiClient) GetTransformList(ctx context.Context, in *emptypb.Empty, opt
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*ResponseList, error) {
|
||||
func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*RecordResponseList, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ResponseList)
|
||||
out := new(RecordResponseList)
|
||||
err := c.cc.Invoke(ctx, Api_GetRecordList_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -496,6 +502,16 @@ func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts .
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetEventRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*EventRecordResponseList, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(EventRecordResponseList)
|
||||
err := c.cc.Invoke(ctx, Api_GetEventRecordList_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetRecordCatalog(ctx context.Context, in *ReqRecordCatalog, opts ...grpc.CallOption) (*ResponseCatalog, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(ResponseCatalog)
|
||||
@@ -516,6 +532,26 @@ func (c *apiClient) DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AlarmListResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetAlarmList_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SubscriptionProgressResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetSubscriptionProgress_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ApiServer is the server API for Api service.
|
||||
// All implementations must embed UnimplementedApiServer
|
||||
// for forward compatibility.
|
||||
@@ -550,16 +586,19 @@ type ApiServer interface {
|
||||
GetPullProxyList(context.Context, *emptypb.Empty) (*PullProxyListResponse, error)
|
||||
AddPullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error)
|
||||
GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error)
|
||||
AddPushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error)
|
||||
GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error)
|
||||
GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error)
|
||||
GetRecordList(context.Context, *ReqRecordList) (*ResponseList, error)
|
||||
GetRecordList(context.Context, *ReqRecordList) (*RecordResponseList, error)
|
||||
GetEventRecordList(context.Context, *ReqRecordList) (*EventRecordResponseList, error)
|
||||
GetRecordCatalog(context.Context, *ReqRecordCatalog) (*ResponseCatalog, error)
|
||||
DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error)
|
||||
GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error)
|
||||
GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error)
|
||||
mustEmbedUnimplementedApiServer()
|
||||
}
|
||||
|
||||
@@ -660,7 +699,7 @@ func (UnimplementedApiServer) AddPullProxy(context.Context, *PullProxyInfo) (*Su
|
||||
func (UnimplementedApiServer) RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error) {
|
||||
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error) {
|
||||
@@ -672,7 +711,7 @@ func (UnimplementedApiServer) AddPushProxy(context.Context, *PushProxyInfo) (*Su
|
||||
func (UnimplementedApiServer) RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error) {
|
||||
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error) {
|
||||
@@ -681,15 +720,24 @@ func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*Re
|
||||
func (UnimplementedApiServer) GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetTransformList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetRecordList(context.Context, *ReqRecordList) (*ResponseList, error) {
|
||||
func (UnimplementedApiServer) GetRecordList(context.Context, *ReqRecordList) (*RecordResponseList, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRecordList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetEventRecordList(context.Context, *ReqRecordList) (*EventRecordResponseList, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetEventRecordList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetRecordCatalog(context.Context, *ReqRecordCatalog) (*ResponseCatalog, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetRecordCatalog not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRecord not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetAlarmList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetSubscriptionProgress not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
|
||||
func (UnimplementedApiServer) testEmbeddedByValue() {}
|
||||
|
||||
@@ -1252,7 +1300,7 @@ func _Api_RemovePullProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
}
|
||||
|
||||
func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PullProxyInfo)
|
||||
in := new(UpdatePullProxyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1264,7 +1312,7 @@ func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
FullMethod: Api_UpdatePullProxy_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*PullProxyInfo))
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*UpdatePullProxyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -1324,7 +1372,7 @@ func _Api_RemovePushProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
}
|
||||
|
||||
func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PushProxyInfo)
|
||||
in := new(UpdatePushProxyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1336,7 +1384,7 @@ func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
FullMethod: Api_UpdatePushProxy_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*PushProxyInfo))
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*UpdatePushProxyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -1395,6 +1443,24 @@ func _Api_GetRecordList_Handler(srv interface{}, ctx context.Context, dec func(i
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetEventRecordList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ReqRecordList)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetEventRecordList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetEventRecordList_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetEventRecordList(ctx, req.(*ReqRecordList))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetRecordCatalog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ReqRecordCatalog)
|
||||
if err := dec(in); err != nil {
|
||||
@@ -1431,6 +1497,42 @@ func _Api_DeleteRecord_Handler(srv interface{}, ctx context.Context, dec func(in
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetAlarmList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AlarmListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetAlarmList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetAlarmList_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetAlarmList(ctx, req.(*AlarmListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetSubscriptionProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StreamSnapRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetSubscriptionProgress(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetSubscriptionProgress_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetSubscriptionProgress(ctx, req.(*StreamSnapRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -1590,6 +1692,10 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "GetRecordList",
|
||||
Handler: _Api_GetRecordList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetEventRecordList",
|
||||
Handler: _Api_GetEventRecordList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetRecordCatalog",
|
||||
Handler: _Api_GetRecordCatalog_Handler,
|
||||
@@ -1598,6 +1704,14 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "DeleteRecord",
|
||||
Handler: _Api_DeleteRecord_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetAlarmList",
|
||||
Handler: _Api_GetAlarmList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetSubscriptionProgress",
|
||||
Handler: _Api_GetSubscriptionProgress_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "global.proto",
|
||||
|
||||
90
pkg/adts.go
90
pkg/adts.go
@@ -1,90 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*ADTS)(nil)
|
||||
|
||||
type ADTS struct {
|
||||
DTS time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (A *ADTS) Parse(track *AVTrack) (err error) {
|
||||
if track.ICodecCtx == nil {
|
||||
var ctx = &codec.AACCtx{}
|
||||
var reader = A.NewReader()
|
||||
var adts []byte
|
||||
adts, err = reader.ReadBytes(7)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var hdrlen, framelen, samples int
|
||||
ctx.Config, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
aacparser.WriteMPEG4AudioConfig(b, ctx.Config)
|
||||
ctx.ConfigBytes = b.Bytes()
|
||||
track.ICodecCtx = ctx
|
||||
track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
|
||||
}
|
||||
track.Value.Raw, err = A.Demux(track.ICodecCtx)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *ADTS) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
func (A *ADTS) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
var reader = A.NewReader()
|
||||
err := reader.Skip(7)
|
||||
var mem util.Memory
|
||||
reader.Range(mem.AppendOne)
|
||||
return mem, err
|
||||
}
|
||||
|
||||
func (A *ADTS) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
A.InitRecycleIndexes(1)
|
||||
A.DTS = frame.Timestamp * 90 / time.Millisecond
|
||||
aacCtx, ok := ctx.GetBase().(*codec.AACCtx)
|
||||
if !ok {
|
||||
A.Append(frame.Raw.(util.Memory).Buffers...)
|
||||
return
|
||||
}
|
||||
adts := A.NextN(7)
|
||||
raw := frame.Raw.(util.Memory)
|
||||
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
|
||||
A.Append(raw.Buffers...)
|
||||
}
|
||||
|
||||
func (A *ADTS) GetTimestamp() time.Duration {
|
||||
return A.DTS * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (A *ADTS) GetCTS() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (A *ADTS) GetSize() int {
|
||||
return A.Size
|
||||
}
|
||||
|
||||
func (A *ADTS) String() string {
|
||||
return fmt.Sprintf("ADTS{size:%d}", A.Size)
|
||||
}
|
||||
|
||||
func (A *ADTS) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
182
pkg/annexb.go
182
pkg/annexb.go
@@ -1,182 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*AnnexB)(nil)
|
||||
|
||||
type AnnexB struct {
|
||||
Hevc bool
|
||||
PTS time.Duration
|
||||
DTS time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (a *AnnexB) Dump(t byte, w io.Writer) {
|
||||
m := a.GetAllocator().Borrow(4 + a.Size)
|
||||
binary.BigEndian.PutUint32(m, uint32(a.Size))
|
||||
a.CopyTo(m[4:])
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
// DecodeConfig implements pkg.IAVFrame.
|
||||
func (a *AnnexB) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
// GetSize implements pkg.IAVFrame.
|
||||
func (a *AnnexB) GetSize() int {
|
||||
return a.Size
|
||||
}
|
||||
|
||||
func (a *AnnexB) GetTimestamp() time.Duration {
|
||||
return a.DTS * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (a *AnnexB) GetCTS() time.Duration {
|
||||
return (a.PTS - a.DTS) * time.Millisecond / 90
|
||||
}
|
||||
|
||||
// Parse implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Parse(t *AVTrack) (err error) {
|
||||
if a.Hevc {
|
||||
if t.ICodecCtx == nil {
|
||||
t.ICodecCtx = &codec.H265Ctx{}
|
||||
}
|
||||
} else {
|
||||
if t.ICodecCtx == nil {
|
||||
t.ICodecCtx = &codec.H264Ctx{}
|
||||
}
|
||||
}
|
||||
if t.Value.Raw, err = a.Demux(t.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
for _, nalu := range t.Value.Raw.(Nalus) {
|
||||
if a.Hevc {
|
||||
ctx := t.ICodecCtx.(*codec.H265Ctx)
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
t.Value.IDR = true
|
||||
}
|
||||
} else {
|
||||
ctx := t.ICodecCtx.(*codec.H264Ctx)
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if len(ctx.RecordInfo.PPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
if len(ctx.RecordInfo.SPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_IDR_Picture:
|
||||
t.Value.IDR = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements pkg.IAVFrame.
|
||||
func (a *AnnexB) String() string {
|
||||
return fmt.Sprintf("%d %d", a.DTS, a.Memory.Size)
|
||||
}
|
||||
|
||||
// Demux implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Demux(codecCtx codec.ICodecCtx) (ret any, err error) {
|
||||
var nalus Nalus
|
||||
var lastFourBytes [4]byte
|
||||
var b byte
|
||||
var shallow util.Memory
|
||||
shallow.Append(a.Buffers...)
|
||||
reader := shallow.NewReader()
|
||||
|
||||
gotNalu := func() {
|
||||
var nalu util.Memory
|
||||
for buf := range reader.ClipFront {
|
||||
nalu.AppendOne(buf)
|
||||
}
|
||||
nalus = append(nalus, nalu)
|
||||
|
||||
}
|
||||
|
||||
for {
|
||||
b, err = reader.ReadByte()
|
||||
if err == nil {
|
||||
copy(lastFourBytes[:], lastFourBytes[1:])
|
||||
lastFourBytes[3] = b
|
||||
var startCode = 0
|
||||
if lastFourBytes == codec.NALU_Delimiter2 {
|
||||
startCode = 4
|
||||
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
|
||||
startCode = 3
|
||||
}
|
||||
if startCode > 0 && reader.Offset() >= 3 {
|
||||
if reader.Offset() == 3 {
|
||||
startCode = 3
|
||||
}
|
||||
reader.Unread(startCode)
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
reader.Skip(startCode)
|
||||
for range reader.ClipFront {
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
ret = nalus
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Mux(codecCtx codec.ICodecCtx, frame *AVFrame) {
|
||||
a.DTS = frame.Timestamp * 90 / time.Millisecond
|
||||
a.PTS = a.DTS + frame.CTS*90/time.Millisecond
|
||||
a.InitRecycleIndexes(0)
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.AppendOne(delimiter2)
|
||||
if frame.IDR {
|
||||
switch ctx := codecCtx.(type) {
|
||||
case *codec.H264Ctx:
|
||||
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
|
||||
case *codec.H265Ctx:
|
||||
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
|
||||
}
|
||||
}
|
||||
for i, nalu := range frame.Raw.(Nalus) {
|
||||
if i > 0 {
|
||||
a.AppendOne(codec.NALU_Delimiter1[:])
|
||||
}
|
||||
a.Append(nalu.Buffers...)
|
||||
}
|
||||
}
|
||||
219
pkg/annexb_reader.go
Normal file
219
pkg/annexb_reader.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
// AnnexBReader 专门用于读取 AnnexB 格式数据的读取器
|
||||
// 模仿 MemoryReader 结构,支持跨切片读取和动态数据管理
|
||||
type AnnexBReader struct {
|
||||
util.Memory // 存储数据的多段内存
|
||||
Length, offset0, offset1 int // 可读长度和当前读取位置
|
||||
}
|
||||
|
||||
// AppendBuffer 追加单个数据缓冲区
|
||||
func (r *AnnexBReader) AppendBuffer(buf []byte) {
|
||||
r.PushOne(buf)
|
||||
r.Length += len(buf)
|
||||
}
|
||||
|
||||
// ClipFront 剔除已读取的数据,释放内存
|
||||
func (r *AnnexBReader) ClipFront() {
|
||||
readOffset := r.Size - r.Length
|
||||
if readOffset == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 剔除已完全读取的缓冲区(不回收内存)
|
||||
if r.offset0 > 0 {
|
||||
r.Buffers = r.Buffers[r.offset0:]
|
||||
r.Size -= readOffset
|
||||
r.offset0 = 0
|
||||
}
|
||||
|
||||
// 处理部分读取的缓冲区(不回收内存)
|
||||
if r.offset1 > 0 && len(r.Buffers) > 0 {
|
||||
buf := r.Buffers[0]
|
||||
r.Buffers[0] = buf[r.offset1:]
|
||||
r.Size -= r.offset1
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
||||
|
||||
// FindStartCode 查找 NALU 起始码,返回起始码位置和长度
|
||||
func (r *AnnexBReader) FindStartCode() (pos int, startCodeLen int, found bool) {
|
||||
if r.Length < 3 {
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// 逐字节检查起始码
|
||||
for i := 0; i <= r.Length-3; i++ {
|
||||
// 优先检查 4 字节起始码
|
||||
if i <= r.Length-4 {
|
||||
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 &&
|
||||
r.getByteAt(i+2) == 0x00 && r.getByteAt(i+3) == 0x01 {
|
||||
return i, 4, true
|
||||
}
|
||||
}
|
||||
|
||||
// 检查 3 字节起始码(但要确保不是 4 字节起始码的一部分)
|
||||
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 && r.getByteAt(i+2) == 0x01 {
|
||||
// 确保这不是4字节起始码的一部分
|
||||
if i == 0 || r.getByteAt(i-1) != 0x00 {
|
||||
return i, 3, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// getByteAt 获取指定位置的字节,不改变读取位置
|
||||
func (r *AnnexBReader) getByteAt(pos int) byte {
|
||||
if pos >= r.Length {
|
||||
return 0
|
||||
}
|
||||
|
||||
// 计算在哪个缓冲区和缓冲区内的位置
|
||||
currentPos := 0
|
||||
bufferIndex := r.offset0
|
||||
bufferOffset := r.offset1
|
||||
|
||||
for bufferIndex < len(r.Buffers) {
|
||||
buf := r.Buffers[bufferIndex]
|
||||
available := len(buf) - bufferOffset
|
||||
|
||||
if currentPos+available > pos {
|
||||
// 目标位置在当前缓冲区内
|
||||
return buf[bufferOffset+(pos-currentPos)]
|
||||
}
|
||||
|
||||
currentPos += available
|
||||
bufferIndex++
|
||||
bufferOffset = 0
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
type InvalidDataError struct {
|
||||
util.Memory
|
||||
}
|
||||
|
||||
func (e InvalidDataError) Error() string {
|
||||
return fmt.Sprintf("% 02X", e.ToBytes())
|
||||
}
|
||||
|
||||
// ReadNALU 读取一个完整的 NALU
|
||||
// withStart 用于接收“包含起始码”的内存段
|
||||
// withoutStart 用于接收“不包含起始码”的内存段
|
||||
// 允许 withStart 或 withoutStart 为 nil(表示调用方不需要该形式的数据)
|
||||
func (r *AnnexBReader) ReadNALU(withStart, withoutStart *util.Memory) error {
|
||||
r.ClipFront()
|
||||
// 定位到第一个起始码
|
||||
firstPos, startCodeLen, found := r.FindStartCode()
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 跳过起始码之前的无效数据
|
||||
if firstPos > 0 {
|
||||
var invalidData util.Memory
|
||||
var reader util.MemoryReader
|
||||
reader.Memory = &r.Memory
|
||||
reader.RangeN(firstPos, invalidData.PushOne)
|
||||
return InvalidDataError{invalidData}
|
||||
}
|
||||
|
||||
// 为了查找下一个起始码,需要临时跳过当前起始码再查找
|
||||
saveOffset0, saveOffset1, saveLength := r.offset0, r.offset1, r.Length
|
||||
r.forward(startCodeLen)
|
||||
nextPosAfterStart, _, nextFound := r.FindStartCode()
|
||||
// 恢复到起始码起点
|
||||
r.offset0, r.offset1, r.Length = saveOffset0, saveOffset1, saveLength
|
||||
if !nextFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 依次读取并填充输出,同时推进读取位置到 NALU 末尾(不消耗下一个起始码)
|
||||
remaining := startCodeLen + nextPosAfterStart
|
||||
// 需要在 withoutStart 中跳过的前缀(即起始码长度)
|
||||
skipForWithout := startCodeLen
|
||||
|
||||
for remaining > 0 && r.offset0 < len(r.Buffers) {
|
||||
buf := r.getCurrentBuffer()
|
||||
readLen := len(buf)
|
||||
if readLen > remaining {
|
||||
readLen = remaining
|
||||
}
|
||||
segment := buf[:readLen]
|
||||
|
||||
if withStart != nil {
|
||||
withStart.PushOne(segment)
|
||||
}
|
||||
|
||||
if withoutStart != nil {
|
||||
if skipForWithout >= readLen {
|
||||
// 本段全部属于起始码,跳过
|
||||
skipForWithout -= readLen
|
||||
} else {
|
||||
// 仅跳过起始码前缀,余下推入 withoutStart
|
||||
withoutStart.PushOne(segment[skipForWithout:])
|
||||
skipForWithout = 0
|
||||
}
|
||||
}
|
||||
|
||||
if readLen == len(buf) {
|
||||
r.skipCurrentBuffer()
|
||||
} else {
|
||||
r.forward(readLen)
|
||||
}
|
||||
remaining -= readLen
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCurrentBuffer 获取当前读取位置的缓冲区
|
||||
func (r *AnnexBReader) getCurrentBuffer() []byte {
|
||||
if r.offset0 >= len(r.Buffers) {
|
||||
return nil
|
||||
}
|
||||
return r.Buffers[r.offset0][r.offset1:]
|
||||
}
|
||||
|
||||
// forward 向前移动读取位置
|
||||
func (r *AnnexBReader) forward(n int) {
|
||||
if n <= 0 || r.Length <= 0 {
|
||||
return
|
||||
}
|
||||
if n > r.Length { // 防御:不允许超出剩余长度
|
||||
n = r.Length
|
||||
}
|
||||
r.Length -= n
|
||||
for n > 0 && r.offset0 < len(r.Buffers) {
|
||||
cur := r.Buffers[r.offset0]
|
||||
remain := len(cur) - r.offset1
|
||||
if n < remain { // 仍在当前缓冲区内
|
||||
r.offset1 += n
|
||||
n = 0
|
||||
return
|
||||
}
|
||||
// 用掉当前缓冲区剩余部分,跳到下一个缓冲区起点
|
||||
n -= remain
|
||||
r.offset0++
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
||||
|
||||
// skipCurrentBuffer 跳过当前缓冲区
|
||||
func (r *AnnexBReader) skipCurrentBuffer() {
|
||||
if r.offset0 < len(r.Buffers) {
|
||||
curBufLen := len(r.Buffers[r.offset0]) - r.offset1
|
||||
r.Length -= curBufLen
|
||||
r.offset0++
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
||||
173
pkg/annexb_reader_test.go
Normal file
173
pkg/annexb_reader_test.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func bytesFromMemory(m util.Memory) []byte {
|
||||
if m.Size == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]byte, 0, m.Size)
|
||||
for _, b := range m.Buffers {
|
||||
out = append(out, b...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestAnnexBReader_ReadNALU_Basic(t *testing.T) {
|
||||
|
||||
var reader AnnexBReader
|
||||
|
||||
// 3 个 NALU,分别使用 4 字节、3 字节、4 字节起始码
|
||||
expected1 := []byte{0x67, 0x42, 0x00, 0x1E}
|
||||
expected2 := []byte{0x68, 0xCE, 0x3C, 0x80}
|
||||
expected3 := []byte{0x65, 0x88, 0x84, 0x00}
|
||||
|
||||
buf := append([]byte{0x00, 0x00, 0x00, 0x01}, expected1...)
|
||||
buf = append(buf, append([]byte{0x00, 0x00, 0x01}, expected2...)...)
|
||||
buf = append(buf, append([]byte{0x00, 0x00, 0x00, 0x01}, expected3...)...)
|
||||
|
||||
reader.AppendBuffer(append(buf, codec.NALU_Delimiter2[:]...))
|
||||
|
||||
// 读取并校验 3 个 NALU(不包含起始码)
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 1: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected1) {
|
||||
t.Fatalf("nalu1 mismatch")
|
||||
}
|
||||
|
||||
n = util.Memory{}
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 2: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected2) {
|
||||
t.Fatalf("nalu2 mismatch")
|
||||
}
|
||||
|
||||
n = util.Memory{}
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 3: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected3) {
|
||||
t.Fatalf("nalu3 mismatch")
|
||||
}
|
||||
|
||||
// 再读一次应无更多起始码,返回 nil 错误且长度为 0
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("expected nil error when no more nalu, got: %v", err)
|
||||
}
|
||||
if reader.Length != 4 {
|
||||
t.Fatalf("expected length 0 after reading all, got %d", reader.Length)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnnexBReader_AppendBuffer_MultiChunk_Random(t *testing.T) {
|
||||
|
||||
var reader AnnexBReader
|
||||
|
||||
rng := rand.New(rand.NewSource(1)) // 固定种子,保证可复现
|
||||
|
||||
// 生成随机 NALU(仅负载部分),并构造 AnnexB 数据(随机 3/4 字节起始码)
|
||||
numNALU := 12
|
||||
expectedPayloads := make([][]byte, 0, numNALU)
|
||||
fullStream := make([]byte, 0, 1024)
|
||||
|
||||
for i := 0; i < numNALU; i++ {
|
||||
payloadLen := 1 + rng.Intn(32)
|
||||
payload := make([]byte, payloadLen)
|
||||
for j := 0; j < payloadLen; j++ {
|
||||
payload[j] = byte(rng.Intn(256))
|
||||
}
|
||||
expectedPayloads = append(expectedPayloads, payload)
|
||||
|
||||
if rng.Intn(2) == 0 {
|
||||
fullStream = append(fullStream, 0x00, 0x00, 0x01)
|
||||
} else {
|
||||
fullStream = append(fullStream, 0x00, 0x00, 0x00, 0x01)
|
||||
}
|
||||
fullStream = append(fullStream, payload...)
|
||||
}
|
||||
fullStream = append(fullStream, codec.NALU_Delimiter2[:]...) // 结尾加个起始码,方便读取到最后一个 NALU
|
||||
// 随机切割为多段并 AppendBuffer
|
||||
for i := 0; i < len(fullStream); {
|
||||
// 每段长度 1..7 字节(或剩余长度)
|
||||
maxStep := 7
|
||||
remain := len(fullStream) - i
|
||||
step := 1 + rng.Intn(maxStep)
|
||||
if step > remain {
|
||||
step = remain
|
||||
}
|
||||
reader.AppendBuffer(fullStream[i : i+step])
|
||||
i += step
|
||||
}
|
||||
|
||||
// 依次读取并校验
|
||||
for idx, expected := range expectedPayloads {
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu %d: %v", idx+1, err)
|
||||
}
|
||||
got := bytesFromMemory(n)
|
||||
if !bytes.Equal(got, expected) {
|
||||
t.Fatalf("nalu %d mismatch: expected %d bytes, got %d bytes", idx+1, len(expected), len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// 没有更多 NALU
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("expected nil error when no more nalu, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 起始码跨越两个缓冲区的情况测试(例如 00 00 | 00 01)
|
||||
func TestAnnexBReader_StartCodeAcrossBuffers(t *testing.T) {
|
||||
var reader AnnexBReader
|
||||
// 构造一个 4 字节起始码被拆成两段的情况,后跟一个短 payload
|
||||
reader.AppendBuffer([]byte{0x00, 0x00})
|
||||
reader.AppendBuffer([]byte{0x00})
|
||||
reader.AppendBuffer([]byte{0x01, 0x11, 0x22, 0x33}) // payload: 11 22 33
|
||||
reader.AppendBuffer(codec.NALU_Delimiter2[:])
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu: %v", err)
|
||||
}
|
||||
got := bytesFromMemory(n)
|
||||
expected := []byte{0x11, 0x22, 0x33}
|
||||
if !bytes.Equal(got, expected) {
|
||||
t.Fatalf("payload mismatch: expected %v got %v", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed test.h264
|
||||
var annexbH264Sample []byte
|
||||
|
||||
var clipSizesH264 = [...]int{7823, 7157, 5137, 6268, 5958, 4573, 5661, 5589, 3917, 5207, 5347, 4111, 4755, 5199, 3761, 5014, 4981, 3736, 5075, 4889, 3739, 4701, 4655, 3471, 4086, 4428, 3309, 4388, 28, 8, 63974, 63976, 37544, 4945, 6525, 6974, 4874, 6317, 6141, 4455, 5833, 4105, 5407, 5479, 3741, 5142, 4939, 3745, 4945, 4857, 3518, 4624, 4930, 3649, 4846, 5020, 3293, 4588, 4571, 3430, 4844, 4822, 21223, 8461, 7188, 4882, 6108, 5870, 4432, 5389, 5466, 3726}
|
||||
|
||||
func TestAnnexBReader_EmbeddedAnnexB_H265(t *testing.T) {
|
||||
var reader AnnexBReader
|
||||
offset := 0
|
||||
for _, size := range clipSizesH264 {
|
||||
reader.AppendBuffer(annexbH264Sample[offset : offset+size])
|
||||
offset += size
|
||||
var nalu util.Memory
|
||||
if err := reader.ReadNALU(nil, &nalu); err != nil {
|
||||
t.Fatalf("read nalu: %v", err)
|
||||
} else {
|
||||
t.Logf("read nalu: %d bytes", nalu.Size)
|
||||
if nalu.Size > 0 {
|
||||
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
t.Logf("tryH264Type: %d", tryH264Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -174,7 +174,9 @@ func (r *AVRingReader) ReadFrame(conf *config.Subscribe) (err error) {
|
||||
r.Delay = r.Track.LastValue.Sequence - r.Value.Sequence
|
||||
// fmt.Println(r.Delay)
|
||||
if r.Track.ICodecCtx != nil {
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
|
||||
if r.Logger.Enabled(context.TODO(), task.TraceLevel) {
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
|
||||
}
|
||||
} else {
|
||||
r.Warn("no codec")
|
||||
}
|
||||
189
pkg/avframe.go
189
pkg/avframe.go
@@ -1,8 +1,6 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -27,21 +25,28 @@ type (
|
||||
}
|
||||
// Source -> Parse -> Demux -> (ConvertCtx) -> Mux(GetAllocator) -> Recycle
|
||||
IAVFrame interface {
|
||||
GetAllocator() *util.ScalableMemoryAllocator
|
||||
SetAllocator(*util.ScalableMemoryAllocator)
|
||||
Parse(*AVTrack) error // get codec info, idr
|
||||
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) // convert codec from source stream
|
||||
Demux(codec.ICodecCtx) (any, error) // demux to raw format
|
||||
Mux(codec.ICodecCtx, *AVFrame) // mux from raw format
|
||||
GetTimestamp() time.Duration
|
||||
GetCTS() time.Duration
|
||||
GetSample() *Sample
|
||||
GetSize() int
|
||||
CheckCodecChange() error
|
||||
Demux() error // demux to raw format
|
||||
Mux(*Sample) error // mux from origin format
|
||||
Recycle()
|
||||
String() string
|
||||
Dump(byte, io.Writer)
|
||||
}
|
||||
|
||||
Nalus []util.Memory
|
||||
ISequenceCodecCtx[T any] interface {
|
||||
GetSequenceFrame() T
|
||||
}
|
||||
BaseSample struct {
|
||||
Raw IRaw // 裸格式用于转换的中间格式
|
||||
IDR bool
|
||||
TS0, Timestamp, CTS time.Duration // 原始 TS、修正 TS、Composition Time Stamp
|
||||
}
|
||||
Sample struct {
|
||||
codec.ICodecCtx
|
||||
util.RecyclableMemory
|
||||
*BaseSample
|
||||
}
|
||||
Nalus = util.ReuseArray[util.Memory]
|
||||
|
||||
AudioData = util.Memory
|
||||
|
||||
@@ -49,38 +54,130 @@ type (
|
||||
|
||||
AVFrame struct {
|
||||
DataFrame
|
||||
IDR bool
|
||||
Timestamp time.Duration // 绝对时间戳
|
||||
CTS time.Duration // composition time stamp
|
||||
Wraps []IAVFrame // 封装格式
|
||||
*Sample
|
||||
Wraps []IAVFrame // 封装格式
|
||||
}
|
||||
IRaw interface {
|
||||
util.Resetter
|
||||
Count() int
|
||||
}
|
||||
|
||||
AVRing = util.Ring[AVFrame]
|
||||
DataFrame struct {
|
||||
sync.RWMutex
|
||||
discard bool
|
||||
Sequence uint32 // 在一个Track中的序号
|
||||
WriteTime time.Time // 写入时间,可用于比较两个帧的先后
|
||||
Raw any // 裸格式
|
||||
}
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*AnnexB)(nil)
|
||||
func (sample *Sample) GetSize() int {
|
||||
return sample.Size
|
||||
}
|
||||
|
||||
func (frame *AVFrame) Clone() {
|
||||
func (sample *Sample) GetSample() *Sample {
|
||||
return sample
|
||||
}
|
||||
|
||||
func (sample *Sample) CheckCodecChange() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (sample *Sample) Demux() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sample *Sample) Mux(from *Sample) error {
|
||||
sample.ICodecCtx = from.GetBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertFrameType(from, to IAVFrame) (err error) {
|
||||
fromSampe, toSample := from.GetSample(), to.GetSample()
|
||||
if !fromSampe.HasRaw() {
|
||||
if err = from.Demux(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
toSample.SetAllocator(fromSampe.GetAllocator())
|
||||
toSample.BaseSample = fromSampe.BaseSample
|
||||
return to.Mux(fromSampe)
|
||||
}
|
||||
|
||||
func (b *BaseSample) HasRaw() bool {
|
||||
return b.Raw != nil && b.Raw.Count() > 0
|
||||
}
|
||||
|
||||
// 90Hz
|
||||
func (b *BaseSample) GetDTS() time.Duration {
|
||||
return b.Timestamp * 90 / time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetPTS() time.Duration {
|
||||
return (b.Timestamp + b.CTS) * 90 / time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetDTS(dts time.Duration) {
|
||||
b.Timestamp = dts * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetPTS(pts time.Duration) {
|
||||
b.CTS = pts*time.Millisecond/90 - b.Timestamp
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetTS32(ts uint32) {
|
||||
b.Timestamp = time.Duration(ts) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetTS32() uint32 {
|
||||
return uint32(b.Timestamp / time.Millisecond)
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetCTS32(ts uint32) {
|
||||
b.CTS = time.Duration(ts) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetCTS32() uint32 {
|
||||
return uint32(b.CTS / time.Millisecond)
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetNalus() *util.ReuseArray[util.Memory] {
|
||||
if b.Raw == nil {
|
||||
b.Raw = &Nalus{}
|
||||
}
|
||||
return b.Raw.(*util.ReuseArray[util.Memory])
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetAudioData() *AudioData {
|
||||
if b.Raw == nil {
|
||||
b.Raw = &AudioData{}
|
||||
}
|
||||
return b.Raw.(*AudioData)
|
||||
}
|
||||
|
||||
func (b *BaseSample) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
|
||||
array := b.GetNalus()
|
||||
for reader.Length > 0 {
|
||||
l, err := reader.ReadBE(naluSizeLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader.RangeN(int(l), array.GetNextPointer().PushOne)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (frame *AVFrame) Reset() {
|
||||
frame.Timestamp = 0
|
||||
frame.IDR = false
|
||||
frame.CTS = 0
|
||||
frame.Raw = nil
|
||||
if len(frame.Wraps) > 0 {
|
||||
for _, wrap := range frame.Wraps {
|
||||
wrap.Recycle()
|
||||
}
|
||||
frame.Wraps = frame.Wraps[:0]
|
||||
frame.BaseSample.IDR = false
|
||||
frame.BaseSample.TS0 = 0
|
||||
frame.BaseSample.Timestamp = 0
|
||||
frame.BaseSample.CTS = 0
|
||||
if frame.Raw != nil {
|
||||
frame.Raw.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,11 +186,6 @@ func (frame *AVFrame) Discard() {
|
||||
frame.Reset()
|
||||
}
|
||||
|
||||
func (frame *AVFrame) Demux(codecCtx codec.ICodecCtx) (err error) {
|
||||
frame.Raw, err = frame.Wraps[0].Demux(codecCtx)
|
||||
return
|
||||
}
|
||||
|
||||
func (df *DataFrame) StartWrite() (success bool) {
|
||||
if df.discard {
|
||||
return
|
||||
@@ -110,31 +202,6 @@ func (df *DataFrame) Ready() {
|
||||
df.Unlock()
|
||||
}
|
||||
|
||||
func (nalus *Nalus) H264Type() codec.H264NALUType {
|
||||
return codec.ParseH264NALUType((*nalus)[0].Buffers[0][0])
|
||||
}
|
||||
|
||||
func (nalus *Nalus) H265Type() codec.H265NALUType {
|
||||
return codec.ParseH265NALUType((*nalus)[0].Buffers[0][0])
|
||||
}
|
||||
|
||||
func (nalus *Nalus) Append(bytes []byte) {
|
||||
*nalus = append(*nalus, util.Memory{Buffers: net.Buffers{bytes}, Size: len(bytes)})
|
||||
}
|
||||
|
||||
func (nalus *Nalus) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
|
||||
for reader.Length > 0 {
|
||||
l, err := reader.ReadBE(naluSizeLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mem util.Memory
|
||||
reader.RangeN(int(l), mem.AppendOne)
|
||||
*nalus = append(*nalus, mem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
|
||||
var obuHeader av1.OBUHeader
|
||||
startLen := reader.Length
|
||||
@@ -159,7 +226,15 @@ func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*AudioData)(obus).AppendOne(obu)
|
||||
(*AudioData)(obus).PushOne(obu)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obus *OBUs) Reset() {
|
||||
((*util.Memory)(obus)).Reset()
|
||||
}
|
||||
|
||||
func (obus *OBUs) Count() int {
|
||||
return (*util.Memory)(obus).Count()
|
||||
}
|
||||
|
||||
@@ -27,6 +27,32 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewAACCtxFromRecord(record []byte) (ret *AACCtx, err error) {
|
||||
ret = &AACCtx{}
|
||||
ret.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(record)
|
||||
return
|
||||
}
|
||||
|
||||
func NewPCMACtx() *PCMACtx {
|
||||
return &PCMACtx{
|
||||
AudioCtx: AudioCtx{
|
||||
SampleRate: 90000,
|
||||
Channels: 1,
|
||||
SampleSize: 16,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewPCMUCtx() *PCMUCtx {
|
||||
return &PCMUCtx{
|
||||
AudioCtx: AudioCtx{
|
||||
SampleRate: 90000,
|
||||
Channels: 1,
|
||||
SampleSize: 16,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *AudioCtx) GetRecord() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
@@ -112,6 +112,12 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewH264CtxFromRecord(record []byte) (ret *H264Ctx, err error) {
|
||||
ret = &H264Ctx{}
|
||||
ret.CodecData, err = h264parser.NewCodecDataFromAVCDecoderConfRecord(record)
|
||||
return
|
||||
}
|
||||
|
||||
func (*H264Ctx) FourCC() FourCC {
|
||||
return FourCC_H264
|
||||
}
|
||||
|
||||
@@ -24,6 +24,15 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewH265CtxFromRecord(record []byte) (ret *H265Ctx, err error) {
|
||||
ret = &H265Ctx{}
|
||||
ret.CodecData, err = h265parser.NewCodecDataFromAVCDecoderConfRecord(record)
|
||||
if err == nil {
|
||||
ret.RecordInfo.LengthSizeMinusOne = 3
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ctx *H265Ctx) GetInfo() string {
|
||||
return fmt.Sprintf("fps: %d, resolution: %s", ctx.FPS(), ctx.Resolution())
|
||||
}
|
||||
|
||||
25
pkg/codec/h26x.go
Normal file
25
pkg/codec/h26x.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package codec
|
||||
|
||||
type H26XCtx struct {
|
||||
VPS, SPS, PPS []byte
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) FourCC() (f FourCC) {
|
||||
return
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetInfo() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetBase() ICodecCtx {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetRecord() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) String() string {
|
||||
return ""
|
||||
}
|
||||
@@ -36,6 +36,22 @@ type Config struct {
|
||||
var (
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
regexpType = reflect.TypeOf(Regexp{})
|
||||
basicTypes = []reflect.Kind{
|
||||
reflect.Bool,
|
||||
reflect.Int,
|
||||
reflect.Int8,
|
||||
reflect.Int16,
|
||||
reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32,
|
||||
reflect.Float64,
|
||||
reflect.String,
|
||||
}
|
||||
)
|
||||
|
||||
func (config *Config) Range(f func(key string, value Config)) {
|
||||
@@ -99,29 +115,29 @@ func (config *Config) Parse(s any, prefix ...string) {
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t, v = t.Elem(), v.Elem()
|
||||
}
|
||||
|
||||
isStruct := t.Kind() == reflect.Struct && t != regexpType
|
||||
if isStruct {
|
||||
defaults.SetDefaults(v.Addr().Interface())
|
||||
}
|
||||
config.Ptr = v
|
||||
|
||||
if !v.IsValid() {
|
||||
fmt.Println("parse to ", prefix, config.name, s, "is not valid")
|
||||
return
|
||||
}
|
||||
|
||||
config.Default = v.Interface()
|
||||
|
||||
if l := len(prefix); l > 0 { // 读取环境变量
|
||||
name := strings.ToLower(prefix[l-1])
|
||||
if tag := config.tag.Get("default"); tag != "" {
|
||||
_, isUnmarshaler := v.Addr().Interface().(yaml.Unmarshaler)
|
||||
tag := config.tag.Get("default")
|
||||
if tag != "" && isUnmarshaler {
|
||||
v.Set(config.assign(name, tag))
|
||||
config.Default = v.Interface()
|
||||
}
|
||||
if envValue := os.Getenv(strings.Join(prefix, "_")); envValue != "" {
|
||||
v.Set(config.assign(name, envValue))
|
||||
config.Env = v.Interface()
|
||||
}
|
||||
}
|
||||
|
||||
if t.Kind() == reflect.Struct && t != regexpType {
|
||||
config.Default = v.Interface()
|
||||
if isStruct {
|
||||
for i, j := 0, t.NumField(); i < j; i++ {
|
||||
ft, fv := t.Field(i), v.Field(i)
|
||||
|
||||
@@ -315,16 +331,18 @@ func (config *Config) GetMap() map[string]any {
|
||||
|
||||
var regexPureNumber = regexp.MustCompile(`^\d+$`)
|
||||
|
||||
func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
ft := config.Ptr.Type()
|
||||
|
||||
func unmarshal(ft reflect.Type, v any) (target reflect.Value) {
|
||||
source := reflect.ValueOf(v)
|
||||
|
||||
for _, t := range basicTypes {
|
||||
if source.Kind() == t && ft.Kind() == t {
|
||||
return source
|
||||
}
|
||||
}
|
||||
switch ft {
|
||||
case durationType:
|
||||
target = reflect.New(ft).Elem()
|
||||
if source.Type() == durationType {
|
||||
target.Set(source)
|
||||
return source
|
||||
} else if source.IsZero() || !source.IsValid() {
|
||||
target.SetInt(0)
|
||||
} else {
|
||||
@@ -332,7 +350,7 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
if d, err := time.ParseDuration(timeStr); err == nil && !regexPureNumber.MatchString(timeStr) {
|
||||
target.SetInt(int64(d))
|
||||
} else {
|
||||
slog.Error("invalid duration value please add unit (s,m,h,d),eg: 100ms, 10s, 4m, 1h", "key", k, "value", source)
|
||||
slog.Error("invalid duration value please add unit (s,m,h,d),eg: 100ms, 10s, 4m, 1h", "value", timeStr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -341,58 +359,69 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
regexpStr := source.String()
|
||||
target.Set(reflect.ValueOf(Regexp{regexp.MustCompile(regexpStr)}))
|
||||
default:
|
||||
if ft.Kind() == reflect.Map {
|
||||
target = reflect.MakeMap(ft)
|
||||
if v != nil {
|
||||
tmpStruct := reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: "Key",
|
||||
Type: ft.Key(),
|
||||
},
|
||||
})
|
||||
tmpValue := reflect.New(tmpStruct)
|
||||
for k, v := range v.(map[string]any) {
|
||||
_ = yaml.Unmarshal([]byte(fmt.Sprintf("key: %s", k)), tmpValue.Interface())
|
||||
var value reflect.Value
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
value = reflect.New(ft.Elem())
|
||||
defaults.SetDefaults(value.Interface())
|
||||
if reflect.TypeOf(v).Kind() != reflect.Map {
|
||||
value.Elem().Field(0).Set(reflect.ValueOf(v))
|
||||
} else {
|
||||
out, _ := yaml.Marshal(v)
|
||||
_ = yaml.Unmarshal(out, value.Interface())
|
||||
}
|
||||
value = value.Elem()
|
||||
} else {
|
||||
value = reflect.ValueOf(v)
|
||||
switch ft.Kind() {
|
||||
case reflect.Struct:
|
||||
newStruct := reflect.New(ft)
|
||||
defaults.SetDefaults(newStruct.Interface())
|
||||
if value, ok := v.(map[string]any); ok {
|
||||
for i := 0; i < ft.NumField(); i++ {
|
||||
key := strings.ToLower(ft.Field(i).Name)
|
||||
if vv, ok := value[key]; ok {
|
||||
newStruct.Elem().Field(i).Set(unmarshal(ft.Field(i).Type, vv))
|
||||
}
|
||||
target.SetMapIndex(tmpValue.Elem().Field(0), value)
|
||||
}
|
||||
} else {
|
||||
newStruct.Elem().Field(0).Set(unmarshal(ft.Field(0).Type, v))
|
||||
}
|
||||
return newStruct.Elem()
|
||||
case reflect.Map:
|
||||
if v != nil {
|
||||
target = reflect.MakeMap(ft)
|
||||
for k, v := range v.(map[string]any) {
|
||||
target.SetMapIndex(unmarshal(ft.Key(), k), unmarshal(ft.Elem(), v))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tmpStruct := reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: strings.ToUpper(k),
|
||||
Type: ft,
|
||||
},
|
||||
})
|
||||
tmpValue := reflect.New(tmpStruct)
|
||||
case reflect.Slice:
|
||||
if v != nil {
|
||||
s := v.([]any)
|
||||
target = reflect.MakeSlice(ft, len(s), len(s))
|
||||
for i, v := range s {
|
||||
target.Index(i).Set(unmarshal(ft.Elem(), v))
|
||||
}
|
||||
}
|
||||
default:
|
||||
if v != nil {
|
||||
var out []byte
|
||||
var err error
|
||||
if vv, ok := v.(string); ok {
|
||||
out = []byte(fmt.Sprintf("%s: %s", k, vv))
|
||||
out = []byte(fmt.Sprintf("%s: %s", "value", vv))
|
||||
} else {
|
||||
out, _ = yaml.Marshal(map[string]any{k: v})
|
||||
out, err = yaml.Marshal(map[string]any{"value": v})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
_ = yaml.Unmarshal(out, tmpValue.Interface())
|
||||
tmpValue := reflect.New(reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: "Value",
|
||||
Type: ft,
|
||||
},
|
||||
}))
|
||||
err = yaml.Unmarshal(out, tmpValue.Interface())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmpValue.Elem().Field(0)
|
||||
}
|
||||
target = tmpValue.Elem().Field(0)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (config *Config) assign(k string, v any) reflect.Value {
|
||||
return unmarshal(config.Ptr.Type(), v)
|
||||
}
|
||||
|
||||
func Parse(target any, conf map[string]any) {
|
||||
var c Config
|
||||
c.Parse(target)
|
||||
|
||||
@@ -49,6 +49,7 @@ func (task *ListenQuicWork) Start() (err error) {
|
||||
task.Error("listen quic error", err)
|
||||
return
|
||||
}
|
||||
task.OnStop(task.Listener.Close)
|
||||
task.Info("listen quic on", task.ListenAddr)
|
||||
return
|
||||
}
|
||||
@@ -63,7 +64,3 @@ func (task *ListenQuicWork) Go() error {
|
||||
task.AddTask(subTask)
|
||||
}
|
||||
}
|
||||
|
||||
func (task *ListenQuicWork) Dispose() {
|
||||
_ = task.Listener.Close()
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ const (
|
||||
|
||||
RecordModeAuto RecordMode = "auto"
|
||||
RecordModeEvent RecordMode = "event"
|
||||
RecordModeTest RecordMode = "test"
|
||||
|
||||
HookOnServerKeepAlive HookType = "server_keep_alive"
|
||||
HookOnPublishStart HookType = "publish_start"
|
||||
@@ -32,9 +33,27 @@ const (
|
||||
HookOnRecordEnd HookType = "record_end"
|
||||
HookOnTransformStart HookType = "transform_start"
|
||||
HookOnTransformEnd HookType = "transform_end"
|
||||
HookOnSystemStart HookType = "system_start"
|
||||
HookDefault HookType = "default"
|
||||
|
||||
EventLevelLow EventLevel = "low"
|
||||
EventLevelHigh EventLevel = "high"
|
||||
|
||||
AlarmStorageException = 0x10010 // 存储异常
|
||||
AlarmStorageExceptionRecover = 0x10011 // 存储异常恢复
|
||||
AlarmPullOffline = 0x10012 // 拉流异常,触发一次报警。
|
||||
AlarmPullRecover = 0x10013 // 拉流恢复
|
||||
AlarmDiskSpaceFull = 0x10014 // 磁盘空间满,磁盘占有率,超出最大磁盘空间使用率,触发报警。
|
||||
AlarmStartupRunning = 0x10015 // 启动运行
|
||||
AlarmPublishOffline = 0x10016 // 发布者异常,触发一次报警。
|
||||
AlarmPublishRecover = 0x10017 // 发布者恢复
|
||||
AlarmSubscribeOffline = 0x10018 // 订阅者异常,触发一次报警。
|
||||
AlarmSubscribeRecover = 0x10019 // 订阅者恢复
|
||||
AlarmPushOffline = 0x10020 // 推流异常,触发一次报警。
|
||||
AlarmPushRecover = 0x10021 // 推流恢复
|
||||
AlarmTransformOffline = 0x10022 // 转换异常,触发一次报警。
|
||||
AlarmTransformRecover = 0x10023 // 转换恢复
|
||||
AlarmKeepAliveOnline = 0x10024 // 保活正常,触发一次报警。
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -52,7 +71,7 @@ type (
|
||||
IdleTimeout time.Duration `desc:"空闲(无订阅)超时"` // 空闲(无订阅)超时
|
||||
PauseTimeout time.Duration `default:"30s" desc:"暂停超时时间"` // 暂停超时
|
||||
BufferTime time.Duration `desc:"缓冲时长,0代表取最近关键帧"` // 缓冲长度(单位:秒),0代表取最近关键帧
|
||||
Speed float64 `default:"1" desc:"发送速率"` // 发送速率,0 为不限速
|
||||
Speed float64 `desc:"发送速率"` // 发送速率,0 为不限速
|
||||
Scale float64 `default:"1" desc:"缩放倍数"` // 缩放倍数
|
||||
MaxFPS int `default:"60" desc:"最大FPS"` // 最大FPS
|
||||
Key string `desc:"发布鉴权key"` // 发布鉴权key
|
||||
@@ -70,17 +89,18 @@ type (
|
||||
SyncMode int `default:"1" desc:"同步模式" enum:"0:采用时间戳同步,1:采用写入时间同步"` // 0,采用时间戳同步,1,采用写入时间同步
|
||||
IFrameOnly bool `desc:"只要关键帧"` // 只要关键帧
|
||||
WaitTimeout time.Duration `default:"10s" desc:"等待流超时时间"` // 等待流超时
|
||||
WriteBufferSize int `desc:"写缓冲大小"` // 写缓冲大小
|
||||
Key string `desc:"订阅鉴权key"` // 订阅鉴权key
|
||||
SubType string `desc:"订阅类型"` // 订阅类型
|
||||
WaitTrack string `default:"video" desc:"等待轨道" enum:"audio:等待音频,video:等待视频,all:等待全部"`
|
||||
WriteBufferSize int `desc:"写缓冲大小"` // 写缓冲大小
|
||||
Key string `desc:"订阅鉴权key"` // 订阅鉴权key
|
||||
SubType string `desc:"订阅类型"` // 订阅类型
|
||||
}
|
||||
HTTPValues map[string][]string
|
||||
Pull struct {
|
||||
URL string `desc:"拉流地址"`
|
||||
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
|
||||
MaxRetry int `default:"-1" desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉,高于0 的数代表最大重拉次数
|
||||
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
|
||||
Proxy string `desc:"代理地址"` // 代理地址
|
||||
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
|
||||
MaxRetry int `desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉,高于0 的数代表最大重拉次数
|
||||
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
|
||||
Proxy string `desc:"代理地址"` // 代理地址
|
||||
Header HTTPValues
|
||||
Args HTTPValues `gorm:"-:all"` // 拉流参数
|
||||
TestMode int `desc:"测试模式,0:关闭,1:只拉流不发布"` // 测试模式
|
||||
@@ -105,6 +125,7 @@ type (
|
||||
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
|
||||
FilePath string `desc:"录制文件路径"` // 录制文件路径
|
||||
Fragment time.Duration `desc:"分片时长"` // 分片时长
|
||||
RealTime bool `desc:"是否实时录制"` // 是否实时录制
|
||||
Append bool `desc:"是否追加录制"` // 是否追加录制
|
||||
Event *RecordEvent `json:"event" desc:"事件录像配置" gorm:"-"` // 事件录像配置
|
||||
}
|
||||
@@ -130,10 +151,11 @@ type (
|
||||
URL string // Webhook 地址
|
||||
Method string `default:"POST"` // HTTP 方法
|
||||
Headers map[string]string // 自定义请求头
|
||||
TimeoutSeconds int `default:"5"` // 超时时间(秒)
|
||||
RetryTimes int `default:"3"` // 重试次数
|
||||
RetryInterval time.Duration `default:"1s"` // 重试间隔
|
||||
Interval int `default:"60"` // 保活间隔(秒)
|
||||
TimeoutSeconds int `default:"5"` // 超时时间(秒)
|
||||
RetryTimes int `default:"3"` // 重试次数
|
||||
RetryInterval time.Duration `default:"1s"` // 重试间隔
|
||||
Interval int `default:"60"` // 保活间隔(秒)
|
||||
SaveAlarm bool `default:"false"` // 是否保存告警到数据库
|
||||
}
|
||||
Common struct {
|
||||
PublicIP string
|
||||
|
||||
@@ -9,14 +9,11 @@ import (
|
||||
|
||||
// User represents a user in the system
|
||||
type User struct {
|
||||
ID uint `gorm:"primarykey"`
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
DeletedAt gorm.DeletedAt `gorm:"index"`
|
||||
Username string `gorm:"uniqueIndex;size:64"`
|
||||
Password string `gorm:"size:60"` // bcrypt hash
|
||||
Role string `gorm:"size:20;default:'user'"` // admin or user
|
||||
LastLogin time.Time `gorm:"type:datetime;default:CURRENT_TIMESTAMP"`
|
||||
gorm.Model
|
||||
Username string `gorm:"uniqueIndex;size:64"`
|
||||
Password string `gorm:"size:60"` // bcrypt hash
|
||||
Role string `gorm:"size:20;default:'user'"` // admin or user
|
||||
LastLogin time.Time `gorm:"type:timestamp;default:CURRENT_TIMESTAMP"`
|
||||
}
|
||||
|
||||
// BeforeCreate hook to hash password before saving
|
||||
|
||||
@@ -4,6 +4,7 @@ import "errors"
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrDisposed = errors.New("disposed")
|
||||
ErrDisabled = errors.New("disabled")
|
||||
ErrStreamExist = errors.New("stream exist")
|
||||
ErrRecordExists = errors.New("record exists")
|
||||
|
||||
82
pkg/format/adts.go
Normal file
82
pkg/format/adts.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
)
|
||||
|
||||
var _ pkg.IAVFrame = (*Mpeg2Audio)(nil)
|
||||
|
||||
type Mpeg2Audio struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) CheckCodecChange() (err error) {
|
||||
old := A.ICodecCtx
|
||||
if old == nil || old.FourCC().Is(codec.FourCC_MP4A) {
|
||||
var reader = A.NewReader()
|
||||
var adts []byte
|
||||
adts, err = reader.ReadBytes(7)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var hdrlen, framelen, samples int
|
||||
var conf aacparser.MPEG4AudioConfig
|
||||
conf, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
aacparser.WriteMPEG4AudioConfig(b, conf)
|
||||
if old == nil || !bytes.Equal(b.Bytes(), old.GetRecord()) {
|
||||
var ctx = &codec.AACCtx{}
|
||||
ctx.ConfigBytes = b.Bytes()
|
||||
A.ICodecCtx = ctx
|
||||
if false {
|
||||
println("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples, "config", ctx.Config)
|
||||
}
|
||||
// track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
|
||||
} else {
|
||||
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) Demux() (err error) {
|
||||
var reader = A.NewReader()
|
||||
mem := A.GetAudioData()
|
||||
if A.ICodecCtx.FourCC().Is(codec.FourCC_MP4A) {
|
||||
err = reader.Skip(7)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
reader.Range(mem.PushOne)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) Mux(frame *pkg.Sample) (err error) {
|
||||
if A.ICodecCtx == nil {
|
||||
A.ICodecCtx = frame.GetBase()
|
||||
}
|
||||
raw := frame.Raw.(*pkg.AudioData)
|
||||
aacCtx, ok := A.ICodecCtx.(*codec.AACCtx)
|
||||
if ok {
|
||||
A.InitRecycleIndexes(1)
|
||||
adts := A.NextN(7)
|
||||
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
|
||||
} else {
|
||||
A.InitRecycleIndexes(0)
|
||||
}
|
||||
A.Push(raw.Buffers...)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) String() string {
|
||||
return fmt.Sprintf("ADTS{size:%d}", A.Size)
|
||||
}
|
||||
290
pkg/format/annexb.go
Normal file
290
pkg/format/annexb.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
type AnnexB struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (a *AnnexB) CheckCodecChange() (err error) {
|
||||
if !a.HasRaw() || a.ICodecCtx == nil {
|
||||
err = a.Demux()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx == nil {
|
||||
return pkg.ErrSkip
|
||||
}
|
||||
var vps, sps, pps []byte
|
||||
a.IDR = false
|
||||
for nalu := range a.Raw.(*pkg.Nalus).RangePoint {
|
||||
if a.FourCC() == codec.FourCC_H265 {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
vps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
a.IDR = true
|
||||
}
|
||||
} else {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case codec.NALU_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case codec.NALU_IDR_Picture:
|
||||
a.IDR = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.FourCC() == codec.FourCC_H265 {
|
||||
if vps != nil && sps != nil && pps != nil {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H265Ctx).Record) {
|
||||
a.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx.(*codec.H265Ctx).Record == nil {
|
||||
err = pkg.ErrSkip
|
||||
}
|
||||
} else {
|
||||
if sps != nil && pps != nil {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H264Ctx).Record) {
|
||||
a.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx.(*codec.H264Ctx).Record == nil {
|
||||
err = pkg.ErrSkip
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements pkg.IAVFrame.
|
||||
func (a *AnnexB) String() string {
|
||||
return fmt.Sprintf("%d %d", a.Timestamp, a.Memory.Size)
|
||||
}
|
||||
|
||||
// Demux implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Demux() (err error) {
|
||||
nalus := a.GetNalus()
|
||||
var lastFourBytes [4]byte
|
||||
var b byte
|
||||
var shallow util.Memory
|
||||
shallow.Push(a.Buffers...)
|
||||
reader := shallow.NewReader()
|
||||
gotNalu := func() {
|
||||
nalu := nalus.GetNextPointer()
|
||||
for buf := range reader.ClipFront {
|
||||
nalu.PushOne(buf)
|
||||
}
|
||||
if a.ICodecCtx == nil {
|
||||
naluType := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
switch naluType {
|
||||
case codec.NALU_Non_IDR_Picture,
|
||||
codec.NALU_IDR_Picture,
|
||||
codec.NALU_SEI,
|
||||
codec.NALU_SPS,
|
||||
codec.NALU_PPS,
|
||||
codec.NALU_Access_Unit_Delimiter:
|
||||
a.ICodecCtx = &codec.H264Ctx{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
b, err = reader.ReadByte()
|
||||
if err == nil {
|
||||
copy(lastFourBytes[:], lastFourBytes[1:])
|
||||
lastFourBytes[3] = b
|
||||
var startCode = 0
|
||||
if lastFourBytes == codec.NALU_Delimiter2 {
|
||||
startCode = 4
|
||||
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
|
||||
startCode = 3
|
||||
}
|
||||
if startCode > 0 && reader.Offset() >= 3 {
|
||||
if reader.Offset() == 3 {
|
||||
startCode = 3
|
||||
}
|
||||
reader.Unread(startCode)
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
reader.Skip(startCode)
|
||||
for range reader.ClipFront {
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Mux(fromBase *pkg.Sample) (err error) {
|
||||
if a.ICodecCtx == nil {
|
||||
a.ICodecCtx = fromBase.GetBase()
|
||||
}
|
||||
a.InitRecycleIndexes(0)
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.PushOne(delimiter2)
|
||||
if fromBase.IDR {
|
||||
switch ctx := fromBase.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
|
||||
case *codec.H265Ctx:
|
||||
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
|
||||
}
|
||||
}
|
||||
for i, nalu := range *fromBase.Raw.(*pkg.Nalus) {
|
||||
if i > 0 {
|
||||
a.PushOne(codec.NALU_Delimiter1[:])
|
||||
}
|
||||
a.Push(nalu.Buffers...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Parse(reader *pkg.AnnexBReader) (hasFrame bool, err error) {
|
||||
nalus := a.BaseSample.GetNalus()
|
||||
for !hasFrame {
|
||||
nalu := nalus.GetNextPointer()
|
||||
reader.ReadNALU(&a.Memory, nalu)
|
||||
if nalu.Size == 0 {
|
||||
nalus.Reduce()
|
||||
return
|
||||
}
|
||||
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
h265Type := codec.ParseH265NALUType(nalu.Buffers[0][0])
|
||||
if a.ICodecCtx == nil {
|
||||
a.ICodecCtx = &codec.H26XCtx{}
|
||||
}
|
||||
switch ctx := a.ICodecCtx.(type) {
|
||||
case *codec.H26XCtx:
|
||||
if tryH264Type == codec.NALU_SPS {
|
||||
ctx.SPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if tryH264Type == codec.NALU_PPS {
|
||||
ctx.PPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_VPS {
|
||||
ctx.VPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_SPS {
|
||||
ctx.SPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_PPS {
|
||||
ctx.PPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else {
|
||||
if ctx.SPS != nil && ctx.PPS != nil && tryH264Type == codec.NALU_IDR_Picture {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS, ctx.PPS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
a.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
|
||||
a.Size += 8 + len(ctx.SPS) + len(ctx.PPS)
|
||||
} else if ctx.VPS != nil && ctx.SPS != nil && ctx.PPS != nil && h265Type == h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS, ctx.SPS, ctx.PPS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
a.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.VPS), util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.VPS, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
|
||||
a.Size += 24 + len(ctx.VPS) + len(ctx.SPS) + len(ctx.PPS)
|
||||
} else {
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
}
|
||||
}
|
||||
case *codec.H264Ctx:
|
||||
switch tryH264Type {
|
||||
case codec.NALU_IDR_Picture:
|
||||
a.IDR = true
|
||||
hasFrame = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
a.IDR = false
|
||||
hasFrame = true
|
||||
}
|
||||
case *codec.H265Ctx:
|
||||
switch h265Type {
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
a.IDR = true
|
||||
hasFrame = true
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_TRAIL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TRAIL_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TSA_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TSA_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_STSA_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_STSA_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RADL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RADL_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RASL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RASL_R:
|
||||
a.IDR = false
|
||||
hasFrame = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
309
pkg/format/ps/mpegps.go
Normal file
309
pkg/format/ps/mpegps.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
|
||||
mpegts "m7s.live/v5/pkg/format/ts"
|
||||
)
|
||||
|
||||
const (
|
||||
StartCodePS = 0x000001ba
|
||||
StartCodeSYS = 0x000001bb
|
||||
StartCodeMAP = 0x000001bc
|
||||
StartCodePadding = 0x000001be
|
||||
StartCodeVideo = 0x000001e0
|
||||
StartCodeVideo1 = 0x000001e1
|
||||
StartCodeVideo2 = 0x000001e2
|
||||
StartCodeAudio = 0x000001c0
|
||||
PrivateStreamCode = 0x000001bd
|
||||
MEPGProgramEndCode = 0x000001b9
|
||||
)
|
||||
|
||||
// PS包头常量
|
||||
const (
|
||||
PSPackHeaderSize = 14 // PS pack header basic size
|
||||
PSSystemHeaderSize = 18 // PS system header basic size
|
||||
PSMHeaderSize = 12 // PS map header basic size
|
||||
PESHeaderMinSize = 9 // PES header minimum size
|
||||
MaxPESPayloadSize = 0xFFEB // 0xFFFF - 14 (to leave room for headers)
|
||||
)
|
||||
|
||||
type MpegPsDemuxer struct {
|
||||
stAudio, stVideo byte
|
||||
Publisher *m7s.Publisher
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
writer m7s.PublishWriter[*format.Mpeg2Audio, *format.AnnexB]
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) Feed(reader *util.BufReader) (err error) {
|
||||
writer := &s.writer
|
||||
var payload util.Memory
|
||||
var pesHeader mpegts.MpegPESHeader
|
||||
var lastVideoPts, lastAudioPts uint64
|
||||
var annexbReader pkg.AnnexBReader
|
||||
for {
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch code {
|
||||
case StartCodePS:
|
||||
var psl byte
|
||||
if err = reader.Skip(9); err != nil {
|
||||
return err
|
||||
}
|
||||
psl, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
return err
|
||||
}
|
||||
case StartCodeVideo:
|
||||
payload, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.Publisher.PubVideo {
|
||||
continue
|
||||
}
|
||||
if writer.PublishVideoWriter == nil {
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*format.AnnexB](s.Publisher, s.Allocator)
|
||||
switch s.stVideo {
|
||||
case mpegts.STREAM_TYPE_H264:
|
||||
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
|
||||
case mpegts.STREAM_TYPE_H265:
|
||||
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
|
||||
}
|
||||
}
|
||||
pes := writer.VideoFrame
|
||||
reader := payload.NewReader()
|
||||
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read PES header"))
|
||||
}
|
||||
if pesHeader.Pts != 0 && pesHeader.Pts != lastVideoPts {
|
||||
if pes.Size > 0 {
|
||||
err = writer.NextVideo()
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to get next video frame"))
|
||||
}
|
||||
pes = writer.VideoFrame
|
||||
}
|
||||
pes.SetDTS(time.Duration(pesHeader.Dts))
|
||||
pes.SetPTS(time.Duration(pesHeader.Pts))
|
||||
lastVideoPts = pesHeader.Pts
|
||||
}
|
||||
annexb := s.Allocator.Malloc(reader.Length)
|
||||
reader.Read(annexb)
|
||||
annexbReader.AppendBuffer(annexb)
|
||||
_, err = pes.Parse(&annexbReader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to parse annexb"))
|
||||
}
|
||||
case StartCodeAudio:
|
||||
payload, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read audio payload"))
|
||||
}
|
||||
if s.stAudio == 0 || !s.Publisher.PubAudio {
|
||||
continue
|
||||
}
|
||||
if writer.PublishAudioWriter == nil {
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
switch s.stAudio {
|
||||
case mpegts.STREAM_TYPE_AAC:
|
||||
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
|
||||
case mpegts.STREAM_TYPE_G711A:
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
|
||||
case mpegts.STREAM_TYPE_G711U:
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
|
||||
}
|
||||
}
|
||||
pes := writer.AudioFrame
|
||||
reader := payload.NewReader()
|
||||
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read PES header"))
|
||||
}
|
||||
if pesHeader.Pts != 0 && pesHeader.Pts != lastAudioPts {
|
||||
if pes.Size > 0 {
|
||||
err = writer.NextAudio()
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to get next audio frame"))
|
||||
}
|
||||
pes = writer.AudioFrame
|
||||
}
|
||||
pes.SetDTS(time.Duration(pesHeader.Pts))
|
||||
pes.SetPTS(time.Duration(pesHeader.Pts))
|
||||
lastAudioPts = pesHeader.Pts
|
||||
}
|
||||
reader.Range(func(buf []byte) {
|
||||
copy(pes.NextN(len(buf)), buf)
|
||||
})
|
||||
// reader.Range(pes.PushOne)
|
||||
case StartCodeMAP:
|
||||
var psm util.Memory
|
||||
psm, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read program stream map"))
|
||||
}
|
||||
err = s.decProgramStreamMap(psm)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to decode program stream map"))
|
||||
}
|
||||
default:
|
||||
payloadlen, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read payload length"))
|
||||
}
|
||||
reader.Skip(payloadlen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) ReadPayload(reader *util.BufReader) (payload util.Memory, err error) {
|
||||
payloadlen, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return reader.ReadBytes(payloadlen)
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) decProgramStreamMap(psm util.Memory) (err error) {
|
||||
var programStreamInfoLen, programStreamMapLen, elementaryStreamInfoLength uint32
|
||||
var streamType, elementaryStreamID byte
|
||||
reader := psm.NewReader()
|
||||
reader.Skip(2)
|
||||
programStreamInfoLen, err = reader.ReadBE(2)
|
||||
reader.Skip(int(programStreamInfoLen))
|
||||
programStreamMapLen, err = reader.ReadBE(2)
|
||||
for programStreamMapLen > 0 {
|
||||
streamType, err = reader.ReadByte()
|
||||
elementaryStreamID, err = reader.ReadByte()
|
||||
if elementaryStreamID >= 0xe0 && elementaryStreamID <= 0xef {
|
||||
s.stVideo = streamType
|
||||
|
||||
} else if elementaryStreamID >= 0xc0 && elementaryStreamID <= 0xdf {
|
||||
s.stAudio = streamType
|
||||
}
|
||||
elementaryStreamInfoLength, err = reader.ReadBE(2)
|
||||
reader.Skip(int(elementaryStreamInfoLength))
|
||||
programStreamMapLen -= 4 + elementaryStreamInfoLength
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MpegPSMuxer struct {
|
||||
*m7s.Subscriber
|
||||
Packet *util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (muxer *MpegPSMuxer) Mux(onPacket func() error) {
|
||||
var pesAudio, pesVideo *MpegpsPESFrame
|
||||
puber := muxer.Publisher
|
||||
var elementary_stream_map_length uint16
|
||||
if puber.HasAudioTrack() {
|
||||
elementary_stream_map_length += 4
|
||||
pesAudio = &MpegpsPESFrame{}
|
||||
pesAudio.StreamID = mpegts.STREAM_ID_AUDIO
|
||||
switch puber.AudioTrack.ICodecCtx.FourCC() {
|
||||
case codec.FourCC_ALAW:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_G711A
|
||||
case codec.FourCC_ULAW:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_G711U
|
||||
case codec.FourCC_MP4A:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_AAC
|
||||
}
|
||||
}
|
||||
if puber.HasVideoTrack() {
|
||||
elementary_stream_map_length += 4
|
||||
pesVideo = &MpegpsPESFrame{}
|
||||
pesVideo.StreamID = mpegts.STREAM_ID_VIDEO
|
||||
switch puber.VideoTrack.ICodecCtx.FourCC() {
|
||||
case codec.FourCC_H264:
|
||||
pesVideo.StreamType = mpegts.STREAM_TYPE_H264
|
||||
case codec.FourCC_H265:
|
||||
pesVideo.StreamType = mpegts.STREAM_TYPE_H265
|
||||
}
|
||||
}
|
||||
var outputBuffer util.Buffer = muxer.Packet.NextN(PSPackHeaderSize + PSMHeaderSize + int(elementary_stream_map_length))
|
||||
outputBuffer.Reset()
|
||||
MuxPSHeader(&outputBuffer)
|
||||
// System Header - 定义流的缓冲区信息
|
||||
// outputBuffer.WriteUint32(StartCodeSYS)
|
||||
// outputBuffer.WriteByte(0x00) // header_length high
|
||||
// outputBuffer.WriteByte(0x0C) // header_length low (12 bytes)
|
||||
// outputBuffer.WriteByte(0x80) // marker + rate_bound[21..15]
|
||||
// outputBuffer.WriteByte(0x62) // rate_bound[14..8]
|
||||
// outputBuffer.WriteByte(0x4E) // rate_bound[7..1] + marker
|
||||
// outputBuffer.WriteByte(0x01) // audio_bound + fixed_flag + CSPS_flag + system_audio_lock_flag + system_video_lock_flag + marker
|
||||
// outputBuffer.WriteByte(0x01) // video_bound + packet_rate_restriction_flag + reserved
|
||||
// outputBuffer.WriteByte(frame.StreamId) // stream_id
|
||||
// outputBuffer.WriteByte(0xC0) // '11' + P-STD_buffer_bound_scale
|
||||
// outputBuffer.WriteByte(0x20) // P-STD_buffer_size_bound low
|
||||
// outputBuffer.WriteByte(0x00) // P-STD_buffer_size_bound high
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
|
||||
// PSM Header - 程序流映射,定义流类型
|
||||
outputBuffer.WriteUint32(StartCodeMAP)
|
||||
outputBuffer.WriteUint16(uint16(PSMHeaderSize) + elementary_stream_map_length - 6) // psm_length
|
||||
outputBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
|
||||
outputBuffer.WriteByte(0xFF) // reserved + marker
|
||||
outputBuffer.WriteUint16(0) // program_stream_info_length
|
||||
|
||||
outputBuffer.WriteUint16(elementary_stream_map_length)
|
||||
if pesAudio != nil {
|
||||
outputBuffer.WriteByte(pesAudio.StreamType) // stream_type
|
||||
outputBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
|
||||
outputBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
if pesVideo != nil {
|
||||
outputBuffer.WriteByte(pesVideo.StreamType) // stream_type
|
||||
outputBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
|
||||
outputBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
onPacket()
|
||||
m7s.PlayBlock(muxer.Subscriber, func(audio *format.Mpeg2Audio) error {
|
||||
pesAudio.Pts = uint64(audio.GetPTS())
|
||||
pesAudio.WritePESPacket(audio.Memory, muxer.Packet)
|
||||
return onPacket()
|
||||
}, func(video *format.AnnexB) error {
|
||||
pesVideo.Pts = uint64(video.GetPTS())
|
||||
pesVideo.Dts = uint64(video.GetDTS())
|
||||
pesVideo.WritePESPacket(video.Memory, muxer.Packet)
|
||||
|
||||
return onPacket()
|
||||
})
|
||||
}
|
||||
|
||||
func MuxPSHeader(outputBuffer *util.Buffer) {
|
||||
// 写入PS Pack Header - 参考MPEG-2程序流标准
|
||||
// Pack start code: 0x000001BA
|
||||
outputBuffer.WriteUint32(StartCodePS)
|
||||
// SCR字段 (System Clock Reference) - 参考ps-muxer.go的实现
|
||||
// 系统时钟参考
|
||||
scr := uint64(time.Now().UnixMilli()) * 90
|
||||
outputBuffer.WriteByte(0x44 | byte((scr>>30)&0x07)) // '01' + SCR[32..30]
|
||||
outputBuffer.WriteByte(byte((scr >> 22) & 0xFF)) // SCR[29..22]
|
||||
outputBuffer.WriteByte(0x04 | byte((scr>>20)&0x03)) // marker + SCR[21..20]
|
||||
outputBuffer.WriteByte(byte((scr >> 12) & 0xFF)) // SCR[19..12]
|
||||
outputBuffer.WriteByte(0x04 | byte((scr>>10)&0x03)) // marker + SCR[11..10]
|
||||
outputBuffer.WriteByte(byte((scr >> 2) & 0xFF)) // SCR[9..2]
|
||||
outputBuffer.WriteByte(0x04 | byte(scr&0x03)) // marker + SCR[1..0]
|
||||
outputBuffer.WriteByte(0x01) // SCR_ext + marker
|
||||
outputBuffer.WriteByte(0x89) // program_mux_rate high
|
||||
outputBuffer.WriteByte(0xC8) // program_mux_rate low + markers + reserved + stuffing_length(0)
|
||||
}
|
||||
853
pkg/format/ps/mpegps_test.go
Normal file
853
pkg/format/ps/mpegps_test.go
Normal file
@@ -0,0 +1,853 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestMpegPSConstants(t *testing.T) {
|
||||
// Test that PS constants are properly defined
|
||||
t.Run("Constants", func(t *testing.T) {
|
||||
if StartCodePS != 0x000001ba {
|
||||
t.Errorf("Expected StartCodePS %x, got %x", 0x000001ba, StartCodePS)
|
||||
}
|
||||
|
||||
if PSPackHeaderSize != 14 {
|
||||
t.Errorf("Expected PSPackHeaderSize %d, got %d", 14, PSPackHeaderSize)
|
||||
}
|
||||
|
||||
if MaxPESPayloadSize != 0xFFEB {
|
||||
t.Errorf("Expected MaxPESPayloadSize %x, got %x", 0xFFEB, MaxPESPayloadSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMuxPSHeader(t *testing.T) {
|
||||
// Test PS header generation
|
||||
t.Run("PSHeader", func(t *testing.T) {
|
||||
// Create a buffer for testing - initialize with length 0 to allow appending
|
||||
buffer := make([]byte, 0, PSPackHeaderSize)
|
||||
utilBuffer := util.Buffer(buffer)
|
||||
|
||||
// Call MuxPSHeader
|
||||
MuxPSHeader(&utilBuffer)
|
||||
|
||||
// Check the buffer length
|
||||
if len(utilBuffer) != PSPackHeaderSize {
|
||||
t.Errorf("Expected buffer length %d, got %d", PSPackHeaderSize, len(utilBuffer))
|
||||
}
|
||||
|
||||
// Check PS start code (first 4 bytes should be 0x00 0x00 0x01 0xBA)
|
||||
expectedStartCode := []byte{0x00, 0x00, 0x01, 0xBA}
|
||||
if !bytes.Equal(utilBuffer[:4], expectedStartCode) {
|
||||
t.Errorf("Expected PS start code %x, got %x", expectedStartCode, utilBuffer[:4])
|
||||
}
|
||||
|
||||
t.Logf("PS Header: %x", utilBuffer)
|
||||
t.Logf("Buffer length: %d", len(utilBuffer))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMpegpsPESFrame(t *testing.T) {
|
||||
// Test MpegpsPESFrame basic functionality
|
||||
t.Run("PESFrame", func(t *testing.T) {
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000 // 1 second in 90kHz clock
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Test basic properties
|
||||
if pesFrame.StreamType != 0x1B {
|
||||
t.Errorf("Expected stream type 0x1B, got %x", pesFrame.StreamType)
|
||||
}
|
||||
|
||||
if pesFrame.Pts != 90000 {
|
||||
t.Errorf("Expected PTS %d, got %d", 90000, pesFrame.Pts)
|
||||
}
|
||||
|
||||
if pesFrame.Dts != 90000 {
|
||||
t.Errorf("Expected DTS %d, got %d", 90000, pesFrame.Dts)
|
||||
}
|
||||
|
||||
t.Logf("PES Frame: StreamType=%x, PTS=%d, DTS=%d", pesFrame.StreamType, pesFrame.Pts, pesFrame.Dts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadPayload(t *testing.T) {
|
||||
// Test ReadPayload functionality
|
||||
t.Run("ReadPayload", func(t *testing.T) {
|
||||
// Create test data with payload length and payload
|
||||
testData := []byte{
|
||||
0x00, 0x05, // Payload length = 5 bytes
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, // Payload data
|
||||
}
|
||||
|
||||
demuxer := &MpegPsDemuxer{}
|
||||
reader := util.NewBufReader(bytes.NewReader(testData))
|
||||
|
||||
payload, err := demuxer.ReadPayload(reader)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadPayload failed: %v", err)
|
||||
}
|
||||
|
||||
if payload.Size != 5 {
|
||||
t.Errorf("Expected payload size 5, got %d", payload.Size)
|
||||
}
|
||||
|
||||
expectedPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
if !bytes.Equal(payload.ToBytes(), expectedPayload) {
|
||||
t.Errorf("Expected payload %x, got %x", expectedPayload, payload.ToBytes())
|
||||
}
|
||||
|
||||
t.Logf("ReadPayload successful: %x", payload.ToBytes())
|
||||
})
|
||||
}
|
||||
|
||||
func TestMpegPSMuxerBasic(t *testing.T) {
|
||||
// Test MpegPSMuxer basic functionality
|
||||
t.Run("MuxBasic", func(t *testing.T) {
|
||||
|
||||
// Test basic PS header generation without PlayBlock
|
||||
// This focuses on testing the header generation logic
|
||||
var outputBuffer util.Buffer = make([]byte, 0, 1024)
|
||||
outputBuffer.Reset()
|
||||
|
||||
// Test PS header generation
|
||||
MuxPSHeader(&outputBuffer)
|
||||
|
||||
// Add stuffing bytes as expected by the demuxer
|
||||
// The demuxer expects: 9 bytes + 1 stuffing length byte + stuffing bytes
|
||||
stuffingLength := byte(0x00) // No stuffing bytes
|
||||
outputBuffer.WriteByte(stuffingLength)
|
||||
|
||||
// Verify PS header contains expected start code
|
||||
if len(outputBuffer) != PSPackHeaderSize+1 {
|
||||
t.Errorf("Expected PS header size %d, got %d", PSPackHeaderSize+1, len(outputBuffer))
|
||||
}
|
||||
|
||||
// Check for PS start code
|
||||
if !bytes.Contains(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PS header does not contain PS start code")
|
||||
}
|
||||
|
||||
t.Logf("PS Header: %x", outputBuffer)
|
||||
t.Logf("PS Header size: %d bytes", len(outputBuffer))
|
||||
|
||||
// Test PSM header generation
|
||||
var pesAudio, pesVideo *MpegpsPESFrame
|
||||
var elementary_stream_map_length uint16
|
||||
|
||||
// Simulate audio stream
|
||||
hasAudio := true
|
||||
if hasAudio {
|
||||
elementary_stream_map_length += 4
|
||||
pesAudio = &MpegpsPESFrame{}
|
||||
pesAudio.StreamID = 0xC0 // MPEG audio
|
||||
pesAudio.StreamType = 0x0F // AAC
|
||||
}
|
||||
|
||||
// Simulate video stream
|
||||
hasVideo := true
|
||||
if hasVideo {
|
||||
elementary_stream_map_length += 4
|
||||
pesVideo = &MpegpsPESFrame{}
|
||||
pesVideo.StreamID = 0xE0 // MPEG video
|
||||
pesVideo.StreamType = 0x1B // H.264
|
||||
}
|
||||
|
||||
// Create PSM header with proper payload length
|
||||
psmData := make([]byte, 0, PSMHeaderSize+int(elementary_stream_map_length))
|
||||
psmBuffer := util.Buffer(psmData)
|
||||
psmBuffer.Reset()
|
||||
|
||||
// Write PSM start code
|
||||
psmBuffer.WriteUint32(StartCodeMAP)
|
||||
psmLength := uint16(PSMHeaderSize + int(elementary_stream_map_length) - 6)
|
||||
psmBuffer.WriteUint16(psmLength) // psm_length
|
||||
psmBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
|
||||
psmBuffer.WriteByte(0xFF) // reserved + marker
|
||||
psmBuffer.WriteUint16(0) // program_stream_info_length
|
||||
|
||||
psmBuffer.WriteUint16(elementary_stream_map_length)
|
||||
if pesAudio != nil {
|
||||
psmBuffer.WriteByte(pesAudio.StreamType) // stream_type
|
||||
psmBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
|
||||
psmBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
if pesVideo != nil {
|
||||
psmBuffer.WriteByte(pesVideo.StreamType) // stream_type
|
||||
psmBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
|
||||
psmBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
|
||||
// Verify PSM header
|
||||
if len(psmBuffer) != PSMHeaderSize+int(elementary_stream_map_length) {
|
||||
t.Errorf("Expected PSM size %d, got %d", PSMHeaderSize+int(elementary_stream_map_length), len(psmBuffer))
|
||||
}
|
||||
|
||||
// Check for PSM start code
|
||||
if !bytes.Contains(psmBuffer, []byte{0x00, 0x00, 0x01, 0xBC}) {
|
||||
t.Error("PSM header does not contain PSM start code")
|
||||
}
|
||||
|
||||
t.Logf("PSM Header: %x", psmBuffer)
|
||||
t.Logf("PSM Header size: %d bytes", len(psmBuffer))
|
||||
|
||||
// Test ReadPayload function directly
|
||||
t.Run("ReadPayload", func(t *testing.T) {
|
||||
// Create test payload data
|
||||
testPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
|
||||
// Create a packet with length prefix
|
||||
packetData := make([]byte, 0, 2+len(testPayload))
|
||||
packetData = append(packetData, byte(len(testPayload)>>8), byte(len(testPayload)))
|
||||
packetData = append(packetData, testPayload...)
|
||||
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
demuxer := &MpegPsDemuxer{}
|
||||
|
||||
// Test ReadPayload function
|
||||
payload, err := demuxer.ReadPayload(reader)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadPayload failed: %v", err)
|
||||
}
|
||||
|
||||
if payload.Size != len(testPayload) {
|
||||
t.Errorf("Expected payload size %d, got %d", len(testPayload), payload.Size)
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.ToBytes(), testPayload) {
|
||||
t.Errorf("Expected payload %x, got %x", testPayload, payload.ToBytes())
|
||||
}
|
||||
|
||||
t.Logf("ReadPayload test passed: %x", payload.ToBytes())
|
||||
})
|
||||
|
||||
// Test basic demuxing with PS header only
|
||||
t.Run("PSHeader", func(t *testing.T) {
|
||||
// Create a simple test that just verifies the PS header structure
|
||||
// without trying to demux it (which expects more data)
|
||||
if len(outputBuffer) < 4 {
|
||||
t.Errorf("PS header too short: %d bytes", len(outputBuffer))
|
||||
}
|
||||
|
||||
// Check that it starts with the correct start code
|
||||
if !bytes.HasPrefix(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Errorf("PS header does not start with correct start code: %x", outputBuffer[:4])
|
||||
}
|
||||
|
||||
t.Logf("PS header structure test passed")
|
||||
})
|
||||
|
||||
t.Logf("Basic mux/demux test completed successfully")
|
||||
})
|
||||
|
||||
// Test basic PES packet generation without PlayBlock
|
||||
t.Run("PESGeneration", func(t *testing.T) {
|
||||
// Create a test that simulates PES packet generation
|
||||
// without requiring a full subscriber setup
|
||||
|
||||
// Create test payload
|
||||
testPayload := make([]byte, 5000)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("PES packet generated: %d bytes", len(packetData))
|
||||
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Test reading back the packet
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packets directly by parsing the PES structure
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
// Verify total payload size matches
|
||||
if totalPayloadSize != len(testPayload) {
|
||||
t.Errorf("Expected total payload size %d, got %d", len(testPayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
t.Logf("PES generation test completed successfully: %d packets, total %d bytes", packetCount, totalPayloadSize)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPESPacketWriteRead(t *testing.T) {
|
||||
// Test PES packet writing and reading functionality
|
||||
t.Run("PESWriteRead", func(t *testing.T) {
|
||||
// Create test payload data
|
||||
testPayload := make([]byte, 1000)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000 // 1 second in 90kHz clock
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("PES packet written: %d bytes", len(packetData))
|
||||
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Now test reading the PES packet back
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Read and process the PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header (9 bytes + stuffing length)
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packet directly by parsing the PES structure
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
t.Logf("PES payload read: %d bytes", totalPayloadSize)
|
||||
|
||||
// Verify payload size
|
||||
if totalPayloadSize != len(testPayload) {
|
||||
t.Errorf("Expected payload size %d, got %d", len(testPayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
// Note: We can't easily verify the content because the payload is fragmented across multiple PES packets
|
||||
// But we can verify the total size is correct
|
||||
|
||||
t.Logf("PES packet write-read test completed successfully")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLargePESPacket(t *testing.T) {
|
||||
// Test large PES packet handling (payload > 65535 bytes)
|
||||
t.Run("LargePESPacket", func(t *testing.T) {
|
||||
// Create large test payload (exceeds 65535 bytes)
|
||||
largePayload := make([]byte, 70000) // 70KB payload
|
||||
for i := range largePayload {
|
||||
largePayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 180000 // 2 seconds in 90kHz clock
|
||||
pesFrame.Dts = 180000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024) // 1MB allocator
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write large PES packet
|
||||
t.Logf("Writing large PES packet with %d bytes payload", len(largePayload))
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(largePayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed for large payload: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("Large PES packet written: %d bytes", len(packetData))
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("Large PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Count number of PES packets (should be multiple due to size limitation)
|
||||
pesCount := 0
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read and count PES packets
|
||||
totalPayloadSize := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// streamID := byte(pesStartCode & 0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", pesCount+1, payload.Size)
|
||||
}
|
||||
|
||||
pesCount++
|
||||
}
|
||||
|
||||
// Verify that we got multiple PES packets
|
||||
if pesCount < 2 {
|
||||
t.Errorf("Expected multiple PES packets for large payload, got %d", pesCount)
|
||||
}
|
||||
|
||||
// Verify total payload size
|
||||
if totalPayloadSize != len(largePayload) {
|
||||
t.Errorf("Expected total payload size %d, got %d", len(largePayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
// Verify individual PES packet sizes don't exceed maximum
|
||||
maxPacketSize := MaxPESPayloadSize + PESHeaderMinSize
|
||||
if pesCount == 1 && len(packetData) > maxPacketSize {
|
||||
t.Errorf("Single PES packet exceeds maximum size: %d > %d", len(packetData), maxPacketSize)
|
||||
}
|
||||
|
||||
t.Logf("Large PES packet test completed successfully: %d packets, total %d bytes", pesCount, totalPayloadSize)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPESPacketBoundaryConditions(t *testing.T) {
|
||||
// Test PES packet boundary conditions
|
||||
t.Run("BoundaryConditions", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
payloadSize int
|
||||
}{
|
||||
{"EmptyPayload", 0},
|
||||
{"SmallPayload", 1},
|
||||
{"ExactBoundary", MaxPESPayloadSize},
|
||||
{"JustOverBoundary", MaxPESPayloadSize + 1},
|
||||
{"MultipleBoundary", MaxPESPayloadSize * 2 + 100},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create test payload
|
||||
testPayload := make([]byte, tc.payloadSize)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = uint64(tc.payloadSize) * 90 // Use payload size as PTS
|
||||
pesFrame.Dts = uint64(tc.payloadSize) * 90
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 && tc.payloadSize > 0 {
|
||||
t.Fatal("No data was written to packet for non-empty payload")
|
||||
}
|
||||
|
||||
t.Logf("%s: %d bytes payload -> %d bytes packet", tc.name, tc.payloadSize, len(packetData))
|
||||
|
||||
// For non-empty payloads, verify we can read them back
|
||||
if tc.payloadSize > 0 {
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packets
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
// Verify total payload size matches
|
||||
if totalPayloadSize != tc.payloadSize {
|
||||
t.Errorf("Expected total payload size %d, got %d", tc.payloadSize, totalPayloadSize)
|
||||
}
|
||||
|
||||
t.Logf("%s: Successfully read back %d PES packets", tc.name, packetCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
35
pkg/format/ps/pes.go
Normal file
35
pkg/format/ps/pes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
mpegts "m7s.live/v5/pkg/format/ts"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
type MpegpsPESFrame struct {
|
||||
StreamType byte // Stream type (e.g., video, audio)
|
||||
mpegts.MpegPESHeader
|
||||
}
|
||||
|
||||
func (frame *MpegpsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
|
||||
frame.DataAlignmentIndicator = 1
|
||||
|
||||
pesReader := payload.NewReader()
|
||||
var outputMemory util.Buffer = allocator.NextN(PSPackHeaderSize)
|
||||
outputMemory.Reset()
|
||||
MuxPSHeader(&outputMemory)
|
||||
for pesReader.Length > 0 {
|
||||
currentPESPayload := min(pesReader.Length, MaxPESPayloadSize)
|
||||
var pesHeadItem util.Buffer
|
||||
pesHeadItem, err = frame.WritePESHeader(currentPESPayload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
copy(allocator.NextN(pesHeadItem.Len()), pesHeadItem)
|
||||
// 申请输出缓冲
|
||||
outputMemory = allocator.NextN(currentPESPayload)
|
||||
pesReader.Read(outputMemory)
|
||||
frame.DataAlignmentIndicator = 0
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
131
pkg/format/raw.go
Normal file
131
pkg/format/raw.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ pkg.IAVFrame = (*RawAudio)(nil)
|
||||
|
||||
type RawAudio struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetSize() int {
|
||||
return r.Raw.(*util.Memory).Size
|
||||
}
|
||||
|
||||
func (r *RawAudio) Demux() error {
|
||||
r.Raw = &r.Memory
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Mux(from *pkg.Sample) (err error) {
|
||||
r.InitRecycleIndexes(0)
|
||||
r.Memory = *from.Raw.(*util.Memory)
|
||||
r.ICodecCtx = from.GetBase()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RawAudio) String() string {
|
||||
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC(), r.Timestamp, r.Size)
|
||||
}
|
||||
|
||||
var _ pkg.IAVFrame = (*H26xFrame)(nil)
|
||||
|
||||
type H26xFrame struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (h *H26xFrame) CheckCodecChange() (err error) {
|
||||
if h.ICodecCtx == nil {
|
||||
return pkg.ErrUnsupportCodec
|
||||
}
|
||||
var hasVideoFrame bool
|
||||
switch ctx := h.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
var sps, pps []byte
|
||||
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case codec.NALU_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case codec.NALU_IDR_Picture:
|
||||
h.IDR = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
if sps != nil && pps != nil {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, ctx.Record) {
|
||||
h.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
case *codec.H265Ctx:
|
||||
var vps, sps, pps []byte
|
||||
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
vps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
h.IDR = true
|
||||
case 1, 2, 3, 4, 5, 6, 7, 8, 9:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
if vps != nil && sps != nil && pps != nil {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, ctx.Record) {
|
||||
h.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Return ErrSkip if no video frames are present (only metadata NALUs)
|
||||
if !hasVideoFrame && !h.IDR {
|
||||
return pkg.ErrSkip
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *H26xFrame) GetSize() (ret int) {
|
||||
switch raw := r.Raw.(type) {
|
||||
case *pkg.Nalus:
|
||||
for nalu := range raw.RangePoint {
|
||||
ret += nalu.Size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *H26xFrame) String() string {
|
||||
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
|
||||
}
|
||||
@@ -4,7 +4,11 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
//"sync"
|
||||
)
|
||||
@@ -101,22 +105,16 @@ const (
|
||||
//
|
||||
|
||||
type MpegTsStream struct {
|
||||
PAT MpegTsPAT // PAT表信息
|
||||
PMT MpegTsPMT // PMT表信息
|
||||
PESBuffer map[uint16]*MpegTsPESPacket
|
||||
PESChan chan *MpegTsPESPacket
|
||||
PAT MpegTsPAT // PAT表信息
|
||||
PMT MpegTsPMT // PMT表信息
|
||||
Publisher *m7s.Publisher
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
writer m7s.PublishWriter[*format.Mpeg2Audio, *VideoFrame]
|
||||
audioPID, videoPID, pmtPID uint16
|
||||
tsPacket [TS_PACKET_SIZE]byte
|
||||
}
|
||||
|
||||
// ios13818-1-CN.pdf 33/165
|
||||
//
|
||||
// TS
|
||||
//
|
||||
|
||||
// Packet == Header + Payload == 188 bytes
|
||||
type MpegTsPacket struct {
|
||||
Header MpegTsHeader
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// 前面32bit的数据即TS分组首部,它指出了这个分组的属性
|
||||
type MpegTsHeader struct {
|
||||
@@ -185,25 +183,6 @@ type MpegTsDescriptor struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func ReadTsPacket(r io.Reader) (packet MpegTsPacket, err error) {
|
||||
lr := &io.LimitedReader{R: r, N: TS_PACKET_SIZE}
|
||||
|
||||
// header
|
||||
packet.Header, err = ReadTsHeader(lr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// payload
|
||||
packet.Payload = make([]byte, lr.N)
|
||||
_, err = lr.Read(packet.Payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
|
||||
var h uint32
|
||||
|
||||
@@ -365,7 +344,7 @@ func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
|
||||
// Discard 是一个 io.Writer,对它进行的任何 Write 调用都将无条件成功
|
||||
// 但是ioutil.Discard不记录copy得到的数值
|
||||
// 用于发送需要读取但不想存储的数据,目的是耗尽读取端的数据
|
||||
if _, err = io.CopyN(ioutil.Discard, lr, int64(lr.N)); err != nil {
|
||||
if _, err = io.CopyN(io.Discard, lr, int64(lr.N)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -440,138 +419,96 @@ func WriteTsHeader(w io.Writer, header MpegTsHeader) (written int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
//func (s *MpegTsStream) TestWrite(fileName string) error {
|
||||
//
|
||||
// if fileName != "" {
|
||||
// file, err := os.Create(fileName)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer file.Close()
|
||||
//
|
||||
// patTsHeader := []byte{0x47, 0x40, 0x00, 0x10}
|
||||
//
|
||||
// if err := WritePATPacket(file, patTsHeader, *s.pat); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//
|
||||
// // TODO:这里的pid应该是由PAT给的
|
||||
// pmtTsHeader := []byte{0x47, 0x41, 0x00, 0x10}
|
||||
//
|
||||
// if err := WritePMTPacket(file, pmtTsHeader, *s.pmt); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// var videoFrame int
|
||||
// var audioFrame int
|
||||
// for {
|
||||
// tsPesPkt, ok := <-s.TsPesPktChan
|
||||
// if !ok {
|
||||
// fmt.Println("frame index, video , audio :", videoFrame, audioFrame)
|
||||
// break
|
||||
// }
|
||||
//
|
||||
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_AUDIO {
|
||||
// audioFrame++
|
||||
// }
|
||||
//
|
||||
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_VIDEO {
|
||||
// println(tsPesPkt.PesPkt.Header.Pts)
|
||||
// videoFrame++
|
||||
// }
|
||||
//
|
||||
// fmt.Sprintf("%s", tsPesPkt)
|
||||
//
|
||||
// // if err := WritePESPacket(file, tsPesPkt.TsPkt.Header, tsPesPkt.PesPkt); err != nil {
|
||||
// // return err
|
||||
// // }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
|
||||
func (s *MpegTsStream) ReadPAT(packet *MpegTsPacket, pr io.Reader) (err error) {
|
||||
// 首先找到PID==0x00的TS包(PAT)
|
||||
if PID_PAT == packet.Header.Pid {
|
||||
if len(packet.Payload) == 188 {
|
||||
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
|
||||
}
|
||||
// Header + PSI + Paylod
|
||||
s.PAT, err = ReadPAT(pr)
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MpegTsStream) ReadPMT(packet *MpegTsPacket, pr io.Reader) (err error) {
|
||||
// 在读取PAT中已经将所有频道节目信息(PMT_PID)保存了起来
|
||||
// 接着读取所有TS包里面的PID,找出PID==PMT_PID的TS包,就是PMT表
|
||||
for _, v := range s.PAT.Program {
|
||||
if v.ProgramMapPID == packet.Header.Pid {
|
||||
if len(packet.Payload) == 188 {
|
||||
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
|
||||
}
|
||||
// Header + PSI + Paylod
|
||||
s.PMT, err = ReadPMT(pr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MpegTsStream) Feed(ts io.Reader) (err error) {
|
||||
writer := &s.writer
|
||||
var reader bytes.Reader
|
||||
var lr io.LimitedReader
|
||||
lr.R = &reader
|
||||
var tsHeader MpegTsHeader
|
||||
tsData := make([]byte, TS_PACKET_SIZE)
|
||||
for {
|
||||
_, err = io.ReadFull(ts, tsData)
|
||||
var pesHeader MpegPESHeader
|
||||
for !s.Publisher.IsStopped() {
|
||||
_, err = io.ReadFull(ts, s.tsPacket[:])
|
||||
if err == io.EOF {
|
||||
// 文件结尾 把最后面的数据发出去
|
||||
for _, pesPkt := range s.PESBuffer {
|
||||
if pesPkt != nil {
|
||||
s.PESChan <- pesPkt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
reader.Reset(tsData)
|
||||
reader.Reset(s.tsPacket[:])
|
||||
lr.N = TS_PACKET_SIZE
|
||||
if tsHeader, err = ReadTsHeader(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if tsHeader.Pid == PID_PAT {
|
||||
switch tsHeader.Pid {
|
||||
case PID_PAT:
|
||||
if s.PAT, err = ReadPAT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
s.pmtPID = s.PAT.Program[0].ProgramMapPID
|
||||
continue
|
||||
}
|
||||
if len(s.PMT.Stream) == 0 {
|
||||
for _, v := range s.PAT.Program {
|
||||
if v.ProgramMapPID == tsHeader.Pid {
|
||||
if s.PMT, err = ReadPMT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
for _, v := range s.PMT.Stream {
|
||||
s.PESBuffer[v.ElementaryPID] = nil
|
||||
}
|
||||
}
|
||||
case s.pmtPID:
|
||||
if len(s.PMT.Stream) != 0 {
|
||||
continue
|
||||
}
|
||||
} else if pesPkt, ok := s.PESBuffer[tsHeader.Pid]; ok {
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesPkt != nil {
|
||||
s.PESChan <- pesPkt
|
||||
}
|
||||
pesPkt = &MpegTsPESPacket{}
|
||||
s.PESBuffer[tsHeader.Pid] = pesPkt
|
||||
if pesPkt.Header, err = ReadPESHeader(&lr); err != nil {
|
||||
return
|
||||
if s.PMT, err = ReadPMT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
for _, pmt := range s.PMT.Stream {
|
||||
switch pmt.StreamType {
|
||||
case STREAM_TYPE_H265:
|
||||
s.videoPID = pmt.ElementaryPID
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
|
||||
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
|
||||
case STREAM_TYPE_H264:
|
||||
s.videoPID = pmt.ElementaryPID
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
|
||||
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
|
||||
case STREAM_TYPE_AAC:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
|
||||
case STREAM_TYPE_G711A:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
|
||||
case STREAM_TYPE_G711U:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
|
||||
}
|
||||
}
|
||||
io.Copy(&pesPkt.Payload, &lr)
|
||||
case s.audioPID:
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if !s.Publisher.PubAudio {
|
||||
continue
|
||||
}
|
||||
if writer.AudioFrame.Size > 0 {
|
||||
if err = writer.NextAudio(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
writer.AudioFrame.SetDTS(time.Duration(pesHeader.Pts))
|
||||
}
|
||||
lr.Read(writer.AudioFrame.NextN(int(lr.N)))
|
||||
case s.videoPID:
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if !s.Publisher.PubVideo {
|
||||
continue
|
||||
}
|
||||
if writer.VideoFrame.Size > 0 {
|
||||
if err = writer.NextVideo(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
writer.VideoFrame.SetDTS(time.Duration(pesHeader.Dts))
|
||||
writer.VideoFrame.SetPTS(time.Duration(pesHeader.Pts))
|
||||
|
||||
}
|
||||
lr.Read(writer.VideoFrame.NextN(int(lr.N)))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -2,39 +2,19 @@ package mpegts
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
"net"
|
||||
)
|
||||
|
||||
// ios13818-1-CN.pdf 45/166
|
||||
//
|
||||
// PES
|
||||
//
|
||||
|
||||
// 每个传输流和节目流在逻辑上都是由 PES 包构造的
|
||||
type MpegTsPesStream struct {
|
||||
TsPkt MpegTsPacket
|
||||
PesPkt MpegTsPESPacket
|
||||
}
|
||||
|
||||
// PES--Packetized Elementary Streams (分组的ES),ES形成的分组称为PES分组,是用来传递ES的一种数据结构
|
||||
// 1110 xxxx 为视频流(0xE0)
|
||||
// 110x xxxx 为音频流(0xC0)
|
||||
type MpegTsPESPacket struct {
|
||||
Header MpegTsPESHeader
|
||||
Payload util.Buffer //从TS包中读取的数据
|
||||
Buffers net.Buffers //用于写TS包
|
||||
}
|
||||
|
||||
type MpegTsPESHeader struct {
|
||||
PacketStartCodePrefix uint32 // 24 bits 同跟随它的 stream_id 一起组成标识包起始端的包起始码.packet_start_code_prefix 为比特串"0000 0000 0000 0000 0000 0001"(0x000001)
|
||||
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
|
||||
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
|
||||
|
||||
type MpegPESHeader struct {
|
||||
header [32]byte
|
||||
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
|
||||
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
|
||||
MpegTsOptionalPESHeader
|
||||
|
||||
PayloadLength uint64 // 这个不是标准文档里面的字段,是自己添加的,方便计算
|
||||
}
|
||||
|
||||
// 可选的PES Header = MpegTsOptionalPESHeader + stuffing bytes(0xFF) m * 8
|
||||
@@ -99,23 +79,35 @@ type MpegTsOptionalPESHeader struct {
|
||||
// pts_dts_Flags == "11" -> PTS + DTS
|
||||
|
||||
type MpegtsPESFrame struct {
|
||||
Pid uint16
|
||||
IsKeyFrame bool
|
||||
ContinuityCounter byte
|
||||
ProgramClockReferenceBase uint64
|
||||
Pid uint16
|
||||
IsKeyFrame bool
|
||||
ContinuityCounter byte
|
||||
MpegPESHeader
|
||||
}
|
||||
|
||||
func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
var flags uint8
|
||||
var length uint
|
||||
func CreatePESWriters() (pesAudio, pesVideo MpegtsPESFrame) {
|
||||
pesAudio, pesVideo = MpegtsPESFrame{
|
||||
Pid: PID_AUDIO,
|
||||
}, MpegtsPESFrame{
|
||||
Pid: PID_VIDEO,
|
||||
}
|
||||
pesAudio.DataAlignmentIndicator = 1
|
||||
pesVideo.DataAlignmentIndicator = 1
|
||||
pesAudio.StreamID = STREAM_ID_AUDIO
|
||||
pesVideo.StreamID = STREAM_ID_VIDEO
|
||||
return
|
||||
}
|
||||
|
||||
func ReadPESHeader0(r *io.LimitedReader) (header MpegPESHeader, err error) {
|
||||
var length uint
|
||||
var packetStartCodePrefix uint32
|
||||
// packetStartCodePrefix(24) (0x000001)
|
||||
header.PacketStartCodePrefix, err = util.ReadByteToUint24(r, true)
|
||||
packetStartCodePrefix, err = util.ReadByteToUint24(r, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if header.PacketStartCodePrefix != 0x0000001 {
|
||||
if packetStartCodePrefix != 0x0000001 {
|
||||
err = errors.New("read PacketStartCodePrefix is not 0x0000001")
|
||||
return
|
||||
}
|
||||
@@ -141,18 +133,27 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
if length == 0 {
|
||||
length = 1 << 31
|
||||
}
|
||||
var header1 MpegPESHeader
|
||||
header1, err = ReadPESHeader(r)
|
||||
if err == nil {
|
||||
if header.PesPacketLength == 0 {
|
||||
header1.PesPacketLength = uint16(r.N)
|
||||
}
|
||||
header1.StreamID = header.StreamID
|
||||
return header1, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// lrPacket 和 lrHeader 位置指针是在同一位置的
|
||||
lrPacket := &io.LimitedReader{R: r, N: int64(length)}
|
||||
lrHeader := lrPacket
|
||||
|
||||
func ReadPESHeader(lrPacket *io.LimitedReader) (header MpegPESHeader, err error) {
|
||||
var flags uint8
|
||||
// constTen(2)
|
||||
// pes_ScramblingControl(2)
|
||||
// pes_Priority(1)
|
||||
// dataAlignmentIndicator(1)
|
||||
// copyright(1)
|
||||
// originalOrCopy(1)
|
||||
flags, err = util.ReadByteToUint8(lrHeader)
|
||||
flags, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -171,7 +172,7 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
// additionalCopyInfoFlag(1)
|
||||
// pes_CRCFlag(1)
|
||||
// pes_ExtensionFlag(1)
|
||||
flags, err = util.ReadByteToUint8(lrHeader)
|
||||
flags, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -185,14 +186,14 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
header.PesExtensionFlag = flags & 0x01
|
||||
|
||||
// pes_HeaderDataLength(8)
|
||||
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrHeader)
|
||||
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length = uint(header.PesHeaderDataLength)
|
||||
length := uint(header.PesHeaderDataLength)
|
||||
|
||||
lrHeader = &io.LimitedReader{R: lrHeader, N: int64(length)}
|
||||
lrHeader := &io.LimitedReader{R: lrPacket, N: int64(length)}
|
||||
|
||||
// 00 -> PES 包头中既无任何PTS 字段也无任何DTS 字段存在
|
||||
// 10 -> PES 包头中PTS 字段存在
|
||||
@@ -219,6 +220,8 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
}
|
||||
|
||||
header.Dts = util.GetPtsDts(dts)
|
||||
} else {
|
||||
header.Dts = header.Pts
|
||||
}
|
||||
|
||||
// reserved(2) + escr_Base1(3) + marker_bit(1) +
|
||||
@@ -336,48 +339,31 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// 2的16次方,16个字节
|
||||
if lrPacket.N < 65536 {
|
||||
// 这里得到的其实是负载长度,因为已经偏移过了Header部分.
|
||||
//header.pes_PacketLength = uint16(lrPacket.N)
|
||||
header.PayloadLength = uint64(lrPacket.N)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error) {
|
||||
if header.PacketStartCodePrefix != 0x0000001 {
|
||||
err = errors.New("write PacketStartCodePrefix is not 0x0000001")
|
||||
return
|
||||
func (header *MpegPESHeader) WritePESHeader(esSize int) (w util.Buffer, err error) {
|
||||
if header.DataAlignmentIndicator == 1 {
|
||||
if header.Pts == header.Dts {
|
||||
header.PtsDtsFlags = 0x80
|
||||
header.PesHeaderDataLength = 5
|
||||
} else {
|
||||
header.PtsDtsFlags = 0xC0
|
||||
header.PesHeaderDataLength = 10
|
||||
}
|
||||
} else {
|
||||
header.PtsDtsFlags = 0
|
||||
header.PesHeaderDataLength = 0
|
||||
}
|
||||
|
||||
// packetStartCodePrefix(24) (0x000001)
|
||||
if err = util.WriteUint24ToByte(w, header.PacketStartCodePrefix, true); err != nil {
|
||||
return
|
||||
pktLength := esSize + int(header.PesHeaderDataLength) + 3
|
||||
if pktLength > 0xffff {
|
||||
pktLength = 0
|
||||
}
|
||||
header.PesPacketLength = uint16(pktLength)
|
||||
|
||||
written += 3
|
||||
|
||||
// streamID(8)
|
||||
if err = util.WriteUint8ToByte(w, header.StreamID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
// pes_PacketLength(16)
|
||||
// PES包长度可能为0,这个时候,需要自己去算
|
||||
// 0 <= len <= 65535
|
||||
if err = util.WriteUint16ToByte(w, header.PesPacketLength, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//fmt.Println("Length :", payloadLength)
|
||||
//fmt.Println("PES Packet Length :", header.pes_PacketLength)
|
||||
|
||||
written += 2
|
||||
|
||||
w = header.header[:0]
|
||||
w.WriteUint32(0x00000100 | uint32(header.StreamID))
|
||||
w.WriteUint16(header.PesPacketLength)
|
||||
// constTen(2)
|
||||
// pes_ScramblingControl(2)
|
||||
// pes_Priority(1)
|
||||
@@ -385,18 +371,9 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// copyright(1)
|
||||
// originalOrCopy(1)
|
||||
// 1000 0001
|
||||
if header.ConstTen != 0x80 {
|
||||
err = errors.New("pes header ConstTen != 0x80")
|
||||
return
|
||||
}
|
||||
|
||||
flags := header.ConstTen | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
|
||||
if err = util.WriteUint8ToByte(w, flags); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
flags := 0x80 | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
|
||||
w.WriteByte(flags)
|
||||
// pts_dts_Flags(2)
|
||||
// escr_Flag(1)
|
||||
// es_RateFlag(1)
|
||||
@@ -405,19 +382,8 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// pes_CRCFlag(1)
|
||||
// pes_ExtensionFlag(1)
|
||||
sevenFlags := header.PtsDtsFlags | header.EscrFlag | header.EsRateFlag | header.DsmTrickModeFlag | header.AdditionalCopyInfoFlag | header.PesCRCFlag | header.PesExtensionFlag
|
||||
if err = util.WriteUint8ToByte(w, sevenFlags); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
// pes_HeaderDataLength(8)
|
||||
if err = util.WriteUint8ToByte(w, header.PesHeaderDataLength); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
w.WriteByte(sevenFlags)
|
||||
w.WriteByte(header.PesHeaderDataLength)
|
||||
// PtsDtsFlags == 192(11), 128(10), 64(01)禁用, 0(00)
|
||||
if header.PtsDtsFlags&0x80 != 0 {
|
||||
// PTS和DTS都存在(11),否则只有PTS(10)
|
||||
@@ -425,30 +391,121 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// 11:PTS和DTS
|
||||
// PTS(33) + 4 + 3
|
||||
pts := util.PutPtsDts(header.Pts) | 3<<36
|
||||
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
|
||||
// DTS(33) + 4 + 3
|
||||
dts := util.PutPtsDts(header.Dts) | 1<<36
|
||||
if err = util.WriteUint40ToByte(w, dts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, dts, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
} else {
|
||||
// 10:只有PTS
|
||||
// PTS(33) + 4 + 3
|
||||
pts := util.PutPtsDts(header.Pts) | 2<<36
|
||||
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
func (frame *MpegtsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
|
||||
var pesHeadItem util.Buffer
|
||||
pesHeadItem, err = frame.WritePESHeader(payload.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pesBuffers := util.NewMemory(pesHeadItem)
|
||||
payload.Range(pesBuffers.PushOne)
|
||||
pesPktLength := int64(pesBuffers.Size)
|
||||
pesReader := pesBuffers.NewReader()
|
||||
var tsHeaderLength int
|
||||
for i := 0; pesPktLength > 0; i++ {
|
||||
var buffer util.Buffer = allocator.NextN(TS_PACKET_SIZE)
|
||||
bwTsHeader := &buffer
|
||||
bwTsHeader.Reset()
|
||||
tsHeader := MpegTsHeader{
|
||||
SyncByte: 0x47,
|
||||
TransportErrorIndicator: 0,
|
||||
PayloadUnitStartIndicator: 0,
|
||||
TransportPriority: 0,
|
||||
Pid: frame.Pid,
|
||||
TransportScramblingControl: 0,
|
||||
AdaptionFieldControl: 1,
|
||||
ContinuityCounter: frame.ContinuityCounter,
|
||||
}
|
||||
|
||||
frame.ContinuityCounter++
|
||||
frame.ContinuityCounter = frame.ContinuityCounter % 16
|
||||
|
||||
// 每一帧的开头,当含有pcr的时候,包含调整字段
|
||||
if i == 0 {
|
||||
tsHeader.PayloadUnitStartIndicator = 1
|
||||
|
||||
// 当PCRFlag为1的时候,包含调整字段
|
||||
if frame.IsKeyFrame {
|
||||
tsHeader.AdaptionFieldControl = 0x03
|
||||
tsHeader.AdaptationFieldLength = 7
|
||||
tsHeader.PCRFlag = 1
|
||||
tsHeader.RandomAccessIndicator = 1
|
||||
tsHeader.ProgramClockReferenceBase = frame.Pts
|
||||
}
|
||||
}
|
||||
|
||||
// 每一帧的结尾,当不满足188个字节的时候,包含调整字段
|
||||
if pesPktLength < TS_PACKET_SIZE-4 {
|
||||
var tsStuffingLength uint8
|
||||
|
||||
tsHeader.AdaptionFieldControl = 0x03
|
||||
tsHeader.AdaptationFieldLength = uint8(TS_PACKET_SIZE - 4 - 1 - pesPktLength)
|
||||
|
||||
// TODO:如果第一个TS包也是最后一个TS包,是不是需要考虑这个情况?
|
||||
// MpegTsHeader最少占6个字节.(前4个走字节 + AdaptationFieldLength(1 byte) + 3个指示符5个标志位(1 byte))
|
||||
if tsHeader.AdaptationFieldLength >= 1 {
|
||||
tsStuffingLength = tsHeader.AdaptationFieldLength - 1
|
||||
} else {
|
||||
tsStuffingLength = 0
|
||||
}
|
||||
// error
|
||||
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tsStuffingLength > 0 {
|
||||
if _, err = bwTsHeader.Write(Stuffing[:tsStuffingLength]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
tsHeaderLength += int(tsStuffingLength)
|
||||
} else {
|
||||
|
||||
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tsPayloadLength := TS_PACKET_SIZE - tsHeaderLength
|
||||
|
||||
//fmt.Println("tsPayloadLength :", tsPayloadLength)
|
||||
|
||||
// 这里不断的减少PES包
|
||||
written, _ := io.CopyN(bwTsHeader, &pesReader, int64(tsPayloadLength))
|
||||
// tmp := tsHeaderByte[3] << 2
|
||||
// tmp = tmp >> 6
|
||||
// if tmp == 2 {
|
||||
// fmt.Println("fuck you mother.")
|
||||
// }
|
||||
pesPktLength -= written
|
||||
tsPktByteLen := bwTsHeader.Len()
|
||||
|
||||
if tsPktByteLen != TS_PACKET_SIZE {
|
||||
err = errors.New(fmt.Sprintf("%s, packet size=%d", "TS_PACKET_SIZE != 188,", tsPktByteLen))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
||||
@@ -1,9 +1,11 @@
|
||||
package mpegts
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
@@ -179,50 +181,56 @@ func WritePSI(w io.Writer, pt uint32, psi MpegTsPSI, data []byte) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
cw := &util.Crc32Writer{W: w, Crc32: 0xffffffff}
|
||||
// 使用buffer收集所有需要计算CRC32的数据
|
||||
bw := &bytes.Buffer{}
|
||||
|
||||
// table id(8)
|
||||
if err = util.WriteUint8ToByte(cw, tableId); err != nil {
|
||||
if err = util.WriteUint8ToByte(bw, tableId); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// sectionSyntaxIndicator(1) + zero(1) + reserved1(2) + sectionLength(12)
|
||||
// sectionLength 前两个字节固定为00
|
||||
// 1 0 11 sectionLength
|
||||
if err = util.WriteUint16ToByte(cw, sectionSyntaxIndicatorAndSectionLength, true); err != nil {
|
||||
if err = util.WriteUint16ToByte(bw, sectionSyntaxIndicatorAndSectionLength, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// PAT TransportStreamID(16) or PMT ProgramNumber(16)
|
||||
if err = util.WriteUint16ToByte(cw, transportStreamIdOrProgramNumber, true); err != nil {
|
||||
if err = util.WriteUint16ToByte(bw, transportStreamIdOrProgramNumber, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// reserved2(2) + versionNumber(5) + currentNextIndicator(1)
|
||||
// 0x3 << 6 -> 1100 0000
|
||||
// 0x3 << 6 | 1 -> 1100 0001
|
||||
if err = util.WriteUint8ToByte(cw, versionNumberAndCurrentNextIndicator); err != nil {
|
||||
if err = util.WriteUint8ToByte(bw, versionNumberAndCurrentNextIndicator); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// sectionNumber(8)
|
||||
if err = util.WriteUint8ToByte(cw, sectionNumber); err != nil {
|
||||
if err = util.WriteUint8ToByte(bw, sectionNumber); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// lastSectionNumber(8)
|
||||
if err = util.WriteUint8ToByte(cw, lastSectionNumber); err != nil {
|
||||
if err = util.WriteUint8ToByte(bw, lastSectionNumber); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// data
|
||||
if _, err = cw.Write(data); err != nil {
|
||||
if _, err = bw.Write(data); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// crc32
|
||||
crc32 := util.BigLittleSwap(uint(cw.Crc32))
|
||||
if err = util.WriteUint32ToByte(cw, uint32(crc32), true); err != nil {
|
||||
// 写入PSI数据
|
||||
if _, err = w.Write(bw.Bytes()); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// 使用MPEG-TS CRC32算法计算CRC32
|
||||
crc32 := GetCRC32(bw.Bytes())
|
||||
if err = util.WriteUint32ToByte(w, crc32, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
20
pkg/format/ts/video.go
Normal file
20
pkg/format/ts/video.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package mpegts
|
||||
|
||||
import (
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
)
|
||||
|
||||
type VideoFrame struct {
|
||||
format.AnnexB
|
||||
}
|
||||
|
||||
func (a *VideoFrame) Mux(fromBase *pkg.Sample) (err error) {
|
||||
if fromBase.GetBase().FourCC().Is(codec.FourCC_H265) {
|
||||
a.PushOne(codec.AudNalu)
|
||||
} else {
|
||||
a.PushOne(codec.NALU_AUD_BYTE)
|
||||
}
|
||||
return a.AnnexB.Mux(fromBase)
|
||||
}
|
||||
133
pkg/port.go
133
pkg/port.go
@@ -13,7 +13,6 @@ type (
|
||||
Port struct {
|
||||
Protocol string
|
||||
Ports [2]int
|
||||
Map [2]int // 映射端口范围,通常用于 NAT 或端口转发
|
||||
}
|
||||
IPort interface {
|
||||
IsTCP() bool
|
||||
@@ -23,23 +22,10 @@ type (
|
||||
)
|
||||
|
||||
func (p Port) String() string {
|
||||
var result string
|
||||
if p.Ports[0] == p.Ports[1] {
|
||||
result = p.Protocol + ":" + strconv.Itoa(p.Ports[0])
|
||||
} else {
|
||||
result = p.Protocol + ":" + strconv.Itoa(p.Ports[0]) + "-" + strconv.Itoa(p.Ports[1])
|
||||
return p.Protocol + ":" + strconv.Itoa(p.Ports[0])
|
||||
}
|
||||
|
||||
// 如果有端口映射,添加映射信息
|
||||
if p.HasMapping() {
|
||||
if p.Map[0] == p.Map[1] {
|
||||
result += ":" + strconv.Itoa(p.Map[0])
|
||||
} else {
|
||||
result += ":" + strconv.Itoa(p.Map[0]) + "-" + strconv.Itoa(p.Map[1])
|
||||
}
|
||||
}
|
||||
|
||||
return result
|
||||
return p.Protocol + ":" + strconv.Itoa(p.Ports[0]) + "-" + strconv.Itoa(p.Ports[1])
|
||||
}
|
||||
|
||||
func (p Port) IsTCP() bool {
|
||||
@@ -54,36 +40,6 @@ func (p Port) IsRange() bool {
|
||||
return p.Ports[0] != p.Ports[1]
|
||||
}
|
||||
|
||||
func (p Port) HasMapping() bool {
|
||||
return p.Map[0] > 0 || p.Map[1] > 0
|
||||
}
|
||||
|
||||
func (p Port) IsRangeMapping() bool {
|
||||
return p.HasMapping() && p.Map[0] != p.Map[1]
|
||||
}
|
||||
|
||||
// ParsePort2 解析端口配置字符串并返回对应的端口类型实例
|
||||
// 根据协议类型和端口范围返回不同的类型:
|
||||
// - TCP单端口:返回 TCPPort
|
||||
// - TCP端口范围:返回 TCPRangePort
|
||||
// - UDP单端口:返回 UDPPort
|
||||
// - UDP端口范围:返回 UDPRangePort
|
||||
//
|
||||
// 参数:
|
||||
//
|
||||
// conf - 端口配置字符串,格式:protocol:port 或 protocol:port1-port2
|
||||
//
|
||||
// 返回值:
|
||||
//
|
||||
// ret - 端口实例 (TCPPort/UDPPort/TCPRangePort/UDPRangePort)
|
||||
// err - 解析错误
|
||||
//
|
||||
// 示例:
|
||||
//
|
||||
// ParsePort2("tcp:8080") // 返回 TCPPort(8080)
|
||||
// ParsePort2("tcp:8080-8090") // 返回 TCPRangePort([2]int{8080, 8090})
|
||||
// ParsePort2("udp:5000") // 返回 UDPPort(5000)
|
||||
// ParsePort2("udp:5000-5010") // 返回 UDPRangePort([2]int{5000, 5010})
|
||||
func ParsePort2(conf string) (ret any, err error) {
|
||||
var port Port
|
||||
port, err = ParsePort(conf)
|
||||
@@ -102,84 +58,10 @@ func ParsePort2(conf string) (ret any, err error) {
|
||||
return UDPPort(port.Ports[0]), nil
|
||||
}
|
||||
|
||||
// ParsePort 解析端口配置字符串为 Port 结构体
|
||||
// 支持协议前缀、端口号/端口范围以及端口映射的解析
|
||||
//
|
||||
// 参数:
|
||||
//
|
||||
// conf - 端口配置字符串,格式:
|
||||
// - "protocol:port" 单端口,如 "tcp:8080"
|
||||
// - "protocol:port1-port2" 端口范围,如 "tcp:8080-8090"
|
||||
// - "protocol:port:mapPort" 单端口映射,如 "tcp:8080:9090"
|
||||
// - "protocol:port:mapPort1-mapPort2" 单端口映射到端口范围,如 "tcp:8080:9000-9010"
|
||||
// - "protocol:port1-port2:mapPort1-mapPort2" 端口范围映射,如 "tcp:8080-8090:9000-9010"
|
||||
//
|
||||
// 返回值:
|
||||
//
|
||||
// ret - Port 结构体,包含协议、端口和映射端口信息
|
||||
// err - 解析错误
|
||||
//
|
||||
// 注意:
|
||||
// - 如果端口范围中 min > max,会自动交换顺序
|
||||
// - 单端口时,Ports[0] 和 Ports[1] 值相同
|
||||
// - 端口映射时,Map[0] 和 Map[1] 存储映射的目标端口范围
|
||||
// - 单个映射端口时,Map[0] 和 Map[1] 值相同
|
||||
//
|
||||
// 示例:
|
||||
//
|
||||
// ParsePort("tcp:8080") // Port{Protocol:"tcp", Ports:[2]int{8080, 8080}, Map:[2]int{0, 0}}
|
||||
// ParsePort("tcp:8080-8090") // Port{Protocol:"tcp", Ports:[2]int{8080, 8090}, Map:[2]int{0, 0}}
|
||||
// ParsePort("tcp:8080:9090") // Port{Protocol:"tcp", Ports:[2]int{8080, 8080}, Map:[2]int{9090, 9090}}
|
||||
// ParsePort("tcp:8080:9000-9010") // Port{Protocol:"tcp", Ports:[2]int{8080, 8080}, Map:[2]int{9000, 9010}}
|
||||
// ParsePort("tcp:8080-8090:9000-9010") // Port{Protocol:"tcp", Ports:[2]int{8080, 8090}, Map:[2]int{9000, 9010}}
|
||||
// ParsePort("udp:5000") // Port{Protocol:"udp", Ports:[2]int{5000, 5000}, Map:[2]int{0, 0}}
|
||||
// ParsePort("udp:5010-5000") // Port{Protocol:"udp", Ports:[2]int{5000, 5010}, Map:[2]int{0, 0}}
|
||||
func ParsePort(conf string) (ret Port, err error) {
|
||||
var port, mapPort string
|
||||
var port string
|
||||
var min, max int
|
||||
|
||||
// 按冒号分割,支持端口映射
|
||||
parts := strings.Split(conf, ":")
|
||||
if len(parts) < 2 || len(parts) > 3 {
|
||||
err = strconv.ErrSyntax
|
||||
return
|
||||
}
|
||||
|
||||
ret.Protocol = parts[0]
|
||||
port = parts[1]
|
||||
|
||||
// 处理端口映射
|
||||
if len(parts) == 3 {
|
||||
mapPort = parts[2]
|
||||
// 解析映射端口,支持单端口和端口范围
|
||||
if mapRange := strings.Split(mapPort, "-"); len(mapRange) == 2 {
|
||||
// 映射端口范围
|
||||
var mapMin, mapMax int
|
||||
mapMin, err = strconv.Atoi(mapRange[0])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
mapMax, err = strconv.Atoi(mapRange[1])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if mapMin < mapMax {
|
||||
ret.Map[0], ret.Map[1] = mapMin, mapMax
|
||||
} else {
|
||||
ret.Map[0], ret.Map[1] = mapMax, mapMin
|
||||
}
|
||||
} else {
|
||||
// 单个映射端口
|
||||
var mapPortNum int
|
||||
mapPortNum, err = strconv.Atoi(mapPort)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
ret.Map[0], ret.Map[1] = mapPortNum, mapPortNum
|
||||
}
|
||||
}
|
||||
|
||||
// 处理端口范围
|
||||
ret.Protocol, port, _ = strings.Cut(conf, ":")
|
||||
if r := strings.Split(port, "-"); len(r) == 2 {
|
||||
min, err = strconv.Atoi(r[0])
|
||||
if err != nil {
|
||||
@@ -194,12 +76,7 @@ func ParsePort(conf string) (ret Port, err error) {
|
||||
} else {
|
||||
ret.Ports[0], ret.Ports[1] = max, min
|
||||
}
|
||||
} else {
|
||||
var p int
|
||||
p, err = strconv.Atoi(port)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else if p, err := strconv.Atoi(port); err == nil {
|
||||
ret.Ports[0], ret.Ports[1] = p, p
|
||||
}
|
||||
return
|
||||
|
||||
370
pkg/port_test.go
370
pkg/port_test.go
@@ -1,370 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParsePort(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected Port
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
name: "TCP单端口",
|
||||
input: "tcp:8080",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8080},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围",
|
||||
input: "tcp:8080-8090",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8090},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围(反序)",
|
||||
input: "tcp:8090-8080",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8090},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP单端口映射到单端口",
|
||||
input: "tcp:8080:9090",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8080},
|
||||
Map: [2]int{9090, 9090},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP单端口映射到端口范围",
|
||||
input: "tcp:8080:9000-9010",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8080},
|
||||
Map: [2]int{9000, 9010},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围映射到端口范围",
|
||||
input: "tcp:8080-8090:9000-9010",
|
||||
expected: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8090},
|
||||
Map: [2]int{9000, 9010},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP单端口",
|
||||
input: "udp:5000",
|
||||
expected: Port{
|
||||
Protocol: "udp",
|
||||
Ports: [2]int{5000, 5000},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP端口范围",
|
||||
input: "udp:5000-5010",
|
||||
expected: Port{
|
||||
Protocol: "udp",
|
||||
Ports: [2]int{5000, 5010},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP端口映射",
|
||||
input: "udp:5000:6000",
|
||||
expected: Port{
|
||||
Protocol: "udp",
|
||||
Ports: [2]int{5000, 5000},
|
||||
Map: [2]int{6000, 6000},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP端口范围映射(映射范围反序)",
|
||||
input: "udp:5000-5010:6010-6000",
|
||||
expected: Port{
|
||||
Protocol: "udp",
|
||||
Ports: [2]int{5000, 5010},
|
||||
Map: [2]int{6000, 6010},
|
||||
},
|
||||
hasError: false,
|
||||
},
|
||||
// 错误情况
|
||||
{
|
||||
name: "缺少协议",
|
||||
input: "8080",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "过多冒号",
|
||||
input: "tcp:8080:9090:extra",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "无效端口号",
|
||||
input: "tcp:abc",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "无效映射端口号",
|
||||
input: "tcp:8080:abc",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "无效端口范围",
|
||||
input: "tcp:8080-abc",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
{
|
||||
name: "无效映射端口范围",
|
||||
input: "tcp:8080:9000-abc",
|
||||
expected: Port{},
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ParsePort(tt.input)
|
||||
|
||||
if tt.hasError {
|
||||
if err == nil {
|
||||
t.Errorf("期望有错误,但没有错误")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("意外的错误: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if result.Protocol != tt.expected.Protocol {
|
||||
t.Errorf("协议不匹配: 期望 %s, 得到 %s", tt.expected.Protocol, result.Protocol)
|
||||
}
|
||||
|
||||
if result.Ports != tt.expected.Ports {
|
||||
t.Errorf("端口不匹配: 期望 %v, 得到 %v", tt.expected.Ports, result.Ports)
|
||||
}
|
||||
|
||||
if result.Map != tt.expected.Map {
|
||||
t.Errorf("映射端口不匹配: 期望 %v, 得到 %v", tt.expected.Map, result.Map)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPortMethods(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
port Port
|
||||
expectTCP bool
|
||||
expectUDP bool
|
||||
expectRange bool
|
||||
expectMapping bool
|
||||
expectRangeMap bool
|
||||
expectString string
|
||||
}{
|
||||
{
|
||||
name: "TCP单端口",
|
||||
port: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8080},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
expectTCP: true,
|
||||
expectUDP: false,
|
||||
expectRange: false,
|
||||
expectMapping: false,
|
||||
expectRangeMap: false,
|
||||
expectString: "tcp:8080",
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围",
|
||||
port: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8090},
|
||||
Map: [2]int{0, 0},
|
||||
},
|
||||
expectTCP: true,
|
||||
expectUDP: false,
|
||||
expectRange: true,
|
||||
expectMapping: false,
|
||||
expectRangeMap: false,
|
||||
expectString: "tcp:8080-8090",
|
||||
},
|
||||
{
|
||||
name: "TCP单端口映射",
|
||||
port: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8080},
|
||||
Map: [2]int{9090, 9090},
|
||||
},
|
||||
expectTCP: true,
|
||||
expectUDP: false,
|
||||
expectRange: false,
|
||||
expectMapping: true,
|
||||
expectRangeMap: false,
|
||||
expectString: "tcp:8080:9090",
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围映射",
|
||||
port: Port{
|
||||
Protocol: "tcp",
|
||||
Ports: [2]int{8080, 8090},
|
||||
Map: [2]int{9000, 9010},
|
||||
},
|
||||
expectTCP: true,
|
||||
expectUDP: false,
|
||||
expectRange: true,
|
||||
expectMapping: true,
|
||||
expectRangeMap: true,
|
||||
expectString: "tcp:8080-8090:9000-9010",
|
||||
},
|
||||
{
|
||||
name: "UDP单端口映射到端口范围",
|
||||
port: Port{
|
||||
Protocol: "udp",
|
||||
Ports: [2]int{5000, 5000},
|
||||
Map: [2]int{6000, 6010},
|
||||
},
|
||||
expectTCP: false,
|
||||
expectUDP: true,
|
||||
expectRange: false,
|
||||
expectMapping: true,
|
||||
expectRangeMap: true,
|
||||
expectString: "udp:5000:6000-6010",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.port.IsTCP() != tt.expectTCP {
|
||||
t.Errorf("IsTCP(): 期望 %v, 得到 %v", tt.expectTCP, tt.port.IsTCP())
|
||||
}
|
||||
|
||||
if tt.port.IsUDP() != tt.expectUDP {
|
||||
t.Errorf("IsUDP(): 期望 %v, 得到 %v", tt.expectUDP, tt.port.IsUDP())
|
||||
}
|
||||
|
||||
if tt.port.IsRange() != tt.expectRange {
|
||||
t.Errorf("IsRange(): 期望 %v, 得到 %v", tt.expectRange, tt.port.IsRange())
|
||||
}
|
||||
|
||||
if tt.port.HasMapping() != tt.expectMapping {
|
||||
t.Errorf("HasMapping(): 期望 %v, 得到 %v", tt.expectMapping, tt.port.HasMapping())
|
||||
}
|
||||
|
||||
if tt.port.IsRangeMapping() != tt.expectRangeMap {
|
||||
t.Errorf("IsRangeMapping(): 期望 %v, 得到 %v", tt.expectRangeMap, tt.port.IsRangeMapping())
|
||||
}
|
||||
|
||||
if tt.port.String() != tt.expectString {
|
||||
t.Errorf("String(): 期望 %s, 得到 %s", tt.expectString, tt.port.String())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParsePort2(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expectedType string
|
||||
hasError bool
|
||||
}{
|
||||
{
|
||||
name: "TCP单端口",
|
||||
input: "tcp:8080",
|
||||
expectedType: "TCPPort",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "TCP端口范围",
|
||||
input: "tcp:8080-8090",
|
||||
expectedType: "TCPRangePort",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP单端口",
|
||||
input: "udp:5000",
|
||||
expectedType: "UDPPort",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "UDP端口范围",
|
||||
input: "udp:5000-5010",
|
||||
expectedType: "UDPRangePort",
|
||||
hasError: false,
|
||||
},
|
||||
{
|
||||
name: "无效输入",
|
||||
input: "invalid",
|
||||
hasError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result, err := ParsePort2(tt.input)
|
||||
|
||||
if tt.hasError {
|
||||
if err == nil {
|
||||
t.Errorf("期望有错误,但没有错误")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("意外的错误: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
switch tt.expectedType {
|
||||
case "TCPPort":
|
||||
if _, ok := result.(TCPPort); !ok {
|
||||
t.Errorf("期望类型 TCPPort, 得到 %T", result)
|
||||
}
|
||||
case "TCPRangePort":
|
||||
if _, ok := result.(TCPRangePort); !ok {
|
||||
t.Errorf("期望类型 TCPRangePort, 得到 %T", result)
|
||||
}
|
||||
case "UDPPort":
|
||||
if _, ok := result.(UDPPort); !ok {
|
||||
t.Errorf("期望类型 UDPPort, 得到 %T", result)
|
||||
}
|
||||
case "UDPRangePort":
|
||||
if _, ok := result.(UDPRangePort); !ok {
|
||||
t.Errorf("期望类型 UDPRangePort, 得到 %T", result)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
236
pkg/raw.go
236
pkg/raw.go
@@ -1,236 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*RawAudio)(nil)
|
||||
|
||||
type RawAudio struct {
|
||||
codec.FourCC
|
||||
Timestamp time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (r *RawAudio) Parse(track *AVTrack) (err error) {
|
||||
if track.ICodecCtx == nil {
|
||||
switch r.FourCC {
|
||||
case codec.FourCC_MP4A:
|
||||
ctx := &codec.AACCtx{}
|
||||
ctx.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(r.ToBytes())
|
||||
track.ICodecCtx = ctx
|
||||
case codec.FourCC_ALAW:
|
||||
track.ICodecCtx = &codec.PCMACtx{
|
||||
AudioCtx: codec.AudioCtx{
|
||||
SampleRate: 8000,
|
||||
Channels: 1,
|
||||
SampleSize: 8,
|
||||
},
|
||||
}
|
||||
case codec.FourCC_ULAW:
|
||||
track.ICodecCtx = &codec.PCMUCtx{
|
||||
AudioCtx: codec.AudioCtx{
|
||||
SampleRate: 8000,
|
||||
Channels: 1,
|
||||
SampleSize: 8,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RawAudio) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
c := ctx.GetBase()
|
||||
if c.FourCC().Is(codec.FourCC_MP4A) {
|
||||
seq := &RawAudio{
|
||||
FourCC: codec.FourCC_MP4A,
|
||||
Timestamp: r.Timestamp,
|
||||
}
|
||||
seq.SetAllocator(r.GetAllocator())
|
||||
seq.Memory.Append(c.GetRecord())
|
||||
return c, seq, nil
|
||||
}
|
||||
return c, nil, nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
return r.Memory, nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
r.InitRecycleIndexes(0)
|
||||
r.FourCC = ctx.FourCC()
|
||||
r.Memory = frame.Raw.(util.Memory)
|
||||
r.Timestamp = frame.Timestamp
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetTimestamp() time.Duration {
|
||||
return r.Timestamp
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetCTS() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetSize() int {
|
||||
return r.Size
|
||||
}
|
||||
|
||||
func (r *RawAudio) String() string {
|
||||
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC, r.Timestamp, r.Size)
|
||||
}
|
||||
|
||||
func (r *RawAudio) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ IAVFrame = (*H26xFrame)(nil)
|
||||
|
||||
type H26xFrame struct {
|
||||
codec.FourCC
|
||||
Timestamp time.Duration
|
||||
CTS time.Duration
|
||||
Nalus
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Parse(track *AVTrack) (err error) {
|
||||
var hasVideoFrame bool
|
||||
|
||||
switch h.FourCC {
|
||||
case codec.FourCC_H264:
|
||||
var ctx *codec.H264Ctx
|
||||
if track.ICodecCtx != nil {
|
||||
ctx = track.ICodecCtx.GetBase().(*codec.H264Ctx)
|
||||
}
|
||||
for _, nalu := range h.Nalus {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case h264parser.NALU_SPS:
|
||||
ctx = &codec.H264Ctx{}
|
||||
track.ICodecCtx = ctx
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if ctx.SPSInfo, err = h264parser.ParseSPS(ctx.SPS()); err != nil {
|
||||
return
|
||||
}
|
||||
case h264parser.NALU_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case codec.NALU_IDR_Picture:
|
||||
track.Value.IDR = true
|
||||
hasVideoFrame = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
case codec.FourCC_H265:
|
||||
var ctx *codec.H265Ctx
|
||||
if track.ICodecCtx != nil {
|
||||
ctx = track.ICodecCtx.GetBase().(*codec.H265Ctx)
|
||||
}
|
||||
for _, nalu := range h.Nalus {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
ctx = &codec.H265Ctx{}
|
||||
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
|
||||
track.ICodecCtx = ctx
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if ctx.SPSInfo, err = h265parser.ParseSPS(ctx.SPS()); err != nil {
|
||||
return
|
||||
}
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
track.Value.IDR = true
|
||||
hasVideoFrame = true
|
||||
case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return ErrSkip if no video frames are present (only metadata NALUs)
|
||||
if !hasVideoFrame {
|
||||
return ErrSkip
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *H26xFrame) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
switch c := ctx.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
return c, &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory(c.SPS()),
|
||||
util.NewMemory(c.PPS()),
|
||||
},
|
||||
}, nil
|
||||
case *codec.H265Ctx:
|
||||
return c, &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory(c.VPS()),
|
||||
util.NewMemory(c.SPS()),
|
||||
util.NewMemory(c.PPS()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
return h.Nalus, nil
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
h.FourCC = ctx.FourCC()
|
||||
h.Nalus = frame.Raw.(Nalus)
|
||||
h.Timestamp = frame.Timestamp
|
||||
h.CTS = frame.CTS
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetTimestamp() time.Duration {
|
||||
return h.Timestamp
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetCTS() time.Duration {
|
||||
return h.CTS
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetSize() int {
|
||||
var size int
|
||||
for _, nalu := range h.Nalus {
|
||||
size += nalu.Size
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (h *H26xFrame) String() string {
|
||||
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
157
pkg/raw_test.go
157
pkg/raw_test.go
@@ -1,157 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func TestH26xFrame_Parse_VideoFrameDetection(t *testing.T) {
|
||||
// Test H264 IDR Picture (should not skip)
|
||||
t.Run("H264_IDR_Picture", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H264 IDR frame")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 Non-IDR Picture (should not skip)
|
||||
t.Run("H264_Non_IDR_Picture", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x21}), // Non-IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 Non-IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 metadata only (should skip)
|
||||
t.Run("H264_SPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x67}), // SPS NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H264 SPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 PPS only (should skip)
|
||||
t.Run("H264_PPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x68}), // PPS NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H264 PPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test H265 IDR slice (should not skip)
|
||||
t.Run("H265_IDR_Slice", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x4E, 0x01}), // IDR_W_RADL slice type (19 << 1 = 38 = 0x26, so first byte should be 0x4C, but let's use a simpler approach)
|
||||
// Using NAL_UNIT_CODED_SLICE_IDR_W_RADL which should be type 19
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
|
||||
// Let's use the correct byte pattern for H265 IDR slice
|
||||
// NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
|
||||
// H265 header: (type << 1) | layer_id_bit
|
||||
idrSliceByte := byte(19 << 1) // 19 * 2 = 38 = 0x26
|
||||
frame.Nalus[0] = util.NewMemory([]byte{idrSliceByte})
|
||||
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H265 IDR slice to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H265 IDR slice")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H265 metadata only (should skip)
|
||||
t.Run("H265_VPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1 = 64 = 0x40)
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H265 VPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test mixed H264 frame with SPS and IDR (should not skip)
|
||||
t.Run("H264_Mixed_SPS_And_IDR", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x67}), // SPS NALU type
|
||||
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 mixed SPS+IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H264 mixed frame with IDR")
|
||||
}
|
||||
})
|
||||
|
||||
// Test mixed H265 frame with VPS and IDR (should not skip)
|
||||
t.Run("H265_Mixed_VPS_And_IDR", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1)
|
||||
util.NewMemory([]byte{0x4C, 0x01}), // IDR_W_RADL slice type (19 << 1)
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
|
||||
// Fix the IDR slice byte for H265
|
||||
idrSliceByte := byte(19 << 1) // NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
|
||||
frame.Nalus[1] = util.NewMemory([]byte{idrSliceByte, 0x01})
|
||||
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H265 mixed VPS+IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H265 mixed frame with IDR")
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package pkg
|
||||
import (
|
||||
"log/slog"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
@@ -21,6 +22,7 @@ type RingWriter struct {
|
||||
Size int
|
||||
LastValue *AVFrame
|
||||
SLogger *slog.Logger
|
||||
status atomic.Int32 // 0: init, 1: writing, 2: disposed
|
||||
}
|
||||
|
||||
func NewRingWriter(sizeRange util.Range[int]) (rb *RingWriter) {
|
||||
@@ -90,7 +92,9 @@ func (rb *RingWriter) reduce(size int) {
|
||||
|
||||
func (rb *RingWriter) Dispose() {
|
||||
rb.SLogger.Debug("dispose")
|
||||
rb.Value.Ready()
|
||||
if rb.status.Add(-1) == -1 { // normal dispose
|
||||
rb.Value.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *RingWriter) GetIDR() *util.Ring[AVFrame] {
|
||||
@@ -185,18 +189,70 @@ func (rb *RingWriter) Step() (normal bool) {
|
||||
|
||||
rb.LastValue = &rb.Value
|
||||
nextSeq := rb.LastValue.Sequence + 1
|
||||
if normal = next.Value.StartWrite(); normal {
|
||||
next.Value.Reset()
|
||||
rb.Ring = next
|
||||
} else {
|
||||
rb.reduce(1) //抛弃还有订阅者的节点
|
||||
rb.Ring = rb.glow(1, "refill") //补充一个新节点
|
||||
normal = rb.Value.StartWrite()
|
||||
if !normal {
|
||||
panic("RingWriter.Step")
|
||||
|
||||
/*
|
||||
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant Caller as Caller
|
||||
participant RW as RingWriter
|
||||
participant Val as AVFrame.Value
|
||||
|
||||
Note over RW: status initial = 0 (idle)
|
||||
|
||||
Caller->>RW: Step()
|
||||
activate RW
|
||||
RW->>RW: status.Add(1) (0→1)
|
||||
alt entered writing (result == 1)
|
||||
Note over RW: writing
|
||||
RW->>Val: StartWrite()
|
||||
RW->>Val: Reset()
|
||||
opt Dispose during write
|
||||
Caller->>RW: Dispose()
|
||||
RW->>RW: status.Add(-1) (1→0)
|
||||
end
|
||||
RW->>RW: status.Add(-1) at end of Step
|
||||
alt returns 0 (write completed)
|
||||
RW->>Val: Ready()
|
||||
else returns -1 (disposed during write)
|
||||
RW->>Val: Unlock()
|
||||
end
|
||||
else not entered
|
||||
Note over RW: Step aborted (already disposed/busy)
|
||||
end
|
||||
deactivate RW
|
||||
|
||||
Caller->>RW: Dispose()
|
||||
activate RW
|
||||
RW->>RW: status.Add(-1)
|
||||
alt returns -1 (idle dispose)
|
||||
RW->>Val: Unlock()
|
||||
else returns 0 (dispose during write)
|
||||
Note over RW: Unlock will occur at Step end (no Ready)
|
||||
end
|
||||
deactivate RW
|
||||
|
||||
Note over RW: States: -1 (disposed), 0 (idle), 1 (writing)
|
||||
|
||||
*/
|
||||
if rb.status.Add(1) == 1 {
|
||||
if normal = next.Value.StartWrite(); normal {
|
||||
next.Value.Reset()
|
||||
rb.Ring = next
|
||||
} else {
|
||||
rb.reduce(1) //抛弃还有订阅者的节点
|
||||
rb.Ring = rb.glow(1, "refill") //补充一个新节点
|
||||
normal = rb.Value.StartWrite()
|
||||
if !normal {
|
||||
panic("RingWriter.Step")
|
||||
}
|
||||
}
|
||||
rb.Value.Sequence = nextSeq
|
||||
if rb.status.Add(-1) == 0 {
|
||||
rb.LastValue.Ready()
|
||||
} else {
|
||||
rb.Value.Unlock()
|
||||
}
|
||||
}
|
||||
rb.Value.Sequence = nextSeq
|
||||
rb.LastValue.Ready()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"log/slog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func TestRing(t *testing.T) {
|
||||
@@ -13,7 +15,7 @@ func TestRing(t *testing.T) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
go t.Run("writer", func(t *testing.T) {
|
||||
for i := 0; ctx.Err() == nil; i++ {
|
||||
w.Value.Raw = i
|
||||
w.Value.Raw = &util.Memory{}
|
||||
normal := w.Step()
|
||||
t.Log("write", i, normal)
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
@@ -76,7 +78,7 @@ func BenchmarkRing(b *testing.B) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
go func() {
|
||||
for i := 0; ctx.Err() == nil; i++ {
|
||||
w.Value.Raw = i
|
||||
w.Value.Raw = &util.Memory{}
|
||||
w.Step()
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
|
||||
21
pkg/steps.go
Normal file
21
pkg/steps.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package pkg
|
||||
|
||||
// StepName is a typed alias for all workflow step identifiers.
|
||||
type StepName string
|
||||
|
||||
// StepDef defines a step with typed name and description.
|
||||
type StepDef struct {
|
||||
Name StepName
|
||||
Description string
|
||||
}
|
||||
|
||||
// Standard, cross-plugin step name constants for pull/publish workflows.
|
||||
// Plugin-specific step names should be defined in their respective plugin packages.
|
||||
const (
|
||||
StepPublish StepName = "publish"
|
||||
StepURLParsing StepName = "url_parsing"
|
||||
StepConnection StepName = "connection"
|
||||
StepHandshake StepName = "handshake"
|
||||
StepParsing StepName = "parsing"
|
||||
StepStreaming StepName = "streaming"
|
||||
)
|
||||
59
pkg/task/README.md
Normal file
59
pkg/task/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# 任务系统概要
|
||||
|
||||
# 任务的启动
|
||||
|
||||
任务通过调用父任务的 AddTask 来启动,此时会进入队列中等待启动,父任务的 EventLoop 会接受到子任务,然后调用子任务的 Start 方法进行启动操作
|
||||
|
||||
## EventLoop 的初始化
|
||||
为了节省资源,EventLoop 在没有子任务时不会创建协程,一直等到有子任务时才会创建,并且如果这个子任务也是一个空的 Job(即没有 Start、Run、Go)则仍然不会创建协程。
|
||||
|
||||
## EventLoop 停止
|
||||
为了节省资源,当 EventLoop 中没有待执行的子任务时,需要退出协程。EventLoop 会在以下情况退出:
|
||||
|
||||
1. 没有待处理的任务且没有活跃的子任务,且父任务的 keepalive() 返回 false
|
||||
2. EventLoop 的状态被设置为停止状态(-1)
|
||||
|
||||
# 任务的停止
|
||||
|
||||
## 主动停止某个任务
|
||||
|
||||
调用任务的 Stop 方法即可停止某个任务,此时该任务会由其父任务的 eventLoop 检测到 context 取消信号然后开始执行任务的 dispose 来进行销毁
|
||||
|
||||
## 任务的意外退出
|
||||
|
||||
当任务的 Run 返回错误,或者 context 被取消时,任务会退出,最终流程会同主动停止一样
|
||||
|
||||
## 父任务停止
|
||||
|
||||
当父任务停止并销毁时,会按照以下步骤处理子任务:
|
||||
|
||||
### 步骤
|
||||
|
||||
1. **设置 EventLoop 的状态为停止状态**:调用 `stop()` 方法设置 status = -1,防止继续添加子任务
|
||||
2. **激活 EventLoop 处理剩余任务**:调用 `active()` 方法,即使状态为 -1 也能处理剩余的子任务
|
||||
3. **停止所有子任务**:调用所有子任务的 Stop 方法
|
||||
4. **等待子任务销毁完成**:等待 EventLoop 处理完所有子任务的销毁工作
|
||||
|
||||
### 设计要点
|
||||
|
||||
- EventLoop 的 `active()` 方法允许在状态为 -1 时调用,以确保剩余的子任务能被正确处理
|
||||
- 使用互斥锁保护状态转换,避免竞态条件
|
||||
- 先停止再处理剩余任务,确保不会添加新的子任务
|
||||
|
||||
## 竞态条件处理
|
||||
|
||||
为了确保任务系统的线程安全,我们采取了以下措施:
|
||||
|
||||
### 状态管理
|
||||
- 使用 `sync.RWMutex` 保护 EventLoop 的状态转换
|
||||
- `add()` 方法使用读锁检查状态,防止在停止后添加新任务
|
||||
- `stop()` 方法使用写锁设置状态,确保原子性
|
||||
|
||||
### EventLoop 生命周期
|
||||
- EventLoop 只有在状态从 0(ready)转换到 1(running)时才启动新的 goroutine
|
||||
- 即使状态为 -1(stopped),`active()` 方法仍可被调用以处理剩余任务
|
||||
- 使用 `hasPending` 标志和互斥锁跟踪待处理任务,避免频繁检查 channel 长度
|
||||
|
||||
### 任务添加
|
||||
- 添加任务时会检查 EventLoop 状态,如果已停止则返回 `ErrDisposed`
|
||||
- 使用 `pendingMux` 保护 `hasPending` 标志,避免竞态条件
|
||||
@@ -1,34 +0,0 @@
|
||||
package task
|
||||
|
||||
type CallBackTask struct {
|
||||
Task
|
||||
startHandler func() error
|
||||
disposeHandler func()
|
||||
}
|
||||
|
||||
func (t *CallBackTask) GetTaskType() TaskType {
|
||||
return TASK_TYPE_CALL
|
||||
}
|
||||
|
||||
func (t *CallBackTask) Start() error {
|
||||
return t.startHandler()
|
||||
}
|
||||
|
||||
func (t *CallBackTask) Dispose() {
|
||||
if t.disposeHandler != nil {
|
||||
t.disposeHandler()
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTaskByCallBack(start func() error, dispose func()) *CallBackTask {
|
||||
var task CallBackTask
|
||||
task.startHandler = func() error {
|
||||
err := start()
|
||||
if err == nil && dispose == nil {
|
||||
err = ErrTaskComplete
|
||||
}
|
||||
return err
|
||||
}
|
||||
task.disposeHandler = dispose
|
||||
return &task
|
||||
}
|
||||
@@ -42,6 +42,9 @@ func (t *TickTask) GetTickInterval() time.Duration {
|
||||
func (t *TickTask) Start() (err error) {
|
||||
t.Ticker = time.NewTicker(t.handler.(ITickTask).GetTickInterval())
|
||||
t.SignalChan = t.Ticker.C
|
||||
t.OnStop(func() {
|
||||
t.Ticker.Reset(time.Millisecond)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
167
pkg/task/event_loop.go
Normal file
167
pkg/task/event_loop.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Singleton[T comparable] struct {
|
||||
instance atomic.Value
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func (s *Singleton[T]) Load() T {
|
||||
return s.instance.Load().(T)
|
||||
}
|
||||
|
||||
func (s *Singleton[T]) Get(newF func() T) T {
|
||||
ch := s.instance.Load() //fast
|
||||
if ch == nil { // slow
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
if ch = s.instance.Load(); ch == nil {
|
||||
ch = newF()
|
||||
s.instance.Store(ch)
|
||||
}
|
||||
}
|
||||
return ch.(T)
|
||||
}
|
||||
|
||||
type EventLoop struct {
|
||||
cases []reflect.SelectCase
|
||||
children []ITask
|
||||
addSub Singleton[chan any]
|
||||
running atomic.Bool
|
||||
}
|
||||
|
||||
func (e *EventLoop) getInput() chan any {
|
||||
return e.addSub.Get(func() chan any {
|
||||
return make(chan any, 20)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *EventLoop) active(mt *Job) {
|
||||
if mt.parent != nil {
|
||||
mt.parent.eventLoop.active(mt.parent)
|
||||
}
|
||||
if e.running.CompareAndSwap(false, true) {
|
||||
go e.run(mt)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventLoop) add(mt *Job, sub any) (err error) {
|
||||
shouldActive := true
|
||||
switch sub.(type) {
|
||||
case TaskStarter, TaskBlock, TaskGo:
|
||||
case IJob:
|
||||
shouldActive = false
|
||||
}
|
||||
select {
|
||||
case e.getInput() <- sub:
|
||||
if shouldActive || mt.IsStopped() {
|
||||
e.active(mt)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return ErrTooManyChildren
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventLoop) run(mt *Job) {
|
||||
mt.Debug("event loop start", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
|
||||
ch := e.getInput()
|
||||
e.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}}
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
|
||||
if !ThrowPanic {
|
||||
mt.Stop(errors.Join(err.(error), ErrPanic))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
mt.Debug("event loop exit", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
|
||||
if !mt.handler.keepalive() {
|
||||
if mt.blocked != nil {
|
||||
mt.Stop(errors.Join(mt.blocked.StopReason(), ErrAutoStop))
|
||||
} else {
|
||||
mt.Stop(ErrAutoStop)
|
||||
}
|
||||
}
|
||||
mt.blocked = nil
|
||||
}()
|
||||
|
||||
// Main event loop - only exit when no more events AND no children
|
||||
for {
|
||||
if len(ch) == 0 && len(e.children) == 0 {
|
||||
if e.running.CompareAndSwap(true, false) {
|
||||
if len(ch) > 0 { // if add before running set to false
|
||||
e.active(mt)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
mt.blocked = nil
|
||||
if chosen, rev, ok := reflect.Select(e.cases); chosen == 0 {
|
||||
if !ok {
|
||||
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
|
||||
mt.Stop(ErrAutoStop)
|
||||
return
|
||||
}
|
||||
switch v := rev.Interface().(type) {
|
||||
case func():
|
||||
v()
|
||||
case ITask:
|
||||
if len(e.cases) >= 65535 {
|
||||
mt.Warn("task children too many, may cause performance issue", "count", len(e.cases), "taskId", mt.GetTaskID(), "taskType", mt.GetTaskType(), "ownerType", mt.GetOwnerType())
|
||||
v.Stop(ErrTooManyChildren)
|
||||
continue
|
||||
}
|
||||
if mt.blocked = v; v.start() {
|
||||
e.cases = append(e.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(v.GetSignal())})
|
||||
e.children = append(e.children, v)
|
||||
mt.onChildStart(v)
|
||||
} else {
|
||||
mt.removeChild(v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
taskIndex := chosen - 1
|
||||
child := e.children[taskIndex]
|
||||
mt.blocked = child
|
||||
switch tt := mt.blocked.(type) {
|
||||
case IChannelTask:
|
||||
if tt.IsStopped() {
|
||||
switch ttt := tt.(type) {
|
||||
case ITickTask:
|
||||
ttt.GetTicker().Stop()
|
||||
}
|
||||
mt.onChildDispose(child)
|
||||
mt.removeChild(child)
|
||||
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
|
||||
e.cases = slices.Delete(e.cases, chosen, chosen+1)
|
||||
} else {
|
||||
tt.Tick(rev.Interface())
|
||||
}
|
||||
default:
|
||||
if !ok {
|
||||
if mt.onChildDispose(child); child.checkRetry(child.StopReason()) {
|
||||
if child.reset(); child.start() {
|
||||
e.cases[chosen].Chan = reflect.ValueOf(child.GetSignal())
|
||||
mt.onChildStart(child)
|
||||
continue
|
||||
}
|
||||
}
|
||||
mt.removeChild(child)
|
||||
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
|
||||
e.cases = slices.Delete(e.cases, chosen, chosen+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
261
pkg/task/job.go
261
pkg/task/job.go
@@ -2,13 +2,9 @@ package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -32,15 +28,12 @@ func GetNextTaskID() uint32 {
|
||||
// Job include tasks
|
||||
type Job struct {
|
||||
Task
|
||||
cases []reflect.SelectCase
|
||||
addSub chan ITask
|
||||
children []ITask
|
||||
lazyRun sync.Once
|
||||
eventLoopLock sync.Mutex
|
||||
childrenDisposed chan struct{}
|
||||
children sync.Map
|
||||
descendantsDisposeListeners []func(ITask)
|
||||
descendantsStartListeners []func(ITask)
|
||||
blocked ITask
|
||||
eventLoop EventLoop
|
||||
Size atomic.Int32
|
||||
}
|
||||
|
||||
func (*Job) GetTaskType() TaskType {
|
||||
@@ -55,19 +48,18 @@ func (mt *Job) Blocked() ITask {
|
||||
return mt.blocked
|
||||
}
|
||||
|
||||
func (mt *Job) waitChildrenDispose() {
|
||||
blocked := mt.blocked
|
||||
defer func() {
|
||||
// 忽略由于在任务关闭过程中可能存在竞态条件,当父任务关闭时子任务可能已经被释放。
|
||||
if err := recover(); err != nil {
|
||||
mt.Debug("waitChildrenDispose panic", "err", err)
|
||||
}
|
||||
mt.addSub <- nil
|
||||
<-mt.childrenDisposed
|
||||
}()
|
||||
if blocked != nil {
|
||||
blocked.Stop(mt.StopReason())
|
||||
}
|
||||
func (mt *Job) EventLoopRunning() bool {
|
||||
return mt.eventLoop.running.Load()
|
||||
}
|
||||
|
||||
func (mt *Job) waitChildrenDispose(stopReason error) {
|
||||
mt.eventLoop.active(mt)
|
||||
mt.children.Range(func(key, value any) bool {
|
||||
child := value.(ITask)
|
||||
child.Stop(stopReason)
|
||||
child.WaitStopped()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (mt *Job) OnDescendantsDispose(listener func(ITask)) {
|
||||
@@ -84,12 +76,21 @@ func (mt *Job) onDescendantsDispose(descendants ITask) {
|
||||
}
|
||||
|
||||
func (mt *Job) onChildDispose(child ITask) {
|
||||
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
|
||||
mt.onDescendantsDispose(child)
|
||||
}
|
||||
mt.onDescendantsDispose(child)
|
||||
child.dispose()
|
||||
}
|
||||
|
||||
func (mt *Job) removeChild(child ITask) {
|
||||
value, loaded := mt.children.LoadAndDelete(child.getKey())
|
||||
if loaded {
|
||||
if value != child {
|
||||
panic("remove child")
|
||||
}
|
||||
remains := mt.Size.Add(-1)
|
||||
mt.Debug("remove child", "id", child.GetTaskID(), "remains", remains)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *Job) OnDescendantsStart(listener func(ITask)) {
|
||||
mt.descendantsStartListeners = append(mt.descendantsStartListeners, listener)
|
||||
}
|
||||
@@ -104,166 +105,98 @@ func (mt *Job) onDescendantsStart(descendants ITask) {
|
||||
}
|
||||
|
||||
func (mt *Job) onChildStart(child ITask) {
|
||||
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
|
||||
mt.onDescendantsStart(child)
|
||||
}
|
||||
mt.onDescendantsStart(child)
|
||||
}
|
||||
|
||||
func (mt *Job) RangeSubTask(callback func(task ITask) bool) {
|
||||
for _, task := range mt.children {
|
||||
callback(task)
|
||||
}
|
||||
mt.children.Range(func(key, value any) bool {
|
||||
callback(value.(ITask))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (mt *Job) AddDependTask(t ITask, opt ...any) (task *Task) {
|
||||
mt.Depend(t)
|
||||
t.Using(mt)
|
||||
opt = append(opt, 1)
|
||||
return mt.AddTask(t, opt...)
|
||||
}
|
||||
|
||||
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
|
||||
if task = t.GetTask(); t != task.handler { // first add
|
||||
for _, o := range opt {
|
||||
switch v := o.(type) {
|
||||
case context.Context:
|
||||
task.parentCtx = v
|
||||
case Description:
|
||||
task.SetDescriptions(v)
|
||||
case RetryConfig:
|
||||
task.retry = v
|
||||
case *slog.Logger:
|
||||
task.Logger = v
|
||||
}
|
||||
}
|
||||
task.parent = mt
|
||||
task.handler = t
|
||||
switch t.(type) {
|
||||
case TaskStarter, TaskBlock, TaskGo:
|
||||
// need start now
|
||||
case IJob:
|
||||
// lazy start
|
||||
return
|
||||
func (mt *Job) initContext(task *Task, opt ...any) {
|
||||
callDepth := 2
|
||||
for _, o := range opt {
|
||||
switch v := o.(type) {
|
||||
case context.Context:
|
||||
task.parentCtx = v
|
||||
case Description:
|
||||
task.SetDescriptions(v)
|
||||
case RetryConfig:
|
||||
task.retry = v
|
||||
case *slog.Logger:
|
||||
task.Logger = v
|
||||
case int:
|
||||
callDepth += v
|
||||
}
|
||||
}
|
||||
_, file, line, ok := runtime.Caller(1)
|
||||
|
||||
_, file, line, ok := runtime.Caller(callDepth)
|
||||
if ok {
|
||||
task.StartReason = fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line)
|
||||
}
|
||||
|
||||
mt.lazyRun.Do(func() {
|
||||
if mt.eventLoopLock.TryLock() {
|
||||
defer mt.eventLoopLock.Unlock()
|
||||
if mt.parent != nil && mt.Context == nil {
|
||||
mt.parent.AddTask(mt.handler) // second add, lazy start
|
||||
}
|
||||
mt.childrenDisposed = make(chan struct{})
|
||||
mt.addSub = make(chan ITask, 20)
|
||||
go mt.run()
|
||||
}
|
||||
})
|
||||
if task.Context == nil {
|
||||
if task.parentCtx == nil {
|
||||
task.parentCtx = mt.Context
|
||||
}
|
||||
task.level = mt.level + 1
|
||||
if task.ID == 0 {
|
||||
task.ID = GetNextTaskID()
|
||||
}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
task.handler = t
|
||||
if task.Logger == nil {
|
||||
task.Logger = mt.Logger
|
||||
}
|
||||
task.parent = mt
|
||||
if task.parentCtx == nil {
|
||||
task.parentCtx = mt.Context
|
||||
}
|
||||
task.level = mt.level + 1
|
||||
if task.ID == 0 {
|
||||
task.ID = GetNextTaskID()
|
||||
}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
if task.Logger == nil {
|
||||
task.Logger = mt.Logger
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
|
||||
task = t.GetTask()
|
||||
task.handler = t
|
||||
mt.initContext(task, opt...)
|
||||
if mt.IsStopped() {
|
||||
task.startup.Reject(mt.StopReason())
|
||||
return
|
||||
}
|
||||
if len(mt.addSub) > 10 {
|
||||
mt.Warn("task wait list too many", "count", len(mt.addSub), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", mt.GetOwnerType())
|
||||
actual, loaded := mt.children.LoadOrStore(t.getKey(), t)
|
||||
if loaded {
|
||||
task.startup.Reject(ExistTaskError{
|
||||
Task: actual.(ITask),
|
||||
})
|
||||
return
|
||||
}
|
||||
mt.addSub <- t
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
mt.children.Delete(t.getKey())
|
||||
task.startup.Reject(err)
|
||||
}
|
||||
}()
|
||||
if err = mt.eventLoop.add(mt, t); err != nil {
|
||||
return
|
||||
}
|
||||
if mt.IsStopped() {
|
||||
err = mt.StopReason()
|
||||
return
|
||||
}
|
||||
remains := mt.Size.Add(1)
|
||||
mt.Debug("child added", "id", task.ID, "remains", remains)
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *Job) Call(callback func() error, args ...any) {
|
||||
mt.Post(callback, args...).WaitStarted()
|
||||
}
|
||||
|
||||
func (mt *Job) Post(callback func() error, args ...any) *Task {
|
||||
task := CreateTaskByCallBack(callback, nil)
|
||||
if len(args) > 0 {
|
||||
task.SetDescription(OwnerTypeKey, args[0])
|
||||
}
|
||||
return mt.AddTask(task)
|
||||
}
|
||||
|
||||
func (mt *Job) run() {
|
||||
mt.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.addSub)}}
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
|
||||
if !ThrowPanic {
|
||||
mt.Stop(errors.Join(err.(error), ErrPanic))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
stopReason := mt.StopReason()
|
||||
for _, task := range mt.children {
|
||||
task.Stop(stopReason)
|
||||
mt.onChildDispose(task)
|
||||
}
|
||||
mt.children = nil
|
||||
close(mt.childrenDisposed)
|
||||
}()
|
||||
for {
|
||||
mt.blocked = nil
|
||||
if chosen, rev, ok := reflect.Select(mt.cases); chosen == 0 {
|
||||
if rev.IsNil() {
|
||||
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
|
||||
return
|
||||
}
|
||||
if mt.blocked = rev.Interface().(ITask); mt.blocked.start() {
|
||||
mt.children = append(mt.children, mt.blocked)
|
||||
mt.cases = append(mt.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.blocked.GetSignal())})
|
||||
mt.onChildStart(mt.blocked)
|
||||
}
|
||||
} else {
|
||||
taskIndex := chosen - 1
|
||||
mt.blocked = mt.children[taskIndex]
|
||||
switch tt := mt.blocked.(type) {
|
||||
case IChannelTask:
|
||||
if tt.IsStopped() {
|
||||
switch ttt := tt.(type) {
|
||||
case ITickTask:
|
||||
ttt.GetTicker().Stop()
|
||||
}
|
||||
mt.onChildDispose(mt.blocked)
|
||||
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
|
||||
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
|
||||
} else {
|
||||
tt.Tick(rev.Interface())
|
||||
}
|
||||
default:
|
||||
if !ok {
|
||||
if mt.onChildDispose(mt.blocked); mt.blocked.checkRetry(mt.blocked.StopReason()) {
|
||||
if mt.blocked.reset(); mt.blocked.start() {
|
||||
mt.cases[chosen].Chan = reflect.ValueOf(mt.blocked.GetSignal())
|
||||
mt.onChildStart(mt.blocked)
|
||||
continue
|
||||
}
|
||||
}
|
||||
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
|
||||
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !mt.handler.keepalive() && len(mt.children) == 0 {
|
||||
mt.Stop(ErrAutoStop)
|
||||
}
|
||||
func (mt *Job) Call(callback func()) {
|
||||
if mt.Size.Load() <= 0 {
|
||||
callback()
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(mt)
|
||||
_ = mt.eventLoop.add(mt, func() { callback(); cancel() })
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
@@ -2,12 +2,21 @@ package task
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
. "m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var ErrExist = errors.New("exist")
|
||||
|
||||
type ExistTaskError struct {
|
||||
Task ITask
|
||||
}
|
||||
|
||||
func (e ExistTaskError) Error() string {
|
||||
return fmt.Sprintf("%v exist", e.Task.getKey())
|
||||
}
|
||||
|
||||
type ManagerItem[K comparable] interface {
|
||||
ITask
|
||||
GetKey() K
|
||||
@@ -30,15 +39,25 @@ func (m *Manager[K, T]) Add(ctx T, opt ...any) *Task {
|
||||
m.Remove(ctx)
|
||||
m.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
|
||||
})
|
||||
opt = append(opt, 1)
|
||||
return m.AddTask(ctx, opt...)
|
||||
}
|
||||
|
||||
func (m *Manager[K, T]) SafeHas(key K) (ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() {
|
||||
ok = m.Collection.Has(key)
|
||||
})
|
||||
return ok
|
||||
}
|
||||
return m.Collection.Has(key)
|
||||
}
|
||||
|
||||
// SafeGet 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
item, ok = m.Collection.Get(key)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
item, ok = m.Collection.Get(key)
|
||||
@@ -49,9 +68,8 @@ func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
|
||||
// SafeRange 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeRange(f func(T) bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
m.Collection.Range(f)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
m.Collection.Range(f)
|
||||
@@ -61,9 +79,8 @@ func (m *Manager[K, T]) SafeRange(f func(T) bool) {
|
||||
// SafeFind 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeFind(f func(T) bool) (item T, ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
item, ok = m.Collection.Find(f)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
item, ok = m.Collection.Find(f)
|
||||
|
||||
@@ -3,4 +3,4 @@
|
||||
|
||||
package task
|
||||
|
||||
var ThrowPanic = true
|
||||
var ThrowPanic = true
|
||||
|
||||
@@ -22,15 +22,20 @@ func (o *OSSignal) Start() error {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
o.SignalChan = signalChan
|
||||
o.OnStop(func() {
|
||||
signal.Stop(signalChan)
|
||||
close(signalChan)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OSSignal) Tick(any) {
|
||||
println("OSSignal Tick")
|
||||
go o.root.Shutdown()
|
||||
}
|
||||
|
||||
type RootManager[K comparable, T ManagerItem[K]] struct {
|
||||
Manager[K, T]
|
||||
WorkCollection[K, T]
|
||||
}
|
||||
|
||||
func (m *RootManager[K, T]) Init() {
|
||||
|
||||
176
pkg/task/task.go
176
pkg/task/task.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"reflect"
|
||||
@@ -21,13 +22,16 @@ const TraceLevel = slog.Level(-8)
|
||||
const OwnerTypeKey = "ownerType"
|
||||
|
||||
var (
|
||||
ErrAutoStop = errors.New("auto stop")
|
||||
ErrRetryRunOut = errors.New("retry out")
|
||||
ErrStopByUser = errors.New("stop by user")
|
||||
ErrRestart = errors.New("restart")
|
||||
ErrTaskComplete = errors.New("complete")
|
||||
ErrExit = errors.New("exit")
|
||||
ErrPanic = errors.New("panic")
|
||||
ErrAutoStop = errors.New("auto stop")
|
||||
ErrRetryRunOut = errors.New("retry out")
|
||||
ErrStopByUser = errors.New("stop by user")
|
||||
ErrRestart = errors.New("restart")
|
||||
ErrTaskComplete = errors.New("complete")
|
||||
ErrTimeout = errors.New("timeout")
|
||||
ErrExit = errors.New("exit")
|
||||
ErrPanic = errors.New("panic")
|
||||
ErrTooManyChildren = errors.New("too many children in job")
|
||||
ErrDisposed = errors.New("disposed")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,7 +49,6 @@ const (
|
||||
TASK_TYPE_JOB
|
||||
TASK_TYPE_Work
|
||||
TASK_TYPE_CHANNEL
|
||||
TASK_TYPE_CALL
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -71,14 +74,15 @@ type (
|
||||
SetDescription(key string, value any)
|
||||
SetDescriptions(value Description)
|
||||
SetRetry(maxRetry int, retryInterval time.Duration)
|
||||
Depend(ITask)
|
||||
Using(resource ...any)
|
||||
OnStop(any)
|
||||
OnStart(func())
|
||||
OnBeforeDispose(func())
|
||||
OnDispose(func())
|
||||
GetState() TaskState
|
||||
GetLevel() byte
|
||||
WaitStopped() error
|
||||
WaitStarted() error
|
||||
getKey() any
|
||||
}
|
||||
IJob interface {
|
||||
ITask
|
||||
@@ -88,8 +92,8 @@ type (
|
||||
OnDescendantsDispose(func(ITask))
|
||||
OnDescendantsStart(func(ITask))
|
||||
Blocked() ITask
|
||||
Call(func() error, ...any)
|
||||
Post(func() error, ...any) *Task
|
||||
EventLoopRunning() bool
|
||||
Call(func())
|
||||
}
|
||||
IChannelTask interface {
|
||||
ITask
|
||||
@@ -121,15 +125,18 @@ type (
|
||||
Logger *slog.Logger
|
||||
context.Context
|
||||
context.CancelCauseFunc
|
||||
handler ITask
|
||||
retry RetryConfig
|
||||
afterStartListeners, beforeDisposeListeners, afterDisposeListeners []func()
|
||||
description sync.Map
|
||||
startup, shutdown *util.Promise
|
||||
parent *Job
|
||||
parentCtx context.Context
|
||||
state TaskState
|
||||
level byte
|
||||
handler ITask
|
||||
retry RetryConfig
|
||||
afterStartListeners, afterDisposeListeners []func()
|
||||
closeOnStop []any
|
||||
resources []any
|
||||
stopOnce sync.Once
|
||||
description sync.Map
|
||||
startup, shutdown *util.Promise
|
||||
parent *Job
|
||||
parentCtx context.Context
|
||||
state TaskState
|
||||
level byte
|
||||
}
|
||||
)
|
||||
|
||||
@@ -183,12 +190,19 @@ func (task *Task) GetKey() uint32 {
|
||||
return task.ID
|
||||
}
|
||||
|
||||
func (task *Task) getKey() any {
|
||||
return reflect.ValueOf(task.handler).MethodByName("GetKey").Call(nil)[0].Interface()
|
||||
}
|
||||
|
||||
func (task *Task) WaitStarted() error {
|
||||
if task.startup == nil {
|
||||
return nil
|
||||
}
|
||||
return task.startup.Await()
|
||||
}
|
||||
|
||||
func (task *Task) WaitStopped() (err error) {
|
||||
err = task.startup.Await()
|
||||
err = task.WaitStarted()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -229,33 +243,50 @@ func (task *Task) Stop(err error) {
|
||||
task.Error("task stop with nil error", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", task.GetParent().GetOwnerType())
|
||||
panic("task stop with nil error")
|
||||
}
|
||||
if task.CancelCauseFunc != nil {
|
||||
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
task.Debug("task stop", "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType())
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
task.stopOnce.Do(func() {
|
||||
if task.CancelCauseFunc != nil {
|
||||
msg := "task stop"
|
||||
if task.startup.IsRejected() {
|
||||
msg = "task start failed"
|
||||
}
|
||||
task.Debug(msg, "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
|
||||
task.CancelCauseFunc(err)
|
||||
}
|
||||
task.CancelCauseFunc(err)
|
||||
}
|
||||
task.stop()
|
||||
})
|
||||
}
|
||||
|
||||
func (task *Task) Depend(t ITask) {
|
||||
t.OnDispose(func() {
|
||||
task.Stop(t.StopReason())
|
||||
})
|
||||
func (task *Task) stop() {
|
||||
for _, resource := range task.closeOnStop {
|
||||
switch v := resource.(type) {
|
||||
case func():
|
||||
v()
|
||||
case func() error:
|
||||
v()
|
||||
case ITask:
|
||||
v.Stop(task.StopReason())
|
||||
}
|
||||
}
|
||||
task.closeOnStop = task.closeOnStop[:0]
|
||||
}
|
||||
|
||||
func (task *Task) OnStart(listener func()) {
|
||||
task.afterStartListeners = append(task.afterStartListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) OnBeforeDispose(listener func()) {
|
||||
task.beforeDisposeListeners = append(task.beforeDisposeListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) OnDispose(listener func()) {
|
||||
task.afterDisposeListeners = append(task.afterDisposeListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) Using(resource ...any) {
|
||||
task.resources = append(task.resources, resource...)
|
||||
}
|
||||
|
||||
func (task *Task) OnStop(resource any) {
|
||||
task.closeOnStop = append(task.closeOnStop, resource)
|
||||
}
|
||||
|
||||
func (task *Task) GetSignal() any {
|
||||
return task.Done()
|
||||
}
|
||||
@@ -300,9 +331,7 @@ func (task *Task) start() bool {
|
||||
}
|
||||
for {
|
||||
task.StartTime = time.Now()
|
||||
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
|
||||
task.Debug("task start", "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType(), "reason", task.StartReason)
|
||||
}
|
||||
task.Debug("task start", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "reason", task.StartReason)
|
||||
task.state = TASK_STATE_STARTING
|
||||
if v, ok := task.handler.(TaskStarter); ok {
|
||||
err = v.Start()
|
||||
@@ -350,6 +379,7 @@ func (task *Task) start() bool {
|
||||
}
|
||||
|
||||
func (task *Task) reset() {
|
||||
task.stopOnce = sync.Once{}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
@@ -363,6 +393,10 @@ func (task *Task) GetDescriptions() map[string]string {
|
||||
})
|
||||
}
|
||||
|
||||
func (task *Task) GetDescription(key string) (any, bool) {
|
||||
return task.description.Load(key)
|
||||
}
|
||||
|
||||
func (task *Task) SetDescription(key string, value any) {
|
||||
task.description.Store(key, value)
|
||||
}
|
||||
@@ -380,41 +414,41 @@ func (task *Task) SetDescriptions(value Description) {
|
||||
func (task *Task) dispose() {
|
||||
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
|
||||
if task.state < TASK_STATE_STARTED {
|
||||
if taskType != TASK_TYPE_CALL {
|
||||
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
|
||||
}
|
||||
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
|
||||
return
|
||||
}
|
||||
reason := task.StopReason()
|
||||
task.state = TASK_STATE_DISPOSING
|
||||
if taskType != TASK_TYPE_CALL {
|
||||
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
|
||||
task.Debug("task dispose", yargs...)
|
||||
defer task.Debug("task disposed", yargs...)
|
||||
}
|
||||
befores := len(task.beforeDisposeListeners)
|
||||
for i, listener := range task.beforeDisposeListeners {
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("b:%d/%d", i, befores))
|
||||
listener()
|
||||
}
|
||||
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
|
||||
task.Debug("task dispose", yargs...)
|
||||
defer task.Debug("task disposed", yargs...)
|
||||
if job, ok := task.handler.(IJob); ok {
|
||||
mt := job.getJob()
|
||||
task.SetDescription("disposeProcess", "wait children")
|
||||
mt.eventLoopLock.Lock()
|
||||
if mt.addSub != nil {
|
||||
mt.waitChildrenDispose()
|
||||
mt.lazyRun = sync.Once{}
|
||||
}
|
||||
mt.eventLoopLock.Unlock()
|
||||
mt.waitChildrenDispose(reason)
|
||||
}
|
||||
task.SetDescription("disposeProcess", "self")
|
||||
if v, ok := task.handler.(TaskDisposal); ok {
|
||||
v.Dispose()
|
||||
}
|
||||
task.shutdown.Fulfill(reason)
|
||||
afters := len(task.afterDisposeListeners)
|
||||
task.SetDescription("disposeProcess", "resources")
|
||||
task.stopOnce.Do(task.stop)
|
||||
for _, resource := range task.resources {
|
||||
switch v := resource.(type) {
|
||||
case func():
|
||||
v()
|
||||
case ITask:
|
||||
v.Stop(task.StopReason())
|
||||
case util.Recyclable:
|
||||
v.Recycle()
|
||||
case io.Closer:
|
||||
v.Close()
|
||||
}
|
||||
}
|
||||
task.resources = task.resources[:0]
|
||||
for i, listener := range task.afterDisposeListeners {
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, afters))
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, len(task.afterDisposeListeners)))
|
||||
listener()
|
||||
}
|
||||
task.SetDescription("disposeProcess", "done")
|
||||
@@ -482,3 +516,25 @@ func (task *Task) Error(msg string, args ...any) {
|
||||
func (task *Task) TraceEnabled() bool {
|
||||
return task.Logger.Enabled(task.Context, TraceLevel)
|
||||
}
|
||||
|
||||
func (task *Task) RunTask(t ITask, opt ...any) (err error) {
|
||||
tt := t.GetTask()
|
||||
tt.handler = t
|
||||
mt := task.parent
|
||||
if job, ok := task.handler.(IJob); ok {
|
||||
mt = job.getJob()
|
||||
}
|
||||
mt.initContext(tt, opt...)
|
||||
if mt.IsStopped() {
|
||||
err = mt.StopReason()
|
||||
task.startup.Reject(err)
|
||||
return
|
||||
}
|
||||
task.OnStop(t)
|
||||
started := tt.start()
|
||||
<-tt.Done()
|
||||
if started {
|
||||
tt.dispose()
|
||||
}
|
||||
return tt.StopReason()
|
||||
}
|
||||
|
||||
@@ -24,9 +24,12 @@ func Test_AddTask_AddsTaskSuccessfully(t *testing.T) {
|
||||
var task Task
|
||||
root.AddTask(&task)
|
||||
_ = task.WaitStarted()
|
||||
if len(root.children) != 1 {
|
||||
t.Errorf("expected 1 child task, got %d", len(root.children))
|
||||
}
|
||||
root.RangeSubTask(func(t ITask) bool {
|
||||
if t.GetTaskID() == task.GetTaskID() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
type retryDemoTask struct {
|
||||
@@ -51,9 +54,9 @@ func Test_RetryTask(t *testing.T) {
|
||||
|
||||
func Test_Call_ExecutesCallback(t *testing.T) {
|
||||
called := false
|
||||
root.Call(func() error {
|
||||
root.Call(func() {
|
||||
called = true
|
||||
return nil
|
||||
return
|
||||
})
|
||||
if !called {
|
||||
t.Errorf("expected callback to be called")
|
||||
@@ -162,6 +165,24 @@ func Test_StartFail(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Block(t *testing.T) {
|
||||
var task Task
|
||||
block := make(chan struct{})
|
||||
var job Job
|
||||
task.OnStart(func() {
|
||||
task.OnStop(func() {
|
||||
close(block)
|
||||
})
|
||||
<-block
|
||||
})
|
||||
time.AfterFunc(time.Second*2, func() {
|
||||
job.Stop(ErrTaskComplete)
|
||||
})
|
||||
root.AddTask(&job)
|
||||
job.AddTask(&task)
|
||||
job.WaitStopped()
|
||||
}
|
||||
|
||||
//
|
||||
//type DemoTask struct {
|
||||
// Task
|
||||
|
||||
@@ -11,3 +11,57 @@ func (m *Work) keepalive() bool {
|
||||
func (*Work) GetTaskType() TaskType {
|
||||
return TASK_TYPE_Work
|
||||
}
|
||||
|
||||
type WorkCollection[K comparable, T interface {
|
||||
ITask
|
||||
GetKey() K
|
||||
}] struct {
|
||||
Work
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Find(f func(T) bool) (item T, ok bool) {
|
||||
c.RangeSubTask(func(task ITask) bool {
|
||||
if v, _ok := task.(T); _ok && f(v) {
|
||||
item = v
|
||||
ok = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Get(key K) (item T, ok bool) {
|
||||
var value any
|
||||
value, ok = c.children.Load(key)
|
||||
if ok {
|
||||
item, ok = value.(T)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Range(f func(T) bool) {
|
||||
c.RangeSubTask(func(task ITask) bool {
|
||||
if v, ok := task.(T); ok && !f(v) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Has(key K) (ok bool) {
|
||||
_, ok = c.children.Load(key)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) ToList() (list []T) {
|
||||
c.Range(func(t T) bool {
|
||||
list = append(list, t)
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Length() int {
|
||||
return int(c.Size.Load())
|
||||
}
|
||||
|
||||
BIN
pkg/test.h264
Normal file
BIN
pkg/test.h264
Normal file
Binary file not shown.
63
pkg/track.go
63
pkg/track.go
@@ -51,14 +51,12 @@ type (
|
||||
LastDropLevelChange time.Time
|
||||
DropFrameLevel int // 0: no drop, 1: drop P-frame, 2: drop all
|
||||
}
|
||||
|
||||
AVTrack struct {
|
||||
Track
|
||||
*RingWriter
|
||||
codec.ICodecCtx
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
SequenceFrame IAVFrame
|
||||
WrapIndex int
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
WrapIndex int
|
||||
TsTamer
|
||||
SpeedController
|
||||
DropController
|
||||
@@ -71,11 +69,13 @@ func NewAVTrack(args ...any) (t *AVTrack) {
|
||||
switch v := arg.(type) {
|
||||
case IAVFrame:
|
||||
t.FrameType = reflect.TypeOf(v)
|
||||
t.Allocator = v.GetAllocator()
|
||||
sample := v.GetSample()
|
||||
t.Allocator = sample.GetAllocator()
|
||||
t.ICodecCtx = sample.ICodecCtx
|
||||
case reflect.Type:
|
||||
t.FrameType = v
|
||||
case *slog.Logger:
|
||||
t.Logger = v
|
||||
t.Logger = v.With("frameType", t.FrameType.String())
|
||||
case *AVTrack:
|
||||
t.Logger = v.Logger.With("subtrack", t.FrameType.String())
|
||||
t.RingWriter = v.RingWriter
|
||||
@@ -118,9 +118,25 @@ func (t *AVTrack) AddBytesIn(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *AVTrack) AcceptFrame(data IAVFrame) {
|
||||
func (t *AVTrack) FixTimestamp(data *Sample, scale float64) {
|
||||
t.AddBytesIn(data.Size)
|
||||
data.Timestamp = t.Tame(data.Timestamp, t.FPS, scale)
|
||||
}
|
||||
|
||||
func (t *AVTrack) NewFrame(avFrame *AVFrame) (frame IAVFrame) {
|
||||
frame = reflect.New(t.FrameType.Elem()).Interface().(IAVFrame)
|
||||
if avFrame.Sample == nil {
|
||||
avFrame.Sample = frame.GetSample()
|
||||
}
|
||||
if avFrame.BaseSample == nil {
|
||||
avFrame.BaseSample = &BaseSample{}
|
||||
}
|
||||
frame.GetSample().BaseSample = avFrame.BaseSample
|
||||
return
|
||||
}
|
||||
|
||||
func (t *AVTrack) AcceptFrame() {
|
||||
t.acceptFrameCount++
|
||||
t.Value.Wraps = append(t.Value.Wraps, data)
|
||||
}
|
||||
|
||||
func (t *AVTrack) changeDropFrameLevel(newLevel int) {
|
||||
@@ -230,23 +246,28 @@ func (t *AVTrack) AddPausedTime(d time.Duration) {
|
||||
t.pausedTime += d
|
||||
}
|
||||
|
||||
func (s *SpeedController) speedControl(speed float64, ts time.Duration) {
|
||||
if speed != s.speed || s.beginTime.IsZero() {
|
||||
s.speed = speed
|
||||
s.beginTime = time.Now()
|
||||
s.beginTimestamp = ts
|
||||
s.pausedTime = 0
|
||||
func (t *AVTrack) speedControl(speed float64, ts time.Duration) {
|
||||
if speed != t.speed || t.beginTime.IsZero() {
|
||||
t.speed = speed
|
||||
t.beginTime = time.Now()
|
||||
t.beginTimestamp = ts
|
||||
t.pausedTime = 0
|
||||
} else {
|
||||
elapsed := time.Since(s.beginTime) - s.pausedTime
|
||||
elapsed := time.Since(t.beginTime) - t.pausedTime
|
||||
if speed == 0 {
|
||||
s.Delta = ts - elapsed
|
||||
t.Delta = ts - elapsed
|
||||
if t.Logger.Enabled(t.ready, task.TraceLevel) {
|
||||
t.Trace("speed 0", "ts", ts, "elapsed", elapsed, "delta", t.Delta)
|
||||
}
|
||||
return
|
||||
}
|
||||
should := time.Duration(float64(ts-s.beginTimestamp) / speed)
|
||||
s.Delta = should - elapsed
|
||||
// fmt.Println(speed, elapsed, should, s.Delta)
|
||||
if s.Delta > threshold {
|
||||
time.Sleep(min(s.Delta, time.Millisecond*500))
|
||||
should := time.Duration(float64(ts-t.beginTimestamp) / speed)
|
||||
t.Delta = should - elapsed
|
||||
if t.Delta > threshold {
|
||||
if t.Logger.Enabled(t.ready, task.TraceLevel) {
|
||||
t.Trace("speed control", "speed", speed, "elapsed", elapsed, "should", should, "delta", t.Delta)
|
||||
}
|
||||
time.Sleep(min(t.Delta, time.Millisecond*500))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
63
pkg/util/buddy_disable.go
Normal file
63
pkg/util/buddy_disable.go
Normal file
@@ -0,0 +1,63 @@
|
||||
//go:build !enable_buddy
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var pool0, pool1, pool2 sync.Pool
|
||||
|
||||
func init() {
|
||||
pool0.New = func() any {
|
||||
ret := createMemoryAllocator(defaultBufSize)
|
||||
ret.recycle = func() {
|
||||
pool0.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
pool1.New = func() any {
|
||||
ret := createMemoryAllocator(1 << MinPowerOf2)
|
||||
ret.recycle = func() {
|
||||
pool1.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
pool2.New = func() any {
|
||||
ret := createMemoryAllocator(1 << (MinPowerOf2 + 2))
|
||||
ret.recycle = func() {
|
||||
pool2.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
func createMemoryAllocator(size int) *MemoryAllocator {
|
||||
memory := make([]byte, size)
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: int64(uintptr(unsafe.Pointer(&memory[0]))),
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
switch size {
|
||||
case defaultBufSize:
|
||||
ret = pool0.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
case 1 << MinPowerOf2:
|
||||
ret = pool1.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
case 1 << (MinPowerOf2 + 2):
|
||||
ret = pool2.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
default:
|
||||
ret = createMemoryAllocator(size)
|
||||
}
|
||||
return
|
||||
}
|
||||
44
pkg/util/buddy_enable.go
Normal file
44
pkg/util/buddy_enable.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build enable_buddy
|
||||
|
||||
package util
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: buddy.memoryPool[offset : offset+size],
|
||||
start: buddy.poolStart + int64(offset),
|
||||
recycle: func() {
|
||||
buddy.Free(offset >> MinPowerOf2)
|
||||
},
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
if size < BuddySize {
|
||||
requiredSize := size >> MinPowerOf2
|
||||
// 循环尝试从池中获取可用的 buddy
|
||||
for {
|
||||
buddy := GetBuddy()
|
||||
defer PutBuddy(buddy)
|
||||
offset, err := buddy.Alloc(requiredSize)
|
||||
if err == nil {
|
||||
// 分配成功,使用这个 buddy
|
||||
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
|
||||
}
|
||||
}
|
||||
}
|
||||
// 池中的 buddy 都无法分配或大小不够,使用系统内存
|
||||
memory := make([]byte, size)
|
||||
start := int64(uintptr(unsafe.Pointer(&memory[0])))
|
||||
return &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: start,
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -15,8 +14,8 @@ type BufReader struct {
|
||||
buf MemoryReader
|
||||
totalRead int
|
||||
BufLen int
|
||||
Mouth chan []byte
|
||||
feedData func() error
|
||||
Dump *os.File
|
||||
}
|
||||
|
||||
func NewBufReaderWithBufLen(reader io.Reader, bufLen int) (r *BufReader) {
|
||||
@@ -62,8 +61,10 @@ func NewBufReaderBuffersChan(feedChan chan net.Buffers) (r *BufReader) {
|
||||
return
|
||||
}
|
||||
|
||||
func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
|
||||
func NewBufReaderChan(bufferSize int) (r *BufReader) {
|
||||
feedChan := make(chan []byte, bufferSize)
|
||||
r = &BufReader{
|
||||
Mouth: feedChan,
|
||||
feedData: func() error {
|
||||
data, ok := <-feedChan
|
||||
if !ok {
|
||||
@@ -81,6 +82,15 @@ func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
|
||||
return
|
||||
}
|
||||
|
||||
func (r *BufReader) Feed(data []byte) bool {
|
||||
select {
|
||||
case r.Mouth <- data:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func NewBufReader(reader io.Reader) (r *BufReader) {
|
||||
return NewBufReaderWithBufLen(reader, defaultBufSize)
|
||||
}
|
||||
@@ -90,6 +100,9 @@ func (r *BufReader) Recycle() {
|
||||
if r.Allocator != nil {
|
||||
r.Allocator.Recycle()
|
||||
}
|
||||
if r.Mouth != nil {
|
||||
close(r.Mouth)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BufReader) Buffered() int {
|
||||
@@ -176,9 +189,6 @@ func (r *BufReader) ReadRange(n int, yield func([]byte)) (err error) {
|
||||
func (r *BufReader) Read(to []byte) (n int, err error) {
|
||||
n = len(to)
|
||||
err = r.ReadNto(n, to)
|
||||
if r.Dump != nil {
|
||||
r.Dump.Write(to)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -199,7 +209,7 @@ func (r *BufReader) ReadString(n int) (s string, err error) {
|
||||
}
|
||||
|
||||
func (r *BufReader) ReadBytes(n int) (mem Memory, err error) {
|
||||
err = r.ReadRange(n, mem.AppendOne)
|
||||
err = r.ReadRange(n, mem.PushOne)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ func TestReadBytesTo(t *testing.T) {
|
||||
s := RandomString(100)
|
||||
t.Logf("s:%s", s)
|
||||
var m Memory
|
||||
m.AppendOne([]byte(s))
|
||||
m.PushOne([]byte(s))
|
||||
r := m.NewReader()
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
var total []byte
|
||||
@@ -34,7 +34,7 @@ func TestReadBytesTo(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
buf := make([]byte, i)
|
||||
n := r.ReadBytesTo(buf)
|
||||
n, _ := r.Read(buf)
|
||||
t.Logf("n:%d buf:%s", n, string(buf))
|
||||
total = append(total, buf[:n]...)
|
||||
if n == 0 {
|
||||
|
||||
@@ -101,23 +101,6 @@ func (c *Collection[K, T]) RemoveByKey(key K) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// func (c *Collection[K, T]) GetOrCreate(key K) (item T, find bool) {
|
||||
// if c.L != nil {
|
||||
// c.L.Lock()
|
||||
// defer c.L.Unlock()
|
||||
// }
|
||||
// if c.m != nil {
|
||||
// item, find = c.m[key]
|
||||
// return item, find
|
||||
// }
|
||||
// for _, item = range c.Items {
|
||||
// if item.GetKey() == key {
|
||||
// return item, true
|
||||
// }
|
||||
// }
|
||||
// item = reflect.New(reflect.TypeOf(item).Elem()).Interface().(T)
|
||||
// return
|
||||
// }
|
||||
func (c *Collection[K, T]) Has(key K) bool {
|
||||
_, ok := c.Get(key)
|
||||
return ok
|
||||
@@ -169,10 +152,6 @@ func (c *Collection[K, T]) Search(f func(T) bool) func(yield func(item T) bool)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collection[K, T]) GetKey() K {
|
||||
return c.Items[0].GetKey()
|
||||
}
|
||||
|
||||
func (c *Collection[K, T]) Clear() {
|
||||
if c.L != nil {
|
||||
c.L.Lock()
|
||||
|
||||
60
pkg/util/http_ws_writer.go
Normal file
60
pkg/util/http_ws_writer.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
)
|
||||
|
||||
type HTTP_WS_Writer struct {
|
||||
io.Writer
|
||||
Conn net.Conn
|
||||
ContentType string
|
||||
WriteTimeout time.Duration
|
||||
IsWebSocket bool
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) Write(p []byte) (n int, err error) {
|
||||
if m.IsWebSocket {
|
||||
m.buffer = append(m.buffer, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
if m.Conn != nil && m.WriteTimeout > 0 {
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
}
|
||||
return m.Writer.Write(p)
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) Flush() (err error) {
|
||||
if m.IsWebSocket {
|
||||
if m.WriteTimeout > 0 {
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
}
|
||||
err = wsutil.WriteServerBinary(m.Conn, m.buffer)
|
||||
m.buffer = m.buffer[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if m.Conn == nil {
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
w.Header().Set("Content-Type", m.ContentType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if hijacker, ok := w.(http.Hijacker); ok && m.WriteTimeout > 0 {
|
||||
m.Conn, _, _ = hijacker.Hijack()
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
m.Writer = m.Conn
|
||||
} else {
|
||||
m.Writer = w
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
} else {
|
||||
m.IsWebSocket = true
|
||||
m.Writer = m.Conn
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,10 @@ type ReadWriteSeekCloser interface {
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type Recyclable interface {
|
||||
Recycle()
|
||||
}
|
||||
|
||||
type Object = map[string]any
|
||||
|
||||
func Conditional[T any](cond bool, t, f T) T {
|
||||
@@ -70,3 +74,59 @@ func Exist(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
return err == nil || os.IsExist(err)
|
||||
}
|
||||
|
||||
type ReuseArray[T any] []T
|
||||
|
||||
func (s *ReuseArray[T]) GetNextPointer() (r *T) {
|
||||
ss := *s
|
||||
l := len(ss)
|
||||
if cap(ss) > l {
|
||||
ss = ss[:l+1]
|
||||
} else {
|
||||
var new T
|
||||
ss = append(ss, new)
|
||||
}
|
||||
*s = ss
|
||||
r = &((ss)[l])
|
||||
if resetter, ok := any(r).(Resetter); ok {
|
||||
resetter.Reset()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (s ReuseArray[T]) RangePoint(f func(yield *T) bool) {
|
||||
for i := range len(s) {
|
||||
if !f(&s[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Reset() {
|
||||
*s = (*s)[:0]
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Reduce() ReuseArray[T] {
|
||||
ss := *s
|
||||
ss = ss[:len(ss)-1]
|
||||
*s = ss
|
||||
return ss
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Remove(item *T) bool {
|
||||
for i := range *s {
|
||||
if &(*s)[i] == item {
|
||||
*s = append((*s)[:i], (*s)[i+1:]...)
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Count() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
type Resetter interface {
|
||||
Reset()
|
||||
}
|
||||
|
||||
103
pkg/util/mem.go
103
pkg/util/mem.go
@@ -1,7 +1,110 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBlockSize = 1 << 22
|
||||
BuddySize = MaxBlockSize << 7
|
||||
MinPowerOf2 = 10
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
Size int
|
||||
Buffers [][]byte
|
||||
}
|
||||
|
||||
func NewMemory(buf []byte) Memory {
|
||||
return Memory{
|
||||
Buffers: net.Buffers{buf},
|
||||
Size: len(buf),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) WriteTo(w io.Writer) (n int64, err error) {
|
||||
copy := net.Buffers(slices.Clone(m.Buffers))
|
||||
return copy.WriteTo(w)
|
||||
}
|
||||
|
||||
func (m *Memory) Reset() {
|
||||
m.Buffers = m.Buffers[:0]
|
||||
m.Size = 0
|
||||
}
|
||||
|
||||
func (m *Memory) UpdateBuffer(index int, buf []byte) {
|
||||
if index < 0 {
|
||||
index = len(m.Buffers) + index
|
||||
}
|
||||
m.Size = len(buf) - len(m.Buffers[index])
|
||||
m.Buffers[index] = buf
|
||||
}
|
||||
|
||||
func (m *Memory) CopyFrom(b *Memory) {
|
||||
buf := make([]byte, b.Size)
|
||||
b.CopyTo(buf)
|
||||
m.PushOne(buf)
|
||||
}
|
||||
|
||||
func (m *Memory) Equal(b *Memory) bool {
|
||||
if m.Size != b.Size || len(m.Buffers) != len(b.Buffers) {
|
||||
return false
|
||||
}
|
||||
for i, buf := range m.Buffers {
|
||||
if !slices.Equal(buf, b.Buffers[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Memory) CopyTo(buf []byte) {
|
||||
for _, b := range m.Buffers {
|
||||
l := len(b)
|
||||
copy(buf, b)
|
||||
buf = buf[l:]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) ToBytes() []byte {
|
||||
buf := make([]byte, m.Size)
|
||||
m.CopyTo(buf)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (m *Memory) PushOne(b []byte) {
|
||||
m.Buffers = append(m.Buffers, b)
|
||||
m.Size += len(b)
|
||||
}
|
||||
|
||||
func (m *Memory) Push(b ...[]byte) {
|
||||
m.Buffers = append(m.Buffers, b...)
|
||||
for _, level0 := range b {
|
||||
m.Size += len(level0)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) Append(mm Memory) *Memory {
|
||||
m.Buffers = append(m.Buffers, mm.Buffers...)
|
||||
m.Size += mm.Size
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) Count() int {
|
||||
return len(m.Buffers)
|
||||
}
|
||||
|
||||
func (m *Memory) Range(yield func([]byte)) {
|
||||
for i := range m.Count() {
|
||||
yield(m.Buffers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) NewReader() MemoryReader {
|
||||
return MemoryReader{
|
||||
Memory: m,
|
||||
Length: m.Size,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,93 +2,23 @@ package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
Size int
|
||||
net.Buffers
|
||||
}
|
||||
|
||||
type MemoryReader struct {
|
||||
*Memory
|
||||
Length int
|
||||
offset0 int
|
||||
offset1 int
|
||||
Length, offset0, offset1 int
|
||||
}
|
||||
|
||||
func NewReadableBuffersFromBytes(b ...[]byte) *MemoryReader {
|
||||
func NewReadableBuffersFromBytes(b ...[]byte) MemoryReader {
|
||||
buf := &Memory{Buffers: b}
|
||||
for _, level0 := range b {
|
||||
buf.Size += len(level0)
|
||||
}
|
||||
return &MemoryReader{Memory: buf, Length: buf.Size}
|
||||
return MemoryReader{Memory: buf, Length: buf.Size}
|
||||
}
|
||||
|
||||
func NewMemory(buf []byte) Memory {
|
||||
return Memory{
|
||||
Buffers: net.Buffers{buf},
|
||||
Size: len(buf),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) UpdateBuffer(index int, buf []byte) {
|
||||
if index < 0 {
|
||||
index = len(m.Buffers) + index
|
||||
}
|
||||
m.Size = len(buf) - len(m.Buffers[index])
|
||||
m.Buffers[index] = buf
|
||||
}
|
||||
|
||||
func (m *Memory) CopyFrom(b *Memory) {
|
||||
buf := make([]byte, b.Size)
|
||||
b.CopyTo(buf)
|
||||
m.AppendOne(buf)
|
||||
}
|
||||
|
||||
func (m *Memory) CopyTo(buf []byte) {
|
||||
for _, b := range m.Buffers {
|
||||
l := len(b)
|
||||
copy(buf, b)
|
||||
buf = buf[l:]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) ToBytes() []byte {
|
||||
buf := make([]byte, m.Size)
|
||||
m.CopyTo(buf)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (m *Memory) AppendOne(b []byte) {
|
||||
m.Buffers = append(m.Buffers, b)
|
||||
m.Size += len(b)
|
||||
}
|
||||
|
||||
func (m *Memory) Append(b ...[]byte) {
|
||||
m.Buffers = append(m.Buffers, b...)
|
||||
for _, level0 := range b {
|
||||
m.Size += len(level0)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) Count() int {
|
||||
return len(m.Buffers)
|
||||
}
|
||||
|
||||
func (m *Memory) Range(yield func([]byte)) {
|
||||
for i := range m.Count() {
|
||||
yield(m.Buffers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) NewReader() *MemoryReader {
|
||||
var reader MemoryReader
|
||||
reader.Memory = m
|
||||
reader.Length = m.Size
|
||||
return &reader
|
||||
}
|
||||
var _ io.Reader = (*MemoryReader)(nil)
|
||||
|
||||
func (r *MemoryReader) Offset() int {
|
||||
return r.Size - r.Length
|
||||
@@ -108,9 +38,9 @@ func (r *MemoryReader) MoveToEnd() {
|
||||
r.Length = 0
|
||||
}
|
||||
|
||||
func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
|
||||
func (r *MemoryReader) Read(buf []byte) (actual int, err error) {
|
||||
if r.Length == 0 {
|
||||
return 0
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := len(buf)
|
||||
curBuf := r.GetCurrent()
|
||||
@@ -142,6 +72,7 @@ func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
|
||||
actual += curBufLen
|
||||
r.skipBuf()
|
||||
if r.Length == 0 && n > 0 {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -204,6 +135,9 @@ func (r *MemoryReader) getCurrentBufLen() int {
|
||||
return len(r.Memory.Buffers[r.offset0]) - r.offset1
|
||||
}
|
||||
func (r *MemoryReader) Skip(n int) error {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
if n > r.Length {
|
||||
return io.EOF
|
||||
}
|
||||
@@ -248,8 +182,8 @@ func (r *MemoryReader) ReadBytes(n int) ([]byte, error) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
b := make([]byte, n)
|
||||
actual := r.ReadBytesTo(b)
|
||||
return b[:actual], nil
|
||||
actual, err := r.Read(b)
|
||||
return b[:actual], err
|
||||
}
|
||||
|
||||
func (r *MemoryReader) ReadBE(n int) (num uint32, err error) {
|
||||
@@ -22,13 +22,13 @@ func NewPromiseWithTimeout(ctx context.Context, timeout time.Duration) *Promise
|
||||
p := &Promise{}
|
||||
p.Context, p.CancelCauseFunc = context.WithCancelCause(ctx)
|
||||
p.timer = time.AfterFunc(timeout, func() {
|
||||
p.CancelCauseFunc(ErrTimeout)
|
||||
p.CancelCauseFunc(errTimeout)
|
||||
})
|
||||
return p
|
||||
}
|
||||
|
||||
var ErrResolve = errors.New("promise resolved")
|
||||
var ErrTimeout = errors.New("promise timeout")
|
||||
var errTimeout = errors.New("promise timeout")
|
||||
|
||||
func (p *Promise) Resolve() {
|
||||
p.Fulfill(nil)
|
||||
@@ -47,6 +47,10 @@ func (p *Promise) Await() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Promise) IsRejected() bool {
|
||||
return context.Cause(p.Context) != ErrResolve
|
||||
}
|
||||
|
||||
func (p *Promise) Fulfill(err error) {
|
||||
if p.timer != nil {
|
||||
p.timer.Stop()
|
||||
|
||||
@@ -4,12 +4,26 @@ package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type RecyclableMemory struct {
|
||||
Memory
|
||||
}
|
||||
|
||||
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
|
||||
return RecyclableMemory{}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) Clone() RecyclableMemory {
|
||||
return RecyclableMemory{
|
||||
Memory: Memory{
|
||||
Buffers: slices.Clone(r.Buffers),
|
||||
Size: r.Size,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
|
||||
}
|
||||
|
||||
|
||||
@@ -15,8 +15,14 @@ type RecyclableMemory struct {
|
||||
recycleIndexes []int
|
||||
}
|
||||
|
||||
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
|
||||
return RecyclableMemory{allocator: allocator}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
|
||||
r.recycleIndexes = make([]int, 0, max)
|
||||
if r.recycleIndexes == nil {
|
||||
r.recycleIndexes = make([]int, 0, max)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) GetAllocator() *ScalableMemoryAllocator {
|
||||
@@ -28,7 +34,7 @@ func (r *RecyclableMemory) NextN(size int) (memory []byte) {
|
||||
if r.recycleIndexes != nil {
|
||||
r.recycleIndexes = append(r.recycleIndexes, r.Count())
|
||||
}
|
||||
r.AppendOne(memory)
|
||||
r.PushOne(memory)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -36,7 +42,7 @@ func (r *RecyclableMemory) AddRecycleBytes(b []byte) {
|
||||
if r.recycleIndexes != nil {
|
||||
r.recycleIndexes = append(r.recycleIndexes, r.Count())
|
||||
}
|
||||
r.AppendOne(b)
|
||||
r.PushOne(b)
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) SetAllocator(allocator *ScalableMemoryAllocator) {
|
||||
@@ -54,6 +60,7 @@ func (r *RecyclableMemory) Recycle() {
|
||||
r.allocator.Free(buf)
|
||||
}
|
||||
}
|
||||
r.Reset()
|
||||
}
|
||||
|
||||
type MemoryAllocator struct {
|
||||
@@ -61,54 +68,14 @@ type MemoryAllocator struct {
|
||||
start int64
|
||||
memory []byte
|
||||
Size int
|
||||
buddy *Buddy
|
||||
}
|
||||
|
||||
// createMemoryAllocator 创建并初始化 MemoryAllocator
|
||||
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
buddy: buddy,
|
||||
Size: size,
|
||||
memory: buddy.memoryPool[offset : offset+size],
|
||||
start: buddy.poolStart + int64(offset),
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
if size < BuddySize {
|
||||
requiredSize := size >> MinPowerOf2
|
||||
// 循环尝试从池中获取可用的 buddy
|
||||
for {
|
||||
buddy := GetBuddy()
|
||||
offset, err := buddy.Alloc(requiredSize)
|
||||
PutBuddy(buddy)
|
||||
if err == nil {
|
||||
// 分配成功,使用这个 buddy
|
||||
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
|
||||
}
|
||||
}
|
||||
}
|
||||
// 池中的 buddy 都无法分配或大小不够,使用系统内存
|
||||
memory := make([]byte, size)
|
||||
start := int64(uintptr(unsafe.Pointer(&memory[0])))
|
||||
return &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: start,
|
||||
}
|
||||
recycle func()
|
||||
}
|
||||
|
||||
func (ma *MemoryAllocator) Recycle() {
|
||||
ma.allocator.Recycle()
|
||||
if ma.buddy != nil {
|
||||
_ = ma.buddy.Free(int((ma.buddy.poolStart - ma.start) >> MinPowerOf2))
|
||||
ma.buddy = nil
|
||||
if ma.recycle != nil {
|
||||
ma.recycle()
|
||||
}
|
||||
ma.memory = nil
|
||||
}
|
||||
|
||||
func (ma *MemoryAllocator) Find(size int) (memory []byte) {
|
||||
|
||||
379
plugin.go
379
plugin.go
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -18,6 +19,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
@@ -25,8 +28,8 @@ import (
|
||||
gatewayRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
myip "github.com/husanpao/ip"
|
||||
"google.golang.org/grpc"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gorm.io/gorm"
|
||||
|
||||
. "m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/db"
|
||||
@@ -63,9 +66,7 @@ type (
|
||||
|
||||
IPlugin interface {
|
||||
task.IJob
|
||||
OnInit() error
|
||||
OnStop()
|
||||
Pull(string, config.Pull, *config.Publish)
|
||||
Pull(string, config.Pull, *config.Publish) (*PullJob, error)
|
||||
Push(string, config.Push, *config.Subscribe)
|
||||
Transform(*Publisher, config.Transform)
|
||||
OnPublish(*Publisher)
|
||||
@@ -161,27 +162,46 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := s.AddTask(instance).WaitStarted(); err != nil {
|
||||
if err = s.AddTask(instance).WaitStarted(); err != nil {
|
||||
p.disable(instance.StopReason().Error())
|
||||
return
|
||||
}
|
||||
if err = p.listen(); err != nil {
|
||||
p.Stop(err)
|
||||
p.disable(err.Error())
|
||||
return
|
||||
}
|
||||
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
|
||||
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
|
||||
if p.Meta.RegisterGRPCHandler != nil {
|
||||
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
|
||||
p.Stop(err)
|
||||
p.disable(fmt.Sprintf("grpc %v", err))
|
||||
return
|
||||
} else {
|
||||
p.Info("grpc handler registered")
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.config.Hook != nil {
|
||||
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
|
||||
p.AddTask(&ServerKeepAliveTask{plugin: p})
|
||||
}
|
||||
}
|
||||
var handlers map[string]http.HandlerFunc
|
||||
if v, ok := instance.(IRegisterHandler); ok {
|
||||
handlers = v.RegisterHandler()
|
||||
}
|
||||
p.registerHandler(handlers)
|
||||
p.OnDispose(func() {
|
||||
s.Plugins.Remove(p)
|
||||
})
|
||||
s.Plugins.Add(p)
|
||||
return
|
||||
}
|
||||
|
||||
// InstallPlugin 安装插件
|
||||
func InstallPlugin[C iPlugin](options ...any) error {
|
||||
var meta PluginMeta
|
||||
for _, option := range options {
|
||||
if m, ok := option.(PluginMeta); ok {
|
||||
meta = m
|
||||
}
|
||||
}
|
||||
func InstallPlugin[C iPlugin](meta PluginMeta) error {
|
||||
var c *C
|
||||
meta.Type = reflect.TypeOf(c).Elem()
|
||||
if meta.Name == "" {
|
||||
@@ -196,30 +216,6 @@ func InstallPlugin[C iPlugin](options ...any) error {
|
||||
meta.Version = "dev"
|
||||
}
|
||||
}
|
||||
for _, option := range options {
|
||||
switch v := option.(type) {
|
||||
case OnExitHandler:
|
||||
meta.OnExit = v
|
||||
case DefaultYaml:
|
||||
meta.DefaultYaml = v
|
||||
case PullerFactory:
|
||||
meta.NewPuller = v
|
||||
case PusherFactory:
|
||||
meta.NewPusher = v
|
||||
case RecorderFactory:
|
||||
meta.NewRecorder = v
|
||||
case TransformerFactory:
|
||||
meta.NewTransformer = v
|
||||
case AuthPublisher:
|
||||
meta.OnAuthPub = v
|
||||
case AuthSubscriber:
|
||||
meta.OnAuthSub = v
|
||||
case *grpc.ServiceDesc:
|
||||
meta.ServiceDesc = v
|
||||
case func(context.Context, *gatewayRuntime.ServeMux, *grpc.ClientConn) error:
|
||||
meta.RegisterGRPCHandler = v
|
||||
}
|
||||
}
|
||||
plugins = append(plugins, meta)
|
||||
return nil
|
||||
}
|
||||
@@ -279,39 +275,6 @@ func (p *Plugin) disable(reason string) {
|
||||
p.Server.disabledPlugins = append(p.Server.disabledPlugins, p)
|
||||
}
|
||||
|
||||
func (p *Plugin) Start() (err error) {
|
||||
s := p.Server
|
||||
|
||||
if err = p.listen(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.handler.OnInit(); err != nil {
|
||||
return
|
||||
}
|
||||
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
|
||||
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
|
||||
if p.Meta.RegisterGRPCHandler != nil {
|
||||
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
|
||||
p.disable(fmt.Sprintf("grpc %v", err))
|
||||
return
|
||||
} else {
|
||||
p.Info("grpc handler registered")
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.config.Hook != nil {
|
||||
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
|
||||
p.AddTask(&ServerKeepAliveTask{plugin: p})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Dispose() {
|
||||
p.handler.OnStop()
|
||||
p.Server.Plugins.Remove(p)
|
||||
}
|
||||
|
||||
func (p *Plugin) listen() (err error) {
|
||||
httpConf := &p.config.HTTP
|
||||
|
||||
@@ -371,13 +334,11 @@ func (p *Plugin) listen() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) OnInit() error {
|
||||
return nil
|
||||
type WebHookQueueTask struct {
|
||||
task.Work
|
||||
}
|
||||
|
||||
func (p *Plugin) OnStop() {
|
||||
|
||||
}
|
||||
var webHookQueueTask WebHookQueueTask
|
||||
|
||||
type WebHookTask struct {
|
||||
task.Task
|
||||
@@ -386,6 +347,7 @@ type WebHookTask struct {
|
||||
conf config.Webhook
|
||||
data any
|
||||
jsonData []byte
|
||||
alarm AlarmInfo
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Start() error {
|
||||
@@ -393,10 +355,58 @@ func (t *WebHookTask) Start() error {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
var err error
|
||||
t.jsonData, err = json.Marshal(t.data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal webhook data: %w", err)
|
||||
// 处理AlarmInfo数据
|
||||
if t.data != nil {
|
||||
// 获取主机名和IP地址
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostname = "unknown"
|
||||
}
|
||||
|
||||
// 获取本机IP地址
|
||||
var ipAddr string
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err == nil {
|
||||
for _, addr := range addrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
ipAddr = ipnet.IP.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ipAddr == "" {
|
||||
ipAddr = "unknown"
|
||||
}
|
||||
|
||||
// 直接使用t.data作为AlarmInfo
|
||||
alarmInfo, ok := t.data.(AlarmInfo)
|
||||
if !ok {
|
||||
return fmt.Errorf("data is not of type AlarmInfo")
|
||||
}
|
||||
|
||||
// 更新服务器信息
|
||||
if alarmInfo.ServerInfo == "" {
|
||||
alarmInfo.ServerInfo = fmt.Sprintf("%s (%s)", hostname, ipAddr)
|
||||
}
|
||||
|
||||
// 确保时间戳已设置
|
||||
if alarmInfo.CreatedAt.IsZero() {
|
||||
alarmInfo.CreatedAt = time.Now()
|
||||
}
|
||||
if alarmInfo.UpdatedAt.IsZero() {
|
||||
alarmInfo.UpdatedAt = time.Now()
|
||||
}
|
||||
|
||||
// 将AlarmInfo序列化为JSON
|
||||
jsonData, err := json.Marshal(alarmInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal AlarmInfo to json: %w", err)
|
||||
}
|
||||
|
||||
t.jsonData = jsonData
|
||||
t.alarm = alarmInfo
|
||||
}
|
||||
|
||||
t.SetRetry(t.conf.RetryTimes, t.conf.RetryInterval)
|
||||
@@ -404,6 +414,20 @@ func (t *WebHookTask) Start() error {
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Go() error {
|
||||
// 检查是否需要保存告警到数据库
|
||||
var dbID uint
|
||||
if t.conf.SaveAlarm && t.plugin.DB != nil {
|
||||
// 默认 IsSent 为 false
|
||||
t.alarm.IsSent = false
|
||||
if err := t.plugin.DB.Create(&t.alarm).Error; err != nil {
|
||||
t.plugin.Error("保存告警到数据库失败", "error", err)
|
||||
} else {
|
||||
dbID = t.alarm.ID
|
||||
t.plugin.Info(""+
|
||||
"", "id", dbID)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(t.conf.Method, t.conf.URL, bytes.NewBuffer(t.jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -420,28 +444,38 @@ func (t *WebHookTask) Go() error {
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.plugin.Error("webhook request failed", "error", err)
|
||||
t.plugin.Error("webhook请求失败", "error", err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 如果发送成功且已保存到数据库,则更新IsSent字段为true
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 && t.conf.SaveAlarm && t.plugin.DB != nil && dbID > 0 {
|
||||
t.alarm.IsSent = true
|
||||
if err := t.plugin.DB.Model(&AlarmInfo{}).Where("id = ?", dbID).Update("is_sent", true).Error; err != nil {
|
||||
t.plugin.Error("更新告警发送状态失败", "error", err)
|
||||
} else {
|
||||
t.plugin.Info("告警发送状态已更新", "id", dbID, "is_sent", true)
|
||||
}
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
err = fmt.Errorf("webhook request failed with status: %d", resp.StatusCode)
|
||||
t.plugin.Error("webhook response error", "status", resp.StatusCode)
|
||||
err = fmt.Errorf("webhook请求失败,状态码:%d", resp.StatusCode)
|
||||
t.plugin.Error("webhook响应错误", "状态码", resp.StatusCode)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Plugin) SendWebhook(hookType config.HookType, data any) *task.Task {
|
||||
func (p *Plugin) SendWebhook(conf config.Webhook, data any) *task.Task {
|
||||
webhookTask := &WebHookTask{
|
||||
plugin: p,
|
||||
hookType: hookType,
|
||||
conf: p.config.Hook[hookType],
|
||||
data: data,
|
||||
plugin: p,
|
||||
conf: conf,
|
||||
data: data,
|
||||
}
|
||||
return p.AddTask(webhookTask)
|
||||
return webHookQueueTask.AddTask(webhookTask)
|
||||
}
|
||||
|
||||
// TODO: use alias stream
|
||||
@@ -514,7 +548,11 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
if p.Meta.NewPuller != nil && reg.MatchString(streamPath) {
|
||||
conf.Args = config.HTTPValues(args)
|
||||
conf.URL = reg.Replace(streamPath, conf.URL)
|
||||
p.handler.Pull(streamPath, conf, nil)
|
||||
if job, err := p.handler.Pull(streamPath, conf, nil); err == nil {
|
||||
if w, ok := p.Server.Waiting.Get(streamPath); ok {
|
||||
job.Progress = &w.Progress
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,8 +574,19 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf config.Publish) (publisher *Publisher, err error) {
|
||||
publisher = createPublisher(p, streamPath, conf)
|
||||
publisher = &Publisher{Publish: conf}
|
||||
publisher.Type = conf.PubType
|
||||
publisher.ID = task.GetNextTaskID()
|
||||
publisher.Plugin = p
|
||||
if conf.PublishTimeout > 0 {
|
||||
publisher.TimeoutTimer = time.NewTimer(conf.PublishTimeout)
|
||||
} else {
|
||||
publisher.TimeoutTimer = time.NewTimer(time.Hour * 24 * 365)
|
||||
}
|
||||
publisher.Logger = p.Logger.With("streamPath", streamPath, "pId", publisher.ID)
|
||||
publisher.Init(streamPath, &publisher.Publish)
|
||||
if p.config.EnableAuth && publisher.Type == PublishTypeServer {
|
||||
onAuthPub := p.Meta.OnAuthPub
|
||||
if onAuthPub == nil {
|
||||
@@ -555,35 +604,40 @@ func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf
|
||||
}
|
||||
}
|
||||
}
|
||||
err = p.Server.Streams.AddTask(publisher, ctx).WaitStarted()
|
||||
if err == nil {
|
||||
if sender := p.getHookSender(config.HookOnPublishEnd); sender != nil {
|
||||
publisher.OnDispose(func() {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnPublishEnd,
|
||||
"streamPath": publisher.StreamPath,
|
||||
"publishId": publisher.ID,
|
||||
"reason": publisher.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
sender(config.HookOnPublishEnd, webhookData)
|
||||
})
|
||||
}
|
||||
if sender := p.getHookSender(config.HookOnPublishStart); sender != nil {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnPublishStart,
|
||||
"streamPath": publisher.StreamPath,
|
||||
"args": publisher.Args,
|
||||
"publishId": publisher.ID,
|
||||
"remoteAddr": publisher.RemoteAddr,
|
||||
"type": publisher.Type,
|
||||
"pluginName": p.Meta.Name,
|
||||
"timestamp": time.Now().Unix(),
|
||||
for {
|
||||
err = p.Server.Streams.Add(publisher, ctx).WaitStarted()
|
||||
if err == nil {
|
||||
if sender, webhook := p.getHookSender(config.HookOnPublishEnd); sender != nil {
|
||||
publisher.OnDispose(func() {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnPublishEnd),
|
||||
AlarmDesc: publisher.StopReason().Error(),
|
||||
AlarmType: config.AlarmPublishOffline,
|
||||
StreamPath: publisher.StreamPath,
|
||||
}
|
||||
sender(webhook, alarmInfo)
|
||||
})
|
||||
}
|
||||
sender(config.HookOnPublishStart, webhookData)
|
||||
if sender, webhook := p.getHookSender(config.HookOnPublishStart); sender != nil {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnPublishStart),
|
||||
AlarmType: config.AlarmPublishRecover,
|
||||
StreamPath: publisher.StreamPath,
|
||||
}
|
||||
sender(webhook, alarmInfo)
|
||||
}
|
||||
return
|
||||
} else if oldStream := new(task.ExistTaskError); errors.As(err, oldStream) {
|
||||
if conf.KickExist {
|
||||
publisher.takeOver(oldStream.Task.(*Publisher))
|
||||
oldStream.Task.WaitStopped()
|
||||
} else {
|
||||
return nil, ErrStreamExist
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Publish(ctx context.Context, streamPath string) (publisher *Publisher, err error) {
|
||||
@@ -613,39 +667,32 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
|
||||
if err == nil {
|
||||
select {
|
||||
case <-subscriber.waitPublishDone:
|
||||
err = subscriber.Publisher.WaitTrack()
|
||||
waitAudio := conf.WaitTrack == "all" || strings.Contains(conf.WaitTrack, "audio")
|
||||
waitVideo := conf.WaitTrack == "all" || strings.Contains(conf.WaitTrack, "video")
|
||||
err = subscriber.Publisher.WaitTrack(waitAudio, waitVideo)
|
||||
case <-subscriber.Done():
|
||||
err = subscriber.StopReason()
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if sender := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
|
||||
if sender, webhook := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
|
||||
subscriber.OnDispose(func() {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnSubscribeEnd,
|
||||
"streamPath": subscriber.StreamPath,
|
||||
"subscriberId": subscriber.ID,
|
||||
"reason": subscriber.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnSubscribeEnd),
|
||||
AlarmDesc: subscriber.StopReason().Error(),
|
||||
AlarmType: config.AlarmSubscribeOffline,
|
||||
StreamPath: subscriber.StreamPath,
|
||||
}
|
||||
if subscriber.Publisher != nil {
|
||||
webhookData["publishId"] = subscriber.Publisher.ID
|
||||
}
|
||||
sender(config.HookOnSubscribeEnd, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
})
|
||||
}
|
||||
if sender := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnSubscribeStart,
|
||||
"streamPath": subscriber.StreamPath,
|
||||
"publishId": subscriber.Publisher.ID,
|
||||
"subscriberId": subscriber.ID,
|
||||
"remoteAddr": subscriber.RemoteAddr,
|
||||
"type": subscriber.Type,
|
||||
"args": subscriber.Args,
|
||||
"timestamp": time.Now().Unix(),
|
||||
if sender, webhook := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnSubscribeStart),
|
||||
AlarmType: config.AlarmSubscribeRecover,
|
||||
StreamPath: subscriber.StreamPath,
|
||||
}
|
||||
sender(config.HookOnSubscribeStart, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -655,12 +702,14 @@ func (p *Plugin) Subscribe(ctx context.Context, streamPath string) (subscriber *
|
||||
return p.SubscribeWithConfig(ctx, streamPath, p.config.Subscribe)
|
||||
}
|
||||
|
||||
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) {
|
||||
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) (job *PullJob, err error) {
|
||||
puller := p.Meta.NewPuller(conf)
|
||||
if puller == nil {
|
||||
return
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
puller.GetPullJob().Init(puller, p, streamPath, conf, pubConf)
|
||||
job = puller.GetPullJob()
|
||||
job.Init(puller, p, streamPath, conf, pubConf)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subscribe) {
|
||||
@@ -671,14 +720,13 @@ func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subsc
|
||||
func (p *Plugin) Record(pub *Publisher, conf config.Record, subConf *config.Subscribe) *RecordJob {
|
||||
recorder := p.Meta.NewRecorder(conf)
|
||||
job := recorder.GetRecordJob().Init(recorder, p, pub.StreamPath, conf, subConf)
|
||||
job.Depend(pub)
|
||||
pub.Using(job)
|
||||
return job
|
||||
}
|
||||
|
||||
func (p *Plugin) Transform(pub *Publisher, conf config.Transform) {
|
||||
transformer := p.Meta.NewTransformer()
|
||||
job := transformer.GetTransformJob().Init(transformer, p, pub, conf)
|
||||
job.Depend(pub)
|
||||
pub.Using(transformer.GetTransformJob().Init(transformer, p, pub, conf))
|
||||
}
|
||||
|
||||
func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
|
||||
@@ -716,10 +764,11 @@ func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
|
||||
streamPath := r.PathValue("streamPath")
|
||||
t := r.PathValue("type")
|
||||
expire := r.URL.Query().Get("expire")
|
||||
if t == "publish" {
|
||||
switch t {
|
||||
case "publish":
|
||||
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
} else if t == "subscribe" {
|
||||
case "subscribe":
|
||||
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
|
||||
rw.Write([]byte(hex.EncodeToString(secret[:])))
|
||||
}
|
||||
@@ -757,13 +806,21 @@ func (p *Plugin) handle(pattern string, handler http.Handler) {
|
||||
p.Server.apiList = append(p.Server.apiList, pattern)
|
||||
}
|
||||
|
||||
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(hookType config.HookType, data any) *task.Task) {
|
||||
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(webhook config.Webhook, data any) *task.Task, conf config.Webhook) {
|
||||
if p.config.Hook != nil {
|
||||
if _, ok := p.config.Hook[hookType]; ok {
|
||||
sender = p.SendWebhook
|
||||
conf = p.config.Hook[hookType]
|
||||
} else if _, ok := p.config.Hook[config.HookDefault]; ok {
|
||||
sender = p.SendWebhook
|
||||
conf = p.config.Hook[config.HookDefault]
|
||||
} else if p.Server.config.Hook != nil {
|
||||
if _, ok := p.Server.config.Hook[hookType]; ok {
|
||||
conf = p.config.Hook[hookType]
|
||||
sender = p.Server.SendWebhook
|
||||
} else if _, ok := p.Server.config.Hook[config.HookDefault]; ok {
|
||||
sender = p.Server.SendWebhook
|
||||
conf = p.config.Hook[config.HookDefault]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -780,19 +837,25 @@ func (t *ServerKeepAliveTask) GetTickInterval() time.Duration {
|
||||
}
|
||||
|
||||
func (t *ServerKeepAliveTask) Tick(now any) {
|
||||
sender := t.plugin.getHookSender(config.HookOnServerKeepAlive)
|
||||
sender, webhook := t.plugin.getHookSender(config.HookOnServerKeepAlive)
|
||||
if sender == nil {
|
||||
return
|
||||
}
|
||||
s := t.plugin.Server
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnServerKeepAlive,
|
||||
"timestamp": time.Now().Unix(),
|
||||
"streams": s.Streams.Length,
|
||||
"subscribers": s.Subscribers.Length,
|
||||
"publisherCount": s.Streams.Length,
|
||||
"subscriberCount": s.Subscribers.Length,
|
||||
"uptime": time.Since(s.StartTime).Seconds(),
|
||||
//s := t.plugin.Server
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnServerKeepAlive),
|
||||
AlarmType: config.AlarmKeepAliveOnline,
|
||||
StreamPath: "",
|
||||
}
|
||||
sender(config.HookOnServerKeepAlive, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
//webhookData := map[string]interface{}{
|
||||
// "event": config.HookOnServerKeepAlive,
|
||||
// "timestamp": time.Now().Unix(),
|
||||
// "streams": s.Streams.Length,
|
||||
// "subscribers": s.Subscribers.Length,
|
||||
// "publisherCount": s.Streams.Length,
|
||||
// "subscriberCount": s.Subscribers.Length,
|
||||
// "uptime": time.Since(s.StartTime).Seconds(),
|
||||
//}
|
||||
//sender(webhook, webhookData)
|
||||
}
|
||||
|
||||
@@ -53,14 +53,16 @@ Example:
|
||||
const defaultConfig = m7s.DefaultYaml(`tcp:
|
||||
listenaddr: :5554`)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](defaultConfig)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
DefaultYaml: defaultConfig,
|
||||
})
|
||||
```
|
||||
|
||||
## 3. Implement Event Callbacks (Optional)
|
||||
|
||||
### Initialization Callback
|
||||
```go
|
||||
func (config *MyPlugin) OnInit() (err error) {
|
||||
func (config *MyPlugin) Start() (err error) {
|
||||
// Initialize things
|
||||
return
|
||||
}
|
||||
@@ -121,22 +123,25 @@ func (config *MyPlugin) test1(rw http.ResponseWriter, r *http.Request) {
|
||||
Push client needs to implement IPusher interface and pass the creation method to InstallPlugin.
|
||||
```go
|
||||
type Pusher struct {
|
||||
pullCtx m7s.PullJob
|
||||
task.Task
|
||||
pushJob m7s.PushJob
|
||||
}
|
||||
|
||||
func (c *Pusher) GetPullJob() *m7s.PullJob {
|
||||
return &c.pullCtx
|
||||
func (c *Pusher) GetPushJob() *m7s.PushJob {
|
||||
return &c.pushJob
|
||||
}
|
||||
|
||||
func NewPusher(_ config.Push) m7s.IPusher {
|
||||
return &Pusher{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPusher)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPusher: NewPusher,
|
||||
})
|
||||
```
|
||||
|
||||
### Implement Pull Client
|
||||
Pull client needs to implement IPuller interface and pass the creation method to InstallPlugin.
|
||||
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling:
|
||||
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling. You need to override the Start method for specific pulling logic:
|
||||
```go
|
||||
type Puller struct {
|
||||
m7s.HTTPFilePuller
|
||||
@@ -145,7 +150,9 @@ type Puller struct {
|
||||
func NewPuller(_ config.Pull) m7s.IPuller {
|
||||
return &Puller{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPuller)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPuller: NewPuller,
|
||||
})
|
||||
```
|
||||
|
||||
## 6. Implement gRPC Service
|
||||
@@ -226,7 +233,10 @@ import (
|
||||
"m7s.live/v5/plugin/myplugin/pb"
|
||||
)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
ServiceDesc: &pb.Api_ServiceDesc,
|
||||
RegisterGRPCHandler: pb.RegisterApiHandler,
|
||||
})
|
||||
|
||||
type MyPlugin struct {
|
||||
pb.UnimplementedApiServer
|
||||
@@ -257,33 +267,25 @@ After obtaining the `publisher`, you can publish audio/video data using `publish
|
||||
If existing audio/video data formats don't meet your needs, you can define custom formats by implementing this interface:
|
||||
```go
|
||||
IAVFrame interface {
|
||||
GetAllocator() *util.ScalableMemoryAllocator
|
||||
SetAllocator(*util.ScalableMemoryAllocator)
|
||||
Parse(*AVTrack) error
|
||||
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error)
|
||||
Demux(codec.ICodecCtx) (any, error)
|
||||
Mux(codec.ICodecCtx, *AVFrame)
|
||||
GetTimestamp() time.Duration
|
||||
GetCTS() time.Duration
|
||||
GetSample() *Sample
|
||||
GetSize() int
|
||||
CheckCodecChange() error
|
||||
Demux() error // demux to raw format
|
||||
Mux(*Sample) error // mux from origin format
|
||||
Recycle()
|
||||
String() string
|
||||
Dump(byte, io.Writer)
|
||||
}
|
||||
```
|
||||
> Define separate types for audio and video
|
||||
|
||||
- GetAllocator/SetAllocator: Automatically implemented when embedding RecyclableMemory
|
||||
- Parse: Identifies key frames, sequence frames, and other important information
|
||||
- ConvertCtx: Called when protocol conversion is needed
|
||||
- Demux: Called when audio/video data needs to be demuxed
|
||||
- Mux: Called when audio/video data needs to be muxed
|
||||
- Recycle: Automatically implemented when embedding RecyclableMemory
|
||||
- String: Prints audio/video data information
|
||||
The methods serve the following purposes:
|
||||
- GetSample: Gets the Sample object containing codec context and raw data
|
||||
- GetSize: Gets the size of audio/video data
|
||||
- GetTimestamp: Gets the timestamp in nanoseconds
|
||||
- GetCTS: Gets the Composition Time Stamp in nanoseconds (PTS = DTS+CTS)
|
||||
- Dump: Prints binary audio/video data
|
||||
- CheckCodecChange: Checks if the codec has changed
|
||||
- Demux: Demuxes audio/video data to raw format for use by other formats
|
||||
- Mux: Muxes from original format to custom audio/video data format
|
||||
- Recycle: Recycles resources, automatically implemented when embedding RecyclableMemory
|
||||
- String: Prints audio/video data information
|
||||
|
||||
## 8. Subscribing to Streams
|
||||
```go
|
||||
|
||||
@@ -51,12 +51,14 @@ type MyPlugin struct {
|
||||
const defaultConfig = m7s.DefaultYaml(`tcp:
|
||||
listenaddr: :5554`)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](defaultConfig)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
DefaultYaml: defaultConfig,
|
||||
})
|
||||
```
|
||||
## 3. 实现事件回调(可选)
|
||||
### 初始化回调
|
||||
```go
|
||||
func (config *MyPlugin) OnInit() (err error) {
|
||||
func (config *MyPlugin) Start() (err error) {
|
||||
// 初始化一些东西
|
||||
return
|
||||
}
|
||||
@@ -113,26 +115,29 @@ func (config *MyPlugin) test1(rw http.ResponseWriter, r *http.Request) {
|
||||
## 5. 实现推拉流客户端
|
||||
|
||||
### 实现推流客户端
|
||||
推流客户端就是想要实现一个 IPusher,然后将创建 IPusher 的方法传入 InstallPlugin 中。
|
||||
推流客户端需要实现 IPusher 接口,然后将创建 IPusher 的方法传入 InstallPlugin 中。
|
||||
```go
|
||||
type Pusher struct {
|
||||
pullCtx m7s.PullJob
|
||||
task.Task
|
||||
pushJob m7s.PushJob
|
||||
}
|
||||
|
||||
func (c *Pusher) GetPullJob() *m7s.PullJob {
|
||||
return &c.pullCtx
|
||||
func (c *Pusher) GetPushJob() *m7s.PushJob {
|
||||
return &c.pushJob
|
||||
}
|
||||
|
||||
func NewPusher(_ config.Push) m7s.IPusher {
|
||||
return &Pusher{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPusher)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPusher: NewPusher,
|
||||
})
|
||||
|
||||
```
|
||||
|
||||
### 实现拉流客户端
|
||||
拉流客户端就是想要实现一个 IPuller,然后将创建 IPuller 的方法传入 InstallPlugin 中。
|
||||
下面这个 Puller 继承了 m7s.HTTPFilePuller,可以实现基本的文件和 HTTP拉流。具体拉流逻辑需要覆盖 Run 方法。
|
||||
拉流客户端需要实现 IPuller 接口,然后将创建 IPuller 的方法传入 InstallPlugin 中。
|
||||
下面这个 Puller 继承了 m7s.HTTPFilePuller,可以实现基本的文件和 HTTP拉流。具体拉流逻辑需要覆盖 Start 方法。
|
||||
```go
|
||||
type Puller struct {
|
||||
m7s.HTTPFilePuller
|
||||
@@ -141,7 +146,9 @@ type Puller struct {
|
||||
func NewPuller(_ config.Pull) m7s.IPuller {
|
||||
return &Puller{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPuller)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPuller: NewPuller,
|
||||
})
|
||||
```
|
||||
|
||||
## 6. 实现gRPC服务
|
||||
@@ -221,7 +228,10 @@ import (
|
||||
"m7s.live/v5/plugin/myplugin/pb"
|
||||
)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
ServiceDesc: &pb.Api_ServiceDesc,
|
||||
RegisterGRPCHandler: pb.RegisterApiHandler,
|
||||
})
|
||||
|
||||
type MyPlugin struct {
|
||||
pb.UnimplementedApiServer
|
||||
@@ -253,35 +263,25 @@ publisher, err = p.Publish(streamPath, connectInfo)
|
||||
但需要满足转换格式的要求。即需要实现下面这个接口:
|
||||
```go
|
||||
IAVFrame interface {
|
||||
GetAllocator() *util.ScalableMemoryAllocator
|
||||
SetAllocator(*util.ScalableMemoryAllocator)
|
||||
Parse(*AVTrack) error // get codec info, idr
|
||||
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) // convert codec from source stream
|
||||
Demux(codec.ICodecCtx) (any, error) // demux to raw format
|
||||
Mux(codec.ICodecCtx, *AVFrame) // mux from raw format
|
||||
GetTimestamp() time.Duration
|
||||
GetCTS() time.Duration
|
||||
GetSample() *Sample
|
||||
GetSize() int
|
||||
CheckCodecChange() error
|
||||
Demux() error // demux to raw format
|
||||
Mux(*Sample) error // mux from origin format
|
||||
Recycle()
|
||||
String() string
|
||||
Dump(byte, io.Writer)
|
||||
}
|
||||
```
|
||||
> 音频和视频需要定义两个不同的类型
|
||||
|
||||
其中 `Parse` 方法用于解析音视频数据,`ConvertCtx` 方法用于转换音视频数据格式的上下文,`Demux` 方法用于解封装音视频数据,`Mux` 方法用于封装音视频数据,`Recycle` 方法用于回收资源。
|
||||
- GetAllocator 方法用于获取内存分配器。(嵌入 RecyclableMemory 会自动实现)
|
||||
- SetAllocator 方法用于设置内存分配器。(嵌入 RecyclableMemory 会自动实现)
|
||||
- Parse方法主要从数据中识别关键帧,序列帧等重要信息。
|
||||
- ConvertCtx 会在需要转换协议的时候调用,传入原始的协议上下文,返回新的协议上下文(即自定义格式的上下文)。
|
||||
- Demux 会在需要解封装音视频数据的时候调用,传入协议上下文,返回解封装后的音视频数据,用于给其他格式封装使用。
|
||||
- Mux 会在需要封装音视频数据的时候调用,传入协议上下文和解封装后的音视频数据,用于封装成自定义格式的音视频数据。
|
||||
- Recycle 方法会在嵌入 RecyclableMemory 时自动实现,无需手动实现。
|
||||
- String 方法用于打印音视频数据的信息。
|
||||
其中各方法的作用如下:
|
||||
- GetSample 方法用于获取音视频数据的Sample对象,包含编解码上下文和原始数据。
|
||||
- GetSize 方法用于获取音视频数据的大小。
|
||||
- GetTimestamp 方法用于获取音视频数据的时间戳(单位:纳秒)。
|
||||
- GetCTS 方法用于获取音视频数据的Composition Time Stamp(单位:纳秒)。PTS = DTS+CTS
|
||||
- Dump 方法用于打印音视频数据的二进制数据。
|
||||
- CheckCodecChange 方法用于检查编解码器是否发生变化。
|
||||
- Demux 方法用于解封装音视频数据到裸格式,用于给其他格式封装使用。
|
||||
- Mux 方法用于从原始格式封装成自定义格式的音视频数据。
|
||||
- Recycle 方法用于回收资源,会在嵌入 RecyclableMemory 时自动实现。
|
||||
- String 方法用于打印音视频数据的信息。
|
||||
|
||||
### 6. 订阅流
|
||||
```go
|
||||
|
||||
@@ -19,10 +19,12 @@ type CascadeClientPlugin struct {
|
||||
AutoPush bool `desc:"自动推流到上级"` //自动推流到上级
|
||||
Server string `desc:"上级服务器"` // TODO: support multiple servers
|
||||
Secret string `desc:"连接秘钥"`
|
||||
conn quic.Connection
|
||||
client *CascadeClient
|
||||
}
|
||||
|
||||
var _ = m7s.InstallPlugin[CascadeClientPlugin](cascade.NewCascadePuller)
|
||||
var _ = m7s.InstallPlugin[CascadeClientPlugin](m7s.PluginMeta{
|
||||
NewPuller: cascade.NewCascadePuller,
|
||||
})
|
||||
|
||||
type CascadeClient struct {
|
||||
task.Work
|
||||
@@ -79,7 +81,7 @@ func (task *CascadeClient) Run() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CascadeClientPlugin) OnInit() (err error) {
|
||||
func (c *CascadeClientPlugin) Start() (err error) {
|
||||
if c.Secret == "" && c.Server == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -88,14 +90,17 @@ func (c *CascadeClientPlugin) OnInit() (err error) {
|
||||
}
|
||||
connectTask.SetRetry(-1, time.Second)
|
||||
c.AddTask(&connectTask)
|
||||
c.client = &connectTask
|
||||
return
|
||||
}
|
||||
|
||||
func (c *CascadeClientPlugin) Pull(streamPath string, conf config.Pull, pub *config.Publish) {
|
||||
func (c *CascadeClientPlugin) Pull(streamPath string, conf config.Pull, pub *config.Publish) (job *m7s.PullJob, err error) {
|
||||
puller := &cascade.Puller{
|
||||
Connection: c.conn,
|
||||
Connection: c.client.Connection,
|
||||
}
|
||||
puller.GetPullJob().Init(puller, &c.Plugin, streamPath, conf, pub)
|
||||
job = puller.GetPullJob()
|
||||
job.Init(puller, &c.Plugin, streamPath, conf, pub)
|
||||
return
|
||||
}
|
||||
|
||||
//func (c *CascadeClientPlugin) Start() {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/config"
|
||||
flv "m7s.live/v5/plugin/flv/pkg"
|
||||
)
|
||||
|
||||
@@ -17,7 +18,7 @@ func (p *Puller) GetPullJob() *m7s.PullJob {
|
||||
return &p.PullJob
|
||||
}
|
||||
|
||||
func NewCascadePuller() m7s.IPuller {
|
||||
func NewCascadePuller(config.Pull) m7s.IPuller {
|
||||
return &Puller{}
|
||||
}
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ type CascadeServerPlugin struct {
|
||||
clients util.Collection[uint, *cascade.Instance]
|
||||
}
|
||||
|
||||
func (c *CascadeServerPlugin) OnInit() (err error) {
|
||||
func (c *CascadeServerPlugin) Start() (err error) {
|
||||
if c.GetCommonConf().Quic.ListenAddr == "" {
|
||||
return pkg.ErrNotListen
|
||||
}
|
||||
@@ -50,8 +50,12 @@ func (c *CascadeServerPlugin) OnInit() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
var _ = m7s.InstallPlugin[CascadeServerPlugin](m7s.DefaultYaml(`quic:
|
||||
listenaddr: :44944`), &pb.Server_ServiceDesc, pb.RegisterServerHandler)
|
||||
var _ = m7s.InstallPlugin[CascadeServerPlugin](m7s.PluginMeta{
|
||||
DefaultYaml: `quic:
|
||||
listenaddr: :44944`,
|
||||
ServiceDesc: &pb.Server_ServiceDesc,
|
||||
RegisterGRPCHandler: pb.RegisterServerHandler,
|
||||
})
|
||||
|
||||
type CascadeServer struct {
|
||||
task.Work
|
||||
|
||||
170
plugin/crontab/README.md
Normal file
170
plugin/crontab/README.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# M7S Crontab 插件说明文档
|
||||
|
||||
## 1. 插件概述
|
||||
|
||||
Crontab 插件是 M7S 流媒体服务器的一个扩展组件,主要用于实现基于时间计划的自动录制功能。该插件允许用户创建定时录制计划,根据预设的时间表自动开始和停止流媒体的录制,支持灵活的周期性录制设置。
|
||||
|
||||
## 2. 核心功能
|
||||
|
||||
- **定时录制计划管理**:创建、更新、删除和查询录制计划
|
||||
- **流路径关联**:将录制计划与特定的流路径关联
|
||||
- **时间表设置**:通过 168 位的二进制字符串(7天×24小时)定义每周的录制时间段
|
||||
- **自动录制控制**:根据时间计划自动开始和停止录制
|
||||
- **状态监控**:查询当前正在执行和计划中的录制任务状态
|
||||
|
||||
## 3. 文件结构与功能
|
||||
|
||||
### 3.1 主要文件
|
||||
|
||||
#### 3.1.1 `index.go`
|
||||
|
||||
插件的入口文件,定义了 `CrontabPlugin` 结构体和初始化方法。
|
||||
|
||||
- **主要结构**:
|
||||
- `CrontabPlugin`:插件的主结构体,包含插件基础功能、API服务器实现和数据集合
|
||||
- **主要功能**:
|
||||
- `OnInit()`:插件初始化函数,负责数据库迁移、加载已有录制计划并创建相应的定时任务
|
||||
|
||||
#### 3.1.2 `crontab.go`
|
||||
|
||||
定义了定时任务的核心逻辑和执行流程。
|
||||
|
||||
- **主要结构**:
|
||||
- `TimeSlot`:表示一个时间段,包含开始和结束时间
|
||||
- `Crontab`:定时任务调度器,负责根据计划执行录制操作
|
||||
- **主要功能**:
|
||||
- `Start()`:初始化定时任务
|
||||
- `Run()`:阻塞运行定时任务,循环检查并执行录制操作
|
||||
- `Dispose()`:停止定时任务
|
||||
- `getNextTimeSlot()`:计算下一个需要执行的时间段
|
||||
- `startRecording()`:开始录制流
|
||||
- `stopRecording()`:停止录制流
|
||||
|
||||
#### 3.1.3 `api.go`
|
||||
|
||||
实现了插件的 API 接口,提供了与前端交互的功能。
|
||||
|
||||
- **主要功能**:
|
||||
- 录制计划管理:`List()`, `Add()`, `Update()`, `Remove()`
|
||||
- 录制计划流关联管理:`ListRecordPlanStreams()`, `AddRecordPlanStream()`, `UpdateRecordPlanStream()`, `RemoveRecordPlanStream()`
|
||||
- 计划解析:`ParsePlanTime()`
|
||||
- 状态查询:`GetCrontabStatus()`
|
||||
- **辅助功能**:
|
||||
- 时间计算:`getWeekdayName()`, `getWeekdayIndex()`, `getNextDateForWeekday()`
|
||||
- 时间段计算:`calculateTimeSlots()`, `getNextTimeSlotFromNow()`
|
||||
|
||||
### 3.2 子目录文件
|
||||
|
||||
#### 3.2.1 `pkg` 目录
|
||||
|
||||
包含数据模型定义和数据库操作相关功能。
|
||||
|
||||
- **`recordplan.go`**:
|
||||
- 定义 `RecordPlan` 结构体,表示录制计划的数据模型
|
||||
- 包含计划ID、名称、时间表和启用状态等字段
|
||||
|
||||
- **`recordplanstream.go`**:
|
||||
- 定义 `RecordPlanStream` 结构体,表示录制计划与流路径的关联
|
||||
- 提供数据库查询的辅助函数,如按流路径模糊查询、按创建时间排序等
|
||||
|
||||
#### 3.2.2 `pb` 目录
|
||||
|
||||
包含 Protocol Buffers 定义和生成的代码,用于 API 接口和数据传输。
|
||||
|
||||
- **`crontab.proto`**:
|
||||
- 定义了插件的 API 服务接口
|
||||
- 定义了各种请求和响应消息结构
|
||||
- 包含 HTTP 路由映射配置
|
||||
|
||||
- **`crontab.pb.go`, `crontab.pb.gw.go`, `crontab_grpc.pb.go`**:
|
||||
- 由 Protocol Buffers 编译器自动生成的 Go 代码
|
||||
- 实现了消息序列化/反序列化和 gRPC 服务接口
|
||||
|
||||
## 4. 工作流程
|
||||
|
||||
### 4.1 插件初始化流程
|
||||
|
||||
1. 插件启动时,`OnInit()` 方法被调用
|
||||
2. 执行数据库迁移,确保必要的表结构存在
|
||||
3. 从数据库加载所有录制计划和关联的流信息
|
||||
4. 对于已启用的计划,创建并启动相应的定时任务
|
||||
|
||||
### 4.2 录制计划执行流程
|
||||
|
||||
1. 定时任务启动后,进入 `Run()` 方法的循环
|
||||
2. 通过 `getNextTimeSlot()` 计算下一个需要执行的时间段
|
||||
3. 设置定时器等待到达开始时间
|
||||
4. 到达开始时间后,调用 `startRecording()` 开始录制
|
||||
5. 设置定时器等待到达结束时间
|
||||
6. 到达结束时间后,调用 `stopRecording()` 停止录制
|
||||
7. 循环继续,计算下一个时间段
|
||||
|
||||
### 4.3 API 交互流程
|
||||
|
||||
1. 前端通过 HTTP/gRPC 接口与插件交互
|
||||
2. 可以创建、更新、删除录制计划和流关联
|
||||
3. 可以查询当前正在执行和计划中的录制任务状态
|
||||
4. 可以解析计划字符串,获取时间段信息
|
||||
|
||||
## 5. 关键概念
|
||||
|
||||
### 5.1 录制计划 (RecordPlan)
|
||||
|
||||
录制计划定义了何时进行录制的时间表。每个计划包含:
|
||||
- **ID**:唯一标识符
|
||||
- **名称**:计划名称
|
||||
- **时间表**:168位的二进制字符串,表示一周中每个小时是否进行录制
|
||||
- **启用状态**:是否启用该计划
|
||||
|
||||
### 5.2 录制计划流 (RecordPlanStream)
|
||||
|
||||
将录制计划与特定的流路径关联,定义了录制的具体参数:
|
||||
- **计划ID**:关联的录制计划ID
|
||||
- **流路径**:要录制的流的路径
|
||||
- **分片设置**:录制文件的分片参数
|
||||
- **文件路径**:录制文件的保存路径
|
||||
- **启用状态**:是否启用该关联
|
||||
|
||||
### 5.3 时间表格式
|
||||
|
||||
时间表使用 168 位的二进制字符串表示一周中的每个小时是否进行录制:
|
||||
- 每天 24 小时,一周 7 天,共 168 小时
|
||||
- 字符串中的每一位对应一个小时,'1' 表示录制,'0' 表示不录制
|
||||
- 字符串按周日到周六的顺序排列,每天 24 位
|
||||
|
||||
例如:
|
||||
- 全为 '0':一周中不进行任何录制
|
||||
- 前 24 位为 '1',其余为 '0':仅在周日全天录制
|
||||
- 每天的第 9 位到第 17 位为 '1':每天上午 9 点到下午 5 点录制
|
||||
|
||||
## 6. 使用场景
|
||||
|
||||
1. **定期节目录制**:适用于每周固定时间播出的节目自动录制
|
||||
2. **工作时间监控**:仅在工作时间段自动录制监控视频
|
||||
3. **带宽管理**:在网络带宽充足的时间段进行录制,避开高峰期
|
||||
4. **存储优化**:只录制有价值的时间段,节省存储空间
|
||||
|
||||
## 7. API 接口说明
|
||||
|
||||
### 7.1 录制计划管理
|
||||
|
||||
- **列表查询**:`GET /plan/api/list`
|
||||
- **添加计划**:`POST /plan/api/add`
|
||||
- **更新计划**:`POST /plan/api/update/{id}`
|
||||
- **删除计划**:`POST /plan/api/remove/{id}`
|
||||
|
||||
### 7.2 录制计划流管理
|
||||
|
||||
- **列表查询**:`GET /planstream/api/list`
|
||||
- **添加关联**:`POST /planstream/api/add`
|
||||
- **更新关联**:`POST /planstream/api/update`
|
||||
- **删除关联**:`POST /planstream/api/remove/{planId}/{streamPath}`
|
||||
|
||||
### 7.3 其他接口
|
||||
|
||||
- **解析计划**:`GET /plan/api/parse/{plan}`
|
||||
- **状态查询**:`GET /crontab/api/status`
|
||||
|
||||
## 8. 总结
|
||||
|
||||
Crontab 插件为 M7S 流媒体服务器提供了强大的定时录制功能,通过灵活的时间表设置和流路径关联,实现了自动化的录制控制。该插件适用于需要定期录制特定时间段流媒体内容的场景,能有效节省人力和存储资源。
|
||||
@@ -2,6 +2,7 @@ package plugin_crontab
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -327,11 +328,9 @@ func (ct *CrontabPlugin) ListRecordPlanStreams(ctx context.Context, req *cronpb.
|
||||
}
|
||||
|
||||
func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
|
||||
if req.PlanId == 0 {
|
||||
return &cronpb.Response{
|
||||
Code: 400,
|
||||
Message: "record_plan_id is required",
|
||||
}, nil
|
||||
planId := 1
|
||||
if req.PlanId > 0 {
|
||||
planId = int(req.PlanId)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.StreamPath) == "" {
|
||||
@@ -342,7 +341,7 @@ func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.Pl
|
||||
}
|
||||
|
||||
// 从内存中获取录制计划
|
||||
plan, ok := ct.recordPlans.Get(uint(req.PlanId))
|
||||
plan, ok := ct.recordPlans.Get(uint(planId))
|
||||
if !ok {
|
||||
return &cronpb.Response{
|
||||
Code: 404,
|
||||
@@ -353,7 +352,7 @@ func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.Pl
|
||||
// 检查是否已存在相同的记录
|
||||
var count int64
|
||||
searchModel := pkg.RecordPlanStream{
|
||||
PlanID: uint(req.PlanId),
|
||||
PlanID: uint(planId),
|
||||
StreamPath: req.StreamPath,
|
||||
}
|
||||
if err := ct.DB.Model(&searchModel).Where(&searchModel).Count(&count).Error; err != nil {
|
||||
@@ -370,10 +369,16 @@ func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.Pl
|
||||
}, nil
|
||||
}
|
||||
|
||||
fragment := "60s"
|
||||
|
||||
if req.Fragment != "" {
|
||||
fragment = req.Fragment
|
||||
}
|
||||
|
||||
stream := &pkg.RecordPlanStream{
|
||||
PlanID: uint(req.PlanId),
|
||||
StreamPath: req.StreamPath,
|
||||
Fragment: req.Fragment,
|
||||
Fragment: fragment,
|
||||
FilePath: req.FilePath,
|
||||
Enable: req.Enable,
|
||||
RecordType: req.RecordType,
|
||||
@@ -406,11 +411,9 @@ func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.Pl
|
||||
}
|
||||
|
||||
func (ct *CrontabPlugin) UpdateRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
|
||||
if req.PlanId == 0 {
|
||||
return &cronpb.Response{
|
||||
Code: 400,
|
||||
Message: "record_plan_id is required",
|
||||
}, nil
|
||||
planId := 1
|
||||
if req.PlanId > 0 {
|
||||
planId = int(req.PlanId)
|
||||
}
|
||||
|
||||
if strings.TrimSpace(req.StreamPath) == "" {
|
||||
@@ -423,7 +426,7 @@ func (ct *CrontabPlugin) UpdateRecordPlanStream(ctx context.Context, req *cronpb
|
||||
// 检查记录是否存在
|
||||
var existingStream pkg.RecordPlanStream
|
||||
searchModel := pkg.RecordPlanStream{
|
||||
PlanID: uint(req.PlanId),
|
||||
PlanID: uint(planId),
|
||||
StreamPath: req.StreamPath,
|
||||
}
|
||||
if err := ct.DB.Where(&searchModel).First(&existingStream).Error; err != nil {
|
||||
@@ -524,7 +527,7 @@ func (ct *CrontabPlugin) RemoveRecordPlanStream(ctx context.Context, req *cronpb
|
||||
// 停止所有相关的定时任务
|
||||
ct.crontabs.Range(func(crontab *Crontab) bool {
|
||||
if crontab.RecordPlanStream.StreamPath == req.StreamPath && crontab.RecordPlan.ID == uint(req.PlanId) {
|
||||
crontab.Stop(nil)
|
||||
crontab.Stop(errors.New("remove record plan"))
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
@@ -359,13 +359,16 @@ func (cron *Crontab) startRecording() {
|
||||
|
||||
// 发送开始录制请求
|
||||
resp, err := http.Post(fmt.Sprintf("http://%s/mp4/api/start/%s", addr, cron.StreamPath), "application/json", bytes.NewBuffer(jsonBody))
|
||||
cron.Debug("record request", "url is ", fmt.Sprintf("http://%s/mp4/api/start/%s", addr, cron.StreamPath), "jsonBody is ", string(jsonBody))
|
||||
if err != nil {
|
||||
time.Sleep(time.Second)
|
||||
cron.Error("开始录制失败: %v", err)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
time.Sleep(time.Second)
|
||||
cron.Error("开始录制失败,HTTP状态码: %d", resp.StatusCode)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ var _ = m7s.InstallPlugin[CrontabPlugin](m7s.PluginMeta{
|
||||
RegisterGRPCHandler: pb.RegisterApiHandler,
|
||||
})
|
||||
|
||||
func (ct *CrontabPlugin) OnInit() (err error) {
|
||||
func (ct *CrontabPlugin) Start() (err error) {
|
||||
if ct.DB == nil {
|
||||
ct.Error("DB is nil")
|
||||
} else {
|
||||
@@ -32,6 +32,9 @@ func (ct *CrontabPlugin) OnInit() (err error) {
|
||||
}
|
||||
ct.Info("init database success")
|
||||
|
||||
// 初始化默认录制计划(工作日和周末计划)
|
||||
ct.InitDefaultPlans()
|
||||
|
||||
// 查询所有录制计划
|
||||
var plans []pkg.RecordPlan
|
||||
if err = ct.DB.Find(&plans).Error; err != nil {
|
||||
|
||||
114
plugin/crontab/init_data.go
Normal file
114
plugin/crontab/init_data.go
Normal file
@@ -0,0 +1,114 @@
|
||||
package plugin_crontab
|
||||
|
||||
import (
|
||||
"gorm.io/gorm"
|
||||
"strings"
|
||||
|
||||
"m7s.live/v5/plugin/crontab/pkg"
|
||||
)
|
||||
|
||||
// InitDefaultPlans 初始化默认的录制计划
|
||||
// 包括工作日录制计划和周末录制计划
|
||||
func (ct *CrontabPlugin) InitDefaultPlans() {
|
||||
|
||||
// 创建全天24小时录制计划(七天全天录制)的计划字符串
|
||||
allDayPlanStr := buildPlanString(true, true, true, true, true, true, true) // 周日到周六
|
||||
|
||||
// 检查是否已存在相同内容的工作日录制计划
|
||||
var count int64
|
||||
if err := ct.DB.Model(&pkg.RecordPlan{}).Where("plan = ?", allDayPlanStr).Count(&count).Error; err != nil {
|
||||
ct.Error("检查24小时录制计划失败: %v", err)
|
||||
} else if count == 0 {
|
||||
// 不存在相同内容的计划,创建新计划
|
||||
workdayPlan := &pkg.RecordPlan{
|
||||
Model: gorm.Model{ID: 1},
|
||||
Name: "七天全天录制计划",
|
||||
Plan: allDayPlanStr,
|
||||
Enable: true,
|
||||
}
|
||||
|
||||
if err := ct.DB.Create(workdayPlan).Error; err != nil {
|
||||
ct.Error("创建七天全天录制计划失败: %v", err)
|
||||
} else {
|
||||
ct.Info("成功创建七天全天录制计划")
|
||||
// 添加到内存中
|
||||
ct.recordPlans.Add(workdayPlan)
|
||||
}
|
||||
} else {
|
||||
ct.Info("已存在相同内容的七天全天录制计划,跳过创建")
|
||||
}
|
||||
|
||||
// 创建工作日录制计划(周一到周五全天录制)的计划字符串
|
||||
workdayPlanStr := buildPlanString(false, true, true, true, true, true, false) // 周一到周五
|
||||
|
||||
// 检查是否已存在相同内容的工作日录制计划
|
||||
if err := ct.DB.Model(&pkg.RecordPlan{}).Where("plan = ?", workdayPlanStr).Count(&count).Error; err != nil {
|
||||
ct.Error("检查工作日录制计划失败: %v", err)
|
||||
} else if count == 0 {
|
||||
// 不存在相同内容的计划,创建新计划
|
||||
workdayPlan := &pkg.RecordPlan{
|
||||
Model: gorm.Model{ID: 2},
|
||||
Name: "工作日录制计划",
|
||||
Plan: workdayPlanStr,
|
||||
Enable: true,
|
||||
}
|
||||
|
||||
if err := ct.DB.Create(workdayPlan).Error; err != nil {
|
||||
ct.Error("创建工作日录制计划失败: %v", err)
|
||||
} else {
|
||||
ct.Info("成功创建工作日录制计划")
|
||||
// 添加到内存中
|
||||
ct.recordPlans.Add(workdayPlan)
|
||||
}
|
||||
} else {
|
||||
ct.Info("已存在相同内容的工作日录制计划,跳过创建")
|
||||
}
|
||||
|
||||
// 创建周末录制计划(周六和周日全天录制)的计划字符串
|
||||
weekendPlanStr := buildPlanString(true, false, false, false, false, false, true) // 周日和周六
|
||||
|
||||
// 检查是否已存在相同内容的周末录制计划
|
||||
if err := ct.DB.Model(&pkg.RecordPlan{}).Where("plan = ?", weekendPlanStr).Count(&count).Error; err != nil {
|
||||
ct.Error("检查周末录制计划失败: %v", err)
|
||||
} else if count == 0 {
|
||||
// 不存在相同内容的计划,创建新计划
|
||||
weekendPlan := &pkg.RecordPlan{
|
||||
Model: gorm.Model{ID: 3},
|
||||
Name: "周末录制计划",
|
||||
Plan: weekendPlanStr,
|
||||
Enable: true,
|
||||
}
|
||||
|
||||
if err := ct.DB.Create(weekendPlan).Error; err != nil {
|
||||
ct.Error("创建周末录制计划失败: %v", err)
|
||||
} else {
|
||||
ct.Info("成功创建周末录制计划")
|
||||
// 添加到内存中
|
||||
ct.recordPlans.Add(weekendPlan)
|
||||
}
|
||||
} else {
|
||||
ct.Info("已存在相同内容的周末录制计划,跳过创建")
|
||||
}
|
||||
}
|
||||
|
||||
// buildPlanString 构建计划字符串
|
||||
// 参数分别表示:周日、周一、周二、周三、周四、周五、周六是否录制
|
||||
// 返回168位的计划字符串,每天24小时,一周7天
|
||||
func buildPlanString(sun, mon, tue, wed, thu, fri, sat bool) string {
|
||||
var planBuilder strings.Builder
|
||||
|
||||
// 按照周日、周一、...、周六的顺序
|
||||
days := []bool{sun, mon, tue, wed, thu, fri, sat}
|
||||
|
||||
for _, record := range days {
|
||||
if record {
|
||||
// 该天录制,24小时都为1
|
||||
planBuilder.WriteString(strings.Repeat("1", 24))
|
||||
} else {
|
||||
// 该天不录制,24小时都为0
|
||||
planBuilder.WriteString(strings.Repeat("0", 24))
|
||||
}
|
||||
}
|
||||
|
||||
return planBuilder.String()
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user