Compare commits

...

96 Commits

Author SHA1 Message Date
langhuihui
479a0a79f6 refactor: frame converter and mp4 track improvements
- Refactor frame converter implementation
- Update mp4 track to use ICodex
- General refactoring and code improvements

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-08-28 13:57:58 +08:00
pggiroro
b6ee2843b0 fix: platform register failed 2025-08-22 15:17:33 +08:00
pggiroro
1a8e2bc816 fix: device offline when device unregister,fix delete device failed 2025-08-21 22:43:08 +08:00
pggiroro
bc0c761aa8 feat: batch snap from mp4 file 2025-08-21 22:43:08 +08:00
langhuihui
cabd0e3088 fix: all config plugin name to lowcase 2025-08-19 17:29:32 +08:00
yangjinxing123
2034f068c0 fix: Deadlock issue caused by device logout (#315)
Co-authored-by: yjx <yjx>
2025-08-15 15:28:23 +08:00
pggiroro
eba62c4054 feat: gb28181 support update channel name,channelid 2025-08-06 18:07:27 +08:00
pggiroro
a070dc64f8 fix: continue delete file when delete file that does not exist 2025-08-06 18:07:27 +08:00
langhuihui
e10dfec816 fix: remove pullproxy have to stop pulljob 2025-08-05 09:41:02 +08:00
pggiroro
96b9cbfc08 fix: gb28181 update use taskManager 2025-08-03 20:35:52 +08:00
pggiroro
2bbee90a9f feat: crontab init sql 2025-08-03 20:35:52 +08:00
pggiroro
272def302a feat: plugin snap support batch snap 2025-08-03 20:35:52 +08:00
pggiroro
04843002bf fix: platform get channel info from memory 2025-07-25 22:19:15 +08:00
pggiroro
e4810e9c55 fix: delete oldest mp4 file 2025-07-23 17:10:02 +08:00
langhuihui
15d830f1eb feat: add custom admin home page 2025-07-21 19:00:13 +08:00
langhuihui
ad32f6f96e feat: update pull or push proxy with optional args 2025-07-20 15:14:14 +08:00
pggiroro
56c4ea5907 fix: api getDevices,getDevice,getChannels 2025-07-11 23:11:27 +08:00
pggiroro
28c71545db fix: groupchannel page select 2025-07-11 18:03:39 +08:00
langhuihui
17faf3f064 feat: pulse interval can be 0 2025-07-11 16:23:00 +08:00
pggiroro
131af312f1 fix: improve webhook task 2025-07-08 21:39:14 +08:00
pggiroro
cf3b7dfabe fix: improve packet replayer 2025-07-08 21:39:14 +08:00
pggiroro
584c2e9932 fix: dialogs.getKey change to string(callid) 2025-07-07 09:14:42 +08:00
pggiroro
a7f04faa23 fix: search hls use type "ts" in db 2025-07-07 09:14:42 +08:00
pggiroro
966153f873 fix: dialog.getKey() change from ssrc to callid;data source of api device/list from db change to memory 2025-07-06 23:06:38 +08:00
pggiroro
4391ad2d8d fix: alarminfo add alarmName 2025-07-06 23:06:38 +08:00
langhuihui
747a5a1104 fix: hls record ts 2025-07-06 10:03:35 +08:00
langhuihui
97d8de523d fix: hls play record ts 2025-07-03 19:52:49 +08:00
pggiroro
cad47aec5c feat: send alarminfo through hook 2025-07-02 21:49:11 +08:00
pggiroro
baf3640b23 feat: send alarm through hook 2025-07-01 11:09:59 +08:00
langhuihui
3d68712ff6 fix: docker action 2025-07-01 08:43:50 +08:00
langhuihui
f06f43dbe9 docs: update v5.0.3 release note, covering major enhancements and fixes for recording, protocols, plugins, and configuration. 2025-06-30 22:38:48 +08:00
langhuihui
75efcba311 fix: mp4 pull record file 2025-06-26 16:36:04 +08:00
pggiroro
6b58e2a9b5 fix: flv record 2025-06-24 20:37:04 +08:00
pggiroro
7b6259ed67 feat: add pullproxy support gb28181 type 2025-06-23 23:39:55 +08:00
langhuihui
0d3d86518d fix: record write time 2025-06-23 16:48:31 +08:00
langhuihui
ac3ad009a7 feat: suber wait video default 2025-06-23 09:00:02 +08:00
langhuihui
5731c2e8da fix: rtmp clone buffers 2025-06-22 23:10:57 +08:00
langhuihui
cf6153fa91 feat: add get all config yaml example 2025-06-22 22:29:09 +08:00
pggiroro
70e1ea51ac fix: Send RTMP data based on timestamps in data of tcpdump file. 2025-06-22 21:37:55 +08:00
langhuihui
8f5a829900 feat: add wait track conf to subscribe 2025-06-21 21:55:17 +08:00
langhuihui
10f4fe3fc6 fix: pull proxy check to pull when on sub 2025-06-20 08:10:58 +08:00
langhuihui
3a2901fa5f fix: correct SQL syntax for event level comparison in eventRecordCheck 2025-06-19 23:17:57 +08:00
dexter
55f5408f64 feat: cut mp4 when avcc changed 2025-06-19 10:46:20 +00:00
dexter
9e45c3eb71 fix: remove event_id from normal record table query 2025-06-19 02:25:15 +00:00
langhuihui
01fa1f3ed8 gifix: replay cap script 2025-06-17 23:51:19 +08:00
langhuihui
830da3aaab fix: mp4 demuxer 2025-06-17 20:22:51 +08:00
langhuihui
5a04dc814d fix: event record check 2025-06-17 19:32:53 +08:00
langhuihui
af5d2bc1f2 fix: set record type 2025-06-17 18:34:10 +08:00
langhuihui
a3e0c1864e feat: add ping pong to batchv2 2025-06-17 14:03:37 +08:00
langhuihui
33d385d2bf fix: record bug 2025-06-17 11:36:32 +08:00
langhuihui
29c47a8d08 fix: hls demo page 2025-06-17 11:26:11 +08:00
langhuihui
5bf5e7bb20 feat: mp4 conert to ts format 2025-06-17 11:09:35 +08:00
langhuihui
4b74ea5841 doc: auth 2025-06-17 09:41:36 +08:00
langhuihui
43710fb017 fix: record 2025-06-16 22:41:55 +08:00
langhuihui
962dda8d08 refactor: mp4 and record system 2025-06-16 20:28:49 +08:00
erroot
ec56bba75a Erroot v5 (#286)
* 插件数据库不同时,新建DB 对象赋值给插件

* MP4 plugin adds extraction, clips, images, compressed video, GOP clicp

* remove mp4/util panic code
2025-06-16 08:29:14 +08:00
pggiroro
b2b511d755 fix: user.LastLogin set gorm type:timestamp, gb28181 api GetGroupChannels modify 2025-06-15 22:19:14 +08:00
pggiroro
42acf47250 feature: gb28181 support single mediaport 2025-06-15 16:58:52 +08:00
langhuihui
6206ee847d fix: record table fit pg database 2025-06-15 15:58:12 +08:00
langhuihui
6cfdc03e4a fix: user mode fit pg database 2025-06-15 15:21:21 +08:00
pggiroro
b425b8da1f fix: ignore RecordEvent in gorm 2025-06-13 12:52:57 +08:00
langhuihui
e105243cd5 refactor: record 2025-06-13 12:52:57 +08:00
langhuihui
20ec6c55cd fix: plugin init error 2025-06-12 15:08:47 +08:00
langhuihui
e478a1972e fix: webrtc batch bug 2025-06-12 14:21:59 +08:00
langhuihui
94be02cd79 feat: consider pull proxy disable status 2025-06-12 13:50:47 +08:00
langhuihui
bacda6f5a0 feat: webrtc fit client codecs 2025-06-12 12:49:36 +08:00
langhuihui
61fae4cc97 fix: webrtc h265 subscribe 2025-06-11 23:43:22 +08:00
pggiroro
e0752242b2 feat: crontab support record with plan like nvr 2025-06-11 22:18:45 +08:00
pggiroro
23f2ed39a1 fix: gb28181 check from.Address.User when onRegister,delete device from db when device is not register 2025-06-11 22:18:45 +08:00
erroot
0b731e468b 插件数据库不同时,新建DB 对象赋值给插件 2025-06-11 21:43:34 +08:00
langhuihui
4fe1472117 refactor: init plugin faild do not register http handle 2025-06-11 13:57:45 +08:00
langhuihui
a8b3a644c3 feat: record recover 2025-06-10 20:16:39 +08:00
pggiroro
4f0a097dac feat: crontab support plat with streampath in database 2025-06-08 21:01:36 +08:00
pggiroro
4df3de00af fix: gb28181 subscriber and invite sdp 2025-06-08 10:40:17 +08:00
langhuihui
9c16905f28 feat: add evn check to debug plugin 2025-06-07 21:07:28 +08:00
pggiroro
0470f78ed7 fix: register to up platform change cseq when need password, get deviceinfo do not update device name when name is not nil in db,return error when DB is nil in Oninit 2025-06-06 22:45:50 +08:00
pggiroro
7282f1f44d fix: add platform from config.yaml,add example into default/config.yaml 2025-06-06 09:03:58 +08:00
pggiroro
67186cd669 fix: subscribe stream before start mp4 record 2025-06-06 09:03:58 +08:00
pggiroro
09e9761083 feat: Added the association feature between plan and streampath, which has not been tested yet. 2025-06-06 09:03:58 +08:00
langhuihui
4acdc19beb feat: add duration to record 2025-06-05 23:51:33 +08:00
langhuihui
80e19726d4 fix: use safeGet insteadof Call and get
feat: multi buddy support
2025-06-05 20:33:59 +08:00
langhuihui
8ff14931fe feat: disable replay protection on tcp webrtc 2025-06-04 23:02:24 +08:00
pggiroro
9c7dc7e628 fix: modify gb.Logger.With 2025-06-04 20:39:49 +08:00
pggiroro
75791fe93f feat: gb28181 support add platform and platform channel from config.yaml 2025-06-04 20:36:48 +08:00
langhuihui
cf218215ff fix: tcp read block 2025-06-04 14:13:28 +08:00
langhuihui
dbf820b845 feat: downlad flv format from mp4 record file 2025-06-03 17:20:58 +08:00
langhuihui
86b9969954 feat: config support more format 2025-06-03 09:06:43 +08:00
langhuihui
b3143e8c14 fix: mp4 download 2025-06-02 22:31:25 +08:00
langhuihui
7f859e6139 fix: mp4 recovery 2025-06-02 21:12:02 +08:00
pggiroro
6eb2941087 fix: use task.Manager to resolve register handler 2025-06-02 20:09:22 +08:00
pggiroro
e8b4cea007 fix: plan.length is 168 2025-06-02 20:09:22 +08:00
pggiroro
3949773e63 fix: update config.yaml add comment about autoinvite,mediaip,sipip 2025-06-02 20:09:22 +08:00
langhuihui
d67279a404 feat: add raw check no frame 2025-05-30 14:01:18 +08:00
langhuihui
043c62f38f feat: add loop read mp4 2025-05-29 20:25:26 +08:00
pggiroro
acf9f0c677 fix: gb28181 make invite sdp mediaip or sipip correct;linux remove viaheader in sip request 2025-05-28 09:22:34 +08:00
langhuihui
49d1e7c784 feat: add s3 plugin 2025-05-28 08:40:53 +08:00
293 changed files with 34652 additions and 11815 deletions

View File

@@ -0,0 +1,5 @@
---
description: build pb
alwaysApply: false
---
如果修改了 proto 文件需要编译,请使用 scripts 目录下的脚本来编译

View File

@@ -93,9 +93,16 @@ jobs:
tar -zxvf bin/m7s_v5_linux_arm64.tar.gz
mv m7s monibuca_arm64
docker login -u langhuihui -p ${{ secrets.DOCKER_PASSWORD }}
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 --push .
- name: docker push version tag
if: success() && !contains(env.version, 'beta')
if [[ "${{ env.version }}" == *"beta"* ]]; then
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 --push .
else
docker buildx build --platform linux/amd64,linux/arm64 -t langhuihui/monibuca:v5 -t langhuihui/monibuca:${{ env.version }} --push .
fi
- name: docker build lite version
if: success() && startsWith(github.ref, 'refs/tags/')
run: |
docker tag langhuihui/monibuca:v5 langhuihui/monibuca:${{ env.version }}
docker push langhuihui/monibuca:${{ env.version }}
if [[ "${{ env.version }}" == *"beta"* ]]; then
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest --push .
else
docker buildx build --platform linux/amd64,linux/arm64 -f DockerfileLite -t monibuca/v5:latest -t monibuca/v5:${{ env.version }} --push .
fi

5
.gitignore vendored
View File

@@ -19,4 +19,7 @@ __debug*
example/default/*
!example/default/main.go
!example/default/config.yaml
shutdown.sh
shutdown.sh
!example/test/test.db
*.mp4
shutdown.bat

199
CLAUDE.md Normal file
View File

@@ -0,0 +1,199 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
Monibuca is a high-performance streaming server framework written in Go. It's designed to be a modular, scalable platform for real-time audio/video streaming with support for multiple protocols including RTMP, RTSP, HLS, WebRTC, GB28181, and more.
## Development Commands
### Building and Running
**Basic Run (with SQLite):**
```bash
cd example/default
go run -tags sqlite main.go
```
**Build Tags:**
- `sqlite` - Enable SQLite database support
- `sqliteCGO` - Enable SQLite with CGO
- `mysql` - Enable MySQL database support
- `postgres` - Enable PostgreSQL database support
- `duckdb` - Enable DuckDB database support
- `disable_rm` - Disable memory pool
- `fasthttp` - Use fasthttp instead of net/http
- `taskpanic` - Enable panics for testing
**Protocol Buffer Generation:**
```bash
# Generate all proto files
sh scripts/protoc.sh
# Generate specific plugin proto
sh scripts/protoc.sh plugin_name
```
**Release Building:**
```bash
# Uses goreleaser configuration
goreleaser build
```
**Testing:**
```bash
go test ./...
```
## Architecture Overview
### Core Components
**Server (`server.go`):** Main server instance that manages plugins, streams, and configurations. Implements the central event loop and lifecycle management.
**Plugin System (`plugin.go`):** Modular architecture where functionality is provided through plugins. Each plugin implements the `IPlugin` interface and can provide:
- Protocol handlers (RTMP, RTSP, etc.)
- Media transformers
- Pull/Push proxies
- Recording capabilities
- Custom HTTP endpoints
**Configuration System (`pkg/config/`):** Hierarchical configuration system with priority order: dynamic modifications > environment variables > config files > default YAML > global config > defaults.
**Task System (`pkg/task/`):** Asynchronous task management with dependency handling, lifecycle management, and graceful shutdown capabilities.
### Key Interfaces
**Publisher:** Handles incoming media streams and manages track information
**Subscriber:** Handles outgoing media streams to clients
**Puller:** Pulls streams from external sources
**Pusher:** Pushes streams to external destinations
**Transformer:** Processes/transcodes media streams
**Recorder:** Records streams to storage
### Stream Processing Flow
1. **Publisher** receives media data and creates tracks
2. **Tracks** handle audio/video data with specific codecs
3. **Subscribers** attach to publishers to receive media
4. **Transformers** can process streams between publishers and subscribers
5. **Plugins** provide protocol-specific implementations
## Plugin Development
### Creating a Plugin
1. Implement the `IPlugin` interface
2. Define plugin metadata using `PluginMeta`
3. Register with `InstallPlugin[YourPluginType](meta)`
4. Optionally implement protocol-specific interfaces:
- `ITCPPlugin` for TCP servers
- `IUDPPlugin` for UDP servers
- `IQUICPlugin` for QUIC servers
- `IRegisterHandler` for HTTP endpoints
### Plugin Lifecycle
1. **Init:** Configuration parsing and initialization
2. **Start:** Network listeners and task registration
3. **Run:** Active operation
4. **Dispose:** Cleanup and shutdown
## Configuration Structure
### Global Configuration
- HTTP/TCP/UDP/QUIC listeners
- Database connections (SQLite, MySQL, PostgreSQL, DuckDB)
- Authentication settings
- Admin interface settings
- Global stream alias mappings
### Plugin Configuration
Each plugin can define its own configuration structure that gets merged with global settings.
## Database Integration
Supports multiple database backends:
- **SQLite:** Default lightweight option
- **MySQL:** Production deployments
- **PostgreSQL:** Production deployments
- **DuckDB:** Analytics use cases
Automatic migration is handled for core models including users, proxies, and stream aliases.
## Protocol Support
### Built-in Plugins
- **RTMP:** Real-time messaging protocol
- **RTSP:** Real-time streaming protocol
- **HLS:** HTTP live streaming
- **WebRTC:** Web real-time communication
- **GB28181:** Chinese surveillance standard
- **FLV:** Flash video format
- **MP4:** MPEG-4 format
- **SRT:** Secure reliable transport
## Authentication & Security
- JWT-based authentication for admin interface
- Stream-level authentication with URL signing
- Role-based access control (admin/user)
- Webhook support for external auth integration
## Development Guidelines
### Code Style
- Follow existing patterns and naming conventions
- Use the task system for async operations
- Implement proper error handling and logging
- Use the configuration system for all settings
### Testing
- Unit tests should be placed alongside source files
- Integration tests can use the example configurations
- Use the mock.py script for protocol testing
### Performance Considerations
- Memory pool is enabled by default (disable with `disable_rm`)
- Zero-copy design for media data where possible
- Lock-free data structures for high concurrency
- Efficient buffer management with ring buffers
## Debugging
### Built-in Debug Plugin
- Performance monitoring and profiling
- Real-time metrics via Prometheus endpoint (`/api/metrics`)
- pprof integration for memory/cpu profiling
### Logging
- Structured logging with zerolog
- Configurable log levels
- Log rotation support
- Fatal crash logging
## Web Admin Interface
- Web-based admin UI served from `admin.zip`
- RESTful API for all operations
- Real-time stream monitoring
- Configuration management
- User management (when auth enabled)
## Common Issues
### Port Conflicts
- Default HTTP port: 8080
- Default gRPC port: 50051
- Check plugin-specific port configurations
### Database Connection
- Ensure proper build tags for database support
- Check DSN configuration strings
- Verify database file permissions
### Plugin Loading
- Plugins are auto-discovered from imports
- Check plugin enable/disable status
- Verify configuration merging

31
DockerfileLite Normal file
View File

@@ -0,0 +1,31 @@
# Running Stage
FROM alpine:latest
WORKDIR /monibuca
# Copy the pre-compiled binary from the build context
# The GitHub Actions workflow prepares 'monibuca_linux' in the context root
COPY monibuca_amd64 ./monibuca_amd64
COPY monibuca_arm64 ./monibuca_arm64
COPY admin.zip ./admin.zip
# Copy the configuration file from the build context
COPY example/default/config.yaml /etc/monibuca/config.yaml
# Export necessary ports
EXPOSE 6000 8080 8443 1935 554 5060 9000-20000
EXPOSE 5060/udp 44944/udp
RUN if [ "$(uname -m)" = "aarch64" ]; then \
mv ./monibuca_arm64 ./monibuca_linux; \
rm ./monibuca_amd64; \
else \
mv ./monibuca_amd64 ./monibuca_linux; \
rm ./monibuca_arm64; \
fi
ENTRYPOINT [ "./monibuca_linux"]
CMD ["-c", "/etc/monibuca/config.yaml"]

151
RELEASE_NOTES_5.0.x_CN.md Normal file
View File

@@ -0,0 +1,151 @@
# Monibuca v5.0.x Release Notes
## v5.0.4 (2025-08-15)
### 新增 / 改进 (Features & Improvements)
- GB28181: 支持更新 channelName / channelIdeba62c4
- 定时任务(crontab): 初始化 SQL 支持2bbee90
- Snap 插件: 支持批量抓图272def3
- 管理后台: 支持自定义首页15d830f
- 推/拉代理: 支持可选参数更新ad32f6f
- 心跳/脉冲: pulse interval 允许为 017faf3f
- 告警上报: 通过 Hook 发送报警baf3640
- 告警信息上报: 通过 Hook 发送 alarminfocad47ae
## v5.0.3 (2025-06-27)
### 🎉 新功能 (New Features)
#### 录像与流媒体协议增强
- **MP4/FLV录像优化**:多项修复和优化录像拉取、分片、写入、格式转换等功能,提升兼容性和稳定性。
- **GB28181协议增强**支持pullproxy代理GB28181流完善平台配置、子码流播放、单独media port等能力。
- **插件与配置系统**插件初始化、配置加载、数据库适配等增强支持获取全部配置yaml示例。
- **WebRTC/HLS/RTMP协议适配**WebRTC支持更多编解码器HLS/RTMP协议兼容性提升。
- **crontab计划录像**:定时任务插件支持计划录像,拉流代理支持禁用。
### 🐛 问题修复 (Bug Fixes)
- **录像/流媒体相关**修复mp4、flv、rtmp、hls等协议的多项bug包括clone buffer、SQL语法、表结构适配等。
- **GB28181/数据库**修复注册、流订阅、表结构、SQL语法等问题适配PostgreSQL。
- **插件系统**:修复插件初始化、数据库对象赋值、配置加载等问题。
### 🛠️ 优化改进 (Improvements)
- **代码结构重构**重构mp4、record、插件等系统提升可维护性。
- **文档与示例**完善文档说明增加配置和API示例。
- **Docker镜像**优化tcpdump、ffmpeg等工具集成。
### 👥 贡献者 (Contributors)
- langhuihui
- pggiroro
- banshan
---
## v5.0.2 (2025-06-05)
### 🎉 新功能 (New Features)
#### 核心功能
- **降低延迟** - 禁用了TCP WebRTC的重放保护功能降低了延迟
- **配置系统增强** - 支持更多配置格式(支持配置项中插入`-``_`和大写字母),提升配置灵活性
- **原始数据检查** - 新增原始数据无帧检查功能,提升数据处理稳定性
- **MP4循环读取** - 支持MP4文件循环读取功能通过配置 pull 配置下的 `loop` 配置)
- **S3插件** - 新增S3存储插件支持云存储集成
- **TCP读写缓冲配置** - 新增TCP连接读写缓冲区配置选项针对高并发下的吞吐能力增强
- **拉流测试模式** - 新增拉流测试模式选项(可以选择拉流时不发布),便于调试和测试
- **SEI API格式扩展** - 扩展SEI API支持更多数据格式
- **Hook扩展** - 新增更多Hook回调点增强扩展性
- **定时任务插件** - 新增crontab定时任务插件
- **服务器抓包** - 新增服务器抓包功能(调用`tcpdump`支持TCP和UDP协议,API 说明见 [tcpdump](https://api.monibuca.com/api-301117332)
#### GB28181协议增强
- **平台配置支持** - GB28181现在支持从config.yaml中添加平台和平台通道配置
- **子码流播放** - 支持GB28181子码流播放功能
- **SDP优化** - 优化invite SDP中的mediaip和sipip处理
- **本地端口保存** - 修复GB28181本地端口保存到数据库的问题
#### MP4功能增强
- **FLV格式下载** - 支持从MP4录制文件下载FLV格式
- **下载功能修复** - 修复MP4下载功能的相关问题
- **恢复功能修复** - 修复MP4恢复功能
### 🐛 问题修复 (Bug Fixes)
#### 网络通信
- **TCP读取阻塞** - 修复TCP读取阻塞问题增加了读取超时设置
- **RTSP内存泄漏** - 修复RTSP协议的内存泄漏问题
- **RTSP音视频标识** - 修复RTSP无音频或视频标识的问题
#### GB28181协议
- **任务管理** - 使用task.Manager解决注册处理器的问题
- **计划长度** - 修复plan.length为168的问题
- **注册频率** - 修复GB28181注册过快导致启动过多任务的问题
- **联系信息** - 修复GB28181获取错误联系信息的问题
#### RTMP协议
- **时间戳处理** - 修复RTMP时间戳开头跳跃问题
### 🛠️ 优化改进 (Improvements)
#### Docker支持
- **tcpdump工具** - Docker镜像中新增tcpdump网络诊断工具
#### Linux平台优化
- **SIP请求优化** - Linux平台移除SIP请求中的viaheader
### 👥 贡献者 (Contributors)
- langhuihui
- pggiroro
- banshan
---
## v5.0.1 (2025-05-21)
### 🎉 新功能 (New Features)
#### WebRTC增强
- **H265支持** - 新增WebRTC对H265编码的支持提升视频质量和压缩效率
#### GB28181协议增强
- **订阅功能扩展** - GB28181模块现在支持订阅报警、移动位置、目录信息
- **通知请求** - 支持接收通知请求,增强与设备的交互能力
#### Docker优化
- **FFmpeg集成** - Docker镜像中新增FFmpeg工具支持更多音视频处理场景
- **多架构支持** - 新增Docker多架构构建支持
### 🐛 问题修复 (Bug Fixes)
#### Docker相关
- **构建问题** - 修复Docker构建过程中的多个问题
- **构建优化** - 优化Docker构建流程提升构建效率
#### RTMP协议
- **时间戳处理** - 修复RTMP第一个chunk类型3需要添加时间戳的问题
#### GB28181协议
- **路径匹配** - 修复GB28181模块中播放流路径的正则表达式匹配问题
#### MP4处理
- **stsz box** - 修复stsz box采样大小的问题
- **G711音频** - 修复拉取MP4文件时读取G711音频的问题
- **H265解析** - 修复H265 MP4文件解析问题
### 🛠️ 优化改进 (Improvements)
#### 代码质量
- **错误处理** - 新增maxcount错误处理机制
- **文档更新** - 更新README文档和go.mod配置
#### 构建系统
- **ARM架构** - 减少JavaScript代码优化ARM架构Docker构建
- **构建标签** - 移除Docker中不必要的构建标签
### 📦 其他更新 (Other Updates)
- **MCP相关** - 更新Model Context Protocol相关功能
- **依赖更新** - 更新项目依赖和模块配置
### 👥 贡献者 (Contributors)
- langhuihui
---

25
alarm.go Normal file
View File

@@ -0,0 +1,25 @@
package m7s
import (
"time"
)
// AlarmInfo 报警信息实体,用于存储到数据库
type AlarmInfo struct {
ID uint `gorm:"primaryKey;autoIncrement" json:"id"` // 主键自增ID
ServerInfo string `gorm:"type:varchar(255);not null" json:"serverInfo"` // 服务器信息
StreamName string `gorm:"type:varchar(255);index" json:"streamName"` // 流名称
StreamPath string `gorm:"type:varchar(500)" json:"streamPath"` // 流的streampath
AlarmName string `gorm:"type:varchar(255);not null" json:"alarmName"` // 报警名称
AlarmDesc string `gorm:"type:varchar(500);not null" json:"alarmDesc"` // 报警描述
AlarmType int `gorm:"not null;index" json:"alarmType"` // 报警类型(对应之前定义的常量)
IsSent bool `gorm:"default:false" json:"isSent"` // 是否已成功发送
CreatedAt time.Time `gorm:"autoCreateTime" json:"createdAt"` // 创建时间,报警时间
UpdatedAt time.Time `gorm:"autoUpdateTime" json:"updatedAt"` // 更新时间
FilePath string `gorm:"type:varchar(255)" json:"filePath"` // 文件路径
}
// TableName 指定表名
func (AlarmInfo) TableName() string {
return "alarm_info"
}

View File

@@ -48,7 +48,7 @@ func (s *Server) initStreamAlias() {
func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *pb.StreamAliasListResponse, err error) {
res = &pb.StreamAliasListResponse{}
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
for alias := range s.AliasStreams.Range {
info := &pb.StreamAlias{
StreamPath: alias.StreamPath,
@@ -62,18 +62,17 @@ func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *p
}
res.Data = append(res.Data, info)
}
return nil
})
return
}
func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasRequest) (res *pb.SuccessResponse, err error) {
res = &pb.SuccessResponse{}
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
if req.StreamPath != "" {
u, err := url.Parse(req.StreamPath)
if err != nil {
return err
return
}
req.StreamPath = strings.TrimPrefix(u.Path, "/")
publisher, canReplace := s.Streams.Get(req.StreamPath)
@@ -159,7 +158,6 @@ func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasReque
}
}
}
return nil
})
return
}

422
api.go
View File

@@ -7,12 +7,12 @@ import (
"net/http"
"net/url"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"time"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/task"
myip "github.com/husanpao/ip"
@@ -26,7 +26,7 @@ import (
"gopkg.in/yaml.v3"
"m7s.live/v5/pb"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/format"
"m7s.live/v5/pkg/util"
)
@@ -97,22 +97,13 @@ func (s *Server) api_Stream_AnnexB_(rw http.ResponseWriter, r *http.Request) {
return
}
defer reader.StopRead()
if reader.Value.Raw == nil {
if err = reader.Value.Demux(publisher.VideoTrack.ICodecCtx); err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
}
var annexb pkg.AnnexB
var t pkg.AVTrack
t.ICodecCtx, t.SequenceFrame, err = annexb.ConvertCtx(publisher.VideoTrack.ICodecCtx)
if t.ICodecCtx == nil {
http.Error(rw, "unsupported codec", http.StatusInternalServerError)
var annexb format.AnnexB
err = pkg.ConvertFrameType(reader.Value.Wraps[0], &annexb)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
annexb.Mux(t.ICodecCtx, &reader.Value)
_, err = annexb.WriteTo(rw)
annexb.WriteTo(rw)
}
func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err error) {
@@ -159,6 +150,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
}
res.Data.AudioTrack.SampleRate = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetSampleRate())
res.Data.AudioTrack.Channels = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetChannels())
if pub.State == PublisherStateInit {
res.Data.State = int32(PublisherStateTrackAdded)
}
}
}
if t := pub.VideoTrack.AVTrack; t != nil {
@@ -174,6 +168,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
}
res.Data.VideoTrack.Width = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Width())
res.Data.VideoTrack.Height = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Height())
if pub.State == PublisherStateInit {
res.Data.State = int32(PublisherStateTrackAdded)
}
}
}
return
@@ -181,19 +178,17 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.StreamInfoResponse, err error) {
var recordings []*pb.RecordingDetail
s.Records.Call(func() error {
for record := range s.Records.Range {
if record.StreamPath == req.StreamPath {
recordings = append(recordings, &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
})
}
s.Records.Range(func(record *RecordJob) bool {
if record.StreamPath == req.StreamPath {
recordings = append(recordings, &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.RecConf.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
})
}
return nil
return true
})
if pub, ok := s.Streams.SafeGet(req.StreamPath); ok {
res, err = s.getStreamInfo(pub)
@@ -223,11 +218,13 @@ func (s *Server) TaskTree(context.Context, *emptypb.Empty) (res *pb.TaskTreeResp
StartTime: timestamppb.New(t.StartTime),
Description: m.GetDescriptions(),
StartReason: t.StartReason,
Level: uint32(t.GetLevel()),
}
if job, ok := m.(task.IJob); ok {
if blockedTask := job.Blocked(); blockedTask != nil {
res.Blocked = fillData(blockedTask)
}
res.EventLoopRunning = job.EventLoopRunning()
for t := range job.RangeSubTask {
child := fillData(t)
if child == nil {
@@ -261,23 +258,21 @@ func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp
}
func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb.RecordingListResponse, err error) {
s.Records.Call(func() error {
resp = &pb.RecordingListResponse{}
for record := range s.Records.Range {
resp.Data = append(resp.Data, &pb.Recording{
StreamPath: record.StreamPath,
StartTime: timestamppb.New(record.StartTime),
Type: reflect.TypeOf(record.recorder).String(),
Pointer: uint64(record.GetTaskPointer()),
})
}
return nil
resp = &pb.RecordingListResponse{}
s.Records.Range(func(record *RecordJob) bool {
resp.Data = append(resp.Data, &pb.Recording{
StreamPath: record.StreamPath,
StartTime: timestamppb.New(record.StartTime),
Type: reflect.TypeOf(record.recorder).String(),
Pointer: uint64(record.GetTaskPointer()),
})
return true
})
return
}
func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *pb.SubscribersResponse, err error) {
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
var subscribers []*pb.SubscriberSnapShot
for subscriber := range s.Subscribers.Range {
meta, _ := json.Marshal(subscriber.GetDescriptions())
@@ -316,7 +311,6 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
Data: subscribers,
Total: int32(s.Subscribers.Length),
}
return nil
})
return
}
@@ -336,7 +330,8 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
}
}
pub.AudioTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
if len(v.Wraps) > 0 && v.TryRLock() {
defer v.RUnlock()
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
@@ -346,7 +341,7 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
@@ -387,7 +382,7 @@ func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
snap.KeyFrame = frame.IDR
for i, wrap := range frame.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
@@ -420,7 +415,7 @@ func (s *Server) api_AudioTrack_SSE(rw http.ResponseWriter, r *http.Request) {
snap.KeyFrame = frame.IDR
for i, wrap := range frame.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
@@ -446,7 +441,8 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
}
}
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
if len(v.Wraps) > 0 {
if len(v.Wraps) > 0 && v.TryRLock() {
defer v.RUnlock()
var snap pb.TrackSnapShot
snap.Sequence = v.Sequence
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
@@ -456,7 +452,7 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
data.RingDataSize += uint32(v.Wraps[0].GetSize())
for i, wrap := range v.Wraps {
snap.Wrap[i] = &pb.Wrap{
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
Size: uint32(wrap.GetSize()),
Data: wrap.String(),
}
@@ -489,126 +485,135 @@ func (s *Server) Shutdown(ctx context.Context, req *pb.RequestWithId) (res *pb.S
}
func (s *Server) ChangeSubscribe(ctx context.Context, req *pb.ChangeSubscribeRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
if pub, ok := s.Streams.SafeGet(req.StreamPath); ok {
if pub, ok := s.Streams.Get(req.StreamPath); ok {
subscriber.Publisher.RemoveSubscriber(subscriber)
subscriber.StreamPath = req.StreamPath
pub.AddSubscriber(subscriber)
return nil
return
}
}
err = pkg.ErrNotFound
return nil
})
return &pb.SuccessResponse{}, err
}
func (s *Server) StopSubscribe(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
subscriber.Stop(errors.New("stop by api"))
} else {
err = pkg.ErrNotFound
}
return nil
})
return &pb.SuccessResponse{}, err
}
func (s *Server) PauseStream(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Pause()
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Pause()
}
return &pb.SuccessResponse{}, err
}
func (s *Server) ResumeStream(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Resume()
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Resume()
}
return &pb.SuccessResponse{}, err
}
func (s *Server) SetStreamSpeed(ctx context.Context, req *pb.SetStreamSpeedRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Speed = float64(req.Speed)
s.Scale = float64(req.Speed)
s.Info("set stream speed", "speed", req.Speed)
}
return &pb.SuccessResponse{}, err
}
func (s *Server) SeekStream(ctx context.Context, req *pb.SeekStreamRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Seek(time.Unix(int64(req.TimeStamp), 0))
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Seek(time.Unix(int64(req.TimeStamp), 0))
}
return &pb.SuccessResponse{}, err
}
func (s *Server) StopPublish(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SuccessResponse, err error) {
s.Streams.Call(func() error {
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Stop(task.ErrStopByUser)
}
return nil
})
if s, ok := s.Streams.SafeGet(req.StreamPath); ok {
s.Stop(task.ErrStopByUser)
}
return &pb.SuccessResponse{}, err
}
// /api/stream/list
func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *pb.StreamListResponse, err error) {
recordingMap := make(map[string][]*pb.RecordingDetail)
s.Records.Call(func() error {
for record := range s.Records.Range {
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
Pointer: uint64(record.GetTaskPointer()),
})
for record := range s.Records.Range {
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
FilePath: record.RecConf.FilePath,
Mode: record.RecConf.Mode,
Fragment: durationpb.New(record.RecConf.Fragment),
Append: record.RecConf.Append,
PluginName: record.Plugin.Meta.Name,
Pointer: uint64(record.GetTaskPointer()),
})
}
var streams []*pb.StreamInfo
for publisher := range s.Streams.SafeRange {
info, err := s.getStreamInfo(publisher)
if err != nil {
continue
}
return nil
})
s.Streams.Call(func() error {
var streams []*pb.StreamInfo
for publisher := range s.Streams.Range {
info, err := s.getStreamInfo(publisher)
if err != nil {
continue
}
info.Data.Recording = recordingMap[info.Data.Path]
streams = append(streams, info.Data)
}
res = &pb.StreamListResponse{Data: streams, Total: int32(s.Streams.Length), PageNum: req.PageNum, PageSize: req.PageSize}
return nil
})
info.Data.Recording = recordingMap[info.Data.Path]
streams = append(streams, info.Data)
}
res = &pb.StreamListResponse{Data: streams, Total: int32(s.Streams.Length), PageNum: req.PageNum, PageSize: req.PageSize}
return
}
func (s *Server) WaitList(context.Context, *emptypb.Empty) (res *pb.StreamWaitListResponse, err error) {
s.Streams.Call(func() error {
s.CallOnStreamTask(func() {
res = &pb.StreamWaitListResponse{
List: make(map[string]int32),
}
for subs := range s.Waiting.Range {
res.List[subs.StreamPath] = int32(subs.Length)
}
return nil
})
return
}
func (s *Server) GetSubscriptionProgress(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SubscriptionProgressResponse, err error) {
s.CallOnStreamTask(func() {
if waitStream, ok := s.Waiting.Get(req.StreamPath); ok {
progress := waitStream.Progress
res = &pb.SubscriptionProgressResponse{
Code: 0,
Message: "success",
Data: &pb.SubscriptionProgressData{
CurrentStep: int32(progress.CurrentStep),
},
}
// Convert steps
for _, step := range progress.Steps {
pbStep := &pb.Step{
Name: step.Name,
Description: step.Description,
Error: step.Error,
}
if !step.StartedAt.IsZero() {
pbStep.StartedAt = timestamppb.New(step.StartedAt)
}
if !step.CompletedAt.IsZero() {
pbStep.CompletedAt = timestamppb.New(step.CompletedAt)
}
res.Data.Steps = append(res.Data.Steps, pbStep)
}
} else {
err = pkg.ErrNotFound
}
})
return
}
@@ -677,10 +682,10 @@ func (s *Server) Summary(context.Context, *emptypb.Empty) (res *pb.SummaryRespon
netWorks = append(netWorks, info)
}
res.StreamCount = int32(s.Streams.Length)
res.PullCount = int32(s.Pulls.Length)
res.PushCount = int32(s.Pushs.Length)
res.PullCount = int32(s.Pulls.Length())
res.PushCount = int32(s.Pushs.Length())
res.SubscribeCount = int32(s.Subscribers.Length)
res.RecordCount = int32(s.Records.Length)
res.RecordCount = int32(s.Records.Length())
res.TransformCount = int32(s.Transforms.Length)
res.NetWork = netWorks
s.lastSummary = res
@@ -718,7 +723,7 @@ func (s *Server) GetConfigFile(_ context.Context, req *emptypb.Empty) (res *pb.G
func (s *Server) UpdateConfigFile(_ context.Context, req *pb.UpdateConfigFileRequest) (res *pb.SuccessResponse, err error) {
if s.configFileContent != nil {
s.configFileContent = []byte(req.Content)
os.WriteFile(filepath.Join(ExecDir, s.conf.(string)), s.configFileContent, 0644)
os.WriteFile(s.configFilePath, s.configFileContent, 0644)
res = &pb.SuccessResponse{}
} else {
err = pkg.ErrNotFound
@@ -762,7 +767,7 @@ func (s *Server) GetConfig(_ context.Context, req *pb.GetConfigRequest) (res *pb
return
}
func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.ResponseList, err error) {
func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.RecordResponseList, err error) {
if s.DB == nil {
err = pkg.ErrNoDB
return
@@ -783,8 +788,61 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
} else if req.StreamPath != "" {
query = query.Where("stream_path = ?", req.StreamPath)
}
if req.Mode != "" {
query = query.Where("mode = ?", req.Mode)
if req.Type != "" {
query = query.Where("type = ?", req.Type)
}
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{"range": []string{req.Range}, "start": []string{req.Start}, "end": []string{req.End}})
if err == nil {
if !startTime.IsZero() {
query = query.Where("start_time >= ?", startTime)
}
if !endTime.IsZero() {
query = query.Where("end_time <= ?", endTime)
}
}
query.Count(&totalCount)
err = query.Offset(int(offset)).Limit(int(req.PageSize)).Order("start_time desc").Find(&result).Error
if err != nil {
return
}
resp = &pb.RecordResponseList{
Total: uint32(totalCount),
PageNum: req.PageNum,
PageSize: req.PageSize,
}
for _, recordFile := range result {
resp.Data = append(resp.Data, &pb.RecordFile{
Id: uint32(recordFile.ID),
StartTime: timestamppb.New(recordFile.StartTime),
EndTime: timestamppb.New(recordFile.EndTime),
FilePath: recordFile.FilePath,
StreamPath: recordFile.StreamPath,
})
}
return
}
func (s *Server) GetEventRecordList(ctx context.Context, req *pb.ReqRecordList) (resp *pb.EventRecordResponseList, err error) {
if s.DB == nil {
err = pkg.ErrNoDB
return
}
if req.PageSize == 0 {
req.PageSize = 10
}
if req.PageNum == 0 {
req.PageNum = 1
}
offset := (req.PageNum - 1) * req.PageSize // 计算偏移量
var totalCount int64 //总条数
var result []*EventRecordStream
query := s.DB.Model(&EventRecordStream{})
if strings.Contains(req.StreamPath, "*") {
query = query.Where("stream_path like ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
} else if req.StreamPath != "" {
query = query.Where("stream_path = ?", req.StreamPath)
}
if req.Type != "" {
query = query.Where("type = ?", req.Type)
@@ -807,21 +865,22 @@ func (s *Server) GetRecordList(ctx context.Context, req *pb.ReqRecordList) (resp
if err != nil {
return
}
resp = &pb.ResponseList{
resp = &pb.EventRecordResponseList{
Total: uint32(totalCount),
PageNum: req.PageNum,
PageSize: req.PageSize,
}
for _, recordFile := range result {
resp.Data = append(resp.Data, &pb.RecordFile{
resp.Data = append(resp.Data, &pb.EventRecordFile{
Id: uint32(recordFile.ID),
StartTime: timestamppb.New(recordFile.StartTime),
EndTime: timestamppb.New(recordFile.EndTime),
FilePath: recordFile.FilePath,
StreamPath: recordFile.StreamPath,
EventLevel: recordFile.EventLevel,
EventDesc: recordFile.EventDesc,
EventId: recordFile.EventId,
EventName: recordFile.EventName,
EventDesc: recordFile.EventDesc,
})
}
return
@@ -900,7 +959,7 @@ func (s *Server) DeleteRecord(ctx context.Context, req *pb.ReqRecordDelete) (res
func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res *pb.TransformListResponse, err error) {
res = &pb.TransformListResponse{}
s.Transforms.Call(func() error {
s.Transforms.Call(func() {
for transform := range s.Transforms.Range {
info := &pb.Transform{
StreamPath: transform.StreamPath,
@@ -912,13 +971,126 @@ func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res
result, err = yaml.Marshal(transform.TransformJob.Config)
if err != nil {
s.Error("marshal transform config failed", "error", err)
return err
return
}
info.Config = string(result)
}
res.Data = append(res.Data, info)
}
return nil
})
return
}
func (s *Server) GetAlarmList(ctx context.Context, req *pb.AlarmListRequest) (res *pb.AlarmListResponse, err error) {
// 初始化响应对象
res = &pb.AlarmListResponse{
Code: 0,
Message: "success",
PageNum: req.PageNum,
PageSize: req.PageSize,
}
// 检查数据库连接是否可用
if s.DB == nil {
res.Code = 500
res.Message = "数据库连接不可用"
return res, nil
}
// 构建查询条件
query := s.DB.Model(&AlarmInfo{})
// 添加时间范围过滤
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{
"range": []string{req.Range},
"start": []string{req.Start},
"end": []string{req.End},
})
if err == nil {
if !startTime.IsZero() {
query = query.Where("created_at >= ?", startTime)
}
if !endTime.IsZero() {
query = query.Where("created_at <= ?", endTime)
}
}
// 添加告警类型过滤
if req.AlarmType != 0 {
query = query.Where("alarm_type = ?", req.AlarmType)
}
// 添加 StreamPath 过滤
if req.StreamPath != "" {
if strings.Contains(req.StreamPath, "*") {
// 支持通配符搜索
query = query.Where("stream_path LIKE ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
} else {
query = query.Where("stream_path = ?", req.StreamPath)
}
}
// 添加 StreamName 过滤
if req.StreamName != "" {
if strings.Contains(req.StreamName, "*") {
// 支持通配符搜索
query = query.Where("stream_name LIKE ?", strings.ReplaceAll(req.StreamName, "*", "%"))
} else {
query = query.Where("stream_name = ?", req.StreamName)
}
}
// 计算总记录数
var total int64
if err = query.Count(&total).Error; err != nil {
res.Code = 500
res.Message = "查询告警信息总数失败: " + err.Error()
return res, nil
}
res.Total = int32(total)
// 如果没有记录,直接返回
if total == 0 {
return res, nil
}
// 处理分页参数
if req.PageNum <= 0 {
req.PageNum = 1
}
if req.PageSize <= 0 {
req.PageSize = 10
}
// 查询分页数据
var alarmInfoList []AlarmInfo
offset := (req.PageNum - 1) * req.PageSize
if err = query.Order("created_at DESC").
Offset(int(offset)).
Limit(int(req.PageSize)).
Find(&alarmInfoList).Error; err != nil {
res.Code = 500
res.Message = "查询告警信息失败: " + err.Error()
return res, nil
}
// 转换为 protobuf 格式
res.Data = make([]*pb.AlarmInfo, len(alarmInfoList))
for i, alarm := range alarmInfoList {
res.Data[i] = &pb.AlarmInfo{
Id: uint32(alarm.ID),
ServerInfo: alarm.ServerInfo,
StreamName: alarm.StreamName,
StreamPath: alarm.StreamPath,
AlarmDesc: alarm.AlarmDesc,
AlarmName: alarm.AlarmName,
AlarmType: int32(alarm.AlarmType),
IsSent: alarm.IsSent,
CreatedAt: timestamppb.New(alarm.CreatedAt),
UpdatedAt: timestamppb.New(alarm.UpdatedAt),
FilePath: alarm.FilePath,
}
}
return res, nil
}

324
api_config.go Normal file
View File

@@ -0,0 +1,324 @@
package m7s
import (
"net/http"
"reflect"
"strconv"
"strings"
"time"
"gopkg.in/yaml.v3"
)
func getIndent(line string) int {
return len(line) - len(strings.TrimLeft(line, " "))
}
func addCommentsToYAML(yamlData []byte) []byte {
lines := strings.Split(string(yamlData), "\n")
var result strings.Builder
var commentBuffer []string
var keyLineBuffer string
var keyLineIndent int
inMultilineValue := false
for _, line := range lines {
trimmedLine := strings.TrimSpace(line)
indent := getIndent(line)
if strings.HasPrefix(trimmedLine, "_description:") {
description := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_description:"))
commentBuffer = append(commentBuffer, "# "+description)
} else if strings.HasPrefix(trimmedLine, "_enum:") {
enum := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_enum:"))
commentBuffer = append(commentBuffer, "# 可选值: "+enum)
} else if strings.HasPrefix(trimmedLine, "_value:") {
valueStr := strings.TrimSpace(strings.TrimPrefix(trimmedLine, "_value:"))
if valueStr != "" && valueStr != "{}" && valueStr != "[]" {
// Single line value
result.WriteString(strings.Repeat(" ", keyLineIndent))
result.WriteString(keyLineBuffer)
result.WriteString(": ")
result.WriteString(valueStr)
if len(commentBuffer) > 0 {
result.WriteString(" ")
for j, c := range commentBuffer {
c = strings.TrimSpace(strings.TrimPrefix(c, "#"))
result.WriteString("# " + c)
if j < len(commentBuffer)-1 {
result.WriteString(" ")
}
}
}
result.WriteString("\n")
} else {
// Multi-line value (struct/map)
for _, comment := range commentBuffer {
result.WriteString(strings.Repeat(" ", keyLineIndent))
result.WriteString(comment)
result.WriteString("\n")
}
result.WriteString(strings.Repeat(" ", keyLineIndent))
result.WriteString(keyLineBuffer)
result.WriteString(":")
result.WriteString("\n")
inMultilineValue = true
}
commentBuffer = nil
keyLineBuffer = ""
keyLineIndent = 0
} else if strings.Contains(trimmedLine, ":") {
// This is a key line
if keyLineBuffer != "" { // flush previous key line
result.WriteString(strings.Repeat(" ", keyLineIndent) + keyLineBuffer + ":\n")
}
inMultilineValue = false
keyLineBuffer = strings.TrimSuffix(trimmedLine, ":")
keyLineIndent = indent
} else if inMultilineValue {
// These are the lines of a multiline value
if trimmedLine != "" {
result.WriteString(line + "\n")
}
}
}
if keyLineBuffer != "" {
result.WriteString(strings.Repeat(" ", keyLineIndent) + keyLineBuffer + ":\n")
}
// Final cleanup to remove empty lines and special keys
finalOutput := []string{}
for _, line := range strings.Split(result.String(), "\n") {
trimmed := strings.TrimSpace(line)
if trimmed == "" || strings.HasPrefix(trimmed, "_") {
continue
}
finalOutput = append(finalOutput, line)
}
return []byte(strings.Join(finalOutput, "\n"))
}
func (s *Server) api_Config_YAML_All(rw http.ResponseWriter, r *http.Request) {
query := r.URL.Query()
filterName := query.Get("name")
shouldMergeCommon := query.Get("common") != "false"
configSections := []struct {
name string
data any
}{}
// 1. Get common config if it needs to be merged.
var commonConfig map[string]any
if shouldMergeCommon {
if c, ok := extractStructConfig(reflect.ValueOf(s.Plugin.GetCommonConf())).(map[string]any); ok {
commonConfig = c
}
}
// 2. Process global config.
if filterName == "" || filterName == "global" {
if globalConf, ok := extractStructConfig(reflect.ValueOf(s.ServerConfig)).(map[string]any); ok {
if shouldMergeCommon && commonConfig != nil {
mergedConf := make(map[string]any)
for k, v := range commonConfig {
mergedConf[k] = v
}
for k, v := range globalConf {
mergedConf[k] = v // Global overrides common
}
configSections = append(configSections, struct {
name string
data any
}{"global", mergedConf})
} else {
configSections = append(configSections, struct {
name string
data any
}{"global", globalConf})
}
}
}
// 3. Process plugin configs.
for _, meta := range plugins {
if filterName != "" && !strings.EqualFold(meta.Name, filterName) {
continue
}
name := strings.ToLower(meta.Name)
configType := meta.Type
if configType.Kind() == reflect.Ptr {
configType = configType.Elem()
}
if pluginConf, ok := extractStructConfig(reflect.New(configType)).(map[string]any); ok {
pluginConf["enable"] = map[string]any{
"_value": true,
"_description": "在global配置disableall时能启用特定插件",
}
if shouldMergeCommon && commonConfig != nil {
mergedConf := make(map[string]any)
for k, v := range commonConfig {
mergedConf[k] = v
}
for k, v := range pluginConf {
mergedConf[k] = v // Plugin overrides common
}
configSections = append(configSections, struct {
name string
data any
}{name, mergedConf})
} else {
configSections = append(configSections, struct {
name string
data any
}{name, pluginConf})
}
}
}
// 4. Serialize each section and combine.
var yamlParts []string
for _, section := range configSections {
if section.data == nil {
continue
}
partMap := map[string]any{section.name: section.data}
partYAML, err := yaml.Marshal(partMap)
if err != nil {
http.Error(rw, err.Error(), http.StatusInternalServerError)
return
}
yamlParts = append(yamlParts, string(partYAML))
}
finalYAML := strings.Join(yamlParts, "")
rw.Header().Set("Content-Type", "text/yaml; charset=utf-8")
rw.Write(addCommentsToYAML([]byte(finalYAML)))
}
func extractStructConfig(v reflect.Value) any {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return nil
}
v = v.Elem()
}
if v.Kind() != reflect.Struct {
return nil
}
m := make(map[string]any)
for i := 0; i < v.NumField(); i++ {
field := v.Type().Field(i)
if !field.IsExported() {
continue
}
// Filter out Plugin and UnimplementedApiServer
fieldType := field.Type
if fieldType.Kind() == reflect.Ptr {
fieldType = fieldType.Elem()
}
if fieldType.Name() == "Plugin" || fieldType.Name() == "UnimplementedApiServer" {
continue
}
yamlTag := field.Tag.Get("yaml")
if yamlTag == "-" {
continue
}
fieldName := strings.Split(yamlTag, ",")[0]
if fieldName == "" {
fieldName = strings.ToLower(field.Name)
}
m[fieldName] = extractFieldConfig(field, v.Field(i))
}
return m
}
func extractFieldConfig(field reflect.StructField, value reflect.Value) any {
result := make(map[string]any)
description := field.Tag.Get("desc")
enum := field.Tag.Get("enum")
if description != "" {
result["_description"] = description
}
if enum != "" {
result["_enum"] = enum
}
kind := value.Kind()
if kind == reflect.Ptr {
if value.IsNil() {
value = reflect.New(value.Type().Elem())
}
value = value.Elem()
kind = value.Kind()
}
switch kind {
case reflect.Struct:
if dur, ok := value.Interface().(time.Duration); ok {
result["_value"] = extractDurationConfig(field, dur)
} else {
result["_value"] = extractStructConfig(value)
}
case reflect.Map, reflect.Slice:
if value.IsNil() {
result["_value"] = make(map[string]any)
if kind == reflect.Slice {
result["_value"] = make([]any, 0)
}
} else {
result["_value"] = value.Interface()
}
default:
result["_value"] = extractBasicTypeConfig(field, value)
}
if description == "" && enum == "" {
return result["_value"]
}
return result
}
func extractBasicTypeConfig(field reflect.StructField, value reflect.Value) any {
if value.IsZero() {
if defaultValue := field.Tag.Get("default"); defaultValue != "" {
return parseDefaultValue(defaultValue, field.Type)
}
}
return value.Interface()
}
func extractDurationConfig(field reflect.StructField, value time.Duration) any {
if value == 0 {
if defaultValue := field.Tag.Get("default"); defaultValue != "" {
return defaultValue
}
}
return value.String()
}
func parseDefaultValue(defaultValue string, t reflect.Type) any {
switch t.Kind() {
case reflect.String:
return defaultValue
case reflect.Bool:
return defaultValue == "true"
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if v, err := strconv.ParseInt(defaultValue, 10, 64); err == nil {
return v
}
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
if v, err := strconv.ParseUint(defaultValue, 10, 64); err == nil {
return v
}
case reflect.Float32, reflect.Float64:
if v, err := strconv.ParseFloat(defaultValue, 64); err == nil {
return v
}
}
return defaultValue
}

279
doc/arch/auth.md Normal file
View File

@@ -0,0 +1,279 @@
# Stream Authentication Mechanism
Monibuca V5 provides a comprehensive stream authentication mechanism to control access permissions for publishing and subscribing to streams. The authentication mechanism supports multiple methods, including key-based signature authentication and custom authentication handlers.
## Authentication Principles
### 1. Authentication Flow Sequence Diagrams
#### Publishing Authentication Sequence Diagram
```mermaid
sequenceDiagram
participant Client as Publishing Client
participant Plugin as Plugin
participant AuthHandler as Auth Handler
participant Server as Server
Client->>Plugin: Publishing Request (streamPath, args)
Plugin->>Plugin: Check EnableAuth && Type == PublishTypeServer
alt Authentication Enabled
Plugin->>Plugin: Look for custom auth handler
alt Custom Handler Exists
Plugin->>AuthHandler: onAuthPub(publisher)
AuthHandler->>AuthHandler: Execute custom auth logic
AuthHandler-->>Plugin: Auth result
else Use Key-based Auth
Plugin->>Plugin: Check if conf.Key exists
alt Key Configured
Plugin->>Plugin: auth(streamPath, key, secret, expire)
Plugin->>Plugin: Validate timestamp
Plugin->>Plugin: Validate secret length
Plugin->>Plugin: Calculate MD5 signature
Plugin->>Plugin: Compare signatures
Plugin-->>Plugin: Auth result
end
end
alt Auth Failed
Plugin-->>Client: Auth failed, reject publishing
else Auth Success
Plugin->>Server: Create Publisher and add to stream management
Server-->>Plugin: Publishing successful
Plugin-->>Client: Publishing established successfully
end
else Auth Disabled
Plugin->>Server: Create Publisher directly
Server-->>Plugin: Publishing successful
Plugin-->>Client: Publishing established successfully
end
```
#### Subscribing Authentication Sequence Diagram
```mermaid
sequenceDiagram
participant Client as Subscribing Client
participant Plugin as Plugin
participant AuthHandler as Auth Handler
participant Server as Server
Client->>Plugin: Subscribing Request (streamPath, args)
Plugin->>Plugin: Check EnableAuth && Type == SubscribeTypeServer
alt Authentication Enabled
Plugin->>Plugin: Look for custom auth handler
alt Custom Handler Exists
Plugin->>AuthHandler: onAuthSub(subscriber)
AuthHandler->>AuthHandler: Execute custom auth logic
AuthHandler-->>Plugin: Auth result
else Use Key-based Auth
Plugin->>Plugin: Check if conf.Key exists
alt Key Configured
Plugin->>Plugin: auth(streamPath, key, secret, expire)
Plugin->>Plugin: Validate timestamp
Plugin->>Plugin: Validate secret length
Plugin->>Plugin: Calculate MD5 signature
Plugin->>Plugin: Compare signatures
Plugin-->>Plugin: Auth result
end
end
alt Auth Failed
Plugin-->>Client: Auth failed, reject subscribing
else Auth Success
Plugin->>Server: Create Subscriber and wait for Publisher
Server->>Server: Wait for stream publishing and track ready
Server-->>Plugin: Subscribing ready
Plugin-->>Client: Start streaming data transmission
end
else Auth Disabled
Plugin->>Server: Create Subscriber directly
Server-->>Plugin: Subscribing successful
Plugin-->>Client: Start streaming data transmission
end
```
### 2. Authentication Trigger Points
Authentication is triggered in the following two scenarios:
- **Publishing Authentication**: Triggered when there's a publishing request in the `PublishWithConfig` method
- **Subscribing Authentication**: Triggered when there's a subscribing request in the `SubscribeWithConfig` method
### 3. Authentication Condition Checks
Authentication is only executed when the following conditions are met simultaneously:
```go
if p.config.EnableAuth && publisher.Type == PublishTypeServer
```
- `EnableAuth`: Authentication is enabled in the plugin configuration
- `Type == PublishTypeServer/SubscribeTypeServer`: Only authenticate server-type publishing/subscribing
### 4. Authentication Method Priority
The system executes authentication in the following priority order:
1. **Custom Authentication Handler** (Highest priority)
2. **Key-based Signature Authentication**
3. **No Authentication** (Default pass)
## Custom Authentication Handlers
### Publishing Authentication Handler
```go
onAuthPub := p.Meta.OnAuthPub
if onAuthPub == nil {
onAuthPub = p.Server.Meta.OnAuthPub
}
if onAuthPub != nil {
if err = onAuthPub(publisher).Await(); err != nil {
p.Warn("auth failed", "error", err)
return
}
}
```
Authentication handler lookup order:
1. Plugin-level authentication handler `p.Meta.OnAuthPub`
2. Server-level authentication handler `p.Server.Meta.OnAuthPub`
### Subscribing Authentication Handler
```go
onAuthSub := p.Meta.OnAuthSub
if onAuthSub == nil {
onAuthSub = p.Server.Meta.OnAuthSub
}
if onAuthSub != nil {
if err = onAuthSub(subscriber).Await(); err != nil {
p.Warn("auth failed", "error", err)
return
}
}
```
## Key-based Signature Authentication
When there's no custom authentication handler, if a Key is configured, the system will use MD5-based signature authentication mechanism.
### Authentication Algorithm
```go
func (p *Plugin) auth(streamPath string, key string, secret string, expire string) (err error) {
// 1. Validate expiration time
if unixTime, err := strconv.ParseInt(expire, 16, 64); err != nil || time.Now().Unix() > unixTime {
return fmt.Errorf("auth failed expired")
}
// 2. Validate secret length
if len(secret) != 32 {
return fmt.Errorf("auth failed secret length must be 32")
}
// 3. Calculate the true secret
trueSecret := md5.Sum([]byte(key + streamPath + expire))
// 4. Compare secrets
if secret == hex.EncodeToString(trueSecret[:]) {
return nil
}
return fmt.Errorf("auth failed invalid secret")
}
```
### Signature Calculation Steps
1. **Construct signature string**: `key + streamPath + expire`
2. **MD5 encryption**: Perform MD5 hash on the signature string
3. **Hexadecimal encoding**: Convert MD5 result to 32-character hexadecimal string
4. **Verify signature**: Compare calculation result with client-provided secret
### Parameter Description
| Parameter | Type | Description | Example |
|-----------|------|-------------|---------|
| key | string | Secret key set in configuration file | "mySecretKey" |
| streamPath | string | Stream path | "live/test" |
| expire | string | Expiration timestamp (hexadecimal) | "64a1b2c3" |
| secret | string | Client-calculated signature (32-char hex) | "5d41402abc4b2a76b9719d911017c592" |
### Timestamp Handling
- Expiration time uses hexadecimal Unix timestamp
- System validates if current time exceeds expiration time
- Timestamp parsing failure or expiration will cause authentication failure
## API Key Generation
The system also provides API interfaces for key generation, supporting authentication needs for admin dashboard:
```go
p.handle("/api/secret/{type}/{streamPath...}", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
// JWT Token validation
authHeader := r.Header.Get("Authorization")
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
_, err := p.Server.ValidateToken(tokenString)
// Generate publishing or subscribing key
streamPath := r.PathValue("streamPath")
t := r.PathValue("type")
expire := r.URL.Query().Get("expire")
if t == "publish" {
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
} else if t == "subscribe" {
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
}
}))
```
## Configuration Examples
### Enable Authentication
```yaml
# Plugin configuration
rtmp:
enableAuth: true
publish:
key: "your-publish-key"
subscribe:
key: "your-subscribe-key"
```
### Publishing URL Example
```
rtmp://localhost/live/test?secret=5d41402abc4b2a76b9719d911017c592&expire=64a1b2c3
```
### Subscribing URL Example
```
http://localhost:8080/flv/live/test.flv?secret=a1b2c3d4e5f6789012345678901234ab&expire=64a1b2c3
```
## Security Considerations
1. **Key Protection**: Keys in configuration files should be properly secured to prevent leakage
2. **Time Window**: Set reasonable expiration times to balance security and usability
3. **HTTPS Transport**: Use HTTPS for transmitting authentication parameters in production
4. **Logging**: Authentication failures are logged as warnings for security auditing
## Error Handling
Common causes of authentication failure:
- `auth failed expired`: Timestamp expired or format error
- `auth failed secret length must be 32`: Incorrect secret length
- `auth failed invalid secret`: Signature verification failed
- `invalid token`: JWT verification failed during API key generation

View File

@@ -93,7 +93,7 @@ Plugins can add global middleware using the `AddMiddleware` method to handle all
Example code:
```go
func (p *YourPlugin) OnInit() {
func (p *YourPlugin) Start() {
// Add authentication middleware
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {

View File

@@ -26,7 +26,7 @@
### Plugin Development
[plugin/README.md](../plugin/README.md)
[plugin/README.md](../../plugin/README.md)
## Task System

View File

@@ -116,7 +116,7 @@ type MyLogHandler struct {
}
// Add handler during plugin initialization
func (p *MyPlugin) OnInit() error {
func (p *MyPlugin) Start() error {
handler := &MyLogHandler{}
p.Server.LogHandler.Add(handler)
return nil

View File

@@ -93,7 +93,7 @@ Plugins start through the `Plugin.Start` method, executing these operations in s
- Start QUIC services (if implementing IQUICPlugin interface)
4. Plugin Initialization Callback
- Call plugin's OnInit method
- Call plugin's Start method
- Handle initialization errors
5. Timer Task Setup
@@ -109,7 +109,7 @@ The startup phase is crucial for plugins to begin providing services, with all p
### 4. Stop Phase (Stop)
The plugin stop phase is implemented through the `Plugin.OnStop` method and related stop handling logic, including:
The plugin stop phase is implemented through the `Plugin.OnDispose` method and related stop handling logic, including:
1. Service Shutdown
- Stop all network services (HTTP/HTTPS/TCP/UDP/QUIC)
@@ -127,7 +127,7 @@ The plugin stop phase is implemented through the `Plugin.OnStop` method and rela
- Trigger stop event notifications
4. Callback Processing
- Call plugin's custom OnStop method
- Call plugin's custom OnDispose method
- Execute registered stop callback functions
- Handle errors during stop process
@@ -143,7 +143,7 @@ The stop phase aims to ensure plugins can safely and cleanly stop running withou
The plugin destroy phase is implemented through the `Plugin.Dispose` method, the final phase in a plugin's lifecycle, including:
1. Resource Release
- Call plugin's OnStop method for stop processing
- Call plugin's OnDispose method for stop processing
- Remove from server's plugin list
- Release all allocated system resources

View File

@@ -0,0 +1,279 @@
# 流鉴权机制
Monibuca V5 提供了完善的流鉴权机制,用于控制推流和拉流的访问权限。鉴权机制支持多种方式,包括基于密钥的签名鉴权和自定义鉴权处理器。
## 鉴权原理
### 1. 鉴权流程时序图
#### 推流鉴权时序图
```mermaid
sequenceDiagram
participant Client as 推流客户端
participant Plugin as 插件
participant AuthHandler as 鉴权处理器
participant Server as 服务器
Client->>Plugin: 推流请求 (streamPath, args)
Plugin->>Plugin: 检查 EnableAuth && Type == PublishTypeServer
alt 启用鉴权
Plugin->>Plugin: 查找自定义鉴权处理器
alt 存在自定义处理器
Plugin->>AuthHandler: onAuthPub(publisher)
AuthHandler->>AuthHandler: 执行自定义鉴权逻辑
AuthHandler-->>Plugin: 鉴权结果
else 使用密钥鉴权
Plugin->>Plugin: 检查 conf.Key 是否存在
alt 配置了Key
Plugin->>Plugin: auth(streamPath, key, secret, expire)
Plugin->>Plugin: 验证时间戳
Plugin->>Plugin: 验证secret长度
Plugin->>Plugin: 计算MD5签名
Plugin->>Plugin: 比较签名
Plugin-->>Plugin: 鉴权结果
end
end
alt 鉴权失败
Plugin-->>Client: 鉴权失败,拒绝推流
else 鉴权成功
Plugin->>Server: 创建Publisher并添加到流管理
Server-->>Plugin: 推流成功
Plugin-->>Client: 推流建立成功
end
else 未启用鉴权
Plugin->>Server: 直接创建Publisher
Server-->>Plugin: 推流成功
Plugin-->>Client: 推流建立成功
end
```
#### 拉流鉴权时序图
```mermaid
sequenceDiagram
participant Client as 拉流客户端
participant Plugin as 插件
participant AuthHandler as 鉴权处理器
participant Server as 服务器
Client->>Plugin: 拉流请求 (streamPath, args)
Plugin->>Plugin: 检查 EnableAuth && Type == SubscribeTypeServer
alt 启用鉴权
Plugin->>Plugin: 查找自定义鉴权处理器
alt 存在自定义处理器
Plugin->>AuthHandler: onAuthSub(subscriber)
AuthHandler->>AuthHandler: 执行自定义鉴权逻辑
AuthHandler-->>Plugin: 鉴权结果
else 使用密钥鉴权
Plugin->>Plugin: 检查 conf.Key 是否存在
alt 配置了Key
Plugin->>Plugin: auth(streamPath, key, secret, expire)
Plugin->>Plugin: 验证时间戳
Plugin->>Plugin: 验证secret长度
Plugin->>Plugin: 计算MD5签名
Plugin->>Plugin: 比较签名
Plugin-->>Plugin: 鉴权结果
end
end
alt 鉴权失败
Plugin-->>Client: 鉴权失败,拒绝拉流
else 鉴权成功
Plugin->>Server: 创建Subscriber并等待Publisher
Server->>Server: 等待流发布和轨道就绪
Server-->>Plugin: 拉流准备就绪
Plugin-->>Client: 开始传输流数据
end
else 未启用鉴权
Plugin->>Server: 直接创建Subscriber
Server-->>Plugin: 拉流成功
Plugin-->>Client: 开始传输流数据
end
```
### 2. 鉴权触发时机
鉴权在以下两种情况下触发:
- **推流鉴权**:当有推流请求时,在`PublishWithConfig`方法中触发
- **拉流鉴权**:当有拉流请求时,在`SubscribeWithConfig`方法中触发
### 3. 鉴权条件判断
鉴权只在以下条件同时满足时才会执行:
```go
if p.config.EnableAuth && publisher.Type == PublishTypeServer
```
- `EnableAuth`:插件配置中启用了鉴权
- `Type == PublishTypeServer/SubscribeTypeServer`:只对服务端类型的推流/拉流进行鉴权
### 4. 鉴权方式优先级
系统按以下优先级执行鉴权:
1. **自定义鉴权处理器**(最高优先级)
2. **基于密钥的签名鉴权**
3. **无鉴权**(默认通过)
## 自定义鉴权处理器
### 推流鉴权处理器
```go
onAuthPub := p.Meta.OnAuthPub
if onAuthPub == nil {
onAuthPub = p.Server.Meta.OnAuthPub
}
if onAuthPub != nil {
if err = onAuthPub(publisher).Await(); err != nil {
p.Warn("auth failed", "error", err)
return
}
}
```
鉴权处理器查找顺序:
1. 插件级别的鉴权处理器 `p.Meta.OnAuthPub`
2. 服务器级别的鉴权处理器 `p.Server.Meta.OnAuthPub`
### 拉流鉴权处理器
```go
onAuthSub := p.Meta.OnAuthSub
if onAuthSub == nil {
onAuthSub = p.Server.Meta.OnAuthSub
}
if onAuthSub != nil {
if err = onAuthSub(subscriber).Await(); err != nil {
p.Warn("auth failed", "error", err)
return
}
}
```
## 基于密钥的签名鉴权
当没有自定义鉴权处理器时如果配置了Key系统将使用基于MD5的签名鉴权机制。
### 鉴权算法
```go
func (p *Plugin) auth(streamPath string, key string, secret string, expire string) (err error) {
// 1. 验证过期时间
if unixTime, err := strconv.ParseInt(expire, 16, 64); err != nil || time.Now().Unix() > unixTime {
return fmt.Errorf("auth failed expired")
}
// 2. 验证secret长度
if len(secret) != 32 {
return fmt.Errorf("auth failed secret length must be 32")
}
// 3. 计算真实的secret
trueSecret := md5.Sum([]byte(key + streamPath + expire))
// 4. 比较secret
if secret == hex.EncodeToString(trueSecret[:]) {
return nil
}
return fmt.Errorf("auth failed invalid secret")
}
```
### 签名计算步骤
1. **构造签名字符串**`key + streamPath + expire`
2. **MD5加密**对签名字符串进行MD5哈希
3. **十六进制编码**将MD5结果转换为32位十六进制字符串
4. **验证签名**比较计算结果与客户端提供的secret
### 参数说明
| 参数 | 类型 | 说明 | 示例 |
|------|------|------|------|
| key | string | 密钥,在配置文件中设置 | "mySecretKey" |
| streamPath | string | 流路径 | "live/test" |
| expire | string | 过期时间戳16进制 | "64a1b2c3" |
| secret | string | 客户端计算的签名32位十六进制 | "5d41402abc4b2a76b9719d911017c592" |
### 时间戳处理
- 过期时间使用16进制Unix时间戳
- 系统会验证当前时间是否超过过期时间
- 时间戳解析失败或已过期都会导致鉴权失败
## API密钥生成
系统还提供了API接口用于生成密钥支持管理后台的鉴权需求
```go
p.handle("/api/secret/{type}/{streamPath...}", http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
// JWT Token验证
authHeader := r.Header.Get("Authorization")
tokenString := strings.TrimPrefix(authHeader, "Bearer ")
_, err := p.Server.ValidateToken(tokenString)
// 生成推流或拉流密钥
streamPath := r.PathValue("streamPath")
t := r.PathValue("type")
expire := r.URL.Query().Get("expire")
if t == "publish" {
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
} else if t == "subscribe" {
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
}
}))
```
## 配置示例
### 启用鉴权
```yaml
# 插件配置
rtmp:
enableAuth: true
publish:
key: "your-publish-key"
subscribe:
key: "your-subscribe-key"
```
### 推流URL示例
```
rtmp://localhost/live/test?secret=5d41402abc4b2a76b9719d911017c592&expire=64a1b2c3
```
### 拉流URL示例
```
http://localhost:8080/flv/live/test.flv?secret=a1b2c3d4e5f6789012345678901234ab&expire=64a1b2c3
```
## 安全考虑
1. **密钥保护**配置文件中的key应当妥善保管避免泄露
2. **时间窗口**:合理设置过期时间,平衡安全性和可用性
3. **HTTPS传输**生产环境建议使用HTTPS传输鉴权参数
4. **日志记录**:鉴权失败会记录警告日志,便于安全审计
## 错误处理
鉴权失败的常见原因:
- `auth failed expired`:时间戳已过期或格式错误
- `auth failed secret length must be 32`secret长度不正确
- `auth failed invalid secret`:签名验证失败
- `invalid token`API密钥生成时JWT验证失败

View File

@@ -93,7 +93,7 @@ func (p *YourPlugin) RegisterHandler() {
示例代码:
```go
func (p *YourPlugin) OnInit() {
func (p *YourPlugin) Start() {
// 添加认证中间件
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {

View File

@@ -116,7 +116,7 @@ type MyLogHandler struct {
}
// 在插件初始化时添加处理器
func (p *MyPlugin) OnInit() error {
func (p *MyPlugin) Start() error {
handler := &MyLogHandler{}
p.Server.LogHandler.Add(handler)
return nil

View File

@@ -109,7 +109,7 @@ Monibuca 采用插件化架构设计,通过插件机制来扩展功能。插
### 4. 停止阶段 (Stop)
插件的停止阶段通过 `Plugin.OnStop` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
插件的停止阶段通过 `Plugin.OnDispose` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
1. 停止服务
- 停止所有网络服务HTTP/HTTPS/TCP/UDP/QUIC

View File

@@ -10,3 +10,5 @@ cascadeclient:
onsub:
pull:
.*: m7s://$0
flv:
enable: true

View File

@@ -9,7 +9,7 @@ transcode:
transform:
^live.+:
input:
mode: rtsp
mode: pipe
output:
- target: rtmp://localhost/trans/$0/small
conf: -loglevel debug -c:a aac -c:v h264 -vf scale=320:240

View File

@@ -8,20 +8,40 @@ srt:
listenaddr: :6000
passphrase: foobarfoobar
gb28181:
enable: false
autoinvite: false
mediaip: 192.168.1.21 #流媒体收流IP
sipip: 192.168.1.21 #SIP通讯IP
enable: false # 是否启用GB28181协议
autoinvite: false #建议使用false开启后会自动邀请设备推流
mediaip: 192.168.1.21 #流媒体收流IP,外网情况下使用公网IP,内网情况下使用网卡IP,不要用127.0.0.1
sipip: 192.168.1.21 #SIP通讯IP,不管公网还是内网都使用本机网卡IP,不要用127.0.0.1
sip:
listenaddr:
- udp::5060
# pull:
# live/test: dump/34020000001320000001
onsub:
pull:
^\d{20}/\d{20}$: $0
^gb_\d+/(.+)$: $1
# .* : $0
platforms:
- enable: false #是否启用平台
name: "测试平台" #平台名称
servergbid: "34020000002000000002" #上级平台GBID
servergbdomain: "3402000000" #上级平台GB域
serverip: 192.168.1.106 #上级平台IP
serverport: 5061 #上级平台端口
devicegbid: "34020000002000000001" #本平台设备GBID
deviceip: 192.168.1.106 #本平台设备IP
deviceport: 5060 #本平台设备端口
username: "34020000002000000001" #SIP账号
password: "123456" #SIP密码
expires: 3600 #注册有效期,单位秒
keeptimeout: 60 #注册保持超时时间,单位秒
civilCode: "340200" #行政区划代码
manufacturer: "Monibuca" #设备制造商
model: "GB28181" #设备型号
address: "江苏南京" #设备地址
register_way: 1
platformchannels:
- platformservergbid: "34020000002000000002" #上级平台GBID
channeldbid: "34020000001110000003_34020000001320000005" #通道DBID,格式为设备ID_通道ID
mp4:
# enable: false
# publish:

2
example/test/config.yaml Normal file
View File

@@ -0,0 +1,2 @@
global:
log_level: debug

40
example/test/main.go Normal file
View File

@@ -0,0 +1,40 @@
package main
import (
"context"
"flag"
"fmt"
"m7s.live/v5"
_ "m7s.live/v5/plugin/cascade"
_ "m7s.live/v5/plugin/crypto"
_ "m7s.live/v5/plugin/debug"
_ "m7s.live/v5/plugin/flv"
_ "m7s.live/v5/plugin/gb28181"
_ "m7s.live/v5/plugin/hls"
_ "m7s.live/v5/plugin/logrotate"
_ "m7s.live/v5/plugin/monitor"
_ "m7s.live/v5/plugin/mp4"
_ "m7s.live/v5/plugin/onvif"
_ "m7s.live/v5/plugin/preview"
_ "m7s.live/v5/plugin/rtmp"
_ "m7s.live/v5/plugin/rtp"
_ "m7s.live/v5/plugin/rtsp"
_ "m7s.live/v5/plugin/sei"
_ "m7s.live/v5/plugin/snap"
_ "m7s.live/v5/plugin/srt"
_ "m7s.live/v5/plugin/stress"
_ "m7s.live/v5/plugin/test"
_ "m7s.live/v5/plugin/transcode"
_ "m7s.live/v5/plugin/webrtc"
_ "m7s.live/v5/plugin/webtransport"
)
func main() {
conf := flag.String("c", "config.yaml", "config file")
flag.Parse()
// ctx, _ := context.WithDeadline(context.Background(), time.Now().Add(time.Second*100))
err := m7s.Run(context.Background(), *conf)
fmt.Println(err)
}

45
go.mod
View File

@@ -6,6 +6,7 @@ require (
github.com/IOTechSystems/onvif v1.2.0
github.com/VictoriaMetrics/VictoriaMetrics v1.102.0
github.com/asavie/xdp v0.3.3
github.com/aws/aws-sdk-go v1.55.7
github.com/beevik/etree v1.4.1
github.com/bluenviron/gohlslib v1.4.0
github.com/c0deltin/duckdb-driver v0.1.0
@@ -28,14 +29,14 @@ require (
github.com/mattn/go-sqlite3 v1.14.24
github.com/mcuadros/go-defaults v1.2.0
github.com/mozillazg/go-pinyin v0.20.0
github.com/ncruces/go-sqlite3 v0.18.1
github.com/ncruces/go-sqlite3/gormlite v0.18.0
github.com/pion/interceptor v0.1.37
github.com/pion/logging v0.2.2
github.com/ncruces/go-sqlite3 v0.27.1
github.com/ncruces/go-sqlite3/gormlite v0.24.0
github.com/pion/interceptor v0.1.40
github.com/pion/logging v0.2.4
github.com/pion/rtcp v1.2.15
github.com/pion/rtp v1.8.10
github.com/pion/sdp/v3 v3.0.9
github.com/pion/webrtc/v4 v4.0.7
github.com/pion/rtp v1.8.21
github.com/pion/sdp/v3 v3.0.15
github.com/pion/webrtc/v4 v4.1.4
github.com/quic-go/qpack v0.5.1
github.com/quic-go/quic-go v0.50.1
github.com/rs/zerolog v1.33.0
@@ -46,13 +47,13 @@ require (
github.com/vishvananda/netlink v1.1.0
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7
golang.org/x/image v0.22.0
golang.org/x/text v0.24.0
golang.org/x/text v0.27.0
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.2
gorm.io/driver/mysql v1.5.7
gorm.io/driver/postgres v1.5.9
gorm.io/gorm v1.25.11
gorm.io/gorm v1.30.0
)
require (
@@ -84,6 +85,7 @@ require (
github.com/jackc/puddle/v2 v2.2.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
@@ -96,15 +98,15 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/ncruces/julianday v1.0.0 // indirect
github.com/pion/datachannel v1.5.10 // indirect
github.com/pion/dtls/v3 v3.0.4 // indirect
github.com/pion/ice/v4 v4.0.3 // indirect
github.com/pion/dtls/v3 v3.0.7 // indirect
github.com/pion/ice/v4 v4.0.10 // indirect
github.com/pion/mdns/v2 v2.0.7 // indirect
github.com/pion/randutil v0.1.0 // indirect
github.com/pion/sctp v1.8.35 // indirect
github.com/pion/srtp/v3 v3.0.4 // indirect
github.com/pion/sctp v1.8.39 // indirect
github.com/pion/srtp/v3 v3.0.7 // indirect
github.com/pion/stun/v3 v3.0.0 // indirect
github.com/pion/transport/v3 v3.0.7 // indirect
github.com/pion/turn/v4 v4.0.0 // indirect
github.com/pion/turn/v4 v4.1.1 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
github.com/prometheus/client_model v0.6.1 // indirect
@@ -115,7 +117,7 @@ require (
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cast v1.7.1 // indirect
github.com/tetratelabs/wazero v1.8.0 // indirect
github.com/tetratelabs/wazero v1.9.0 // indirect
github.com/tklauser/go-sysconf v0.3.12 // indirect
github.com/tklauser/numcpus v0.6.1 // indirect
github.com/valyala/bytebufferpool v1.0.0 // indirect
@@ -129,7 +131,7 @@ require (
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/sync v0.13.0 // indirect
golang.org/x/sync v0.16.0 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect
)
@@ -142,17 +144,16 @@ require (
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
github.com/gorilla/websocket v1.5.1
github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd
github.com/mark3labs/mcp-go v0.27.0
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/phsym/console-slog v0.3.1
github.com/prometheus/client_golang v1.20.4
github.com/quangngotan95/go-m3u8 v0.1.0
go.uber.org/mock v0.5.0 // indirect
golang.org/x/crypto v0.37.0
golang.org/x/crypto v0.40.0
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7
golang.org/x/mod v0.19.0 // indirect
golang.org/x/net v0.39.0
golang.org/x/sys v0.32.0
golang.org/x/tools v0.23.0 // indirect
golang.org/x/mod v0.25.0 // indirect
golang.org/x/net v0.41.0
golang.org/x/sys v0.34.0
golang.org/x/tools v0.34.0 // indirect
gopkg.in/yaml.v3 v3.0.1
)

97
go.sum
View File

@@ -25,6 +25,8 @@ github.com/asticode/go-astikit v0.30.0 h1:DkBkRQRIxYcknlaU7W7ksNfn4gMFsB0tqMJflx
github.com/asticode/go-astikit v0.30.0/go.mod h1:h4ly7idim1tNhaVkdVBeXQZEE3L0xblP7fCWbgwipF0=
github.com/asticode/go-astits v1.13.0 h1:XOgkaadfZODnyZRR5Y0/DWkA9vrkLLPLeeOvDwfKZ1c=
github.com/asticode/go-astits v1.13.0/go.mod h1:QSHmknZ51pf6KJdHKZHJTLlMegIrhega3LPWz3ND/iI=
github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE=
github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/beevik/etree v1.4.1 h1:PmQJDDYahBGNKDcpdX8uPy1xRCwoCGVUiW669MEirVI=
github.com/beevik/etree v1.4.1/go.mod h1:gPNJNaBGVZ9AwsidazFZyygnd+0pAU38N4D+WemwKNs=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
@@ -139,6 +141,10 @@ github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
@@ -183,10 +189,10 @@ github.com/mozillazg/go-pinyin v0.20.0 h1:BtR3DsxpApHfKReaPO1fCqF4pThRwH9uwvXzm+
github.com/mozillazg/go-pinyin v0.20.0/go.mod h1:iR4EnMMRXkfpFVV5FMi4FNB6wGq9NV6uDWbUuPhP4Yc=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/ncruces/go-sqlite3 v0.18.1 h1:iN8IMZV5EMxpH88NUac9vId23eTKNFUhP7jgY0EBbNc=
github.com/ncruces/go-sqlite3 v0.18.1/go.mod h1:eEOyZnW1dGTJ+zDpMuzfYamEUBtdFz5zeYhqLBtHxvM=
github.com/ncruces/go-sqlite3/gormlite v0.18.0 h1:KqP9a9wlX/Ba+yG+aeVX4pnNBNdaSO6xHdNDWzPxPnk=
github.com/ncruces/go-sqlite3/gormlite v0.18.0/go.mod h1:RXeT1hknrz3A0tBDL6IfluDHuNkHdJeImn5TBMQg9zc=
github.com/ncruces/go-sqlite3 v0.27.1 h1:suqlM7xhSyDVMV9RgX99MCPqt9mB6YOCzHZuiI36K34=
github.com/ncruces/go-sqlite3 v0.27.1/go.mod h1:gpF5s+92aw2MbDmZK0ZOnCdFlpe11BH20CTspVqri0c=
github.com/ncruces/go-sqlite3/gormlite v0.24.0 h1:81sHeq3CCdhjoqAB650n5wEdRlLO9VBvosArskcN3+c=
github.com/ncruces/go-sqlite3/gormlite v0.24.0/go.mod h1:vXfVWdBfg7qOgqQqHpzUWl9LLswD0h+8mK4oouaV2oc=
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
@@ -202,36 +208,36 @@ github.com/phsym/console-slog v0.3.1 h1:Fuzcrjr40xTc004S9Kni8XfNsk+qrptQmyR+wZw9
github.com/phsym/console-slog v0.3.1/go.mod h1:oJskjp/X6e6c0mGpfP8ELkfKUsrkDifYRAqJQgmdDS0=
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
github.com/pion/ice/v4 v4.0.3 h1:9s5rI1WKzF5DRqhJ+Id8bls/8PzM7mau0mj1WZb4IXE=
github.com/pion/ice/v4 v4.0.3/go.mod h1:VfHy0beAZ5loDT7BmJ2LtMtC4dbawIkkkejHPRZNB3Y=
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
github.com/pion/dtls/v3 v3.0.7 h1:bItXtTYYhZwkPFk4t1n3Kkf5TDrfj6+4wG+CZR8uI9Q=
github.com/pion/dtls/v3 v3.0.7/go.mod h1:uDlH5VPrgOQIw59irKYkMudSFprY9IEFCqz/eTz16f8=
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU=
github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
github.com/pion/sdp/v3 v3.0.15 h1:F0I1zds+K/+37ZrzdADmx2Q44OFDOPRLhPnNTaUX9hk=
github.com/pion/sdp/v3 v3.0.15/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
github.com/pion/srtp/v3 v3.0.7 h1:QUElw0A/FUg3MP8/KNMZB3i0m8F9XeMnTum86F7S4bs=
github.com/pion/srtp/v3 v3.0.7/go.mod h1:qvnHeqbhT7kDdB+OGB05KA/P067G3mm7XBfLaLiaNF0=
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
github.com/pion/webrtc/v4 v4.0.7 h1:aeq78uVnFZd2umXW0O9A2VFQYuS7+BZxWetQvSp2jPo=
github.com/pion/webrtc/v4 v4.0.7/go.mod h1:oFVBBVSHU3vAEwSgnk3BuKCwAUwpDwQhko1EDwyZWbU=
github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc=
github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8=
github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
@@ -281,22 +287,15 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I=
github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g=
github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
@@ -335,8 +334,8 @@ golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA=
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
@@ -344,17 +343,17 @@ golang.org/x/image v0.22.0 h1:UtK5yLUzilVrkjMAZAZ34DXGpASN8i8pj8g+O+yd10g=
golang.org/x/image v0.22.0/go.mod h1:9hPFhljd4zZ1GNSIZJ49sqbp45GKK9t6w+iXvGqZUz4=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -375,19 +374,19 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
@@ -417,8 +416,8 @@ gorm.io/driver/mysql v1.5.7/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkD
gorm.io/driver/postgres v1.5.9 h1:DkegyItji119OlcaLjqN11kHoUgZ/j13E0jkJZgD6A8=
gorm.io/driver/postgres v1.5.9/go.mod h1:DX3GReXH+3FPWGrrgffdvCk3DQ1dwDPdmbenSkweRGI=
gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8=
gorm.io/gorm v1.25.11 h1:/Wfyg1B/je1hnDx3sMkX+gAlxrlZpn6X0BXRlwXlvHg=
gorm.io/gorm v1.25.11/go.mod h1:xh7N7RHfYlNc5EmcI/El95gXusucDrQnHXe0+CgWcLQ=
gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs=
gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=

File diff suppressed because it is too large Load Diff

View File

@@ -1401,7 +1401,7 @@ func local_request_Api_RemovePullProxy_1(ctx context.Context, marshaler runtime.
}
func request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PullProxyInfo
var protoReq UpdatePullProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1414,7 +1414,7 @@ func request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marsha
}
func local_request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PullProxyInfo
var protoReq UpdatePullProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1427,7 +1427,7 @@ func local_request_Api_UpdatePullProxy_0(ctx context.Context, marshaler runtime.
}
func request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PullProxyInfo
var protoReq UpdatePullProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1440,7 +1440,7 @@ func request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marsha
}
func local_request_Api_UpdatePullProxy_1(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PullProxyInfo
var protoReq UpdatePullProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1557,7 +1557,7 @@ func local_request_Api_RemovePushProxy_0(ctx context.Context, marshaler runtime.
}
func request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PushProxyInfo
var protoReq UpdatePushProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1570,7 +1570,7 @@ func request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marsha
}
func local_request_Api_UpdatePushProxy_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq PushProxyInfo
var protoReq UpdatePushProxyRequest
var metadata runtime.ServerMetadata
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
@@ -1708,6 +1708,96 @@ func local_request_Api_GetRecordList_0(ctx context.Context, marshaler runtime.Ma
}
var (
filter_Api_GetEventRecordList_0 = &utilities.DoubleArray{Encoding: map[string]int{"type": 0, "streamPath": 1}, Base: []int{1, 1, 2, 0, 0}, Check: []int{0, 1, 1, 2, 3}}
)
func request_Api_GetEventRecordList_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReqRecordList
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["type"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "type")
}
protoReq.Type, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "type", err)
}
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetEventRecordList_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetEventRecordList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetEventRecordList_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReqRecordList
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["type"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "type")
}
protoReq.Type, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "type", err)
}
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetEventRecordList_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.GetEventRecordList(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_GetRecordCatalog_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq ReqRecordCatalog
var metadata runtime.ServerMetadata
@@ -1840,6 +1930,94 @@ func local_request_Api_DeleteRecord_0(ctx context.Context, marshaler runtime.Mar
}
var (
filter_Api_GetAlarmList_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
)
func request_Api_GetAlarmList_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AlarmListRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetAlarmList_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.GetAlarmList(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetAlarmList_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq AlarmListRequest
var metadata runtime.ServerMetadata
if err := req.ParseForm(); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Api_GetAlarmList_0); err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.GetAlarmList(ctx, &protoReq)
return msg, metadata, err
}
func request_Api_GetSubscriptionProgress_0(ctx context.Context, marshaler runtime.Marshaler, client ApiClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq StreamSnapRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
msg, err := client.GetSubscriptionProgress(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Api_GetSubscriptionProgress_0(ctx context.Context, marshaler runtime.Marshaler, server ApiServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq StreamSnapRequest
var metadata runtime.ServerMetadata
var (
val string
ok bool
err error
_ = err
)
val, ok = pathParams["streamPath"]
if !ok {
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "streamPath")
}
protoReq.StreamPath, err = runtime.String(val)
if err != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "streamPath", err)
}
msg, err := server.GetSubscriptionProgress(ctx, &protoReq)
return msg, metadata, err
}
// RegisterApiHandlerServer registers the http handlers for service Api to "mux".
// UnaryRPC :call ApiServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
@@ -2896,6 +3074,31 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
})
mux.Handle("GET", pattern_Api_GetEventRecordList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetEventRecordList", runtime.WithHTTPPathPattern("/api/record/{type}/event/list/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_GetEventRecordList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetEventRecordList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Api_GetRecordCatalog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -2946,6 +3149,56 @@ func RegisterApiHandlerServer(ctx context.Context, mux *runtime.ServeMux, server
})
mux.Handle("GET", pattern_Api_GetAlarmList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetAlarmList", runtime.WithHTTPPathPattern("/api/alarm/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_GetAlarmList_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetAlarmList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Api_GetSubscriptionProgress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/global.Api/GetSubscriptionProgress", runtime.WithHTTPPathPattern("/api/stream/progress/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Api_GetSubscriptionProgress_0(annotatedContext, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetSubscriptionProgress_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -3911,6 +4164,28 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
})
mux.Handle("GET", pattern_Api_GetEventRecordList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetEventRecordList", runtime.WithHTTPPathPattern("/api/record/{type}/event/list/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_GetEventRecordList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetEventRecordList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Api_GetRecordCatalog_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
@@ -3955,6 +4230,50 @@ func RegisterApiHandlerClient(ctx context.Context, mux *runtime.ServeMux, client
})
mux.Handle("GET", pattern_Api_GetAlarmList_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetAlarmList", runtime.WithHTTPPathPattern("/api/alarm/list"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_GetAlarmList_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetAlarmList_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("GET", pattern_Api_GetSubscriptionProgress_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
var err error
var annotatedContext context.Context
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/global.Api/GetSubscriptionProgress", runtime.WithHTTPPathPattern("/api/stream/progress/{streamPath=**}"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Api_GetSubscriptionProgress_0(annotatedContext, inboundMarshaler, client, req, pathParams)
annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md)
if err != nil {
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
return
}
forward_Api_GetSubscriptionProgress_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
@@ -4043,9 +4362,15 @@ var (
pattern_Api_GetRecordList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "record", "type", "list", "streamPath"}, ""))
pattern_Api_GetEventRecordList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 2, 4, 3, 0, 4, 1, 5, 5}, []string{"api", "record", "type", "event", "list", "streamPath"}, ""))
pattern_Api_GetRecordCatalog_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"api", "record", "type", "catalog"}, ""))
pattern_Api_DeleteRecord_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3, 3, 0, 4, 1, 5, 4}, []string{"api", "record", "type", "delete", "streamPath"}, ""))
pattern_Api_GetAlarmList_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "alarm", "list"}, ""))
pattern_Api_GetSubscriptionProgress_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 3, 0, 4, 1, 5, 3}, []string{"api", "stream", "progress", "streamPath"}, ""))
)
var (
@@ -4133,7 +4458,13 @@ var (
forward_Api_GetRecordList_0 = runtime.ForwardResponseMessage
forward_Api_GetEventRecordList_0 = runtime.ForwardResponseMessage
forward_Api_GetRecordCatalog_0 = runtime.ForwardResponseMessage
forward_Api_DeleteRecord_0 = runtime.ForwardResponseMessage
forward_Api_GetAlarmList_0 = runtime.ForwardResponseMessage
forward_Api_GetSubscriptionProgress_0 = runtime.ForwardResponseMessage
)

View File

@@ -181,7 +181,7 @@ service api {
}
};
}
rpc UpdatePullProxy (PullProxyInfo) returns (SuccessResponse) {
rpc UpdatePullProxy (UpdatePullProxyRequest) returns (SuccessResponse) {
option (google.api.http) = {
post: "/api/proxy/pull/update"
body: "*"
@@ -208,7 +208,7 @@ service api {
body: "*"
};
}
rpc UpdatePushProxy (PushProxyInfo) returns (SuccessResponse) {
rpc UpdatePushProxy (UpdatePushProxyRequest) returns (SuccessResponse) {
option (google.api.http) = {
post: "/api/proxy/push/update"
body: "*"
@@ -224,11 +224,16 @@ service api {
get: "/api/transform/list"
};
}
rpc GetRecordList (ReqRecordList) returns (ResponseList) {
rpc GetRecordList (ReqRecordList) returns (RecordResponseList) {
option (google.api.http) = {
get: "/api/record/{type}/list/{streamPath=**}"
};
}
rpc GetEventRecordList (ReqRecordList) returns (EventRecordResponseList) {
option (google.api.http) = {
get: "/api/record/{type}/event/list/{streamPath=**}"
};
}
rpc GetRecordCatalog (ReqRecordCatalog) returns (ResponseCatalog) {
option (google.api.http) = {
get: "/api/record/{type}/catalog"
@@ -240,6 +245,16 @@ service api {
body: "*"
};
}
rpc GetAlarmList (AlarmListRequest) returns (AlarmListResponse) {
option (google.api.http) = {
get: "/api/alarm/list"
};
}
rpc GetSubscriptionProgress (StreamSnapRequest) returns (SubscriptionProgressResponse) {
option (google.api.http) = {
get: "/api/stream/progress/{streamPath=**}"
};
}
}
message DisabledPluginsResponse {
@@ -356,6 +371,8 @@ message TaskTreeData {
TaskTreeData blocked = 8;
uint64 pointer = 9;
string startReason = 10;
bool eventLoopRunning = 11;
uint32 level = 12;
}
message TaskTreeResponse {
@@ -561,6 +578,22 @@ message PullProxyInfo {
string streamPath = 16; // 流路径
}
message UpdatePullProxyRequest {
uint32 ID = 1;
optional uint32 parentID = 2; // 父设备ID
optional string name = 3; // 设备名称
optional string type = 4; // 设备类型
optional uint32 status = 5; // 设备状态
optional string pullURL = 6; // 拉流地址
optional bool pullOnStart = 7; // 启动时拉流
optional bool stopOnIdle = 8; // 空闲时停止拉流
optional bool audio = 9; // 是否拉取音频
optional string description = 10; // 设备描述
optional string recordPath = 11; // 录制路径
optional google.protobuf.Duration recordFragment = 12; // 录制片段长度
optional string streamPath = 13; // 流路径
}
message PushProxyInfo {
uint32 ID = 1;
google.protobuf.Timestamp createTime = 2;
@@ -577,6 +610,20 @@ message PushProxyInfo {
string streamPath = 13; // 流路径
}
message UpdatePushProxyRequest {
uint32 ID = 1;
optional uint32 parentID = 2; // 父设备ID
optional string name = 3; // 设备名称
optional string type = 4; // 设备类型
optional uint32 status = 5; // 设备状态
optional string pushURL = 6; // 推流地址
optional bool pushOnStart = 7; // 启动时推流
optional bool audio = 8; // 是否推音频
optional string description = 9; // 设备描述
optional uint32 rtt = 10; // 平均RTT
optional string streamPath = 11; // 流路径
}
message PushProxyListResponse {
int32 code = 1;
string message = 2;
@@ -664,9 +711,8 @@ message ReqRecordList {
string end = 4;
uint32 pageNum = 5;
uint32 pageSize = 6;
string mode = 7;
string type = 8;
string eventLevel = 9;
string type = 7;
string eventLevel = 8;
}
message RecordFile {
@@ -675,12 +721,21 @@ message RecordFile {
string streamPath = 3;
google.protobuf.Timestamp startTime = 4;
google.protobuf.Timestamp endTime = 5;
string eventLevel = 6;
string eventName = 7;
string eventDesc = 8;
}
message ResponseList {
message EventRecordFile {
uint32 id = 1;
string filePath = 2;
string streamPath = 3;
google.protobuf.Timestamp startTime = 4;
google.protobuf.Timestamp endTime = 5;
string eventId = 6;
string eventLevel = 7;
string eventName = 8;
string eventDesc = 9;
}
message RecordResponseList {
int32 code = 1;
string message = 2;
uint32 total = 3;
@@ -689,6 +744,15 @@ message ResponseList {
repeated RecordFile data = 6;
}
message EventRecordResponseList {
int32 code = 1;
string message = 2;
uint32 total = 3;
uint32 pageNum = 4;
uint32 pageSize = 5;
repeated EventRecordFile data = 6;
}
message Catalog {
string streamPath = 1;
uint32 count = 2;
@@ -719,4 +783,57 @@ message ResponseDelete {
message ReqRecordCatalog {
string type = 1;
}
message AlarmInfo {
uint32 id = 1;
string serverInfo = 2;
string streamName = 3;
string streamPath = 4;
string alarmDesc = 5;
string alarmName = 6;
int32 alarmType = 7;
bool isSent = 8;
string filePath = 9;
google.protobuf.Timestamp createdAt = 10;
google.protobuf.Timestamp updatedAt = 11;
}
message AlarmListRequest {
int32 pageNum = 1;
int32 pageSize = 2;
string range = 3;
string start = 4;
string end = 5;
int32 alarmType = 6;
string streamPath = 7;
string streamName = 8;
}
message AlarmListResponse {
int32 code = 1;
string message = 2;
int32 total = 3;
int32 pageNum = 4;
int32 pageSize = 5;
repeated AlarmInfo data = 6;
}
message Step {
string name = 1;
string description = 2;
string error = 3;
google.protobuf.Timestamp startedAt = 4;
google.protobuf.Timestamp completedAt = 5;
}
message SubscriptionProgressData {
repeated Step steps = 1;
int32 currentStep = 2;
}
message SubscriptionProgressResponse {
int32 code = 1;
string message = 2;
SubscriptionProgressData data = 3;
}

View File

@@ -20,46 +20,49 @@ import (
const _ = grpc.SupportPackageIsVersion9
const (
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
Api_Summary_FullMethodName = "/global.api/Summary"
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
Api_Restart_FullMethodName = "/global.api/Restart"
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
Api_StopTask_FullMethodName = "/global.api/StopTask"
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
Api_StreamList_FullMethodName = "/global.api/StreamList"
Api_WaitList_FullMethodName = "/global.api/WaitList"
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
Api_Summary_FullMethodName = "/global.api/Summary"
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
Api_Restart_FullMethodName = "/global.api/Restart"
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
Api_StopTask_FullMethodName = "/global.api/StopTask"
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
Api_StreamList_FullMethodName = "/global.api/StreamList"
Api_WaitList_FullMethodName = "/global.api/WaitList"
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
Api_GetEventRecordList_FullMethodName = "/global.api/GetEventRecordList"
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
Api_GetAlarmList_FullMethodName = "/global.api/GetAlarmList"
Api_GetSubscriptionProgress_FullMethodName = "/global.api/GetSubscriptionProgress"
)
// ApiClient is the client API for Api service.
@@ -96,16 +99,19 @@ type ApiClient interface {
GetPullProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PullProxyListResponse, error)
AddPullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
RemovePullProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
GetPushProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PushProxyListResponse, error)
AddPushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
RemovePushProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
GetRecording(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*RecordingListResponse, error)
GetTransformList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*TransformListResponse, error)
GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*ResponseList, error)
GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*RecordResponseList, error)
GetEventRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*EventRecordResponseList, error)
GetRecordCatalog(ctx context.Context, in *ReqRecordCatalog, opts ...grpc.CallOption) (*ResponseCatalog, error)
DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts ...grpc.CallOption) (*ResponseDelete, error)
GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error)
GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error)
}
type apiClient struct {
@@ -416,7 +422,7 @@ func (c *apiClient) RemovePullProxy(ctx context.Context, in *RequestWithId, opts
return out, nil
}
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SuccessResponse)
err := c.cc.Invoke(ctx, Api_UpdatePullProxy_FullMethodName, in, out, cOpts...)
@@ -456,7 +462,7 @@ func (c *apiClient) RemovePushProxy(ctx context.Context, in *RequestWithId, opts
return out, nil
}
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SuccessResponse)
err := c.cc.Invoke(ctx, Api_UpdatePushProxy_FullMethodName, in, out, cOpts...)
@@ -486,9 +492,9 @@ func (c *apiClient) GetTransformList(ctx context.Context, in *emptypb.Empty, opt
return out, nil
}
func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*ResponseList, error) {
func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*RecordResponseList, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ResponseList)
out := new(RecordResponseList)
err := c.cc.Invoke(ctx, Api_GetRecordList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
@@ -496,6 +502,16 @@ func (c *apiClient) GetRecordList(ctx context.Context, in *ReqRecordList, opts .
return out, nil
}
func (c *apiClient) GetEventRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*EventRecordResponseList, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(EventRecordResponseList)
err := c.cc.Invoke(ctx, Api_GetEventRecordList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) GetRecordCatalog(ctx context.Context, in *ReqRecordCatalog, opts ...grpc.CallOption) (*ResponseCatalog, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ResponseCatalog)
@@ -516,6 +532,26 @@ func (c *apiClient) DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts
return out, nil
}
func (c *apiClient) GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(AlarmListResponse)
err := c.cc.Invoke(ctx, Api_GetAlarmList_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *apiClient) GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SubscriptionProgressResponse)
err := c.cc.Invoke(ctx, Api_GetSubscriptionProgress_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
// ApiServer is the server API for Api service.
// All implementations must embed UnimplementedApiServer
// for forward compatibility.
@@ -550,16 +586,19 @@ type ApiServer interface {
GetPullProxyList(context.Context, *emptypb.Empty) (*PullProxyListResponse, error)
AddPullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error)
GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error)
AddPushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error)
GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error)
GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error)
GetRecordList(context.Context, *ReqRecordList) (*ResponseList, error)
GetRecordList(context.Context, *ReqRecordList) (*RecordResponseList, error)
GetEventRecordList(context.Context, *ReqRecordList) (*EventRecordResponseList, error)
GetRecordCatalog(context.Context, *ReqRecordCatalog) (*ResponseCatalog, error)
DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error)
GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error)
GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error)
mustEmbedUnimplementedApiServer()
}
@@ -660,7 +699,7 @@ func (UnimplementedApiServer) AddPullProxy(context.Context, *PullProxyInfo) (*Su
func (UnimplementedApiServer) RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemovePullProxy not implemented")
}
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error) {
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdatePullProxy not implemented")
}
func (UnimplementedApiServer) GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error) {
@@ -672,7 +711,7 @@ func (UnimplementedApiServer) AddPushProxy(context.Context, *PushProxyInfo) (*Su
func (UnimplementedApiServer) RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RemovePushProxy not implemented")
}
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error) {
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method UpdatePushProxy not implemented")
}
func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error) {
@@ -681,15 +720,24 @@ func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*Re
func (UnimplementedApiServer) GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetTransformList not implemented")
}
func (UnimplementedApiServer) GetRecordList(context.Context, *ReqRecordList) (*ResponseList, error) {
func (UnimplementedApiServer) GetRecordList(context.Context, *ReqRecordList) (*RecordResponseList, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRecordList not implemented")
}
func (UnimplementedApiServer) GetEventRecordList(context.Context, *ReqRecordList) (*EventRecordResponseList, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetEventRecordList not implemented")
}
func (UnimplementedApiServer) GetRecordCatalog(context.Context, *ReqRecordCatalog) (*ResponseCatalog, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetRecordCatalog not implemented")
}
func (UnimplementedApiServer) DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error) {
return nil, status.Errorf(codes.Unimplemented, "method DeleteRecord not implemented")
}
func (UnimplementedApiServer) GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetAlarmList not implemented")
}
func (UnimplementedApiServer) GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method GetSubscriptionProgress not implemented")
}
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
func (UnimplementedApiServer) testEmbeddedByValue() {}
@@ -1252,7 +1300,7 @@ func _Api_RemovePullProxy_Handler(srv interface{}, ctx context.Context, dec func
}
func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PullProxyInfo)
in := new(UpdatePullProxyRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -1264,7 +1312,7 @@ func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func
FullMethod: Api_UpdatePullProxy_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*PullProxyInfo))
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*UpdatePullProxyRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -1324,7 +1372,7 @@ func _Api_RemovePushProxy_Handler(srv interface{}, ctx context.Context, dec func
}
func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PushProxyInfo)
in := new(UpdatePushProxyRequest)
if err := dec(in); err != nil {
return nil, err
}
@@ -1336,7 +1384,7 @@ func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func
FullMethod: Api_UpdatePushProxy_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*PushProxyInfo))
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*UpdatePushProxyRequest))
}
return interceptor(ctx, in, info, handler)
}
@@ -1395,6 +1443,24 @@ func _Api_GetRecordList_Handler(srv interface{}, ctx context.Context, dec func(i
return interceptor(ctx, in, info, handler)
}
func _Api_GetEventRecordList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReqRecordList)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).GetEventRecordList(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_GetEventRecordList_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).GetEventRecordList(ctx, req.(*ReqRecordList))
}
return interceptor(ctx, in, info, handler)
}
func _Api_GetRecordCatalog_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReqRecordCatalog)
if err := dec(in); err != nil {
@@ -1431,6 +1497,42 @@ func _Api_DeleteRecord_Handler(srv interface{}, ctx context.Context, dec func(in
return interceptor(ctx, in, info, handler)
}
func _Api_GetAlarmList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(AlarmListRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).GetAlarmList(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_GetAlarmList_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).GetAlarmList(ctx, req.(*AlarmListRequest))
}
return interceptor(ctx, in, info, handler)
}
func _Api_GetSubscriptionProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StreamSnapRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(ApiServer).GetSubscriptionProgress(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: Api_GetSubscriptionProgress_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(ApiServer).GetSubscriptionProgress(ctx, req.(*StreamSnapRequest))
}
return interceptor(ctx, in, info, handler)
}
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
// It's only intended for direct use with grpc.RegisterService,
// and not to be introspected or modified (even as a copy)
@@ -1590,6 +1692,10 @@ var Api_ServiceDesc = grpc.ServiceDesc{
MethodName: "GetRecordList",
Handler: _Api_GetRecordList_Handler,
},
{
MethodName: "GetEventRecordList",
Handler: _Api_GetEventRecordList_Handler,
},
{
MethodName: "GetRecordCatalog",
Handler: _Api_GetRecordCatalog_Handler,
@@ -1598,6 +1704,14 @@ var Api_ServiceDesc = grpc.ServiceDesc{
MethodName: "DeleteRecord",
Handler: _Api_DeleteRecord_Handler,
},
{
MethodName: "GetAlarmList",
Handler: _Api_GetAlarmList_Handler,
},
{
MethodName: "GetSubscriptionProgress",
Handler: _Api_GetSubscriptionProgress_Handler,
},
},
Streams: []grpc.StreamDesc{},
Metadata: "global.proto",

View File

@@ -1,90 +0,0 @@
package pkg
import (
"bytes"
"fmt"
"io"
"time"
"github.com/deepch/vdk/codec/aacparser"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
var _ IAVFrame = (*ADTS)(nil)
type ADTS struct {
DTS time.Duration
util.RecyclableMemory
}
func (A *ADTS) Parse(track *AVTrack) (err error) {
if track.ICodecCtx == nil {
var ctx = &codec.AACCtx{}
var reader = A.NewReader()
var adts []byte
adts, err = reader.ReadBytes(7)
if err != nil {
return err
}
var hdrlen, framelen, samples int
ctx.Config, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
if err != nil {
return err
}
b := &bytes.Buffer{}
aacparser.WriteMPEG4AudioConfig(b, ctx.Config)
ctx.ConfigBytes = b.Bytes()
track.ICodecCtx = ctx
track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
}
track.Value.Raw, err = A.Demux(track.ICodecCtx)
return
}
func (A *ADTS) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
return ctx.GetBase(), nil, nil
}
func (A *ADTS) Demux(ctx codec.ICodecCtx) (any, error) {
var reader = A.NewReader()
err := reader.Skip(7)
var mem util.Memory
reader.Range(mem.AppendOne)
return mem, err
}
func (A *ADTS) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
A.InitRecycleIndexes(1)
A.DTS = frame.Timestamp * 90 / time.Millisecond
aacCtx, ok := ctx.GetBase().(*codec.AACCtx)
if !ok {
A.Append(frame.Raw.(util.Memory).Buffers...)
return
}
adts := A.NextN(7)
raw := frame.Raw.(util.Memory)
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
A.Append(raw.Buffers...)
}
func (A *ADTS) GetTimestamp() time.Duration {
return A.DTS * time.Millisecond / 90
}
func (A *ADTS) GetCTS() time.Duration {
return 0
}
func (A *ADTS) GetSize() int {
return A.Size
}
func (A *ADTS) String() string {
return fmt.Sprintf("ADTS{size:%d}", A.Size)
}
func (A *ADTS) Dump(b byte, writer io.Writer) {
//TODO implement me
panic("implement me")
}

View File

@@ -1,182 +0,0 @@
package pkg
import (
"encoding/binary"
"fmt"
"io"
"time"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/codec/h265parser"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
var _ IAVFrame = (*AnnexB)(nil)
type AnnexB struct {
Hevc bool
PTS time.Duration
DTS time.Duration
util.RecyclableMemory
}
func (a *AnnexB) Dump(t byte, w io.Writer) {
m := a.GetAllocator().Borrow(4 + a.Size)
binary.BigEndian.PutUint32(m, uint32(a.Size))
a.CopyTo(m[4:])
w.Write(m)
}
// DecodeConfig implements pkg.IAVFrame.
func (a *AnnexB) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
return ctx.GetBase(), nil, nil
}
// GetSize implements pkg.IAVFrame.
func (a *AnnexB) GetSize() int {
return a.Size
}
func (a *AnnexB) GetTimestamp() time.Duration {
return a.DTS * time.Millisecond / 90
}
func (a *AnnexB) GetCTS() time.Duration {
return (a.PTS - a.DTS) * time.Millisecond / 90
}
// Parse implements pkg.IAVFrame.
func (a *AnnexB) Parse(t *AVTrack) (err error) {
if a.Hevc {
if t.ICodecCtx == nil {
t.ICodecCtx = &codec.H265Ctx{}
}
} else {
if t.ICodecCtx == nil {
t.ICodecCtx = &codec.H264Ctx{}
}
}
if t.Value.Raw, err = a.Demux(t.ICodecCtx); err != nil {
return
}
for _, nalu := range t.Value.Raw.(Nalus) {
if a.Hevc {
ctx := t.ICodecCtx.(*codec.H265Ctx)
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
case h265parser.NAL_UNIT_VPS:
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
case h265parser.NAL_UNIT_SPS:
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
case h265parser.NAL_UNIT_PPS:
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
t.Value.IDR = true
}
} else {
ctx := t.ICodecCtx.(*codec.H264Ctx)
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
case codec.NALU_SPS:
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
if len(ctx.RecordInfo.PPS) > 0 {
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
}
case codec.NALU_PPS:
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
if len(ctx.RecordInfo.SPS) > 0 {
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
}
case codec.NALU_IDR_Picture:
t.Value.IDR = true
}
}
}
return
}
// String implements pkg.IAVFrame.
func (a *AnnexB) String() string {
return fmt.Sprintf("%d %d", a.DTS, a.Memory.Size)
}
// Demux implements pkg.IAVFrame.
func (a *AnnexB) Demux(codecCtx codec.ICodecCtx) (ret any, err error) {
var nalus Nalus
var lastFourBytes [4]byte
var b byte
var shallow util.Memory
shallow.Append(a.Buffers...)
reader := shallow.NewReader()
gotNalu := func() {
var nalu util.Memory
for buf := range reader.ClipFront {
nalu.AppendOne(buf)
}
nalus = append(nalus, nalu)
}
for {
b, err = reader.ReadByte()
if err == nil {
copy(lastFourBytes[:], lastFourBytes[1:])
lastFourBytes[3] = b
var startCode = 0
if lastFourBytes == codec.NALU_Delimiter2 {
startCode = 4
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
startCode = 3
}
if startCode > 0 && reader.Offset() >= 3 {
if reader.Offset() == 3 {
startCode = 3
}
reader.Unread(startCode)
if reader.Offset() > 0 {
gotNalu()
}
reader.Skip(startCode)
for range reader.ClipFront {
}
}
} else if err == io.EOF {
if reader.Offset() > 0 {
gotNalu()
}
err = nil
break
}
}
ret = nalus
return
}
func (a *AnnexB) Mux(codecCtx codec.ICodecCtx, frame *AVFrame) {
a.DTS = frame.Timestamp * 90 / time.Millisecond
a.PTS = a.DTS + frame.CTS*90/time.Millisecond
a.InitRecycleIndexes(0)
delimiter2 := codec.NALU_Delimiter2[:]
a.AppendOne(delimiter2)
if frame.IDR {
switch ctx := codecCtx.(type) {
case *codec.H264Ctx:
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
case *codec.H265Ctx:
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
}
}
for i, nalu := range frame.Raw.(Nalus) {
if i > 0 {
a.AppendOne(codec.NALU_Delimiter1[:])
}
a.Append(nalu.Buffers...)
}
}

219
pkg/annexb_reader.go Normal file
View File

@@ -0,0 +1,219 @@
package pkg
import (
"fmt"
"m7s.live/v5/pkg/util"
)
// AnnexBReader 专门用于读取 AnnexB 格式数据的读取器
// 模仿 MemoryReader 结构,支持跨切片读取和动态数据管理
type AnnexBReader struct {
util.Memory // 存储数据的多段内存
Length, offset0, offset1 int // 可读长度和当前读取位置
}
// AppendBuffer 追加单个数据缓冲区
func (r *AnnexBReader) AppendBuffer(buf []byte) {
r.PushOne(buf)
r.Length += len(buf)
}
// ClipFront 剔除已读取的数据,释放内存
func (r *AnnexBReader) ClipFront() {
readOffset := r.Size - r.Length
if readOffset == 0 {
return
}
// 剔除已完全读取的缓冲区(不回收内存)
if r.offset0 > 0 {
r.Buffers = r.Buffers[r.offset0:]
r.Size -= readOffset
r.offset0 = 0
}
// 处理部分读取的缓冲区(不回收内存)
if r.offset1 > 0 && len(r.Buffers) > 0 {
buf := r.Buffers[0]
r.Buffers[0] = buf[r.offset1:]
r.Size -= r.offset1
r.offset1 = 0
}
}
// FindStartCode 查找 NALU 起始码,返回起始码位置和长度
func (r *AnnexBReader) FindStartCode() (pos int, startCodeLen int, found bool) {
if r.Length < 3 {
return 0, 0, false
}
// 逐字节检查起始码
for i := 0; i <= r.Length-3; i++ {
// 优先检查 4 字节起始码
if i <= r.Length-4 {
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 &&
r.getByteAt(i+2) == 0x00 && r.getByteAt(i+3) == 0x01 {
return i, 4, true
}
}
// 检查 3 字节起始码(但要确保不是 4 字节起始码的一部分)
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 && r.getByteAt(i+2) == 0x01 {
// 确保这不是4字节起始码的一部分
if i == 0 || r.getByteAt(i-1) != 0x00 {
return i, 3, true
}
}
}
return 0, 0, false
}
// getByteAt 获取指定位置的字节,不改变读取位置
func (r *AnnexBReader) getByteAt(pos int) byte {
if pos >= r.Length {
return 0
}
// 计算在哪个缓冲区和缓冲区内的位置
currentPos := 0
bufferIndex := r.offset0
bufferOffset := r.offset1
for bufferIndex < len(r.Buffers) {
buf := r.Buffers[bufferIndex]
available := len(buf) - bufferOffset
if currentPos+available > pos {
// 目标位置在当前缓冲区内
return buf[bufferOffset+(pos-currentPos)]
}
currentPos += available
bufferIndex++
bufferOffset = 0
}
return 0
}
type InvalidDataError struct {
util.Memory
}
func (e InvalidDataError) Error() string {
return fmt.Sprintf("% 02X", e.ToBytes())
}
// ReadNALU 读取一个完整的 NALU
// withStart 用于接收“包含起始码”的内存段
// withoutStart 用于接收“不包含起始码”的内存段
// 允许 withStart 或 withoutStart 为 nil表示调用方不需要该形式的数据
func (r *AnnexBReader) ReadNALU(withStart, withoutStart *util.Memory) error {
r.ClipFront()
// 定位到第一个起始码
firstPos, startCodeLen, found := r.FindStartCode()
if !found {
return nil
}
// 跳过起始码之前的无效数据
if firstPos > 0 {
var invalidData util.Memory
var reader util.MemoryReader
reader.Memory = &r.Memory
reader.RangeN(firstPos, invalidData.PushOne)
return InvalidDataError{invalidData}
}
// 为了查找下一个起始码,需要临时跳过当前起始码再查找
saveOffset0, saveOffset1, saveLength := r.offset0, r.offset1, r.Length
r.forward(startCodeLen)
nextPosAfterStart, _, nextFound := r.FindStartCode()
// 恢复到起始码起点
r.offset0, r.offset1, r.Length = saveOffset0, saveOffset1, saveLength
if !nextFound {
return nil
}
// 依次读取并填充输出,同时推进读取位置到 NALU 末尾(不消耗下一个起始码)
remaining := startCodeLen + nextPosAfterStart
// 需要在 withoutStart 中跳过的前缀(即起始码长度)
skipForWithout := startCodeLen
for remaining > 0 && r.offset0 < len(r.Buffers) {
buf := r.getCurrentBuffer()
readLen := len(buf)
if readLen > remaining {
readLen = remaining
}
segment := buf[:readLen]
if withStart != nil {
withStart.PushOne(segment)
}
if withoutStart != nil {
if skipForWithout >= readLen {
// 本段全部属于起始码,跳过
skipForWithout -= readLen
} else {
// 仅跳过起始码前缀,余下推入 withoutStart
withoutStart.PushOne(segment[skipForWithout:])
skipForWithout = 0
}
}
if readLen == len(buf) {
r.skipCurrentBuffer()
} else {
r.forward(readLen)
}
remaining -= readLen
}
return nil
}
// getCurrentBuffer 获取当前读取位置的缓冲区
func (r *AnnexBReader) getCurrentBuffer() []byte {
if r.offset0 >= len(r.Buffers) {
return nil
}
return r.Buffers[r.offset0][r.offset1:]
}
// forward 向前移动读取位置
func (r *AnnexBReader) forward(n int) {
if n <= 0 || r.Length <= 0 {
return
}
if n > r.Length { // 防御:不允许超出剩余长度
n = r.Length
}
r.Length -= n
for n > 0 && r.offset0 < len(r.Buffers) {
cur := r.Buffers[r.offset0]
remain := len(cur) - r.offset1
if n < remain { // 仍在当前缓冲区内
r.offset1 += n
n = 0
return
}
// 用掉当前缓冲区剩余部分,跳到下一个缓冲区起点
n -= remain
r.offset0++
r.offset1 = 0
}
}
// skipCurrentBuffer 跳过当前缓冲区
func (r *AnnexBReader) skipCurrentBuffer() {
if r.offset0 < len(r.Buffers) {
curBufLen := len(r.Buffers[r.offset0]) - r.offset1
r.Length -= curBufLen
r.offset0++
r.offset1 = 0
}
}

173
pkg/annexb_reader_test.go Normal file
View File

@@ -0,0 +1,173 @@
package pkg
import (
"bytes"
_ "embed"
"math/rand"
"testing"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
func bytesFromMemory(m util.Memory) []byte {
if m.Size == 0 {
return nil
}
out := make([]byte, 0, m.Size)
for _, b := range m.Buffers {
out = append(out, b...)
}
return out
}
func TestAnnexBReader_ReadNALU_Basic(t *testing.T) {
var reader AnnexBReader
// 3 个 NALU分别使用 4 字节、3 字节、4 字节起始码
expected1 := []byte{0x67, 0x42, 0x00, 0x1E}
expected2 := []byte{0x68, 0xCE, 0x3C, 0x80}
expected3 := []byte{0x65, 0x88, 0x84, 0x00}
buf := append([]byte{0x00, 0x00, 0x00, 0x01}, expected1...)
buf = append(buf, append([]byte{0x00, 0x00, 0x01}, expected2...)...)
buf = append(buf, append([]byte{0x00, 0x00, 0x00, 0x01}, expected3...)...)
reader.AppendBuffer(append(buf, codec.NALU_Delimiter2[:]...))
// 读取并校验 3 个 NALU不包含起始码
var n util.Memory
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("read nalu 1: %v", err)
}
if !bytes.Equal(bytesFromMemory(n), expected1) {
t.Fatalf("nalu1 mismatch")
}
n = util.Memory{}
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("read nalu 2: %v", err)
}
if !bytes.Equal(bytesFromMemory(n), expected2) {
t.Fatalf("nalu2 mismatch")
}
n = util.Memory{}
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("read nalu 3: %v", err)
}
if !bytes.Equal(bytesFromMemory(n), expected3) {
t.Fatalf("nalu3 mismatch")
}
// 再读一次应无更多起始码,返回 nil 错误且长度为 0
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("expected nil error when no more nalu, got: %v", err)
}
if reader.Length != 4 {
t.Fatalf("expected length 0 after reading all, got %d", reader.Length)
}
}
func TestAnnexBReader_AppendBuffer_MultiChunk_Random(t *testing.T) {
var reader AnnexBReader
rng := rand.New(rand.NewSource(1)) // 固定种子,保证可复现
// 生成随机 NALU仅负载部分并构造 AnnexB 数据(随机 3/4 字节起始码)
numNALU := 12
expectedPayloads := make([][]byte, 0, numNALU)
fullStream := make([]byte, 0, 1024)
for i := 0; i < numNALU; i++ {
payloadLen := 1 + rng.Intn(32)
payload := make([]byte, payloadLen)
for j := 0; j < payloadLen; j++ {
payload[j] = byte(rng.Intn(256))
}
expectedPayloads = append(expectedPayloads, payload)
if rng.Intn(2) == 0 {
fullStream = append(fullStream, 0x00, 0x00, 0x01)
} else {
fullStream = append(fullStream, 0x00, 0x00, 0x00, 0x01)
}
fullStream = append(fullStream, payload...)
}
fullStream = append(fullStream, codec.NALU_Delimiter2[:]...) // 结尾加个起始码,方便读取到最后一个 NALU
// 随机切割为多段并 AppendBuffer
for i := 0; i < len(fullStream); {
// 每段长度 1..7 字节(或剩余长度)
maxStep := 7
remain := len(fullStream) - i
step := 1 + rng.Intn(maxStep)
if step > remain {
step = remain
}
reader.AppendBuffer(fullStream[i : i+step])
i += step
}
// 依次读取并校验
for idx, expected := range expectedPayloads {
var n util.Memory
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("read nalu %d: %v", idx+1, err)
}
got := bytesFromMemory(n)
if !bytes.Equal(got, expected) {
t.Fatalf("nalu %d mismatch: expected %d bytes, got %d bytes", idx+1, len(expected), len(got))
}
}
// 没有更多 NALU
var n util.Memory
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("expected nil error when no more nalu, got: %v", err)
}
}
// 起始码跨越两个缓冲区的情况测试(例如 00 00 | 00 01
func TestAnnexBReader_StartCodeAcrossBuffers(t *testing.T) {
var reader AnnexBReader
// 构造一个 4 字节起始码被拆成两段的情况,后跟一个短 payload
reader.AppendBuffer([]byte{0x00, 0x00})
reader.AppendBuffer([]byte{0x00})
reader.AppendBuffer([]byte{0x01, 0x11, 0x22, 0x33}) // payload: 11 22 33
reader.AppendBuffer(codec.NALU_Delimiter2[:])
var n util.Memory
if err := reader.ReadNALU(nil, &n); err != nil {
t.Fatalf("read nalu: %v", err)
}
got := bytesFromMemory(n)
expected := []byte{0x11, 0x22, 0x33}
if !bytes.Equal(got, expected) {
t.Fatalf("payload mismatch: expected %v got %v", expected, got)
}
}
//go:embed test.h264
var annexbH264Sample []byte
var clipSizesH264 = [...]int{7823, 7157, 5137, 6268, 5958, 4573, 5661, 5589, 3917, 5207, 5347, 4111, 4755, 5199, 3761, 5014, 4981, 3736, 5075, 4889, 3739, 4701, 4655, 3471, 4086, 4428, 3309, 4388, 28, 8, 63974, 63976, 37544, 4945, 6525, 6974, 4874, 6317, 6141, 4455, 5833, 4105, 5407, 5479, 3741, 5142, 4939, 3745, 4945, 4857, 3518, 4624, 4930, 3649, 4846, 5020, 3293, 4588, 4571, 3430, 4844, 4822, 21223, 8461, 7188, 4882, 6108, 5870, 4432, 5389, 5466, 3726}
func TestAnnexBReader_EmbeddedAnnexB_H265(t *testing.T) {
var reader AnnexBReader
offset := 0
for _, size := range clipSizesH264 {
reader.AppendBuffer(annexbH264Sample[offset : offset+size])
offset += size
var nalu util.Memory
if err := reader.ReadNALU(nil, &nalu); err != nil {
t.Fatalf("read nalu: %v", err)
} else {
t.Logf("read nalu: %d bytes", nalu.Size)
if nalu.Size > 0 {
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
t.Logf("tryH264Type: %d", tryH264Type)
}
}
}
}

View File

@@ -174,7 +174,9 @@ func (r *AVRingReader) ReadFrame(conf *config.Subscribe) (err error) {
r.Delay = r.Track.LastValue.Sequence - r.Value.Sequence
// fmt.Println(r.Delay)
if r.Track.ICodecCtx != nil {
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
if r.Logger.Enabled(context.TODO(), task.TraceLevel) {
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
}
} else {
r.Warn("no codec")
}

View File

@@ -1,8 +1,6 @@
package pkg
import (
"io"
"net"
"sync"
"time"
@@ -27,21 +25,28 @@ type (
}
// Source -> Parse -> Demux -> (ConvertCtx) -> Mux(GetAllocator) -> Recycle
IAVFrame interface {
GetAllocator() *util.ScalableMemoryAllocator
SetAllocator(*util.ScalableMemoryAllocator)
Parse(*AVTrack) error // get codec info, idr
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) // convert codec from source stream
Demux(codec.ICodecCtx) (any, error) // demux to raw format
Mux(codec.ICodecCtx, *AVFrame) // mux from raw format
GetTimestamp() time.Duration
GetCTS() time.Duration
GetSample() *Sample
GetSize() int
CheckCodecChange() error
Demux() error // demux to raw format
Mux(*Sample) error // mux from origin format
Recycle()
String() string
Dump(byte, io.Writer)
}
Nalus []util.Memory
ISequenceCodecCtx[T any] interface {
GetSequenceFrame() T
}
BaseSample struct {
Raw IRaw // 裸格式用于转换的中间格式
IDR bool
TS0, Timestamp, CTS time.Duration // 原始 TS、修正 TS、Composition Time Stamp
}
Sample struct {
codec.ICodecCtx
util.RecyclableMemory
*BaseSample
}
Nalus = util.ReuseArray[util.Memory]
AudioData = util.Memory
@@ -49,38 +54,130 @@ type (
AVFrame struct {
DataFrame
IDR bool
Timestamp time.Duration // 绝对时间戳
CTS time.Duration // composition time stamp
Wraps []IAVFrame // 封装格式
*Sample
Wraps []IAVFrame // 封装格式
}
IRaw interface {
util.Resetter
Count() int
}
AVRing = util.Ring[AVFrame]
DataFrame struct {
sync.RWMutex
discard bool
Sequence uint32 // 在一个Track中的序号
WriteTime time.Time // 写入时间,可用于比较两个帧的先后
Raw any // 裸格式
}
)
var _ IAVFrame = (*AnnexB)(nil)
func (sample *Sample) GetSize() int {
return sample.Size
}
func (frame *AVFrame) Clone() {
func (sample *Sample) GetSample() *Sample {
return sample
}
func (sample *Sample) CheckCodecChange() (err error) {
return
}
func (sample *Sample) Demux() error {
return nil
}
func (sample *Sample) Mux(from *Sample) error {
sample.ICodecCtx = from.GetBase()
return nil
}
func ConvertFrameType(from, to IAVFrame) (err error) {
fromSampe, toSample := from.GetSample(), to.GetSample()
if !fromSampe.HasRaw() {
if err = from.Demux(); err != nil {
return
}
}
toSample.SetAllocator(fromSampe.GetAllocator())
toSample.BaseSample = fromSampe.BaseSample
return to.Mux(fromSampe)
}
func (b *BaseSample) HasRaw() bool {
return b.Raw != nil && b.Raw.Count() > 0
}
// 90Hz
func (b *BaseSample) GetDTS() time.Duration {
return b.Timestamp * 90 / time.Millisecond
}
func (b *BaseSample) GetPTS() time.Duration {
return (b.Timestamp + b.CTS) * 90 / time.Millisecond
}
func (b *BaseSample) SetDTS(dts time.Duration) {
b.Timestamp = dts * time.Millisecond / 90
}
func (b *BaseSample) SetPTS(pts time.Duration) {
b.CTS = pts*time.Millisecond/90 - b.Timestamp
}
func (b *BaseSample) SetTS32(ts uint32) {
b.Timestamp = time.Duration(ts) * time.Millisecond
}
func (b *BaseSample) GetTS32() uint32 {
return uint32(b.Timestamp / time.Millisecond)
}
func (b *BaseSample) SetCTS32(ts uint32) {
b.CTS = time.Duration(ts) * time.Millisecond
}
func (b *BaseSample) GetCTS32() uint32 {
return uint32(b.CTS / time.Millisecond)
}
func (b *BaseSample) GetNalus() *util.ReuseArray[util.Memory] {
if b.Raw == nil {
b.Raw = &Nalus{}
}
return b.Raw.(*util.ReuseArray[util.Memory])
}
func (b *BaseSample) GetAudioData() *AudioData {
if b.Raw == nil {
b.Raw = &AudioData{}
}
return b.Raw.(*AudioData)
}
func (b *BaseSample) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
array := b.GetNalus()
for reader.Length > 0 {
l, err := reader.ReadBE(naluSizeLen)
if err != nil {
return err
}
reader.RangeN(int(l), array.GetNextPointer().PushOne)
}
return nil
}
func (frame *AVFrame) Reset() {
frame.Timestamp = 0
frame.IDR = false
frame.CTS = 0
frame.Raw = nil
if len(frame.Wraps) > 0 {
for _, wrap := range frame.Wraps {
wrap.Recycle()
}
frame.Wraps = frame.Wraps[:0]
frame.BaseSample.IDR = false
frame.BaseSample.TS0 = 0
frame.BaseSample.Timestamp = 0
frame.BaseSample.CTS = 0
if frame.Raw != nil {
frame.Raw.Reset()
}
}
}
@@ -89,11 +186,6 @@ func (frame *AVFrame) Discard() {
frame.Reset()
}
func (frame *AVFrame) Demux(codecCtx codec.ICodecCtx) (err error) {
frame.Raw, err = frame.Wraps[0].Demux(codecCtx)
return
}
func (df *DataFrame) StartWrite() (success bool) {
if df.discard {
return
@@ -110,31 +202,6 @@ func (df *DataFrame) Ready() {
df.Unlock()
}
func (nalus *Nalus) H264Type() codec.H264NALUType {
return codec.ParseH264NALUType((*nalus)[0].Buffers[0][0])
}
func (nalus *Nalus) H265Type() codec.H265NALUType {
return codec.ParseH265NALUType((*nalus)[0].Buffers[0][0])
}
func (nalus *Nalus) Append(bytes []byte) {
*nalus = append(*nalus, util.Memory{Buffers: net.Buffers{bytes}, Size: len(bytes)})
}
func (nalus *Nalus) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
for reader.Length > 0 {
l, err := reader.ReadBE(naluSizeLen)
if err != nil {
return err
}
var mem util.Memory
reader.RangeN(int(l), mem.AppendOne)
*nalus = append(*nalus, mem)
}
return nil
}
func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
var obuHeader av1.OBUHeader
startLen := reader.Length
@@ -159,7 +226,15 @@ func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
if err != nil {
return err
}
(*AudioData)(obus).AppendOne(obu)
(*AudioData)(obus).PushOne(obu)
}
return nil
}
func (obus *OBUs) Reset() {
((*util.Memory)(obus)).Reset()
}
func (obus *OBUs) Count() int {
return (*util.Memory)(obus).Count()
}

View File

@@ -27,6 +27,32 @@ type (
}
)
func NewAACCtxFromRecord(record []byte) (ret *AACCtx, err error) {
ret = &AACCtx{}
ret.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(record)
return
}
func NewPCMACtx() *PCMACtx {
return &PCMACtx{
AudioCtx: AudioCtx{
SampleRate: 90000,
Channels: 1,
SampleSize: 16,
},
}
}
func NewPCMUCtx() *PCMUCtx {
return &PCMUCtx{
AudioCtx: AudioCtx{
SampleRate: 90000,
Channels: 1,
SampleSize: 16,
},
}
}
func (ctx *AudioCtx) GetRecord() []byte {
return []byte{}
}

View File

@@ -112,6 +112,12 @@ type (
}
)
func NewH264CtxFromRecord(record []byte) (ret *H264Ctx, err error) {
ret = &H264Ctx{}
ret.CodecData, err = h264parser.NewCodecDataFromAVCDecoderConfRecord(record)
return
}
func (*H264Ctx) FourCC() FourCC {
return FourCC_H264
}

View File

@@ -24,6 +24,15 @@ type (
}
)
func NewH265CtxFromRecord(record []byte) (ret *H265Ctx, err error) {
ret = &H265Ctx{}
ret.CodecData, err = h265parser.NewCodecDataFromAVCDecoderConfRecord(record)
if err == nil {
ret.RecordInfo.LengthSizeMinusOne = 3
}
return
}
func (ctx *H265Ctx) GetInfo() string {
return fmt.Sprintf("fps: %d, resolution: %s", ctx.FPS(), ctx.Resolution())
}
@@ -41,5 +50,11 @@ func (h265 *H265Ctx) GetRecord() []byte {
}
func (h265 *H265Ctx) String() string {
return fmt.Sprintf("hvc1.%02X%02X%02X", h265.RecordInfo.AVCProfileIndication, h265.RecordInfo.ProfileCompatibility, h265.RecordInfo.AVCLevelIndication)
// 根据 HEVC 标准格式hvc1.profile.compatibility.level.constraints
profile := h265.RecordInfo.AVCProfileIndication
compatibility := h265.RecordInfo.ProfileCompatibility
level := h265.RecordInfo.AVCLevelIndication
// 简单实现,使用可用字段模拟 HEVC 格式
return fmt.Sprintf("hvc1.%d.%X.L%d.00", profile, compatibility, level)
}

25
pkg/codec/h26x.go Normal file
View File

@@ -0,0 +1,25 @@
package codec
type H26XCtx struct {
VPS, SPS, PPS []byte
}
func (ctx *H26XCtx) FourCC() (f FourCC) {
return
}
func (ctx *H26XCtx) GetInfo() string {
return ""
}
func (ctx *H26XCtx) GetBase() ICodecCtx {
return ctx
}
func (ctx *H26XCtx) GetRecord() []byte {
return nil
}
func (ctx *H26XCtx) String() string {
return ""
}

View File

@@ -36,6 +36,22 @@ type Config struct {
var (
durationType = reflect.TypeOf(time.Duration(0))
regexpType = reflect.TypeOf(Regexp{})
basicTypes = []reflect.Kind{
reflect.Bool,
reflect.Int,
reflect.Int8,
reflect.Int16,
reflect.Int32,
reflect.Int64,
reflect.Uint,
reflect.Uint8,
reflect.Uint16,
reflect.Uint32,
reflect.Uint64,
reflect.Float32,
reflect.Float64,
reflect.String,
}
)
func (config *Config) Range(f func(key string, value Config)) {
@@ -99,29 +115,29 @@ func (config *Config) Parse(s any, prefix ...string) {
if t.Kind() == reflect.Pointer {
t, v = t.Elem(), v.Elem()
}
isStruct := t.Kind() == reflect.Struct && t != regexpType
if isStruct {
defaults.SetDefaults(v.Addr().Interface())
}
config.Ptr = v
if !v.IsValid() {
fmt.Println("parse to ", prefix, config.name, s, "is not valid")
return
}
config.Default = v.Interface()
if l := len(prefix); l > 0 { // 读取环境变量
name := strings.ToLower(prefix[l-1])
if tag := config.tag.Get("default"); tag != "" {
_, isUnmarshaler := v.Addr().Interface().(yaml.Unmarshaler)
tag := config.tag.Get("default")
if tag != "" && isUnmarshaler {
v.Set(config.assign(name, tag))
config.Default = v.Interface()
}
if envValue := os.Getenv(strings.Join(prefix, "_")); envValue != "" {
v.Set(config.assign(name, envValue))
config.Env = v.Interface()
}
}
if t.Kind() == reflect.Struct && t != regexpType {
config.Default = v.Interface()
if isStruct {
for i, j := 0, t.NumField(); i < j; i++ {
ft, fv := t.Field(i), v.Field(i)
@@ -208,6 +224,9 @@ func (config *Config) ParseUserFile(conf map[string]any) {
}
config.File = conf
for k, v := range conf {
k = strings.ReplaceAll(k, "-", "")
k = strings.ReplaceAll(k, "_", "")
k = strings.ToLower(k)
if config.Has(k) {
if prop := config.Get(k); prop.props != nil {
if v != nil {
@@ -312,16 +331,18 @@ func (config *Config) GetMap() map[string]any {
var regexPureNumber = regexp.MustCompile(`^\d+$`)
func (config *Config) assign(k string, v any) (target reflect.Value) {
ft := config.Ptr.Type()
func unmarshal(ft reflect.Type, v any) (target reflect.Value) {
source := reflect.ValueOf(v)
for _, t := range basicTypes {
if source.Kind() == t && ft.Kind() == t {
return source
}
}
switch ft {
case durationType:
target = reflect.New(ft).Elem()
if source.Type() == durationType {
target.Set(source)
return source
} else if source.IsZero() || !source.IsValid() {
target.SetInt(0)
} else {
@@ -329,7 +350,7 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
if d, err := time.ParseDuration(timeStr); err == nil && !regexPureNumber.MatchString(timeStr) {
target.SetInt(int64(d))
} else {
slog.Error("invalid duration value please add unit (s,m,h,d)eg: 100ms, 10s, 4m, 1h", "key", k, "value", source)
slog.Error("invalid duration value please add unit (s,m,h,d)eg: 100ms, 10s, 4m, 1h", "value", timeStr)
os.Exit(1)
}
}
@@ -338,58 +359,69 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
regexpStr := source.String()
target.Set(reflect.ValueOf(Regexp{regexp.MustCompile(regexpStr)}))
default:
if ft.Kind() == reflect.Map {
target = reflect.MakeMap(ft)
if v != nil {
tmpStruct := reflect.StructOf([]reflect.StructField{
{
Name: "Key",
Type: ft.Key(),
},
})
tmpValue := reflect.New(tmpStruct)
for k, v := range v.(map[string]any) {
_ = yaml.Unmarshal([]byte(fmt.Sprintf("key: %s", k)), tmpValue.Interface())
var value reflect.Value
if ft.Elem().Kind() == reflect.Struct {
value = reflect.New(ft.Elem())
defaults.SetDefaults(value.Interface())
if reflect.TypeOf(v).Kind() != reflect.Map {
value.Elem().Field(0).Set(reflect.ValueOf(v))
} else {
out, _ := yaml.Marshal(v)
_ = yaml.Unmarshal(out, value.Interface())
}
value = value.Elem()
} else {
value = reflect.ValueOf(v)
switch ft.Kind() {
case reflect.Struct:
newStruct := reflect.New(ft)
defaults.SetDefaults(newStruct.Interface())
if value, ok := v.(map[string]any); ok {
for i := 0; i < ft.NumField(); i++ {
key := strings.ToLower(ft.Field(i).Name)
if vv, ok := value[key]; ok {
newStruct.Elem().Field(i).Set(unmarshal(ft.Field(i).Type, vv))
}
target.SetMapIndex(tmpValue.Elem().Field(0), value)
}
} else {
newStruct.Elem().Field(0).Set(unmarshal(ft.Field(0).Type, v))
}
return newStruct.Elem()
case reflect.Map:
if v != nil {
target = reflect.MakeMap(ft)
for k, v := range v.(map[string]any) {
target.SetMapIndex(unmarshal(ft.Key(), k), unmarshal(ft.Elem(), v))
}
}
} else {
tmpStruct := reflect.StructOf([]reflect.StructField{
{
Name: strings.ToUpper(k),
Type: ft,
},
})
tmpValue := reflect.New(tmpStruct)
case reflect.Slice:
if v != nil {
s := v.([]any)
target = reflect.MakeSlice(ft, len(s), len(s))
for i, v := range s {
target.Index(i).Set(unmarshal(ft.Elem(), v))
}
}
default:
if v != nil {
var out []byte
var err error
if vv, ok := v.(string); ok {
out = []byte(fmt.Sprintf("%s: %s", k, vv))
out = []byte(fmt.Sprintf("%s: %s", "value", vv))
} else {
out, _ = yaml.Marshal(map[string]any{k: v})
out, err = yaml.Marshal(map[string]any{"value": v})
if err != nil {
panic(err)
}
}
_ = yaml.Unmarshal(out, tmpValue.Interface())
tmpValue := reflect.New(reflect.StructOf([]reflect.StructField{
{
Name: "Value",
Type: ft,
},
}))
err = yaml.Unmarshal(out, tmpValue.Interface())
if err != nil {
panic(err)
}
return tmpValue.Elem().Field(0)
}
target = tmpValue.Elem().Field(0)
}
}
return
}
func (config *Config) assign(k string, v any) reflect.Value {
return unmarshal(config.Ptr.Type(), v)
}
func Parse(target any, conf map[string]any) {
var c Config
c.Parse(target)

View File

@@ -49,6 +49,7 @@ func (task *ListenQuicWork) Start() (err error) {
task.Error("listen quic error", err)
return
}
task.OnStop(task.Listener.Close)
task.Info("listen quic on", task.ListenAddr)
return
}
@@ -63,7 +64,3 @@ func (task *ListenQuicWork) Go() error {
task.AddTask(subTask)
}
}
func (task *ListenQuicWork) Dispose() {
_ = task.Listener.Close()
}

View File

@@ -41,6 +41,7 @@ type TCP struct {
ListenNum int `desc:"同时并行监听数量0为CPU核心数量"` //同时并行监听数量0为CPU核心数量
NoDelay bool `desc:"是否禁用Nagle算法"` //是否禁用Nagle算法
WriteBuffer int `desc:"写缓冲区大小"` //写缓冲区大小
ReadBuffer int `desc:"读缓冲区大小"` //读缓冲区大小
KeepAlive bool `desc:"是否启用KeepAlive"` //是否启用KeepAlive
AutoListen bool `default:"true" desc:"是否自动监听"`
}
@@ -148,6 +149,12 @@ func (task *ListenTCPWork) listen(handler TCPHandler) {
continue
}
}
if task.ReadBuffer > 0 {
if err := tcpConn.SetReadBuffer(task.ReadBuffer); err != nil {
task.Error("failed to set read buffer", "error", err)
continue
}
}
tempDelay = 0
subTask := handler(tcpConn)
task.AddTask(subTask)

View File

@@ -16,6 +16,10 @@ const (
RelayModeRelay = "relay"
RelayModeMix = "mix"
RecordModeAuto RecordMode = "auto"
RecordModeEvent RecordMode = "event"
RecordModeTest RecordMode = "test"
HookOnServerKeepAlive HookType = "server_keep_alive"
HookOnPublishStart HookType = "publish_start"
HookOnPublishEnd HookType = "publish_end"
@@ -29,11 +33,34 @@ const (
HookOnRecordEnd HookType = "record_end"
HookOnTransformStart HookType = "transform_start"
HookOnTransformEnd HookType = "transform_end"
HookOnSystemStart HookType = "system_start"
HookDefault HookType = "default"
EventLevelLow EventLevel = "low"
EventLevelHigh EventLevel = "high"
AlarmStorageException = 0x10010 // 存储异常
AlarmStorageExceptionRecover = 0x10011 // 存储异常恢复
AlarmPullOffline = 0x10012 // 拉流异常,触发一次报警。
AlarmPullRecover = 0x10013 // 拉流恢复
AlarmDiskSpaceFull = 0x10014 // 磁盘空间满,磁盘占有率,超出最大磁盘空间使用率,触发报警。
AlarmStartupRunning = 0x10015 // 启动运行
AlarmPublishOffline = 0x10016 // 发布者异常,触发一次报警。
AlarmPublishRecover = 0x10017 // 发布者恢复
AlarmSubscribeOffline = 0x10018 // 订阅者异常,触发一次报警。
AlarmSubscribeRecover = 0x10019 // 订阅者恢复
AlarmPushOffline = 0x10020 // 推流异常,触发一次报警。
AlarmPushRecover = 0x10021 // 推流恢复
AlarmTransformOffline = 0x10022 // 转换异常,触发一次报警。
AlarmTransformRecover = 0x10023 // 转换恢复
AlarmKeepAliveOnline = 0x10024 // 保活正常,触发一次报警。
)
type (
HookType string
Publish struct {
EventLevel = string
RecordMode = string
HookType string
Publish struct {
MaxCount int `default:"0" desc:"最大发布者数量"` // 最大发布者数量
PubAudio bool `default:"true" desc:"是否发布音频"`
PubVideo bool `default:"true" desc:"是否发布视频"`
@@ -44,7 +71,7 @@ type (
IdleTimeout time.Duration `desc:"空闲(无订阅)超时"` // 空闲(无订阅)超时
PauseTimeout time.Duration `default:"30s" desc:"暂停超时时间"` // 暂停超时
BufferTime time.Duration `desc:"缓冲时长0代表取最近关键帧"` // 缓冲长度(单位:秒)0代表取最近关键帧
Speed float64 `default:"1" desc:"发送速率"` // 发送速率0 为不限速
Speed float64 `desc:"发送速率"` // 发送速率0 为不限速
Scale float64 `default:"1" desc:"缩放倍数"` // 缩放倍数
MaxFPS int `default:"60" desc:"最大FPS"` // 最大FPS
Key string `desc:"发布鉴权key"` // 发布鉴权key
@@ -62,16 +89,18 @@ type (
SyncMode int `default:"1" desc:"同步模式" enum:"0:采用时间戳同步,1:采用写入时间同步"` // 0采用时间戳同步1采用写入时间同步
IFrameOnly bool `desc:"只要关键帧"` // 只要关键帧
WaitTimeout time.Duration `default:"10s" desc:"等待流超时时间"` // 等待流超时
WriteBufferSize int `desc:"写缓冲大小"` // 写缓冲大小
Key string `desc:"订阅鉴权key"` // 订阅鉴权key
SubType string `desc:"订阅类型"` // 订阅类型
WaitTrack string `default:"video" desc:"等待轨道" enum:"audio:等待音频,video:等待视频,all:等待全部"`
WriteBufferSize int `desc:"写缓冲大小"` // 写缓冲大小
Key string `desc:"订阅鉴权key"` // 订阅鉴权key
SubType string `desc:"订阅类型"` // 订阅类型
}
HTTPValues map[string][]string
Pull struct {
URL string `desc:"拉流地址"`
MaxRetry int `default:"-1" desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉高于0 的数代表最大重拉次数
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
Proxy string `desc:"代理地址"` // 代理地址
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
MaxRetry int `desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉高于0 的数代表最大重拉次数
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
Proxy string `desc:"代理地址"` // 代理地址
Header HTTPValues
Args HTTPValues `gorm:"-:all"` // 拉流参数
TestMode int `desc:"测试模式,0:关闭,1:只拉流不发布"` // 测试模式
@@ -83,11 +112,22 @@ type (
Proxy string `desc:"代理地址"` // 代理地址
Header HTTPValues
}
RecordEvent struct {
EventId string
BeforeDuration uint32 `json:"beforeDuration" desc:"事件前缓存时长" gorm:"comment:事件前缓存时长;default:30000"`
AfterDuration uint32 `json:"afterDuration" desc:"事件后缓存时长" gorm:"comment:事件后缓存时长;default:30000"`
EventDesc string `json:"eventDesc" desc:"事件描述" gorm:"type:varchar(255);comment:事件描述"`
EventLevel EventLevel `json:"eventLevel" desc:"事件级别" gorm:"type:varchar(255);comment:事件级别,high表示重要事件无法删除且表示无需自动删除,low表示非重要事件,达到自动删除时间后,自动删除;default:'low'"`
EventName string `json:"eventName" desc:"事件名称" gorm:"type:varchar(255);comment:事件名称"`
}
Record struct {
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
FilePath string `desc:"录制文件路径"` // 录制文件路径
Fragment time.Duration `desc:"分片时长"` // 分片时长
Append bool `desc:"是否追加录制"` // 是否追加录制
Mode RecordMode `json:"mode" desc:"事件类型,auto=连续录像模式event=事件录像模式" gorm:"type:varchar(255);comment:事件类型,auto=连续录像模式event=事件录像模式;default:'auto'"`
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
FilePath string `desc:"录制文件路径"` // 录制文件路径
Fragment time.Duration `desc:"分片时长"` // 分片时长
RealTime bool `desc:"是否实时录制"` // 是否实时录制
Append bool `desc:"是否追加录制"` // 是否追加录制
Event *RecordEvent `json:"event" desc:"事件录像配置" gorm:"-"` // 事件录像配置
}
TransfromOutput struct {
Target string `desc:"转码目标"` // 转码目标
@@ -111,10 +151,11 @@ type (
URL string // Webhook 地址
Method string `default:"POST"` // HTTP 方法
Headers map[string]string // 自定义请求头
TimeoutSeconds int `default:"5"` // 超时时间(秒)
RetryTimes int `default:"3"` // 重试次数
RetryInterval time.Duration `default:"1s"` // 重试间隔
Interval int `default:"60"` // 保活间隔(秒)
TimeoutSeconds int `default:"5"` // 超时时间(秒)
RetryTimes int `default:"3"` // 重试次数
RetryInterval time.Duration `default:"1s"` // 重试间隔
Interval int `default:"60"` // 保活间隔(秒)
SaveAlarm bool `default:"false"` // 是否保存告警到数据库
}
Common struct {
PublicIP string

View File

@@ -9,14 +9,11 @@ import (
// User represents a user in the system
type User struct {
ID uint `gorm:"primarykey"`
CreatedAt time.Time
UpdatedAt time.Time
DeletedAt gorm.DeletedAt `gorm:"index"`
Username string `gorm:"uniqueIndex;size:64"`
Password string `gorm:"size:60"` // bcrypt hash
Role string `gorm:"size:20;default:'user'"` // admin or user
LastLogin time.Time `gorm:"type:datetime;default:CURRENT_TIMESTAMP"`
gorm.Model
Username string `gorm:"uniqueIndex;size:64"`
Password string `gorm:"size:60"` // bcrypt hash
Role string `gorm:"size:20;default:'user'"` // admin or user
LastLogin time.Time `gorm:"type:timestamp;default:CURRENT_TIMESTAMP"`
}
// BeforeCreate hook to hash password before saving

View File

@@ -4,6 +4,7 @@ import "errors"
var (
ErrNotFound = errors.New("not found")
ErrDisposed = errors.New("disposed")
ErrDisabled = errors.New("disabled")
ErrStreamExist = errors.New("stream exist")
ErrRecordExists = errors.New("record exists")

82
pkg/format/adts.go Normal file
View File

@@ -0,0 +1,82 @@
package format
import (
"bytes"
"fmt"
"github.com/deepch/vdk/codec/aacparser"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
)
var _ pkg.IAVFrame = (*Mpeg2Audio)(nil)
type Mpeg2Audio struct {
pkg.Sample
}
func (A *Mpeg2Audio) CheckCodecChange() (err error) {
old := A.ICodecCtx
if old == nil || old.FourCC().Is(codec.FourCC_MP4A) {
var reader = A.NewReader()
var adts []byte
adts, err = reader.ReadBytes(7)
if err != nil {
return
}
var hdrlen, framelen, samples int
var conf aacparser.MPEG4AudioConfig
conf, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
if err != nil {
return
}
b := &bytes.Buffer{}
aacparser.WriteMPEG4AudioConfig(b, conf)
if old == nil || !bytes.Equal(b.Bytes(), old.GetRecord()) {
var ctx = &codec.AACCtx{}
ctx.ConfigBytes = b.Bytes()
A.ICodecCtx = ctx
if false {
println("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples, "config", ctx.Config)
}
// track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
} else {
}
}
return
}
func (A *Mpeg2Audio) Demux() (err error) {
var reader = A.NewReader()
mem := A.GetAudioData()
if A.ICodecCtx.FourCC().Is(codec.FourCC_MP4A) {
err = reader.Skip(7)
if err != nil {
return
}
}
reader.Range(mem.PushOne)
return
}
func (A *Mpeg2Audio) Mux(frame *pkg.Sample) (err error) {
if A.ICodecCtx == nil {
A.ICodecCtx = frame.GetBase()
}
raw := frame.Raw.(*pkg.AudioData)
aacCtx, ok := A.ICodecCtx.(*codec.AACCtx)
if ok {
A.InitRecycleIndexes(1)
adts := A.NextN(7)
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
} else {
A.InitRecycleIndexes(0)
}
A.Push(raw.Buffers...)
return
}
func (A *Mpeg2Audio) String() string {
return fmt.Sprintf("ADTS{size:%d}", A.Size)
}

290
pkg/format/annexb.go Normal file
View File

@@ -0,0 +1,290 @@
package format
import (
"bytes"
"fmt"
"io"
"slices"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/codec/h265parser"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
type AnnexB struct {
pkg.Sample
}
func (a *AnnexB) CheckCodecChange() (err error) {
if !a.HasRaw() || a.ICodecCtx == nil {
err = a.Demux()
if err != nil {
return
}
}
if a.ICodecCtx == nil {
return pkg.ErrSkip
}
var vps, sps, pps []byte
a.IDR = false
for nalu := range a.Raw.(*pkg.Nalus).RangePoint {
if a.FourCC() == codec.FourCC_H265 {
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
case h265parser.NAL_UNIT_VPS:
vps = nalu.ToBytes()
case h265parser.NAL_UNIT_SPS:
sps = nalu.ToBytes()
case h265parser.NAL_UNIT_PPS:
pps = nalu.ToBytes()
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
a.IDR = true
}
} else {
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
case codec.NALU_SPS:
sps = nalu.ToBytes()
case codec.NALU_PPS:
pps = nalu.ToBytes()
case codec.NALU_IDR_Picture:
a.IDR = true
}
}
}
if a.FourCC() == codec.FourCC_H265 {
if vps != nil && sps != nil && pps != nil {
var codecData h265parser.CodecData
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
if err != nil {
return
}
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H265Ctx).Record) {
a.ICodecCtx = &codec.H265Ctx{
CodecData: codecData,
}
}
}
if a.ICodecCtx.(*codec.H265Ctx).Record == nil {
err = pkg.ErrSkip
}
} else {
if sps != nil && pps != nil {
var codecData h264parser.CodecData
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
if err != nil {
return
}
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H264Ctx).Record) {
a.ICodecCtx = &codec.H264Ctx{
CodecData: codecData,
}
}
}
if a.ICodecCtx.(*codec.H264Ctx).Record == nil {
err = pkg.ErrSkip
}
}
return
}
// String implements pkg.IAVFrame.
func (a *AnnexB) String() string {
return fmt.Sprintf("%d %d", a.Timestamp, a.Memory.Size)
}
// Demux implements pkg.IAVFrame.
func (a *AnnexB) Demux() (err error) {
nalus := a.GetNalus()
var lastFourBytes [4]byte
var b byte
var shallow util.Memory
shallow.Push(a.Buffers...)
reader := shallow.NewReader()
gotNalu := func() {
nalu := nalus.GetNextPointer()
for buf := range reader.ClipFront {
nalu.PushOne(buf)
}
if a.ICodecCtx == nil {
naluType := codec.ParseH264NALUType(nalu.Buffers[0][0])
switch naluType {
case codec.NALU_Non_IDR_Picture,
codec.NALU_IDR_Picture,
codec.NALU_SEI,
codec.NALU_SPS,
codec.NALU_PPS,
codec.NALU_Access_Unit_Delimiter:
a.ICodecCtx = &codec.H264Ctx{}
}
}
}
for {
b, err = reader.ReadByte()
if err == nil {
copy(lastFourBytes[:], lastFourBytes[1:])
lastFourBytes[3] = b
var startCode = 0
if lastFourBytes == codec.NALU_Delimiter2 {
startCode = 4
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
startCode = 3
}
if startCode > 0 && reader.Offset() >= 3 {
if reader.Offset() == 3 {
startCode = 3
}
reader.Unread(startCode)
if reader.Offset() > 0 {
gotNalu()
}
reader.Skip(startCode)
for range reader.ClipFront {
}
}
} else if err == io.EOF {
if reader.Offset() > 0 {
gotNalu()
}
err = nil
break
}
}
return
}
func (a *AnnexB) Mux(fromBase *pkg.Sample) (err error) {
if a.ICodecCtx == nil {
a.ICodecCtx = fromBase.GetBase()
}
a.InitRecycleIndexes(0)
delimiter2 := codec.NALU_Delimiter2[:]
a.PushOne(delimiter2)
if fromBase.IDR {
switch ctx := fromBase.GetBase().(type) {
case *codec.H264Ctx:
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
case *codec.H265Ctx:
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
}
}
for i, nalu := range *fromBase.Raw.(*pkg.Nalus) {
if i > 0 {
a.PushOne(codec.NALU_Delimiter1[:])
}
a.Push(nalu.Buffers...)
}
return
}
func (a *AnnexB) Parse(reader *pkg.AnnexBReader) (hasFrame bool, err error) {
nalus := a.BaseSample.GetNalus()
for !hasFrame {
nalu := nalus.GetNextPointer()
reader.ReadNALU(&a.Memory, nalu)
if nalu.Size == 0 {
nalus.Reduce()
return
}
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
h265Type := codec.ParseH265NALUType(nalu.Buffers[0][0])
if a.ICodecCtx == nil {
a.ICodecCtx = &codec.H26XCtx{}
}
switch ctx := a.ICodecCtx.(type) {
case *codec.H26XCtx:
if tryH264Type == codec.NALU_SPS {
ctx.SPS = nalu.ToBytes()
nalus.Reduce()
a.Recycle()
} else if tryH264Type == codec.NALU_PPS {
ctx.PPS = nalu.ToBytes()
nalus.Reduce()
a.Recycle()
} else if h265Type == h265parser.NAL_UNIT_VPS {
ctx.VPS = nalu.ToBytes()
nalus.Reduce()
a.Recycle()
} else if h265Type == h265parser.NAL_UNIT_SPS {
ctx.SPS = nalu.ToBytes()
nalus.Reduce()
a.Recycle()
} else if h265Type == h265parser.NAL_UNIT_PPS {
ctx.PPS = nalu.ToBytes()
nalus.Reduce()
a.Recycle()
} else {
if ctx.SPS != nil && ctx.PPS != nil && tryH264Type == codec.NALU_IDR_Picture {
var codecData h264parser.CodecData
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS, ctx.PPS)
if err != nil {
return
}
a.ICodecCtx = &codec.H264Ctx{
CodecData: codecData,
}
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
delimiter2 := codec.NALU_Delimiter2[:]
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
a.Size += 8 + len(ctx.SPS) + len(ctx.PPS)
} else if ctx.VPS != nil && ctx.SPS != nil && ctx.PPS != nil && h265Type == h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL {
var codecData h265parser.CodecData
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS, ctx.SPS, ctx.PPS)
if err != nil {
return
}
a.ICodecCtx = &codec.H265Ctx{
CodecData: codecData,
}
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.VPS), util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
delimiter2 := codec.NALU_Delimiter2[:]
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.VPS, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
a.Size += 24 + len(ctx.VPS) + len(ctx.SPS) + len(ctx.PPS)
} else {
nalus.Reduce()
a.Recycle()
}
}
case *codec.H264Ctx:
switch tryH264Type {
case codec.NALU_IDR_Picture:
a.IDR = true
hasFrame = true
case codec.NALU_Non_IDR_Picture:
a.IDR = false
hasFrame = true
}
case *codec.H265Ctx:
switch h265Type {
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
a.IDR = true
hasFrame = true
case h265parser.NAL_UNIT_CODED_SLICE_TRAIL_N,
h265parser.NAL_UNIT_CODED_SLICE_TRAIL_R,
h265parser.NAL_UNIT_CODED_SLICE_TSA_N,
h265parser.NAL_UNIT_CODED_SLICE_TSA_R,
h265parser.NAL_UNIT_CODED_SLICE_STSA_N,
h265parser.NAL_UNIT_CODED_SLICE_STSA_R,
h265parser.NAL_UNIT_CODED_SLICE_RADL_N,
h265parser.NAL_UNIT_CODED_SLICE_RADL_R,
h265parser.NAL_UNIT_CODED_SLICE_RASL_N,
h265parser.NAL_UNIT_CODED_SLICE_RASL_R:
a.IDR = false
hasFrame = true
}
}
}
return
}

309
pkg/format/ps/mpegps.go Normal file
View File

@@ -0,0 +1,309 @@
package mpegps
import (
"errors"
"fmt"
"io"
"time"
"m7s.live/v5"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/format"
"m7s.live/v5/pkg/util"
mpegts "m7s.live/v5/pkg/format/ts"
)
const (
StartCodePS = 0x000001ba
StartCodeSYS = 0x000001bb
StartCodeMAP = 0x000001bc
StartCodePadding = 0x000001be
StartCodeVideo = 0x000001e0
StartCodeVideo1 = 0x000001e1
StartCodeVideo2 = 0x000001e2
StartCodeAudio = 0x000001c0
PrivateStreamCode = 0x000001bd
MEPGProgramEndCode = 0x000001b9
)
// PS包头常量
const (
PSPackHeaderSize = 14 // PS pack header basic size
PSSystemHeaderSize = 18 // PS system header basic size
PSMHeaderSize = 12 // PS map header basic size
PESHeaderMinSize = 9 // PES header minimum size
MaxPESPayloadSize = 0xFFEB // 0xFFFF - 14 (to leave room for headers)
)
type MpegPsDemuxer struct {
stAudio, stVideo byte
Publisher *m7s.Publisher
Allocator *util.ScalableMemoryAllocator
writer m7s.PublishWriter[*format.Mpeg2Audio, *format.AnnexB]
}
func (s *MpegPsDemuxer) Feed(reader *util.BufReader) (err error) {
writer := &s.writer
var payload util.Memory
var pesHeader mpegts.MpegPESHeader
var lastVideoPts, lastAudioPts uint64
var annexbReader pkg.AnnexBReader
for {
code, err := reader.ReadBE32(4)
if err != nil {
return err
}
switch code {
case StartCodePS:
var psl byte
if err = reader.Skip(9); err != nil {
return err
}
psl, err = reader.ReadByte()
if err != nil {
return err
}
psl &= 0x07
if err = reader.Skip(int(psl)); err != nil {
return err
}
case StartCodeVideo:
payload, err = s.ReadPayload(reader)
if err != nil {
return err
}
if !s.Publisher.PubVideo {
continue
}
if writer.PublishVideoWriter == nil {
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*format.AnnexB](s.Publisher, s.Allocator)
switch s.stVideo {
case mpegts.STREAM_TYPE_H264:
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
case mpegts.STREAM_TYPE_H265:
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
}
}
pes := writer.VideoFrame
reader := payload.NewReader()
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
if err != nil {
return errors.Join(err, fmt.Errorf("failed to read PES header"))
}
if pesHeader.Pts != 0 && pesHeader.Pts != lastVideoPts {
if pes.Size > 0 {
err = writer.NextVideo()
if err != nil {
return errors.Join(err, fmt.Errorf("failed to get next video frame"))
}
pes = writer.VideoFrame
}
pes.SetDTS(time.Duration(pesHeader.Dts))
pes.SetPTS(time.Duration(pesHeader.Pts))
lastVideoPts = pesHeader.Pts
}
annexb := s.Allocator.Malloc(reader.Length)
reader.Read(annexb)
annexbReader.AppendBuffer(annexb)
_, err = pes.Parse(&annexbReader)
if err != nil {
return errors.Join(err, fmt.Errorf("failed to parse annexb"))
}
case StartCodeAudio:
payload, err = s.ReadPayload(reader)
if err != nil {
return errors.Join(err, fmt.Errorf("failed to read audio payload"))
}
if s.stAudio == 0 || !s.Publisher.PubAudio {
continue
}
if writer.PublishAudioWriter == nil {
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
switch s.stAudio {
case mpegts.STREAM_TYPE_AAC:
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
case mpegts.STREAM_TYPE_G711A:
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
case mpegts.STREAM_TYPE_G711U:
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
}
}
pes := writer.AudioFrame
reader := payload.NewReader()
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
if err != nil {
return errors.Join(err, fmt.Errorf("failed to read PES header"))
}
if pesHeader.Pts != 0 && pesHeader.Pts != lastAudioPts {
if pes.Size > 0 {
err = writer.NextAudio()
if err != nil {
return errors.Join(err, fmt.Errorf("failed to get next audio frame"))
}
pes = writer.AudioFrame
}
pes.SetDTS(time.Duration(pesHeader.Pts))
pes.SetPTS(time.Duration(pesHeader.Pts))
lastAudioPts = pesHeader.Pts
}
reader.Range(func(buf []byte) {
copy(pes.NextN(len(buf)), buf)
})
// reader.Range(pes.PushOne)
case StartCodeMAP:
var psm util.Memory
psm, err = s.ReadPayload(reader)
if err != nil {
return errors.Join(err, fmt.Errorf("failed to read program stream map"))
}
err = s.decProgramStreamMap(psm)
if err != nil {
return errors.Join(err, fmt.Errorf("failed to decode program stream map"))
}
default:
payloadlen, err := reader.ReadBE(2)
if err != nil {
return errors.Join(err, fmt.Errorf("failed to read payload length"))
}
reader.Skip(payloadlen)
}
}
}
func (s *MpegPsDemuxer) ReadPayload(reader *util.BufReader) (payload util.Memory, err error) {
payloadlen, err := reader.ReadBE(2)
if err != nil {
return
}
return reader.ReadBytes(payloadlen)
}
func (s *MpegPsDemuxer) decProgramStreamMap(psm util.Memory) (err error) {
var programStreamInfoLen, programStreamMapLen, elementaryStreamInfoLength uint32
var streamType, elementaryStreamID byte
reader := psm.NewReader()
reader.Skip(2)
programStreamInfoLen, err = reader.ReadBE(2)
reader.Skip(int(programStreamInfoLen))
programStreamMapLen, err = reader.ReadBE(2)
for programStreamMapLen > 0 {
streamType, err = reader.ReadByte()
elementaryStreamID, err = reader.ReadByte()
if elementaryStreamID >= 0xe0 && elementaryStreamID <= 0xef {
s.stVideo = streamType
} else if elementaryStreamID >= 0xc0 && elementaryStreamID <= 0xdf {
s.stAudio = streamType
}
elementaryStreamInfoLength, err = reader.ReadBE(2)
reader.Skip(int(elementaryStreamInfoLength))
programStreamMapLen -= 4 + elementaryStreamInfoLength
}
return nil
}
type MpegPSMuxer struct {
*m7s.Subscriber
Packet *util.RecyclableMemory
}
func (muxer *MpegPSMuxer) Mux(onPacket func() error) {
var pesAudio, pesVideo *MpegpsPESFrame
puber := muxer.Publisher
var elementary_stream_map_length uint16
if puber.HasAudioTrack() {
elementary_stream_map_length += 4
pesAudio = &MpegpsPESFrame{}
pesAudio.StreamID = mpegts.STREAM_ID_AUDIO
switch puber.AudioTrack.ICodecCtx.FourCC() {
case codec.FourCC_ALAW:
pesAudio.StreamType = mpegts.STREAM_TYPE_G711A
case codec.FourCC_ULAW:
pesAudio.StreamType = mpegts.STREAM_TYPE_G711U
case codec.FourCC_MP4A:
pesAudio.StreamType = mpegts.STREAM_TYPE_AAC
}
}
if puber.HasVideoTrack() {
elementary_stream_map_length += 4
pesVideo = &MpegpsPESFrame{}
pesVideo.StreamID = mpegts.STREAM_ID_VIDEO
switch puber.VideoTrack.ICodecCtx.FourCC() {
case codec.FourCC_H264:
pesVideo.StreamType = mpegts.STREAM_TYPE_H264
case codec.FourCC_H265:
pesVideo.StreamType = mpegts.STREAM_TYPE_H265
}
}
var outputBuffer util.Buffer = muxer.Packet.NextN(PSPackHeaderSize + PSMHeaderSize + int(elementary_stream_map_length))
outputBuffer.Reset()
MuxPSHeader(&outputBuffer)
// System Header - 定义流的缓冲区信息
// outputBuffer.WriteUint32(StartCodeSYS)
// outputBuffer.WriteByte(0x00) // header_length high
// outputBuffer.WriteByte(0x0C) // header_length low (12 bytes)
// outputBuffer.WriteByte(0x80) // marker + rate_bound[21..15]
// outputBuffer.WriteByte(0x62) // rate_bound[14..8]
// outputBuffer.WriteByte(0x4E) // rate_bound[7..1] + marker
// outputBuffer.WriteByte(0x01) // audio_bound + fixed_flag + CSPS_flag + system_audio_lock_flag + system_video_lock_flag + marker
// outputBuffer.WriteByte(0x01) // video_bound + packet_rate_restriction_flag + reserved
// outputBuffer.WriteByte(frame.StreamId) // stream_id
// outputBuffer.WriteByte(0xC0) // '11' + P-STD_buffer_bound_scale
// outputBuffer.WriteByte(0x20) // P-STD_buffer_size_bound low
// outputBuffer.WriteByte(0x00) // P-STD_buffer_size_bound high
// outputBuffer.WriteByte(0x00)
// outputBuffer.WriteByte(0x00)
// outputBuffer.WriteByte(0x00)
// PSM Header - 程序流映射,定义流类型
outputBuffer.WriteUint32(StartCodeMAP)
outputBuffer.WriteUint16(uint16(PSMHeaderSize) + elementary_stream_map_length - 6) // psm_length
outputBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
outputBuffer.WriteByte(0xFF) // reserved + marker
outputBuffer.WriteUint16(0) // program_stream_info_length
outputBuffer.WriteUint16(elementary_stream_map_length)
if pesAudio != nil {
outputBuffer.WriteByte(pesAudio.StreamType) // stream_type
outputBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
outputBuffer.WriteUint16(0) // elementary_stream_info_length
}
if pesVideo != nil {
outputBuffer.WriteByte(pesVideo.StreamType) // stream_type
outputBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
outputBuffer.WriteUint16(0) // elementary_stream_info_length
}
onPacket()
m7s.PlayBlock(muxer.Subscriber, func(audio *format.Mpeg2Audio) error {
pesAudio.Pts = uint64(audio.GetPTS())
pesAudio.WritePESPacket(audio.Memory, muxer.Packet)
return onPacket()
}, func(video *format.AnnexB) error {
pesVideo.Pts = uint64(video.GetPTS())
pesVideo.Dts = uint64(video.GetDTS())
pesVideo.WritePESPacket(video.Memory, muxer.Packet)
return onPacket()
})
}
func MuxPSHeader(outputBuffer *util.Buffer) {
// 写入PS Pack Header - 参考MPEG-2程序流标准
// Pack start code: 0x000001BA
outputBuffer.WriteUint32(StartCodePS)
// SCR字段 (System Clock Reference) - 参考ps-muxer.go的实现
// 系统时钟参考
scr := uint64(time.Now().UnixMilli()) * 90
outputBuffer.WriteByte(0x44 | byte((scr>>30)&0x07)) // '01' + SCR[32..30]
outputBuffer.WriteByte(byte((scr >> 22) & 0xFF)) // SCR[29..22]
outputBuffer.WriteByte(0x04 | byte((scr>>20)&0x03)) // marker + SCR[21..20]
outputBuffer.WriteByte(byte((scr >> 12) & 0xFF)) // SCR[19..12]
outputBuffer.WriteByte(0x04 | byte((scr>>10)&0x03)) // marker + SCR[11..10]
outputBuffer.WriteByte(byte((scr >> 2) & 0xFF)) // SCR[9..2]
outputBuffer.WriteByte(0x04 | byte(scr&0x03)) // marker + SCR[1..0]
outputBuffer.WriteByte(0x01) // SCR_ext + marker
outputBuffer.WriteByte(0x89) // program_mux_rate high
outputBuffer.WriteByte(0xC8) // program_mux_rate low + markers + reserved + stuffing_length(0)
}

View File

@@ -0,0 +1,853 @@
package mpegps
import (
"bytes"
"io"
"testing"
"m7s.live/v5/pkg/util"
)
func min(a, b int) int {
if a < b {
return a
}
return b
}
func TestMpegPSConstants(t *testing.T) {
// Test that PS constants are properly defined
t.Run("Constants", func(t *testing.T) {
if StartCodePS != 0x000001ba {
t.Errorf("Expected StartCodePS %x, got %x", 0x000001ba, StartCodePS)
}
if PSPackHeaderSize != 14 {
t.Errorf("Expected PSPackHeaderSize %d, got %d", 14, PSPackHeaderSize)
}
if MaxPESPayloadSize != 0xFFEB {
t.Errorf("Expected MaxPESPayloadSize %x, got %x", 0xFFEB, MaxPESPayloadSize)
}
})
}
func TestMuxPSHeader(t *testing.T) {
// Test PS header generation
t.Run("PSHeader", func(t *testing.T) {
// Create a buffer for testing - initialize with length 0 to allow appending
buffer := make([]byte, 0, PSPackHeaderSize)
utilBuffer := util.Buffer(buffer)
// Call MuxPSHeader
MuxPSHeader(&utilBuffer)
// Check the buffer length
if len(utilBuffer) != PSPackHeaderSize {
t.Errorf("Expected buffer length %d, got %d", PSPackHeaderSize, len(utilBuffer))
}
// Check PS start code (first 4 bytes should be 0x00 0x00 0x01 0xBA)
expectedStartCode := []byte{0x00, 0x00, 0x01, 0xBA}
if !bytes.Equal(utilBuffer[:4], expectedStartCode) {
t.Errorf("Expected PS start code %x, got %x", expectedStartCode, utilBuffer[:4])
}
t.Logf("PS Header: %x", utilBuffer)
t.Logf("Buffer length: %d", len(utilBuffer))
})
}
func TestMpegpsPESFrame(t *testing.T) {
// Test MpegpsPESFrame basic functionality
t.Run("PESFrame", func(t *testing.T) {
// Create PES frame
pesFrame := &MpegpsPESFrame{
StreamType: 0x1B, // H.264
}
pesFrame.Pts = 90000 // 1 second in 90kHz clock
pesFrame.Dts = 90000
// Test basic properties
if pesFrame.StreamType != 0x1B {
t.Errorf("Expected stream type 0x1B, got %x", pesFrame.StreamType)
}
if pesFrame.Pts != 90000 {
t.Errorf("Expected PTS %d, got %d", 90000, pesFrame.Pts)
}
if pesFrame.Dts != 90000 {
t.Errorf("Expected DTS %d, got %d", 90000, pesFrame.Dts)
}
t.Logf("PES Frame: StreamType=%x, PTS=%d, DTS=%d", pesFrame.StreamType, pesFrame.Pts, pesFrame.Dts)
})
}
func TestReadPayload(t *testing.T) {
// Test ReadPayload functionality
t.Run("ReadPayload", func(t *testing.T) {
// Create test data with payload length and payload
testData := []byte{
0x00, 0x05, // Payload length = 5 bytes
0x01, 0x02, 0x03, 0x04, 0x05, // Payload data
}
demuxer := &MpegPsDemuxer{}
reader := util.NewBufReader(bytes.NewReader(testData))
payload, err := demuxer.ReadPayload(reader)
if err != nil {
t.Fatalf("ReadPayload failed: %v", err)
}
if payload.Size != 5 {
t.Errorf("Expected payload size 5, got %d", payload.Size)
}
expectedPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
if !bytes.Equal(payload.ToBytes(), expectedPayload) {
t.Errorf("Expected payload %x, got %x", expectedPayload, payload.ToBytes())
}
t.Logf("ReadPayload successful: %x", payload.ToBytes())
})
}
func TestMpegPSMuxerBasic(t *testing.T) {
// Test MpegPSMuxer basic functionality
t.Run("MuxBasic", func(t *testing.T) {
// Test basic PS header generation without PlayBlock
// This focuses on testing the header generation logic
var outputBuffer util.Buffer = make([]byte, 0, 1024)
outputBuffer.Reset()
// Test PS header generation
MuxPSHeader(&outputBuffer)
// Add stuffing bytes as expected by the demuxer
// The demuxer expects: 9 bytes + 1 stuffing length byte + stuffing bytes
stuffingLength := byte(0x00) // No stuffing bytes
outputBuffer.WriteByte(stuffingLength)
// Verify PS header contains expected start code
if len(outputBuffer) != PSPackHeaderSize+1 {
t.Errorf("Expected PS header size %d, got %d", PSPackHeaderSize+1, len(outputBuffer))
}
// Check for PS start code
if !bytes.Contains(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
t.Error("PS header does not contain PS start code")
}
t.Logf("PS Header: %x", outputBuffer)
t.Logf("PS Header size: %d bytes", len(outputBuffer))
// Test PSM header generation
var pesAudio, pesVideo *MpegpsPESFrame
var elementary_stream_map_length uint16
// Simulate audio stream
hasAudio := true
if hasAudio {
elementary_stream_map_length += 4
pesAudio = &MpegpsPESFrame{}
pesAudio.StreamID = 0xC0 // MPEG audio
pesAudio.StreamType = 0x0F // AAC
}
// Simulate video stream
hasVideo := true
if hasVideo {
elementary_stream_map_length += 4
pesVideo = &MpegpsPESFrame{}
pesVideo.StreamID = 0xE0 // MPEG video
pesVideo.StreamType = 0x1B // H.264
}
// Create PSM header with proper payload length
psmData := make([]byte, 0, PSMHeaderSize+int(elementary_stream_map_length))
psmBuffer := util.Buffer(psmData)
psmBuffer.Reset()
// Write PSM start code
psmBuffer.WriteUint32(StartCodeMAP)
psmLength := uint16(PSMHeaderSize + int(elementary_stream_map_length) - 6)
psmBuffer.WriteUint16(psmLength) // psm_length
psmBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
psmBuffer.WriteByte(0xFF) // reserved + marker
psmBuffer.WriteUint16(0) // program_stream_info_length
psmBuffer.WriteUint16(elementary_stream_map_length)
if pesAudio != nil {
psmBuffer.WriteByte(pesAudio.StreamType) // stream_type
psmBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
psmBuffer.WriteUint16(0) // elementary_stream_info_length
}
if pesVideo != nil {
psmBuffer.WriteByte(pesVideo.StreamType) // stream_type
psmBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
psmBuffer.WriteUint16(0) // elementary_stream_info_length
}
// Verify PSM header
if len(psmBuffer) != PSMHeaderSize+int(elementary_stream_map_length) {
t.Errorf("Expected PSM size %d, got %d", PSMHeaderSize+int(elementary_stream_map_length), len(psmBuffer))
}
// Check for PSM start code
if !bytes.Contains(psmBuffer, []byte{0x00, 0x00, 0x01, 0xBC}) {
t.Error("PSM header does not contain PSM start code")
}
t.Logf("PSM Header: %x", psmBuffer)
t.Logf("PSM Header size: %d bytes", len(psmBuffer))
// Test ReadPayload function directly
t.Run("ReadPayload", func(t *testing.T) {
// Create test payload data
testPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
// Create a packet with length prefix
packetData := make([]byte, 0, 2+len(testPayload))
packetData = append(packetData, byte(len(testPayload)>>8), byte(len(testPayload)))
packetData = append(packetData, testPayload...)
reader := util.NewBufReader(bytes.NewReader(packetData))
demuxer := &MpegPsDemuxer{}
// Test ReadPayload function
payload, err := demuxer.ReadPayload(reader)
if err != nil {
t.Fatalf("ReadPayload failed: %v", err)
}
if payload.Size != len(testPayload) {
t.Errorf("Expected payload size %d, got %d", len(testPayload), payload.Size)
}
if !bytes.Equal(payload.ToBytes(), testPayload) {
t.Errorf("Expected payload %x, got %x", testPayload, payload.ToBytes())
}
t.Logf("ReadPayload test passed: %x", payload.ToBytes())
})
// Test basic demuxing with PS header only
t.Run("PSHeader", func(t *testing.T) {
// Create a simple test that just verifies the PS header structure
// without trying to demux it (which expects more data)
if len(outputBuffer) < 4 {
t.Errorf("PS header too short: %d bytes", len(outputBuffer))
}
// Check that it starts with the correct start code
if !bytes.HasPrefix(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
t.Errorf("PS header does not start with correct start code: %x", outputBuffer[:4])
}
t.Logf("PS header structure test passed")
})
t.Logf("Basic mux/demux test completed successfully")
})
// Test basic PES packet generation without PlayBlock
t.Run("PESGeneration", func(t *testing.T) {
// Create a test that simulates PES packet generation
// without requiring a full subscriber setup
// Create test payload
testPayload := make([]byte, 5000)
for i := range testPayload {
testPayload[i] = byte(i % 256)
}
// Create PES frame
pesFrame := &MpegpsPESFrame{
StreamType: 0x1B, // H.264
}
pesFrame.Pts = 90000
pesFrame.Dts = 90000
// Create allocator for testing
allocator := util.NewScalableMemoryAllocator(1024*1024)
packet := util.NewRecyclableMemory(allocator)
// Write PES packet
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
if err != nil {
t.Fatalf("WritePESPacket failed: %v", err)
}
// Verify packet was written
packetData := packet.ToBytes()
if len(packetData) == 0 {
t.Fatal("No data was written to packet")
}
t.Logf("PES packet generated: %d bytes", len(packetData))
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
// Verify PS header is present
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
t.Error("PES packet does not contain PS start code")
}
// Test reading back the packet
reader := util.NewBufReader(bytes.NewReader(packetData))
// Skip PS header
code, err := reader.ReadBE32(4)
if err != nil {
t.Fatalf("Failed to read start code: %v", err)
}
if code != StartCodePS {
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
}
// Skip PS header
if err = reader.Skip(9); err != nil {
t.Fatalf("Failed to skip PS header: %v", err)
}
psl, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read stuffing length: %v", err)
}
psl &= 0x07
if err = reader.Skip(int(psl)); err != nil {
t.Fatalf("Failed to skip stuffing bytes: %v", err)
}
// Read PES packets directly by parsing the PES structure
totalPayloadSize := 0
packetCount := 0
for reader.Buffered() > 0 {
// Read PES packet start code (0x00000100 + stream_id)
pesStartCode, err := reader.ReadBE32(4)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("Failed to read PES start code: %v", err)
}
// Check if it's a PES packet (starts with 0x000001)
if pesStartCode&0xFFFFFF00 != 0x00000100 {
t.Errorf("Invalid PES start code: %x", pesStartCode)
break
}
// // streamID := byte(pesStartCode & 0xFF)
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
// Read PES packet length
pesLength, err := reader.ReadBE(2)
if err != nil {
t.Fatalf("Failed to read PES length: %v", err)
}
// Read PES header
// Skip the first byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags1: %v", err)
}
// Skip the second byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags2: %v", err)
}
// Read header data length
headerDataLength, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES header data length: %v", err)
}
// Skip header data
if err = reader.Skip(int(headerDataLength)); err != nil {
t.Fatalf("Failed to skip PES header data: %v", err)
}
// Calculate payload size
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
if payloadSize > 0 {
// Read payload data
payload, err := reader.ReadBytes(payloadSize)
if err != nil {
t.Fatalf("Failed to read PES payload: %v", err)
}
totalPayloadSize += payload.Size
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
}
packetCount++
}
// Verify total payload size matches
if totalPayloadSize != len(testPayload) {
t.Errorf("Expected total payload size %d, got %d", len(testPayload), totalPayloadSize)
}
t.Logf("PES generation test completed successfully: %d packets, total %d bytes", packetCount, totalPayloadSize)
})
}
func TestPESPacketWriteRead(t *testing.T) {
// Test PES packet writing and reading functionality
t.Run("PESWriteRead", func(t *testing.T) {
// Create test payload data
testPayload := make([]byte, 1000)
for i := range testPayload {
testPayload[i] = byte(i % 256)
}
// Create PES frame
pesFrame := &MpegpsPESFrame{
StreamType: 0x1B, // H.264
}
pesFrame.Pts = 90000 // 1 second in 90kHz clock
pesFrame.Dts = 90000
// Create allocator for testing
allocator := util.NewScalableMemoryAllocator(1024)
packet := util.NewRecyclableMemory(allocator)
// Write PES packet
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
if err != nil {
t.Fatalf("WritePESPacket failed: %v", err)
}
// Verify that packet was written
packetData := packet.ToBytes()
if len(packetData) == 0 {
t.Fatal("No data was written to packet")
}
t.Logf("PES packet written: %d bytes", len(packetData))
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
// Verify PS header is present
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
t.Error("PES packet does not contain PS start code")
}
// Now test reading the PES packet back
reader := util.NewBufReader(bytes.NewReader(packetData))
// Read and process the PS header
code, err := reader.ReadBE32(4)
if err != nil {
t.Fatalf("Failed to read start code: %v", err)
}
if code != StartCodePS {
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
}
// Skip PS header (9 bytes + stuffing length)
if err = reader.Skip(9); err != nil {
t.Fatalf("Failed to skip PS header: %v", err)
}
psl, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read stuffing length: %v", err)
}
psl &= 0x07
if err = reader.Skip(int(psl)); err != nil {
t.Fatalf("Failed to skip stuffing bytes: %v", err)
}
// Read PES packet directly by parsing the PES structure
totalPayloadSize := 0
packetCount := 0
for reader.Buffered() > 0 {
// Read PES packet start code (0x00000100 + stream_id)
pesStartCode, err := reader.ReadBE32(4)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("Failed to read PES start code: %v", err)
}
// Check if it's a PES packet (starts with 0x000001)
if pesStartCode&0xFFFFFF00 != 0x00000100 {
t.Errorf("Invalid PES start code: %x", pesStartCode)
break
}
// // streamID := byte(pesStartCode & 0xFF)
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
// Read PES packet length
pesLength, err := reader.ReadBE(2)
if err != nil {
t.Fatalf("Failed to read PES length: %v", err)
}
// Read PES header
// Skip the first byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags1: %v", err)
}
// Skip the second byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags2: %v", err)
}
// Read header data length
headerDataLength, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES header data length: %v", err)
}
// Skip header data
if err = reader.Skip(int(headerDataLength)); err != nil {
t.Fatalf("Failed to skip PES header data: %v", err)
}
// Calculate payload size
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
if payloadSize > 0 {
// Read payload data
payload, err := reader.ReadBytes(payloadSize)
if err != nil {
t.Fatalf("Failed to read PES payload: %v", err)
}
totalPayloadSize += payload.Size
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
}
packetCount++
}
t.Logf("PES payload read: %d bytes", totalPayloadSize)
// Verify payload size
if totalPayloadSize != len(testPayload) {
t.Errorf("Expected payload size %d, got %d", len(testPayload), totalPayloadSize)
}
// Note: We can't easily verify the content because the payload is fragmented across multiple PES packets
// But we can verify the total size is correct
t.Logf("PES packet write-read test completed successfully")
})
}
func TestLargePESPacket(t *testing.T) {
// Test large PES packet handling (payload > 65535 bytes)
t.Run("LargePESPacket", func(t *testing.T) {
// Create large test payload (exceeds 65535 bytes)
largePayload := make([]byte, 70000) // 70KB payload
for i := range largePayload {
largePayload[i] = byte(i % 256)
}
// Create PES frame
pesFrame := &MpegpsPESFrame{
StreamType: 0x1B, // H.264
}
pesFrame.Pts = 180000 // 2 seconds in 90kHz clock
pesFrame.Dts = 180000
// Create allocator for testing
allocator := util.NewScalableMemoryAllocator(1024*1024) // 1MB allocator
packet := util.NewRecyclableMemory(allocator)
// Write large PES packet
t.Logf("Writing large PES packet with %d bytes payload", len(largePayload))
err := pesFrame.WritePESPacket(util.NewMemory(largePayload), &packet)
if err != nil {
t.Fatalf("WritePESPacket failed for large payload: %v", err)
}
// Verify that packet was written
packetData := packet.ToBytes()
if len(packetData) == 0 {
t.Fatal("No data was written to packet")
}
t.Logf("Large PES packet written: %d bytes", len(packetData))
// Verify PS header is present
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
t.Error("Large PES packet does not contain PS start code")
}
// Count number of PES packets (should be multiple due to size limitation)
pesCount := 0
reader := util.NewBufReader(bytes.NewReader(packetData))
// Skip PS header
code, err := reader.ReadBE32(4)
if err != nil {
t.Fatalf("Failed to read start code: %v", err)
}
if code != StartCodePS {
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
}
// Skip PS header
if err = reader.Skip(9); err != nil {
t.Fatalf("Failed to skip PS header: %v", err)
}
psl, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read stuffing length: %v", err)
}
psl &= 0x07
if err = reader.Skip(int(psl)); err != nil {
t.Fatalf("Failed to skip stuffing bytes: %v", err)
}
// Read and count PES packets
totalPayloadSize := 0
for reader.Buffered() > 0 {
// Read PES packet start code (0x00000100 + stream_id)
pesStartCode, err := reader.ReadBE32(4)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("Failed to read PES start code: %v", err)
}
// Check if it's a PES packet (starts with 0x000001)
if pesStartCode&0xFFFFFF00 != 0x00000100 {
t.Errorf("Invalid PES start code: %x", pesStartCode)
break
}
// streamID := byte(pesStartCode & 0xFF)
// Read PES packet length
pesLength, err := reader.ReadBE(2)
if err != nil {
t.Fatalf("Failed to read PES length: %v", err)
}
// Read PES header
// Skip the first byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags1: %v", err)
}
// Skip the second byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags2: %v", err)
}
// Read header data length
headerDataLength, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES header data length: %v", err)
}
// Skip header data
if err = reader.Skip(int(headerDataLength)); err != nil {
t.Fatalf("Failed to skip PES header data: %v", err)
}
// Calculate payload size
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
if payloadSize > 0 {
// Read payload data
payload, err := reader.ReadBytes(payloadSize)
if err != nil {
t.Fatalf("Failed to read PES payload: %v", err)
}
totalPayloadSize += payload.Size
t.Logf("PES packet %d: %d bytes payload", pesCount+1, payload.Size)
}
pesCount++
}
// Verify that we got multiple PES packets
if pesCount < 2 {
t.Errorf("Expected multiple PES packets for large payload, got %d", pesCount)
}
// Verify total payload size
if totalPayloadSize != len(largePayload) {
t.Errorf("Expected total payload size %d, got %d", len(largePayload), totalPayloadSize)
}
// Verify individual PES packet sizes don't exceed maximum
maxPacketSize := MaxPESPayloadSize + PESHeaderMinSize
if pesCount == 1 && len(packetData) > maxPacketSize {
t.Errorf("Single PES packet exceeds maximum size: %d > %d", len(packetData), maxPacketSize)
}
t.Logf("Large PES packet test completed successfully: %d packets, total %d bytes", pesCount, totalPayloadSize)
})
}
func TestPESPacketBoundaryConditions(t *testing.T) {
// Test PES packet boundary conditions
t.Run("BoundaryConditions", func(t *testing.T) {
testCases := []struct {
name string
payloadSize int
}{
{"EmptyPayload", 0},
{"SmallPayload", 1},
{"ExactBoundary", MaxPESPayloadSize},
{"JustOverBoundary", MaxPESPayloadSize + 1},
{"MultipleBoundary", MaxPESPayloadSize * 2 + 100},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
// Create test payload
testPayload := make([]byte, tc.payloadSize)
for i := range testPayload {
testPayload[i] = byte(i % 256)
}
// Create PES frame
pesFrame := &MpegpsPESFrame{
StreamType: 0x1B, // H.264
}
pesFrame.Pts = uint64(tc.payloadSize) * 90 // Use payload size as PTS
pesFrame.Dts = uint64(tc.payloadSize) * 90
// Create allocator for testing
allocator := util.NewScalableMemoryAllocator(1024*1024)
packet := util.NewRecyclableMemory(allocator)
// Write PES packet
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
if err != nil {
t.Fatalf("WritePESPacket failed: %v", err)
}
// Verify that packet was written
packetData := packet.ToBytes()
if len(packetData) == 0 && tc.payloadSize > 0 {
t.Fatal("No data was written to packet for non-empty payload")
}
t.Logf("%s: %d bytes payload -> %d bytes packet", tc.name, tc.payloadSize, len(packetData))
// For non-empty payloads, verify we can read them back
if tc.payloadSize > 0 {
reader := util.NewBufReader(bytes.NewReader(packetData))
// Skip PS header
code, err := reader.ReadBE32(4)
if err != nil {
t.Fatalf("Failed to read start code: %v", err)
}
if code != StartCodePS {
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
}
// Skip PS header
if err = reader.Skip(9); err != nil {
t.Fatalf("Failed to skip PS header: %v", err)
}
psl, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read stuffing length: %v", err)
}
psl &= 0x07
if err = reader.Skip(int(psl)); err != nil {
t.Fatalf("Failed to skip stuffing bytes: %v", err)
}
// Read PES packets
totalPayloadSize := 0
packetCount := 0
for reader.Buffered() > 0 {
// Read PES packet start code (0x00000100 + stream_id)
pesStartCode, err := reader.ReadBE32(4)
if err != nil {
if err == io.EOF {
break
}
t.Fatalf("Failed to read PES start code: %v", err)
}
// Check if it's a PES packet (starts with 0x000001)
if pesStartCode&0xFFFFFF00 != 0x00000100 {
t.Errorf("Invalid PES start code: %x", pesStartCode)
break
}
// // streamID := byte(pesStartCode & 0xFF)
// Read PES packet length
pesLength, err := reader.ReadBE(2)
if err != nil {
t.Fatalf("Failed to read PES length: %v", err)
}
// Read PES header
// Skip the first byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags1: %v", err)
}
// Skip the second byte (flags)
_, err = reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES flags2: %v", err)
}
// Read header data length
headerDataLength, err := reader.ReadByte()
if err != nil {
t.Fatalf("Failed to read PES header data length: %v", err)
}
// Skip header data
if err = reader.Skip(int(headerDataLength)); err != nil {
t.Fatalf("Failed to skip PES header data: %v", err)
}
// Calculate payload size
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
if payloadSize > 0 {
// Read payload data
payload, err := reader.ReadBytes(payloadSize)
if err != nil {
t.Fatalf("Failed to read PES payload: %v", err)
}
totalPayloadSize += payload.Size
}
packetCount++
}
// Verify total payload size matches
if totalPayloadSize != tc.payloadSize {
t.Errorf("Expected total payload size %d, got %d", tc.payloadSize, totalPayloadSize)
}
t.Logf("%s: Successfully read back %d PES packets", tc.name, packetCount)
}
})
}
})
}

35
pkg/format/ps/pes.go Normal file
View File

@@ -0,0 +1,35 @@
package mpegps
import (
mpegts "m7s.live/v5/pkg/format/ts"
"m7s.live/v5/pkg/util"
)
type MpegpsPESFrame struct {
StreamType byte // Stream type (e.g., video, audio)
mpegts.MpegPESHeader
}
func (frame *MpegpsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
frame.DataAlignmentIndicator = 1
pesReader := payload.NewReader()
var outputMemory util.Buffer = allocator.NextN(PSPackHeaderSize)
outputMemory.Reset()
MuxPSHeader(&outputMemory)
for pesReader.Length > 0 {
currentPESPayload := min(pesReader.Length, MaxPESPayloadSize)
var pesHeadItem util.Buffer
pesHeadItem, err = frame.WritePESHeader(currentPESPayload)
if err != nil {
return
}
copy(allocator.NextN(pesHeadItem.Len()), pesHeadItem)
// 申请输出缓冲
outputMemory = allocator.NextN(currentPESPayload)
pesReader.Read(outputMemory)
frame.DataAlignmentIndicator = 0
}
return nil
}

131
pkg/format/raw.go Normal file
View File

@@ -0,0 +1,131 @@
package format
import (
"bytes"
"fmt"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/codec/h265parser"
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
)
var _ pkg.IAVFrame = (*RawAudio)(nil)
type RawAudio struct {
pkg.Sample
}
func (r *RawAudio) GetSize() int {
return r.Raw.(*util.Memory).Size
}
func (r *RawAudio) Demux() error {
r.Raw = &r.Memory
return nil
}
func (r *RawAudio) Mux(from *pkg.Sample) (err error) {
r.InitRecycleIndexes(0)
r.Memory = *from.Raw.(*util.Memory)
r.ICodecCtx = from.GetBase()
return
}
func (r *RawAudio) String() string {
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC(), r.Timestamp, r.Size)
}
var _ pkg.IAVFrame = (*H26xFrame)(nil)
type H26xFrame struct {
pkg.Sample
}
func (h *H26xFrame) CheckCodecChange() (err error) {
if h.ICodecCtx == nil {
return pkg.ErrUnsupportCodec
}
var hasVideoFrame bool
switch ctx := h.GetBase().(type) {
case *codec.H264Ctx:
var sps, pps []byte
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
case codec.NALU_SPS:
sps = nalu.ToBytes()
case codec.NALU_PPS:
pps = nalu.ToBytes()
case codec.NALU_IDR_Picture:
h.IDR = true
case codec.NALU_Non_IDR_Picture:
hasVideoFrame = true
}
}
if sps != nil && pps != nil {
var codecData h264parser.CodecData
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
if err != nil {
return
}
if !bytes.Equal(codecData.Record, ctx.Record) {
h.ICodecCtx = &codec.H264Ctx{
CodecData: codecData,
}
}
}
case *codec.H265Ctx:
var vps, sps, pps []byte
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
case h265parser.NAL_UNIT_VPS:
vps = nalu.ToBytes()
case h265parser.NAL_UNIT_SPS:
sps = nalu.ToBytes()
case h265parser.NAL_UNIT_PPS:
pps = nalu.ToBytes()
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
h.IDR = true
case 1, 2, 3, 4, 5, 6, 7, 8, 9:
hasVideoFrame = true
}
}
if vps != nil && sps != nil && pps != nil {
var codecData h265parser.CodecData
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
if err != nil {
return
}
if !bytes.Equal(codecData.Record, ctx.Record) {
h.ICodecCtx = &codec.H265Ctx{
CodecData: codecData,
}
}
}
}
// Return ErrSkip if no video frames are present (only metadata NALUs)
if !hasVideoFrame && !h.IDR {
return pkg.ErrSkip
}
return
}
func (r *H26xFrame) GetSize() (ret int) {
switch raw := r.Raw.(type) {
case *pkg.Nalus:
for nalu := range raw.RangePoint {
ret += nalu.Size
}
}
return
}
func (h *H26xFrame) String() string {
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
}

View File

@@ -4,7 +4,11 @@ import (
"bytes"
"errors"
"io"
"io/ioutil"
"time"
"m7s.live/v5"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/format"
"m7s.live/v5/pkg/util"
//"sync"
)
@@ -101,22 +105,16 @@ const (
//
type MpegTsStream struct {
PAT MpegTsPAT // PAT表信息
PMT MpegTsPMT // PMT表信息
PESBuffer map[uint16]*MpegTsPESPacket
PESChan chan *MpegTsPESPacket
PAT MpegTsPAT // PAT表信息
PMT MpegTsPMT // PMT表信息
Publisher *m7s.Publisher
Allocator *util.ScalableMemoryAllocator
writer m7s.PublishWriter[*format.Mpeg2Audio, *VideoFrame]
audioPID, videoPID, pmtPID uint16
tsPacket [TS_PACKET_SIZE]byte
}
// ios13818-1-CN.pdf 33/165
//
// TS
//
// Packet == Header + Payload == 188 bytes
type MpegTsPacket struct {
Header MpegTsHeader
Payload []byte
}
// 前面32bit的数据即TS分组首部,它指出了这个分组的属性
type MpegTsHeader struct {
@@ -185,25 +183,6 @@ type MpegTsDescriptor struct {
Data []byte
}
func ReadTsPacket(r io.Reader) (packet MpegTsPacket, err error) {
lr := &io.LimitedReader{R: r, N: TS_PACKET_SIZE}
// header
packet.Header, err = ReadTsHeader(lr)
if err != nil {
return
}
// payload
packet.Payload = make([]byte, lr.N)
_, err = lr.Read(packet.Payload)
if err != nil {
return
}
return
}
func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
var h uint32
@@ -365,7 +344,7 @@ func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
// Discard 是一个 io.Writer,对它进行的任何 Write 调用都将无条件成功
// 但是ioutil.Discard不记录copy得到的数值
// 用于发送需要读取但不想存储的数据,目的是耗尽读取端的数据
if _, err = io.CopyN(ioutil.Discard, lr, int64(lr.N)); err != nil {
if _, err = io.CopyN(io.Discard, lr, int64(lr.N)); err != nil {
return
}
}
@@ -440,138 +419,96 @@ func WriteTsHeader(w io.Writer, header MpegTsHeader) (written int, err error) {
return
}
//
//func (s *MpegTsStream) TestWrite(fileName string) error {
//
// if fileName != "" {
// file, err := os.Create(fileName)
// if err != nil {
// panic(err)
// }
// defer file.Close()
//
// patTsHeader := []byte{0x47, 0x40, 0x00, 0x10}
//
// if err := WritePATPacket(file, patTsHeader, *s.pat); err != nil {
// panic(err)
// }
//
// // TODO:这里的pid应该是由PAT给的
// pmtTsHeader := []byte{0x47, 0x41, 0x00, 0x10}
//
// if err := WritePMTPacket(file, pmtTsHeader, *s.pmt); err != nil {
// panic(err)
// }
// }
//
// var videoFrame int
// var audioFrame int
// for {
// tsPesPkt, ok := <-s.TsPesPktChan
// if !ok {
// fmt.Println("frame index, video , audio :", videoFrame, audioFrame)
// break
// }
//
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_AUDIO {
// audioFrame++
// }
//
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_VIDEO {
// println(tsPesPkt.PesPkt.Header.Pts)
// videoFrame++
// }
//
// fmt.Sprintf("%s", tsPesPkt)
//
// // if err := WritePESPacket(file, tsPesPkt.TsPkt.Header, tsPesPkt.PesPkt); err != nil {
// // return err
// // }
//
// }
//
// return nil
//}
func (s *MpegTsStream) ReadPAT(packet *MpegTsPacket, pr io.Reader) (err error) {
// 首先找到PID==0x00的TS包(PAT)
if PID_PAT == packet.Header.Pid {
if len(packet.Payload) == 188 {
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
}
// Header + PSI + Paylod
s.PAT, err = ReadPAT(pr)
}
return
}
func (s *MpegTsStream) ReadPMT(packet *MpegTsPacket, pr io.Reader) (err error) {
// 在读取PAT中已经将所有频道节目信息(PMT_PID)保存了起来
// 接着读取所有TS包里面的PID,找出PID==PMT_PID的TS包,就是PMT表
for _, v := range s.PAT.Program {
if v.ProgramMapPID == packet.Header.Pid {
if len(packet.Payload) == 188 {
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
}
// Header + PSI + Paylod
s.PMT, err = ReadPMT(pr)
}
}
return
}
func (s *MpegTsStream) Feed(ts io.Reader) (err error) {
writer := &s.writer
var reader bytes.Reader
var lr io.LimitedReader
lr.R = &reader
var tsHeader MpegTsHeader
tsData := make([]byte, TS_PACKET_SIZE)
for {
_, err = io.ReadFull(ts, tsData)
var pesHeader MpegPESHeader
for !s.Publisher.IsStopped() {
_, err = io.ReadFull(ts, s.tsPacket[:])
if err == io.EOF {
// 文件结尾 把最后面的数据发出去
for _, pesPkt := range s.PESBuffer {
if pesPkt != nil {
s.PESChan <- pesPkt
}
}
return nil
} else if err != nil {
return
}
reader.Reset(tsData)
reader.Reset(s.tsPacket[:])
lr.N = TS_PACKET_SIZE
if tsHeader, err = ReadTsHeader(&lr); err != nil {
return
}
if tsHeader.Pid == PID_PAT {
switch tsHeader.Pid {
case PID_PAT:
if s.PAT, err = ReadPAT(&lr); err != nil {
return
}
s.pmtPID = s.PAT.Program[0].ProgramMapPID
continue
}
if len(s.PMT.Stream) == 0 {
for _, v := range s.PAT.Program {
if v.ProgramMapPID == tsHeader.Pid {
if s.PMT, err = ReadPMT(&lr); err != nil {
return
}
for _, v := range s.PMT.Stream {
s.PESBuffer[v.ElementaryPID] = nil
}
}
case s.pmtPID:
if len(s.PMT.Stream) != 0 {
continue
}
} else if pesPkt, ok := s.PESBuffer[tsHeader.Pid]; ok {
if tsHeader.PayloadUnitStartIndicator == 1 {
if pesPkt != nil {
s.PESChan <- pesPkt
}
pesPkt = &MpegTsPESPacket{}
s.PESBuffer[tsHeader.Pid] = pesPkt
if pesPkt.Header, err = ReadPESHeader(&lr); err != nil {
return
if s.PMT, err = ReadPMT(&lr); err != nil {
return
}
for _, pmt := range s.PMT.Stream {
switch pmt.StreamType {
case STREAM_TYPE_H265:
s.videoPID = pmt.ElementaryPID
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
case STREAM_TYPE_H264:
s.videoPID = pmt.ElementaryPID
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
case STREAM_TYPE_AAC:
s.audioPID = pmt.ElementaryPID
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
case STREAM_TYPE_G711A:
s.audioPID = pmt.ElementaryPID
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
case STREAM_TYPE_G711U:
s.audioPID = pmt.ElementaryPID
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
}
}
io.Copy(&pesPkt.Payload, &lr)
case s.audioPID:
if tsHeader.PayloadUnitStartIndicator == 1 {
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
return
}
if !s.Publisher.PubAudio {
continue
}
if writer.AudioFrame.Size > 0 {
if err = writer.NextAudio(); err != nil {
continue
}
}
writer.AudioFrame.SetDTS(time.Duration(pesHeader.Pts))
}
lr.Read(writer.AudioFrame.NextN(int(lr.N)))
case s.videoPID:
if tsHeader.PayloadUnitStartIndicator == 1 {
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
return
}
if !s.Publisher.PubVideo {
continue
}
if writer.VideoFrame.Size > 0 {
if err = writer.NextVideo(); err != nil {
continue
}
}
writer.VideoFrame.SetDTS(time.Duration(pesHeader.Dts))
writer.VideoFrame.SetPTS(time.Duration(pesHeader.Pts))
}
lr.Read(writer.VideoFrame.NextN(int(lr.N)))
}
}
return
}

View File

@@ -2,39 +2,19 @@ package mpegts
import (
"errors"
"fmt"
"io"
"m7s.live/v5/pkg/util"
"net"
)
// ios13818-1-CN.pdf 45/166
//
// PES
//
// 每个传输流和节目流在逻辑上都是由 PES 包构造的
type MpegTsPesStream struct {
TsPkt MpegTsPacket
PesPkt MpegTsPESPacket
}
// PES--Packetized Elementary Streams (分组的ES),ES形成的分组称为PES分组,是用来传递ES的一种数据结构
// 1110 xxxx 为视频流(0xE0)
// 110x xxxx 为音频流(0xC0)
type MpegTsPESPacket struct {
Header MpegTsPESHeader
Payload util.Buffer //从TS包中读取的数据
Buffers net.Buffers //用于写TS包
}
type MpegTsPESHeader struct {
PacketStartCodePrefix uint32 // 24 bits 同跟随它的 stream_id 一起组成标识包起始端的包起始码.packet_start_code_prefix 为比特串"0000 0000 0000 0000 0000 0001"(0x000001)
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
type MpegPESHeader struct {
header [32]byte
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
MpegTsOptionalPESHeader
PayloadLength uint64 // 这个不是标准文档里面的字段,是自己添加的,方便计算
}
// 可选的PES Header = MpegTsOptionalPESHeader + stuffing bytes(0xFF) m * 8
@@ -99,23 +79,35 @@ type MpegTsOptionalPESHeader struct {
// pts_dts_Flags == "11" -> PTS + DTS
type MpegtsPESFrame struct {
Pid uint16
IsKeyFrame bool
ContinuityCounter byte
ProgramClockReferenceBase uint64
Pid uint16
IsKeyFrame bool
ContinuityCounter byte
MpegPESHeader
}
func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
var flags uint8
var length uint
func CreatePESWriters() (pesAudio, pesVideo MpegtsPESFrame) {
pesAudio, pesVideo = MpegtsPESFrame{
Pid: PID_AUDIO,
}, MpegtsPESFrame{
Pid: PID_VIDEO,
}
pesAudio.DataAlignmentIndicator = 1
pesVideo.DataAlignmentIndicator = 1
pesAudio.StreamID = STREAM_ID_AUDIO
pesVideo.StreamID = STREAM_ID_VIDEO
return
}
func ReadPESHeader0(r *io.LimitedReader) (header MpegPESHeader, err error) {
var length uint
var packetStartCodePrefix uint32
// packetStartCodePrefix(24) (0x000001)
header.PacketStartCodePrefix, err = util.ReadByteToUint24(r, true)
packetStartCodePrefix, err = util.ReadByteToUint24(r, true)
if err != nil {
return
}
if header.PacketStartCodePrefix != 0x0000001 {
if packetStartCodePrefix != 0x0000001 {
err = errors.New("read PacketStartCodePrefix is not 0x0000001")
return
}
@@ -141,18 +133,27 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
if length == 0 {
length = 1 << 31
}
var header1 MpegPESHeader
header1, err = ReadPESHeader(r)
if err == nil {
if header.PesPacketLength == 0 {
header1.PesPacketLength = uint16(r.N)
}
header1.StreamID = header.StreamID
return header1, nil
}
return
}
// lrPacket 和 lrHeader 位置指针是在同一位置的
lrPacket := &io.LimitedReader{R: r, N: int64(length)}
lrHeader := lrPacket
func ReadPESHeader(lrPacket *io.LimitedReader) (header MpegPESHeader, err error) {
var flags uint8
// constTen(2)
// pes_ScramblingControl(2)
// pes_Priority(1)
// dataAlignmentIndicator(1)
// copyright(1)
// originalOrCopy(1)
flags, err = util.ReadByteToUint8(lrHeader)
flags, err = util.ReadByteToUint8(lrPacket)
if err != nil {
return
}
@@ -171,7 +172,7 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
// additionalCopyInfoFlag(1)
// pes_CRCFlag(1)
// pes_ExtensionFlag(1)
flags, err = util.ReadByteToUint8(lrHeader)
flags, err = util.ReadByteToUint8(lrPacket)
if err != nil {
return
}
@@ -185,14 +186,14 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
header.PesExtensionFlag = flags & 0x01
// pes_HeaderDataLength(8)
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrHeader)
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrPacket)
if err != nil {
return
}
length = uint(header.PesHeaderDataLength)
length := uint(header.PesHeaderDataLength)
lrHeader = &io.LimitedReader{R: lrHeader, N: int64(length)}
lrHeader := &io.LimitedReader{R: lrPacket, N: int64(length)}
// 00 -> PES 包头中既无任何PTS 字段也无任何DTS 字段存在
// 10 -> PES 包头中PTS 字段存在
@@ -219,6 +220,8 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
}
header.Dts = util.GetPtsDts(dts)
} else {
header.Dts = header.Pts
}
// reserved(2) + escr_Base1(3) + marker_bit(1) +
@@ -336,48 +339,31 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
}
}
// 2的16次方,16个字节
if lrPacket.N < 65536 {
// 这里得到的其实是负载长度,因为已经偏移过了Header部分.
//header.pes_PacketLength = uint16(lrPacket.N)
header.PayloadLength = uint64(lrPacket.N)
}
return
}
func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error) {
if header.PacketStartCodePrefix != 0x0000001 {
err = errors.New("write PacketStartCodePrefix is not 0x0000001")
return
func (header *MpegPESHeader) WritePESHeader(esSize int) (w util.Buffer, err error) {
if header.DataAlignmentIndicator == 1 {
if header.Pts == header.Dts {
header.PtsDtsFlags = 0x80
header.PesHeaderDataLength = 5
} else {
header.PtsDtsFlags = 0xC0
header.PesHeaderDataLength = 10
}
} else {
header.PtsDtsFlags = 0
header.PesHeaderDataLength = 0
}
// packetStartCodePrefix(24) (0x000001)
if err = util.WriteUint24ToByte(w, header.PacketStartCodePrefix, true); err != nil {
return
pktLength := esSize + int(header.PesHeaderDataLength) + 3
if pktLength > 0xffff {
pktLength = 0
}
header.PesPacketLength = uint16(pktLength)
written += 3
// streamID(8)
if err = util.WriteUint8ToByte(w, header.StreamID); err != nil {
return
}
written += 1
// pes_PacketLength(16)
// PES包长度可能为0,这个时候,需要自己去算
// 0 <= len <= 65535
if err = util.WriteUint16ToByte(w, header.PesPacketLength, true); err != nil {
return
}
//fmt.Println("Length :", payloadLength)
//fmt.Println("PES Packet Length :", header.pes_PacketLength)
written += 2
w = header.header[:0]
w.WriteUint32(0x00000100 | uint32(header.StreamID))
w.WriteUint16(header.PesPacketLength)
// constTen(2)
// pes_ScramblingControl(2)
// pes_Priority(1)
@@ -385,18 +371,9 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
// copyright(1)
// originalOrCopy(1)
// 1000 0001
if header.ConstTen != 0x80 {
err = errors.New("pes header ConstTen != 0x80")
return
}
flags := header.ConstTen | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
if err = util.WriteUint8ToByte(w, flags); err != nil {
return
}
written += 1
flags := 0x80 | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
w.WriteByte(flags)
// pts_dts_Flags(2)
// escr_Flag(1)
// es_RateFlag(1)
@@ -405,19 +382,8 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
// pes_CRCFlag(1)
// pes_ExtensionFlag(1)
sevenFlags := header.PtsDtsFlags | header.EscrFlag | header.EsRateFlag | header.DsmTrickModeFlag | header.AdditionalCopyInfoFlag | header.PesCRCFlag | header.PesExtensionFlag
if err = util.WriteUint8ToByte(w, sevenFlags); err != nil {
return
}
written += 1
// pes_HeaderDataLength(8)
if err = util.WriteUint8ToByte(w, header.PesHeaderDataLength); err != nil {
return
}
written += 1
w.WriteByte(sevenFlags)
w.WriteByte(header.PesHeaderDataLength)
// PtsDtsFlags == 192(11), 128(10), 64(01)禁用, 0(00)
if header.PtsDtsFlags&0x80 != 0 {
// PTS和DTS都存在(11),否则只有PTS(10)
@@ -425,30 +391,121 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
// 11:PTS和DTS
// PTS(33) + 4 + 3
pts := util.PutPtsDts(header.Pts) | 3<<36
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
return
}
written += 5
// DTS(33) + 4 + 3
dts := util.PutPtsDts(header.Dts) | 1<<36
if err = util.WriteUint40ToByte(w, dts, true); err != nil {
if err = util.WriteUint40ToByte(&w, dts, true); err != nil {
return
}
written += 5
} else {
// 10:只有PTS
// PTS(33) + 4 + 3
pts := util.PutPtsDts(header.Pts) | 2<<36
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
return
}
}
}
return
}
written += 5
func (frame *MpegtsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
var pesHeadItem util.Buffer
pesHeadItem, err = frame.WritePESHeader(payload.Size)
if err != nil {
return
}
pesBuffers := util.NewMemory(pesHeadItem)
payload.Range(pesBuffers.PushOne)
pesPktLength := int64(pesBuffers.Size)
pesReader := pesBuffers.NewReader()
var tsHeaderLength int
for i := 0; pesPktLength > 0; i++ {
var buffer util.Buffer = allocator.NextN(TS_PACKET_SIZE)
bwTsHeader := &buffer
bwTsHeader.Reset()
tsHeader := MpegTsHeader{
SyncByte: 0x47,
TransportErrorIndicator: 0,
PayloadUnitStartIndicator: 0,
TransportPriority: 0,
Pid: frame.Pid,
TransportScramblingControl: 0,
AdaptionFieldControl: 1,
ContinuityCounter: frame.ContinuityCounter,
}
frame.ContinuityCounter++
frame.ContinuityCounter = frame.ContinuityCounter % 16
// 每一帧的开头,当含有pcr的时候,包含调整字段
if i == 0 {
tsHeader.PayloadUnitStartIndicator = 1
// 当PCRFlag为1的时候,包含调整字段
if frame.IsKeyFrame {
tsHeader.AdaptionFieldControl = 0x03
tsHeader.AdaptationFieldLength = 7
tsHeader.PCRFlag = 1
tsHeader.RandomAccessIndicator = 1
tsHeader.ProgramClockReferenceBase = frame.Pts
}
}
// 每一帧的结尾,当不满足188个字节的时候,包含调整字段
if pesPktLength < TS_PACKET_SIZE-4 {
var tsStuffingLength uint8
tsHeader.AdaptionFieldControl = 0x03
tsHeader.AdaptationFieldLength = uint8(TS_PACKET_SIZE - 4 - 1 - pesPktLength)
// TODO:如果第一个TS包也是最后一个TS包,是不是需要考虑这个情况?
// MpegTsHeader最少占6个字节.(前4个走字节 + AdaptationFieldLength(1 byte) + 3个指示符5个标志位(1 byte))
if tsHeader.AdaptationFieldLength >= 1 {
tsStuffingLength = tsHeader.AdaptationFieldLength - 1
} else {
tsStuffingLength = 0
}
// error
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
if err != nil {
return
}
if tsStuffingLength > 0 {
if _, err = bwTsHeader.Write(Stuffing[:tsStuffingLength]); err != nil {
return
}
}
tsHeaderLength += int(tsStuffingLength)
} else {
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
if err != nil {
return
}
}
tsPayloadLength := TS_PACKET_SIZE - tsHeaderLength
//fmt.Println("tsPayloadLength :", tsPayloadLength)
// 这里不断的减少PES包
written, _ := io.CopyN(bwTsHeader, &pesReader, int64(tsPayloadLength))
// tmp := tsHeaderByte[3] << 2
// tmp = tmp >> 6
// if tmp == 2 {
// fmt.Println("fuck you mother.")
// }
pesPktLength -= written
tsPktByteLen := bwTsHeader.Len()
if tsPktByteLen != TS_PACKET_SIZE {
err = errors.New(fmt.Sprintf("%s, packet size=%d", "TS_PACKET_SIZE != 188,", tsPktByteLen))
return
}
}
return
return nil
}

View File

@@ -1,9 +1,11 @@
package mpegts
import (
"bytes"
"errors"
"fmt"
"io"
"m7s.live/v5/pkg/util"
)
@@ -179,50 +181,56 @@ func WritePSI(w io.Writer, pt uint32, psi MpegTsPSI, data []byte) (err error) {
return
}
cw := &util.Crc32Writer{W: w, Crc32: 0xffffffff}
// 使用buffer收集所有需要计算CRC32的数据
bw := &bytes.Buffer{}
// table id(8)
if err = util.WriteUint8ToByte(cw, tableId); err != nil {
if err = util.WriteUint8ToByte(bw, tableId); err != nil {
return
}
// sectionSyntaxIndicator(1) + zero(1) + reserved1(2) + sectionLength(12)
// sectionLength 前两个字节固定为00
// 1 0 11 sectionLength
if err = util.WriteUint16ToByte(cw, sectionSyntaxIndicatorAndSectionLength, true); err != nil {
if err = util.WriteUint16ToByte(bw, sectionSyntaxIndicatorAndSectionLength, true); err != nil {
return
}
// PAT TransportStreamID(16) or PMT ProgramNumber(16)
if err = util.WriteUint16ToByte(cw, transportStreamIdOrProgramNumber, true); err != nil {
if err = util.WriteUint16ToByte(bw, transportStreamIdOrProgramNumber, true); err != nil {
return
}
// reserved2(2) + versionNumber(5) + currentNextIndicator(1)
// 0x3 << 6 -> 1100 0000
// 0x3 << 6 | 1 -> 1100 0001
if err = util.WriteUint8ToByte(cw, versionNumberAndCurrentNextIndicator); err != nil {
if err = util.WriteUint8ToByte(bw, versionNumberAndCurrentNextIndicator); err != nil {
return
}
// sectionNumber(8)
if err = util.WriteUint8ToByte(cw, sectionNumber); err != nil {
if err = util.WriteUint8ToByte(bw, sectionNumber); err != nil {
return
}
// lastSectionNumber(8)
if err = util.WriteUint8ToByte(cw, lastSectionNumber); err != nil {
if err = util.WriteUint8ToByte(bw, lastSectionNumber); err != nil {
return
}
// data
if _, err = cw.Write(data); err != nil {
if _, err = bw.Write(data); err != nil {
return
}
// crc32
crc32 := util.BigLittleSwap(uint(cw.Crc32))
if err = util.WriteUint32ToByte(cw, uint32(crc32), true); err != nil {
// 写入PSI数据
if _, err = w.Write(bw.Bytes()); err != nil {
return
}
// 使用MPEG-TS CRC32算法计算CRC32
crc32 := GetCRC32(bw.Bytes())
if err = util.WriteUint32ToByte(w, crc32, true); err != nil {
return
}

20
pkg/format/ts/video.go Normal file
View File

@@ -0,0 +1,20 @@
package mpegts
import (
"m7s.live/v5/pkg"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/format"
)
type VideoFrame struct {
format.AnnexB
}
func (a *VideoFrame) Mux(fromBase *pkg.Sample) (err error) {
if fromBase.GetBase().FourCC().Is(codec.FourCC_H265) {
a.PushOne(codec.AudNalu)
} else {
a.PushOne(codec.NALU_AUD_BYTE)
}
return a.AnnexB.Mux(fromBase)
}

View File

@@ -1,221 +0,0 @@
package pkg
import (
"fmt"
"github.com/deepch/vdk/codec/aacparser"
"github.com/deepch/vdk/codec/h264parser"
"github.com/deepch/vdk/codec/h265parser"
"io"
"m7s.live/v5/pkg/codec"
"m7s.live/v5/pkg/util"
"time"
)
var _ IAVFrame = (*RawAudio)(nil)
type RawAudio struct {
codec.FourCC
Timestamp time.Duration
util.RecyclableMemory
}
func (r *RawAudio) Parse(track *AVTrack) (err error) {
if track.ICodecCtx == nil {
switch r.FourCC {
case codec.FourCC_MP4A:
ctx := &codec.AACCtx{}
ctx.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(r.ToBytes())
track.ICodecCtx = ctx
case codec.FourCC_ALAW:
track.ICodecCtx = &codec.PCMACtx{
AudioCtx: codec.AudioCtx{
SampleRate: 8000,
Channels: 1,
SampleSize: 8,
},
}
case codec.FourCC_ULAW:
track.ICodecCtx = &codec.PCMUCtx{
AudioCtx: codec.AudioCtx{
SampleRate: 8000,
Channels: 1,
SampleSize: 8,
},
}
}
}
return
}
func (r *RawAudio) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
c := ctx.GetBase()
if c.FourCC().Is(codec.FourCC_MP4A) {
seq := &RawAudio{
FourCC: codec.FourCC_MP4A,
Timestamp: r.Timestamp,
}
seq.SetAllocator(r.GetAllocator())
seq.Memory.Append(c.GetRecord())
return c, seq, nil
}
return c, nil, nil
}
func (r *RawAudio) Demux(ctx codec.ICodecCtx) (any, error) {
return r.Memory, nil
}
func (r *RawAudio) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
r.InitRecycleIndexes(0)
r.FourCC = ctx.FourCC()
r.Memory = frame.Raw.(util.Memory)
r.Timestamp = frame.Timestamp
}
func (r *RawAudio) GetTimestamp() time.Duration {
return r.Timestamp
}
func (r *RawAudio) GetCTS() time.Duration {
return 0
}
func (r *RawAudio) GetSize() int {
return r.Size
}
func (r *RawAudio) String() string {
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC, r.Timestamp, r.Size)
}
func (r *RawAudio) Dump(b byte, writer io.Writer) {
//TODO implement me
panic("implement me")
}
var _ IAVFrame = (*H26xFrame)(nil)
type H26xFrame struct {
codec.FourCC
Timestamp time.Duration
CTS time.Duration
Nalus
util.RecyclableMemory
}
func (h *H26xFrame) Parse(track *AVTrack) (err error) {
switch h.FourCC {
case codec.FourCC_H264:
var ctx *codec.H264Ctx
if track.ICodecCtx != nil {
ctx = track.ICodecCtx.GetBase().(*codec.H264Ctx)
}
for _, nalu := range h.Nalus {
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
case h264parser.NALU_SPS:
ctx = &codec.H264Ctx{}
track.ICodecCtx = ctx
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
if ctx.SPSInfo, err = h264parser.ParseSPS(ctx.SPS()); err != nil {
return
}
case h264parser.NALU_PPS:
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
if err != nil {
return
}
case codec.NALU_IDR_Picture:
track.Value.IDR = true
}
}
case codec.FourCC_H265:
var ctx *codec.H265Ctx
if track.ICodecCtx != nil {
ctx = track.ICodecCtx.GetBase().(*codec.H265Ctx)
}
for _, nalu := range h.Nalus {
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
case h265parser.NAL_UNIT_VPS:
ctx = &codec.H265Ctx{}
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
track.ICodecCtx = ctx
case h265parser.NAL_UNIT_SPS:
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
if ctx.SPSInfo, err = h265parser.ParseSPS(ctx.SPS()); err != nil {
return
}
case h265parser.NAL_UNIT_PPS:
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
h265parser.NAL_UNIT_CODED_SLICE_CRA:
track.Value.IDR = true
}
}
}
return
}
func (h *H26xFrame) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
switch c := ctx.GetBase().(type) {
case *codec.H264Ctx:
return c, &H26xFrame{
FourCC: codec.FourCC_H264,
Nalus: []util.Memory{
util.NewMemory(c.SPS()),
util.NewMemory(c.PPS()),
},
}, nil
case *codec.H265Ctx:
return c, &H26xFrame{
FourCC: codec.FourCC_H265,
Nalus: []util.Memory{
util.NewMemory(c.VPS()),
util.NewMemory(c.SPS()),
util.NewMemory(c.PPS()),
},
}, nil
}
return ctx.GetBase(), nil, nil
}
func (h *H26xFrame) Demux(ctx codec.ICodecCtx) (any, error) {
return h.Nalus, nil
}
func (h *H26xFrame) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
h.FourCC = ctx.FourCC()
h.Nalus = frame.Raw.(Nalus)
h.Timestamp = frame.Timestamp
h.CTS = frame.CTS
}
func (h *H26xFrame) GetTimestamp() time.Duration {
return h.Timestamp
}
func (h *H26xFrame) GetCTS() time.Duration {
return h.CTS
}
func (h *H26xFrame) GetSize() int {
var size int
for _, nalu := range h.Nalus {
size += nalu.Size
}
return size
}
func (h *H26xFrame) String() string {
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
}
func (h *H26xFrame) Dump(b byte, writer io.Writer) {
//TODO implement me
panic("implement me")
}

View File

@@ -3,6 +3,7 @@ package pkg
import (
"log/slog"
"sync"
"sync/atomic"
"time"
"m7s.live/v5/pkg/task"
@@ -21,6 +22,7 @@ type RingWriter struct {
Size int
LastValue *AVFrame
SLogger *slog.Logger
status atomic.Int32 // 0: init, 1: writing, 2: disposed
}
func NewRingWriter(sizeRange util.Range[int]) (rb *RingWriter) {
@@ -90,7 +92,9 @@ func (rb *RingWriter) reduce(size int) {
func (rb *RingWriter) Dispose() {
rb.SLogger.Debug("dispose")
rb.Value.Ready()
if rb.status.Add(-1) == -1 { // normal dispose
rb.Value.Unlock()
}
}
func (rb *RingWriter) GetIDR() *util.Ring[AVFrame] {
@@ -185,18 +189,70 @@ func (rb *RingWriter) Step() (normal bool) {
rb.LastValue = &rb.Value
nextSeq := rb.LastValue.Sequence + 1
if normal = next.Value.StartWrite(); normal {
next.Value.Reset()
rb.Ring = next
} else {
rb.reduce(1) //抛弃还有订阅者的节点
rb.Ring = rb.glow(1, "refill") //补充一个新节点
normal = rb.Value.StartWrite()
if !normal {
panic("RingWriter.Step")
/*
sequenceDiagram
autonumber
participant Caller as Caller
participant RW as RingWriter
participant Val as AVFrame.Value
Note over RW: status initial = 0 (idle)
Caller->>RW: Step()
activate RW
RW->>RW: status.Add(1) (0→1)
alt entered writing (result == 1)
Note over RW: writing
RW->>Val: StartWrite()
RW->>Val: Reset()
opt Dispose during write
Caller->>RW: Dispose()
RW->>RW: status.Add(-1) (1→0)
end
RW->>RW: status.Add(-1) at end of Step
alt returns 0 (write completed)
RW->>Val: Ready()
else returns -1 (disposed during write)
RW->>Val: Unlock()
end
else not entered
Note over RW: Step aborted (already disposed/busy)
end
deactivate RW
Caller->>RW: Dispose()
activate RW
RW->>RW: status.Add(-1)
alt returns -1 (idle dispose)
RW->>Val: Unlock()
else returns 0 (dispose during write)
Note over RW: Unlock will occur at Step end (no Ready)
end
deactivate RW
Note over RW: States: -1 (disposed), 0 (idle), 1 (writing)
*/
if rb.status.Add(1) == 1 {
if normal = next.Value.StartWrite(); normal {
next.Value.Reset()
rb.Ring = next
} else {
rb.reduce(1) //抛弃还有订阅者的节点
rb.Ring = rb.glow(1, "refill") //补充一个新节点
normal = rb.Value.StartWrite()
if !normal {
panic("RingWriter.Step")
}
}
rb.Value.Sequence = nextSeq
if rb.status.Add(-1) == 0 {
rb.LastValue.Ready()
} else {
rb.Value.Unlock()
}
}
rb.Value.Sequence = nextSeq
rb.LastValue.Ready()
return
}

View File

@@ -5,6 +5,8 @@ import (
"log/slog"
"testing"
"time"
"m7s.live/v5/pkg/util"
)
func TestRing(t *testing.T) {
@@ -13,7 +15,7 @@ func TestRing(t *testing.T) {
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
go t.Run("writer", func(t *testing.T) {
for i := 0; ctx.Err() == nil; i++ {
w.Value.Raw = i
w.Value.Raw = &util.Memory{}
normal := w.Step()
t.Log("write", i, normal)
time.Sleep(time.Millisecond * 50)
@@ -76,7 +78,7 @@ func BenchmarkRing(b *testing.B) {
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
go func() {
for i := 0; ctx.Err() == nil; i++ {
w.Value.Raw = i
w.Value.Raw = &util.Memory{}
w.Step()
time.Sleep(time.Millisecond * 50)
}

21
pkg/steps.go Normal file
View File

@@ -0,0 +1,21 @@
package pkg
// StepName is a typed alias for all workflow step identifiers.
type StepName string
// StepDef defines a step with typed name and description.
type StepDef struct {
Name StepName
Description string
}
// Standard, cross-plugin step name constants for pull/publish workflows.
// Plugin-specific step names should be defined in their respective plugin packages.
const (
StepPublish StepName = "publish"
StepURLParsing StepName = "url_parsing"
StepConnection StepName = "connection"
StepHandshake StepName = "handshake"
StepParsing StepName = "parsing"
StepStreaming StepName = "streaming"
)

59
pkg/task/README.md Normal file
View File

@@ -0,0 +1,59 @@
# 任务系统概要
# 任务的启动
任务通过调用父任务的 AddTask 来启动,此时会进入队列中等待启动,父任务的 EventLoop 会接受到子任务,然后调用子任务的 Start 方法进行启动操作
## EventLoop 的初始化
为了节省资源EventLoop 在没有子任务时不会创建协程,一直等到有子任务时才会创建,并且如果这个子任务也是一个空的 Job即没有 Start、Run、Go则仍然不会创建协程。
## EventLoop 停止
为了节省资源,当 EventLoop 中没有待执行的子任务时需要退出协程。EventLoop 会在以下情况退出:
1. 没有待处理的任务且没有活跃的子任务,且父任务的 keepalive() 返回 false
2. EventLoop 的状态被设置为停止状态(-1
# 任务的停止
## 主动停止某个任务
调用任务的 Stop 方法即可停止某个任务,此时该任务会由其父任务的 eventLoop 检测到 context 取消信号然后开始执行任务的 dispose 来进行销毁
## 任务的意外退出
当任务的 Run 返回错误,或者 context 被取消时,任务会退出,最终流程会同主动停止一样
## 父任务停止
当父任务停止并销毁时,会按照以下步骤处理子任务:
### 步骤
1. **设置 EventLoop 的状态为停止状态**:调用 `stop()` 方法设置 status = -1防止继续添加子任务
2. **激活 EventLoop 处理剩余任务**:调用 `active()` 方法,即使状态为 -1 也能处理剩余的子任务
3. **停止所有子任务**:调用所有子任务的 Stop 方法
4. **等待子任务销毁完成**:等待 EventLoop 处理完所有子任务的销毁工作
### 设计要点
- EventLoop 的 `active()` 方法允许在状态为 -1 时调用,以确保剩余的子任务能被正确处理
- 使用互斥锁保护状态转换,避免竞态条件
- 先停止再处理剩余任务,确保不会添加新的子任务
## 竞态条件处理
为了确保任务系统的线程安全,我们采取了以下措施:
### 状态管理
- 使用 `sync.RWMutex` 保护 EventLoop 的状态转换
- `add()` 方法使用读锁检查状态,防止在停止后添加新任务
- `stop()` 方法使用写锁设置状态,确保原子性
### EventLoop 生命周期
- EventLoop 只有在状态从 0ready转换到 1running时才启动新的 goroutine
- 即使状态为 -1stopped`active()` 方法仍可被调用以处理剩余任务
- 使用 `hasPending` 标志和互斥锁跟踪待处理任务,避免频繁检查 channel 长度
### 任务添加
- 添加任务时会检查 EventLoop 状态,如果已停止则返回 `ErrDisposed`
- 使用 `pendingMux` 保护 `hasPending` 标志,避免竞态条件

View File

@@ -1,34 +0,0 @@
package task
type CallBackTask struct {
Task
startHandler func() error
disposeHandler func()
}
func (t *CallBackTask) GetTaskType() TaskType {
return TASK_TYPE_CALL
}
func (t *CallBackTask) Start() error {
return t.startHandler()
}
func (t *CallBackTask) Dispose() {
if t.disposeHandler != nil {
t.disposeHandler()
}
}
func CreateTaskByCallBack(start func() error, dispose func()) *CallBackTask {
var task CallBackTask
task.startHandler = func() error {
err := start()
if err == nil && dispose == nil {
err = ErrTaskComplete
}
return err
}
task.disposeHandler = dispose
return &task
}

View File

@@ -42,6 +42,9 @@ func (t *TickTask) GetTickInterval() time.Duration {
func (t *TickTask) Start() (err error) {
t.Ticker = time.NewTicker(t.handler.(ITickTask).GetTickInterval())
t.SignalChan = t.Ticker.C
t.OnStop(func() {
t.Ticker.Reset(time.Millisecond)
})
return
}

167
pkg/task/event_loop.go Normal file
View File

@@ -0,0 +1,167 @@
package task
import (
"errors"
"reflect"
"runtime/debug"
"slices"
"sync"
"sync/atomic"
)
type Singleton[T comparable] struct {
instance atomic.Value
mux sync.Mutex
}
func (s *Singleton[T]) Load() T {
return s.instance.Load().(T)
}
func (s *Singleton[T]) Get(newF func() T) T {
ch := s.instance.Load() //fast
if ch == nil { // slow
s.mux.Lock()
defer s.mux.Unlock()
if ch = s.instance.Load(); ch == nil {
ch = newF()
s.instance.Store(ch)
}
}
return ch.(T)
}
type EventLoop struct {
cases []reflect.SelectCase
children []ITask
addSub Singleton[chan any]
running atomic.Bool
}
func (e *EventLoop) getInput() chan any {
return e.addSub.Get(func() chan any {
return make(chan any, 20)
})
}
func (e *EventLoop) active(mt *Job) {
if mt.parent != nil {
mt.parent.eventLoop.active(mt.parent)
}
if e.running.CompareAndSwap(false, true) {
go e.run(mt)
}
}
func (e *EventLoop) add(mt *Job, sub any) (err error) {
shouldActive := true
switch sub.(type) {
case TaskStarter, TaskBlock, TaskGo:
case IJob:
shouldActive = false
}
select {
case e.getInput() <- sub:
if shouldActive || mt.IsStopped() {
e.active(mt)
}
return nil
default:
return ErrTooManyChildren
}
}
func (e *EventLoop) run(mt *Job) {
mt.Debug("event loop start", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
ch := e.getInput()
e.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}}
defer func() {
err := recover()
if err != nil {
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
if !ThrowPanic {
mt.Stop(errors.Join(err.(error), ErrPanic))
} else {
panic(err)
}
}
mt.Debug("event loop exit", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
if !mt.handler.keepalive() {
if mt.blocked != nil {
mt.Stop(errors.Join(mt.blocked.StopReason(), ErrAutoStop))
} else {
mt.Stop(ErrAutoStop)
}
}
mt.blocked = nil
}()
// Main event loop - only exit when no more events AND no children
for {
if len(ch) == 0 && len(e.children) == 0 {
if e.running.CompareAndSwap(true, false) {
if len(ch) > 0 { // if add before running set to false
e.active(mt)
}
return
}
}
mt.blocked = nil
if chosen, rev, ok := reflect.Select(e.cases); chosen == 0 {
if !ok {
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
mt.Stop(ErrAutoStop)
return
}
switch v := rev.Interface().(type) {
case func():
v()
case ITask:
if len(e.cases) >= 65535 {
mt.Warn("task children too many, may cause performance issue", "count", len(e.cases), "taskId", mt.GetTaskID(), "taskType", mt.GetTaskType(), "ownerType", mt.GetOwnerType())
v.Stop(ErrTooManyChildren)
continue
}
if mt.blocked = v; v.start() {
e.cases = append(e.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(v.GetSignal())})
e.children = append(e.children, v)
mt.onChildStart(v)
} else {
mt.removeChild(v)
}
}
} else {
taskIndex := chosen - 1
child := e.children[taskIndex]
mt.blocked = child
switch tt := mt.blocked.(type) {
case IChannelTask:
if tt.IsStopped() {
switch ttt := tt.(type) {
case ITickTask:
ttt.GetTicker().Stop()
}
mt.onChildDispose(child)
mt.removeChild(child)
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
e.cases = slices.Delete(e.cases, chosen, chosen+1)
} else {
tt.Tick(rev.Interface())
}
default:
if !ok {
if mt.onChildDispose(child); child.checkRetry(child.StopReason()) {
if child.reset(); child.start() {
e.cases[chosen].Chan = reflect.ValueOf(child.GetSignal())
mt.onChildStart(child)
continue
}
}
mt.removeChild(child)
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
e.cases = slices.Delete(e.cases, chosen, chosen+1)
}
}
}
}
}

View File

@@ -2,13 +2,9 @@ package task
import (
"context"
"errors"
"fmt"
"log/slog"
"reflect"
"runtime"
"runtime/debug"
"slices"
"strings"
"sync"
"sync/atomic"
@@ -32,15 +28,12 @@ func GetNextTaskID() uint32 {
// Job include tasks
type Job struct {
Task
cases []reflect.SelectCase
addSub chan ITask
children []ITask
lazyRun sync.Once
eventLoopLock sync.Mutex
childrenDisposed chan struct{}
children sync.Map
descendantsDisposeListeners []func(ITask)
descendantsStartListeners []func(ITask)
blocked ITask
eventLoop EventLoop
Size atomic.Int32
}
func (*Job) GetTaskType() TaskType {
@@ -55,18 +48,18 @@ func (mt *Job) Blocked() ITask {
return mt.blocked
}
func (mt *Job) waitChildrenDispose() {
defer func() {
// 忽略由于在任务关闭过程中可能存在竞态条件,当父任务关闭时子任务可能已经被释放。
if err := recover(); err != nil {
mt.Debug("waitChildrenDispose panic", "err", err)
}
mt.addSub <- nil
<-mt.childrenDisposed
}()
if blocked := mt.blocked; blocked != nil {
blocked.Stop(mt.StopReason())
}
func (mt *Job) EventLoopRunning() bool {
return mt.eventLoop.running.Load()
}
func (mt *Job) waitChildrenDispose(stopReason error) {
mt.eventLoop.active(mt)
mt.children.Range(func(key, value any) bool {
child := value.(ITask)
child.Stop(stopReason)
child.WaitStopped()
return true
})
}
func (mt *Job) OnDescendantsDispose(listener func(ITask)) {
@@ -83,12 +76,21 @@ func (mt *Job) onDescendantsDispose(descendants ITask) {
}
func (mt *Job) onChildDispose(child ITask) {
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
mt.onDescendantsDispose(child)
}
mt.onDescendantsDispose(child)
child.dispose()
}
func (mt *Job) removeChild(child ITask) {
value, loaded := mt.children.LoadAndDelete(child.getKey())
if loaded {
if value != child {
panic("remove child")
}
remains := mt.Size.Add(-1)
mt.Debug("remove child", "id", child.GetTaskID(), "remains", remains)
}
}
func (mt *Job) OnDescendantsStart(listener func(ITask)) {
mt.descendantsStartListeners = append(mt.descendantsStartListeners, listener)
}
@@ -103,169 +105,98 @@ func (mt *Job) onDescendantsStart(descendants ITask) {
}
func (mt *Job) onChildStart(child ITask) {
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
mt.onDescendantsStart(child)
}
mt.onDescendantsStart(child)
}
func (mt *Job) RangeSubTask(callback func(task ITask) bool) {
for _, task := range mt.children {
callback(task)
}
mt.children.Range(func(key, value any) bool {
callback(value.(ITask))
return true
})
}
func (mt *Job) AddDependTask(t ITask, opt ...any) (task *Task) {
mt.Depend(t)
t.Using(mt)
opt = append(opt, 1)
return mt.AddTask(t, opt...)
}
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
if task = t.GetTask(); t != task.handler { // first add
for _, o := range opt {
switch v := o.(type) {
case context.Context:
task.parentCtx = v
case Description:
task.SetDescriptions(v)
case RetryConfig:
task.retry = v
case *slog.Logger:
task.Logger = v
}
}
task.parent = mt
task.handler = t
switch t.(type) {
case TaskStarter, TaskBlock, TaskGo:
// need start now
case IJob:
// lazy start
return
func (mt *Job) initContext(task *Task, opt ...any) {
callDepth := 2
for _, o := range opt {
switch v := o.(type) {
case context.Context:
task.parentCtx = v
case Description:
task.SetDescriptions(v)
case RetryConfig:
task.retry = v
case *slog.Logger:
task.Logger = v
case int:
callDepth += v
}
}
_, file, line, ok := runtime.Caller(1)
_, file, line, ok := runtime.Caller(callDepth)
if ok {
task.StartReason = fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line)
}
mt.lazyRun.Do(func() {
if mt.eventLoopLock.TryLock() {
defer mt.eventLoopLock.Unlock()
if mt.parent != nil && mt.Context == nil {
mt.parent.AddTask(mt.handler) // second add, lazy start
}
mt.childrenDisposed = make(chan struct{})
mt.addSub = make(chan ITask, 20)
go mt.run()
}
})
if task.Context == nil {
if task.parentCtx == nil {
task.parentCtx = mt.Context
}
task.level = mt.level + 1
if task.ID == 0 {
task.ID = GetNextTaskID()
}
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
task.startup = util.NewPromise(task.Context)
task.shutdown = util.NewPromise(context.Background())
task.handler = t
if task.Logger == nil {
task.Logger = mt.Logger
}
task.parent = mt
if task.parentCtx == nil {
task.parentCtx = mt.Context
}
task.level = mt.level + 1
if task.ID == 0 {
task.ID = GetNextTaskID()
}
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
task.startup = util.NewPromise(task.Context)
task.shutdown = util.NewPromise(context.Background())
if task.Logger == nil {
task.Logger = mt.Logger
}
}
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
task = t.GetTask()
task.handler = t
mt.initContext(task, opt...)
if mt.IsStopped() {
task.startup.Reject(mt.StopReason())
return
}
if len(mt.addSub) > 10 {
if mt.Logger != nil {
mt.Warn("task wait list too many", "count", len(mt.addSub), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", mt.GetOwnerType())
}
actual, loaded := mt.children.LoadOrStore(t.getKey(), t)
if loaded {
task.startup.Reject(ExistTaskError{
Task: actual.(ITask),
})
return
}
mt.addSub <- t
var err error
defer func() {
if err != nil {
mt.children.Delete(t.getKey())
task.startup.Reject(err)
}
}()
if err = mt.eventLoop.add(mt, t); err != nil {
return
}
if mt.IsStopped() {
err = mt.StopReason()
return
}
remains := mt.Size.Add(1)
mt.Debug("child added", "id", task.ID, "remains", remains)
return
}
func (mt *Job) Call(callback func() error, args ...any) {
mt.Post(callback, args...).WaitStarted()
}
func (mt *Job) Post(callback func() error, args ...any) *Task {
task := CreateTaskByCallBack(callback, nil)
if len(args) > 0 {
task.SetDescription(OwnerTypeKey, args[0])
}
return mt.AddTask(task)
}
func (mt *Job) run() {
mt.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.addSub)}}
defer func() {
err := recover()
if err != nil {
if mt.Logger != nil {
mt.Logger.Error("job panic", "err", err, "stack", string(debug.Stack()))
}
if !ThrowPanic {
mt.Stop(errors.Join(err.(error), ErrPanic))
} else {
panic(err)
}
}
stopReason := mt.StopReason()
for _, task := range mt.children {
task.Stop(stopReason)
mt.onChildDispose(task)
}
mt.children = nil
close(mt.childrenDisposed)
}()
for {
mt.blocked = nil
if chosen, rev, ok := reflect.Select(mt.cases); chosen == 0 {
if rev.IsNil() {
return
}
if mt.blocked = rev.Interface().(ITask); mt.blocked.start() {
mt.children = append(mt.children, mt.blocked)
mt.cases = append(mt.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.blocked.GetSignal())})
mt.onChildStart(mt.blocked)
}
} else {
taskIndex := chosen - 1
mt.blocked = mt.children[taskIndex]
switch tt := mt.blocked.(type) {
case IChannelTask:
if tt.IsStopped() {
switch ttt := tt.(type) {
case ITickTask:
ttt.GetTicker().Stop()
}
mt.onChildDispose(mt.blocked)
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
} else {
tt.Tick(rev.Interface())
}
default:
if !ok {
if mt.onChildDispose(mt.blocked); mt.blocked.checkRetry(mt.blocked.StopReason()) {
if mt.blocked.reset(); mt.blocked.start() {
mt.cases[chosen].Chan = reflect.ValueOf(mt.blocked.GetSignal())
mt.onChildStart(mt.blocked)
continue
}
}
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
}
}
}
if !mt.handler.keepalive() && len(mt.children) == 0 {
mt.Stop(ErrAutoStop)
}
func (mt *Job) Call(callback func()) {
if mt.Size.Load() <= 0 {
callback()
return
}
ctx, cancel := context.WithCancel(mt)
_ = mt.eventLoop.add(mt, func() { callback(); cancel() })
<-ctx.Done()
}

View File

@@ -2,12 +2,21 @@ package task
import (
"errors"
"fmt"
. "m7s.live/v5/pkg/util"
)
var ErrExist = errors.New("exist")
type ExistTaskError struct {
Task ITask
}
func (e ExistTaskError) Error() string {
return fmt.Sprintf("%v exist", e.Task.getKey())
}
type ManagerItem[K comparable] interface {
ITask
GetKey() K
@@ -24,25 +33,31 @@ func (m *Manager[K, T]) Add(ctx T, opt ...any) *Task {
ctx.Stop(ErrExist)
return
}
if m.Logger != nil {
m.Logger.Debug("add", "key", ctx.GetKey(), "count", m.Length)
}
m.Debug("add", "key", ctx.GetKey(), "count", m.Length)
})
ctx.OnDispose(func() {
m.Remove(ctx)
if m.Logger != nil {
m.Logger.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
}
m.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
})
opt = append(opt, 1)
return m.AddTask(ctx, opt...)
}
func (m *Manager[K, T]) SafeHas(key K) (ok bool) {
if m.L == nil {
m.Call(func() {
ok = m.Collection.Has(key)
})
return ok
}
return m.Collection.Has(key)
}
// SafeGet 用于不同协程获取元素,防止并发请求
func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
if m.L == nil {
m.Call(func() error {
m.Call(func() {
item, ok = m.Collection.Get(key)
return nil
})
} else {
item, ok = m.Collection.Get(key)
@@ -53,9 +68,8 @@ func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
// SafeRange 用于不同协程获取元素,防止并发请求
func (m *Manager[K, T]) SafeRange(f func(T) bool) {
if m.L == nil {
m.Call(func() error {
m.Call(func() {
m.Collection.Range(f)
return nil
})
} else {
m.Collection.Range(f)
@@ -65,9 +79,8 @@ func (m *Manager[K, T]) SafeRange(f func(T) bool) {
// SafeFind 用于不同协程获取元素,防止并发请求
func (m *Manager[K, T]) SafeFind(f func(T) bool) (item T, ok bool) {
if m.L == nil {
m.Call(func() error {
m.Call(func() {
item, ok = m.Collection.Find(f)
return nil
})
} else {
item, ok = m.Collection.Find(f)

View File

@@ -3,4 +3,4 @@
package task
var ThrowPanic = true
var ThrowPanic = true

View File

@@ -22,15 +22,20 @@ func (o *OSSignal) Start() error {
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
o.SignalChan = signalChan
o.OnStop(func() {
signal.Stop(signalChan)
close(signalChan)
})
return nil
}
func (o *OSSignal) Tick(any) {
println("OSSignal Tick")
go o.root.Shutdown()
}
type RootManager[K comparable, T ManagerItem[K]] struct {
Manager[K, T]
WorkCollection[K, T]
}
func (m *RootManager[K, T]) Init() {

View File

@@ -4,9 +4,11 @@ import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"maps"
"reflect"
"runtime"
"runtime/debug"
"strings"
"sync"
@@ -20,13 +22,16 @@ const TraceLevel = slog.Level(-8)
const OwnerTypeKey = "ownerType"
var (
ErrAutoStop = errors.New("auto stop")
ErrRetryRunOut = errors.New("retry out")
ErrStopByUser = errors.New("stop by user")
ErrRestart = errors.New("restart")
ErrTaskComplete = errors.New("complete")
ErrExit = errors.New("exit")
ErrPanic = errors.New("panic")
ErrAutoStop = errors.New("auto stop")
ErrRetryRunOut = errors.New("retry out")
ErrStopByUser = errors.New("stop by user")
ErrRestart = errors.New("restart")
ErrTaskComplete = errors.New("complete")
ErrTimeout = errors.New("timeout")
ErrExit = errors.New("exit")
ErrPanic = errors.New("panic")
ErrTooManyChildren = errors.New("too many children in job")
ErrDisposed = errors.New("disposed")
)
const (
@@ -44,7 +49,6 @@ const (
TASK_TYPE_JOB
TASK_TYPE_Work
TASK_TYPE_CHANNEL
TASK_TYPE_CALL
)
type (
@@ -70,14 +74,15 @@ type (
SetDescription(key string, value any)
SetDescriptions(value Description)
SetRetry(maxRetry int, retryInterval time.Duration)
Depend(ITask)
Using(resource ...any)
OnStop(any)
OnStart(func())
OnBeforeDispose(func())
OnDispose(func())
GetState() TaskState
GetLevel() byte
WaitStopped() error
WaitStarted() error
getKey() any
}
IJob interface {
ITask
@@ -87,8 +92,8 @@ type (
OnDescendantsDispose(func(ITask))
OnDescendantsStart(func(ITask))
Blocked() ITask
Call(func() error, ...any)
Post(func() error, ...any) *Task
EventLoopRunning() bool
Call(func())
}
IChannelTask interface {
ITask
@@ -117,18 +122,21 @@ type (
ID uint32
StartTime time.Time
StartReason string
*slog.Logger
Logger *slog.Logger
context.Context
context.CancelCauseFunc
handler ITask
retry RetryConfig
afterStartListeners, beforeDisposeListeners, afterDisposeListeners []func()
description sync.Map
startup, shutdown *util.Promise
parent *Job
parentCtx context.Context
state TaskState
level byte
handler ITask
retry RetryConfig
afterStartListeners, afterDisposeListeners []func()
closeOnStop []any
resources []any
stopOnce sync.Once
description sync.Map
startup, shutdown *util.Promise
parent *Job
parentCtx context.Context
state TaskState
level byte
}
)
@@ -182,12 +190,19 @@ func (task *Task) GetKey() uint32 {
return task.ID
}
func (task *Task) getKey() any {
return reflect.ValueOf(task.handler).MethodByName("GetKey").Call(nil)[0].Interface()
}
func (task *Task) WaitStarted() error {
if task.startup == nil {
return nil
}
return task.startup.Await()
}
func (task *Task) WaitStopped() (err error) {
err = task.startup.Await()
err = task.WaitStarted()
if err != nil {
return err
}
@@ -198,7 +213,11 @@ func (task *Task) WaitStopped() (err error) {
}
func (task *Task) Trace(msg string, fields ...any) {
task.Log(task.Context, TraceLevel, msg, fields...)
if task.Logger == nil {
slog.Default().Log(task.Context, TraceLevel, msg, fields...)
return
}
task.Logger.Log(task.Context, TraceLevel, msg, fields...)
}
func (task *Task) IsStopped() bool {
@@ -224,32 +243,50 @@ func (task *Task) Stop(err error) {
task.Error("task stop with nil error", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", task.GetParent().GetOwnerType())
panic("task stop with nil error")
}
if task.CancelCauseFunc != nil {
if tt := task.handler.GetTaskType(); task.Logger != nil && tt != TASK_TYPE_CALL {
task.Debug("task stop", "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType())
_, file, line, _ := runtime.Caller(1)
task.stopOnce.Do(func() {
if task.CancelCauseFunc != nil {
msg := "task stop"
if task.startup.IsRejected() {
msg = "task start failed"
}
task.Debug(msg, "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
task.CancelCauseFunc(err)
}
task.CancelCauseFunc(err)
}
task.stop()
})
}
func (task *Task) Depend(t ITask) {
t.OnDispose(func() {
task.Stop(t.StopReason())
})
func (task *Task) stop() {
for _, resource := range task.closeOnStop {
switch v := resource.(type) {
case func():
v()
case func() error:
v()
case ITask:
v.Stop(task.StopReason())
}
}
task.closeOnStop = task.closeOnStop[:0]
}
func (task *Task) OnStart(listener func()) {
task.afterStartListeners = append(task.afterStartListeners, listener)
}
func (task *Task) OnBeforeDispose(listener func()) {
task.beforeDisposeListeners = append(task.beforeDisposeListeners, listener)
}
func (task *Task) OnDispose(listener func()) {
task.afterDisposeListeners = append(task.afterDisposeListeners, listener)
}
func (task *Task) Using(resource ...any) {
task.resources = append(task.resources, resource...)
}
func (task *Task) OnStop(resource any) {
task.closeOnStop = append(task.closeOnStop, resource)
}
func (task *Task) GetSignal() any {
return task.Done()
}
@@ -264,12 +301,10 @@ func (task *Task) checkRetry(err error) bool {
if task.retry.MaxRetry < 0 || task.retry.RetryCount < task.retry.MaxRetry {
task.retry.RetryCount++
task.SetDescription("retryCount", task.retry.RetryCount)
if task.Logger != nil {
if task.retry.MaxRetry < 0 {
task.Warn(fmt.Sprintf("retry %d/∞", task.retry.RetryCount), "taskId", task.ID)
} else {
task.Warn(fmt.Sprintf("retry %d/%d", task.retry.RetryCount, task.retry.MaxRetry), "taskId", task.ID)
}
if task.retry.MaxRetry < 0 {
task.Warn(fmt.Sprintf("retry %d/∞", task.retry.RetryCount), "taskId", task.ID)
} else {
task.Warn(fmt.Sprintf("retry %d/%d", task.retry.RetryCount, task.retry.MaxRetry), "taskId", task.ID)
}
if delta := time.Since(task.StartTime); delta < task.retry.RetryInterval {
time.Sleep(task.retry.RetryInterval - delta)
@@ -277,9 +312,7 @@ func (task *Task) checkRetry(err error) bool {
return true
} else {
if task.retry.MaxRetry > 0 {
if task.Logger != nil {
task.Warn(fmt.Sprintf("max retry %d failed", task.retry.MaxRetry))
}
task.Warn(fmt.Sprintf("max retry %d failed", task.retry.MaxRetry))
return false
}
}
@@ -292,17 +325,13 @@ func (task *Task) start() bool {
defer func() {
if r := recover(); r != nil {
err = errors.New(fmt.Sprint(r))
if task.Logger != nil {
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
}()
}
for {
task.StartTime = time.Now()
if tt := task.handler.GetTaskType(); task.Logger != nil && tt != TASK_TYPE_CALL {
task.Debug("task start", "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType(), "reason", task.StartReason)
}
task.Debug("task start", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "reason", task.StartReason)
task.state = TASK_STATE_STARTING
if v, ok := task.handler.(TaskStarter); ok {
err = v.Start()
@@ -322,9 +351,7 @@ func (task *Task) start() bool {
task.ResetRetryCount()
if runHandler, ok := task.handler.(TaskBlock); ok {
task.state = TASK_STATE_RUNNING
if task.Logger != nil {
task.Debug("task run", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
}
task.Debug("task run", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
err = runHandler.Run()
if err == nil {
err = ErrTaskComplete
@@ -335,9 +362,7 @@ func (task *Task) start() bool {
if err == nil {
if goHandler, ok := task.handler.(TaskGo); ok {
task.state = TASK_STATE_GOING
if task.Logger != nil {
task.Debug("task go", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
}
task.Debug("task go", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
go task.run(goHandler.Go)
}
return true
@@ -354,6 +379,7 @@ func (task *Task) start() bool {
}
func (task *Task) reset() {
task.stopOnce = sync.Once{}
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
task.shutdown = util.NewPromise(context.Background())
task.startup = util.NewPromise(task.Context)
@@ -367,6 +393,10 @@ func (task *Task) GetDescriptions() map[string]string {
})
}
func (task *Task) GetDescription(key string) (any, bool) {
return task.description.Load(key)
}
func (task *Task) SetDescription(key string, value any) {
task.description.Store(key, value)
}
@@ -384,43 +414,41 @@ func (task *Task) SetDescriptions(value Description) {
func (task *Task) dispose() {
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
if task.state < TASK_STATE_STARTED {
if task.Logger != nil && taskType != TASK_TYPE_CALL {
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
}
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
return
}
reason := task.StopReason()
task.state = TASK_STATE_DISPOSING
if task.Logger != nil {
if taskType != TASK_TYPE_CALL {
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
task.Debug("task dispose", yargs...)
defer task.Debug("task disposed", yargs...)
}
}
befores := len(task.beforeDisposeListeners)
for i, listener := range task.beforeDisposeListeners {
task.SetDescription("disposeProcess", fmt.Sprintf("b:%d/%d", i, befores))
listener()
}
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
task.Debug("task dispose", yargs...)
defer task.Debug("task disposed", yargs...)
if job, ok := task.handler.(IJob); ok {
mt := job.getJob()
task.SetDescription("disposeProcess", "wait children")
mt.eventLoopLock.Lock()
if mt.addSub != nil {
mt.waitChildrenDispose()
mt.lazyRun = sync.Once{}
}
mt.eventLoopLock.Unlock()
mt.waitChildrenDispose(reason)
}
task.SetDescription("disposeProcess", "self")
if v, ok := task.handler.(TaskDisposal); ok {
v.Dispose()
}
task.shutdown.Fulfill(reason)
afters := len(task.afterDisposeListeners)
task.SetDescription("disposeProcess", "resources")
task.stopOnce.Do(task.stop)
for _, resource := range task.resources {
switch v := resource.(type) {
case func():
v()
case ITask:
v.Stop(task.StopReason())
case util.Recyclable:
v.Recycle()
case io.Closer:
v.Close()
}
}
task.resources = task.resources[:0]
for i, listener := range task.afterDisposeListeners {
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, afters))
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, len(task.afterDisposeListeners)))
listener()
}
task.SetDescription("disposeProcess", "done")
@@ -441,9 +469,7 @@ func (task *Task) run(handler func() error) {
if !ThrowPanic {
if r := recover(); r != nil {
err = errors.New(fmt.Sprint(r))
if task.Logger != nil {
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
task.Error("panic", "error", err, "stack", string(debug.Stack()))
}
}
if err == nil {
@@ -454,3 +480,61 @@ func (task *Task) run(handler func() error) {
}()
err = handler()
}
func (task *Task) Debug(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Debug(msg, args...)
return
}
task.Logger.Debug(msg, args...)
}
func (task *Task) Info(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Info(msg, args...)
return
}
task.Logger.Info(msg, args...)
}
func (task *Task) Warn(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Warn(msg, args...)
return
}
task.Logger.Warn(msg, args...)
}
func (task *Task) Error(msg string, args ...any) {
if task.Logger == nil {
slog.Default().Error(msg, args...)
return
}
task.Logger.Error(msg, args...)
}
func (task *Task) TraceEnabled() bool {
return task.Logger.Enabled(task.Context, TraceLevel)
}
func (task *Task) RunTask(t ITask, opt ...any) (err error) {
tt := t.GetTask()
tt.handler = t
mt := task.parent
if job, ok := task.handler.(IJob); ok {
mt = job.getJob()
}
mt.initContext(tt, opt...)
if mt.IsStopped() {
err = mt.StopReason()
task.startup.Reject(err)
return
}
task.OnStop(t)
started := tt.start()
<-tt.Done()
if started {
tt.dispose()
}
return tt.StopReason()
}

View File

@@ -24,9 +24,12 @@ func Test_AddTask_AddsTaskSuccessfully(t *testing.T) {
var task Task
root.AddTask(&task)
_ = task.WaitStarted()
if len(root.children) != 1 {
t.Errorf("expected 1 child task, got %d", len(root.children))
}
root.RangeSubTask(func(t ITask) bool {
if t.GetTaskID() == task.GetTaskID() {
return false
}
return true
})
}
type retryDemoTask struct {
@@ -51,9 +54,9 @@ func Test_RetryTask(t *testing.T) {
func Test_Call_ExecutesCallback(t *testing.T) {
called := false
root.Call(func() error {
root.Call(func() {
called = true
return nil
return
})
if !called {
t.Errorf("expected callback to be called")
@@ -162,6 +165,24 @@ func Test_StartFail(t *testing.T) {
}
}
func Test_Block(t *testing.T) {
var task Task
block := make(chan struct{})
var job Job
task.OnStart(func() {
task.OnStop(func() {
close(block)
})
<-block
})
time.AfterFunc(time.Second*2, func() {
job.Stop(ErrTaskComplete)
})
root.AddTask(&job)
job.AddTask(&task)
job.WaitStopped()
}
//
//type DemoTask struct {
// Task

View File

@@ -11,3 +11,57 @@ func (m *Work) keepalive() bool {
func (*Work) GetTaskType() TaskType {
return TASK_TYPE_Work
}
type WorkCollection[K comparable, T interface {
ITask
GetKey() K
}] struct {
Work
}
func (c *WorkCollection[K, T]) Find(f func(T) bool) (item T, ok bool) {
c.RangeSubTask(func(task ITask) bool {
if v, _ok := task.(T); _ok && f(v) {
item = v
ok = true
return false
}
return true
})
return
}
func (c *WorkCollection[K, T]) Get(key K) (item T, ok bool) {
var value any
value, ok = c.children.Load(key)
if ok {
item, ok = value.(T)
}
return
}
func (c *WorkCollection[K, T]) Range(f func(T) bool) {
c.RangeSubTask(func(task ITask) bool {
if v, ok := task.(T); ok && !f(v) {
return false
}
return true
})
}
func (c *WorkCollection[K, T]) Has(key K) (ok bool) {
_, ok = c.children.Load(key)
return
}
func (c *WorkCollection[K, T]) ToList() (list []T) {
c.Range(func(t T) bool {
list = append(list, t)
return true
})
return
}
func (c *WorkCollection[K, T]) Length() int {
return int(c.Size.Load())
}

BIN
pkg/test.h264 Normal file

Binary file not shown.

View File

@@ -51,14 +51,12 @@ type (
LastDropLevelChange time.Time
DropFrameLevel int // 0: no drop, 1: drop P-frame, 2: drop all
}
AVTrack struct {
Track
*RingWriter
codec.ICodecCtx
Allocator *util.ScalableMemoryAllocator
SequenceFrame IAVFrame
WrapIndex int
Allocator *util.ScalableMemoryAllocator
WrapIndex int
TsTamer
SpeedController
DropController
@@ -71,11 +69,13 @@ func NewAVTrack(args ...any) (t *AVTrack) {
switch v := arg.(type) {
case IAVFrame:
t.FrameType = reflect.TypeOf(v)
t.Allocator = v.GetAllocator()
sample := v.GetSample()
t.Allocator = sample.GetAllocator()
t.ICodecCtx = sample.ICodecCtx
case reflect.Type:
t.FrameType = v
case *slog.Logger:
t.Logger = v
t.Logger = v.With("frameType", t.FrameType.String())
case *AVTrack:
t.Logger = v.Logger.With("subtrack", t.FrameType.String())
t.RingWriter = v.RingWriter
@@ -118,9 +118,25 @@ func (t *AVTrack) AddBytesIn(n int) {
}
}
func (t *AVTrack) AcceptFrame(data IAVFrame) {
func (t *AVTrack) FixTimestamp(data *Sample, scale float64) {
t.AddBytesIn(data.Size)
data.Timestamp = t.Tame(data.Timestamp, t.FPS, scale)
}
func (t *AVTrack) NewFrame(avFrame *AVFrame) (frame IAVFrame) {
frame = reflect.New(t.FrameType.Elem()).Interface().(IAVFrame)
if avFrame.Sample == nil {
avFrame.Sample = frame.GetSample()
}
if avFrame.BaseSample == nil {
avFrame.BaseSample = &BaseSample{}
}
frame.GetSample().BaseSample = avFrame.BaseSample
return
}
func (t *AVTrack) AcceptFrame() {
t.acceptFrameCount++
t.Value.Wraps = append(t.Value.Wraps, data)
}
func (t *AVTrack) changeDropFrameLevel(newLevel int) {
@@ -230,23 +246,28 @@ func (t *AVTrack) AddPausedTime(d time.Duration) {
t.pausedTime += d
}
func (s *SpeedController) speedControl(speed float64, ts time.Duration) {
if speed != s.speed || s.beginTime.IsZero() {
s.speed = speed
s.beginTime = time.Now()
s.beginTimestamp = ts
s.pausedTime = 0
func (t *AVTrack) speedControl(speed float64, ts time.Duration) {
if speed != t.speed || t.beginTime.IsZero() {
t.speed = speed
t.beginTime = time.Now()
t.beginTimestamp = ts
t.pausedTime = 0
} else {
elapsed := time.Since(s.beginTime) - s.pausedTime
elapsed := time.Since(t.beginTime) - t.pausedTime
if speed == 0 {
s.Delta = ts - elapsed
t.Delta = ts - elapsed
if t.Logger.Enabled(t.ready, task.TraceLevel) {
t.Trace("speed 0", "ts", ts, "elapsed", elapsed, "delta", t.Delta)
}
return
}
should := time.Duration(float64(ts-s.beginTimestamp) / speed)
s.Delta = should - elapsed
// fmt.Println(speed, elapsed, should, s.Delta)
if s.Delta > threshold {
time.Sleep(min(s.Delta, time.Millisecond*500))
should := time.Duration(float64(ts-t.beginTimestamp) / speed)
t.Delta = should - elapsed
if t.Delta > threshold {
if t.Logger.Enabled(t.ready, task.TraceLevel) {
t.Trace("speed control", "speed", speed, "elapsed", elapsed, "should", should, "delta", t.Delta)
}
time.Sleep(min(t.Delta, time.Millisecond*500))
}
}
}

View File

@@ -2,33 +2,55 @@ package util
import (
"errors"
"sync"
"unsafe"
)
type Buddy struct {
size int
longests []int
size int
longests [BuddySize>>(MinPowerOf2-1) - 1]int
memoryPool [BuddySize]byte
poolStart int64
lock sync.Mutex // 保护 longests 数组的并发访问
}
var (
InValidParameterErr = errors.New("buddy: invalid parameter")
NotFoundErr = errors.New("buddy: can't find block")
buddyPool = sync.Pool{
New: func() interface{} {
return NewBuddy()
},
}
)
// GetBuddy 从池中获取一个 Buddy 实例
func GetBuddy() *Buddy {
buddy := buddyPool.Get().(*Buddy)
return buddy
}
// PutBuddy 将 Buddy 实例放回池中
func PutBuddy(b *Buddy) {
buddyPool.Put(b)
}
// NewBuddy creates a buddy instance.
// If the parameter isn't valid, return the nil and error as well
func NewBuddy(size int) *Buddy {
if !isPowerOf2(size) {
size = fixSize(size)
func NewBuddy() *Buddy {
size := BuddySize >> MinPowerOf2
ret := &Buddy{
size: size,
}
nodeCount := 2*size - 1
longests := make([]int, nodeCount)
for nodeSize, i := 2*size, 0; i < nodeCount; i++ {
for nodeSize, i := 2*size, 0; i < len(ret.longests); i++ {
if isPowerOf2(i + 1) {
nodeSize /= 2
}
longests[i] = nodeSize
ret.longests[i] = nodeSize
}
return &Buddy{size, longests}
ret.poolStart = int64(uintptr(unsafe.Pointer(&ret.memoryPool[0])))
return ret
}
// Alloc find a unused block according to the size
@@ -42,6 +64,8 @@ func (b *Buddy) Alloc(size int) (offset int, err error) {
if !isPowerOf2(size) {
size = fixSize(size)
}
b.lock.Lock()
defer b.lock.Unlock()
if size > b.longests[0] {
err = NotFoundErr
return
@@ -70,6 +94,8 @@ func (b *Buddy) Free(offset int) error {
if offset < 0 || offset >= b.size {
return InValidParameterErr
}
b.lock.Lock()
defer b.lock.Unlock()
nodeSize := 1
index := offset + b.size - 1
for ; b.longests[index] != 0; index = parent(index) {

63
pkg/util/buddy_disable.go Normal file
View File

@@ -0,0 +1,63 @@
//go:build !enable_buddy
package util
import (
"sync"
"unsafe"
)
var pool0, pool1, pool2 sync.Pool
func init() {
pool0.New = func() any {
ret := createMemoryAllocator(defaultBufSize)
ret.recycle = func() {
pool0.Put(ret)
}
return ret
}
pool1.New = func() any {
ret := createMemoryAllocator(1 << MinPowerOf2)
ret.recycle = func() {
pool1.Put(ret)
}
return ret
}
pool2.New = func() any {
ret := createMemoryAllocator(1 << (MinPowerOf2 + 2))
ret.recycle = func() {
pool2.Put(ret)
}
return ret
}
}
func createMemoryAllocator(size int) *MemoryAllocator {
memory := make([]byte, size)
ret := &MemoryAllocator{
allocator: NewAllocator(size),
Size: size,
memory: memory,
start: int64(uintptr(unsafe.Pointer(&memory[0]))),
}
ret.allocator.Init(size)
return ret
}
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
switch size {
case defaultBufSize:
ret = pool0.Get().(*MemoryAllocator)
ret.allocator.Init(size)
case 1 << MinPowerOf2:
ret = pool1.Get().(*MemoryAllocator)
ret.allocator.Init(size)
case 1 << (MinPowerOf2 + 2):
ret = pool2.Get().(*MemoryAllocator)
ret.allocator.Init(size)
default:
ret = createMemoryAllocator(size)
}
return
}

44
pkg/util/buddy_enable.go Normal file
View File

@@ -0,0 +1,44 @@
//go:build enable_buddy
package util
import "unsafe"
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
ret := &MemoryAllocator{
allocator: NewAllocator(size),
Size: size,
memory: buddy.memoryPool[offset : offset+size],
start: buddy.poolStart + int64(offset),
recycle: func() {
buddy.Free(offset >> MinPowerOf2)
},
}
ret.allocator.Init(size)
return ret
}
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
if size < BuddySize {
requiredSize := size >> MinPowerOf2
// 循环尝试从池中获取可用的 buddy
for {
buddy := GetBuddy()
defer PutBuddy(buddy)
offset, err := buddy.Alloc(requiredSize)
if err == nil {
// 分配成功,使用这个 buddy
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
}
}
}
// 池中的 buddy 都无法分配或大小不够,使用系统内存
memory := make([]byte, size)
start := int64(uintptr(unsafe.Pointer(&memory[0])))
return &MemoryAllocator{
allocator: NewAllocator(size),
Size: size,
memory: memory,
start: start,
}
}

View File

@@ -4,7 +4,6 @@ import (
"io"
"net"
"net/textproto"
"os"
"strings"
)
@@ -15,8 +14,8 @@ type BufReader struct {
buf MemoryReader
totalRead int
BufLen int
Mouth chan []byte
feedData func() error
Dump *os.File
}
func NewBufReaderWithBufLen(reader io.Reader, bufLen int) (r *BufReader) {
@@ -62,8 +61,10 @@ func NewBufReaderBuffersChan(feedChan chan net.Buffers) (r *BufReader) {
return
}
func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
func NewBufReaderChan(bufferSize int) (r *BufReader) {
feedChan := make(chan []byte, bufferSize)
r = &BufReader{
Mouth: feedChan,
feedData: func() error {
data, ok := <-feedChan
if !ok {
@@ -81,6 +82,15 @@ func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
return
}
func (r *BufReader) Feed(data []byte) bool {
select {
case r.Mouth <- data:
return true
default:
return false
}
}
func NewBufReader(reader io.Reader) (r *BufReader) {
return NewBufReaderWithBufLen(reader, defaultBufSize)
}
@@ -90,6 +100,9 @@ func (r *BufReader) Recycle() {
if r.Allocator != nil {
r.Allocator.Recycle()
}
if r.Mouth != nil {
close(r.Mouth)
}
}
func (r *BufReader) Buffered() int {
@@ -176,9 +189,6 @@ func (r *BufReader) ReadRange(n int, yield func([]byte)) (err error) {
func (r *BufReader) Read(to []byte) (n int, err error) {
n = len(to)
err = r.ReadNto(n, to)
if r.Dump != nil {
r.Dump.Write(to)
}
return
}
@@ -199,7 +209,7 @@ func (r *BufReader) ReadString(n int) (s string, err error) {
}
func (r *BufReader) ReadBytes(n int) (mem Memory, err error) {
err = r.ReadRange(n, mem.AppendOne)
err = r.ReadRange(n, mem.PushOne)
return
}

View File

@@ -24,7 +24,7 @@ func TestReadBytesTo(t *testing.T) {
s := RandomString(100)
t.Logf("s:%s", s)
var m Memory
m.AppendOne([]byte(s))
m.PushOne([]byte(s))
r := m.NewReader()
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
var total []byte
@@ -34,7 +34,7 @@ func TestReadBytesTo(t *testing.T) {
continue
}
buf := make([]byte, i)
n := r.ReadBytesTo(buf)
n, _ := r.Read(buf)
t.Logf("n:%d buf:%s", n, string(buf))
total = append(total, buf[:n]...)
if n == 0 {

View File

@@ -101,23 +101,6 @@ func (c *Collection[K, T]) RemoveByKey(key K) bool {
return false
}
// func (c *Collection[K, T]) GetOrCreate(key K) (item T, find bool) {
// if c.L != nil {
// c.L.Lock()
// defer c.L.Unlock()
// }
// if c.m != nil {
// item, find = c.m[key]
// return item, find
// }
// for _, item = range c.Items {
// if item.GetKey() == key {
// return item, true
// }
// }
// item = reflect.New(reflect.TypeOf(item).Elem()).Interface().(T)
// return
// }
func (c *Collection[K, T]) Has(key K) bool {
_, ok := c.Get(key)
return ok
@@ -169,10 +152,6 @@ func (c *Collection[K, T]) Search(f func(T) bool) func(yield func(item T) bool)
}
}
func (c *Collection[K, T]) GetKey() K {
return c.Items[0].GetKey()
}
func (c *Collection[K, T]) Clear() {
if c.L != nil {
c.L.Lock()

View File

@@ -0,0 +1,60 @@
package util
import (
"io"
"net"
"net/http"
"time"
"github.com/gobwas/ws/wsutil"
)
type HTTP_WS_Writer struct {
io.Writer
Conn net.Conn
ContentType string
WriteTimeout time.Duration
IsWebSocket bool
buffer []byte
}
func (m *HTTP_WS_Writer) Write(p []byte) (n int, err error) {
if m.IsWebSocket {
m.buffer = append(m.buffer, p...)
return len(p), nil
}
if m.Conn != nil && m.WriteTimeout > 0 {
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
}
return m.Writer.Write(p)
}
func (m *HTTP_WS_Writer) Flush() (err error) {
if m.IsWebSocket {
if m.WriteTimeout > 0 {
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
}
err = wsutil.WriteServerBinary(m.Conn, m.buffer)
m.buffer = m.buffer[:0]
}
return
}
func (m *HTTP_WS_Writer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if m.Conn == nil {
w.Header().Set("Transfer-Encoding", "chunked")
w.Header().Set("Content-Type", m.ContentType)
w.WriteHeader(http.StatusOK)
if hijacker, ok := w.(http.Hijacker); ok && m.WriteTimeout > 0 {
m.Conn, _, _ = hijacker.Hijack()
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
m.Writer = m.Conn
} else {
m.Writer = w
w.(http.Flusher).Flush()
}
} else {
m.IsWebSocket = true
m.Writer = m.Conn
}
}

View File

@@ -16,6 +16,10 @@ type ReadWriteSeekCloser interface {
io.Closer
}
type Recyclable interface {
Recycle()
}
type Object = map[string]any
func Conditional[T any](cond bool, t, f T) T {
@@ -70,3 +74,59 @@ func Exist(filename string) bool {
_, err := os.Stat(filename)
return err == nil || os.IsExist(err)
}
type ReuseArray[T any] []T
func (s *ReuseArray[T]) GetNextPointer() (r *T) {
ss := *s
l := len(ss)
if cap(ss) > l {
ss = ss[:l+1]
} else {
var new T
ss = append(ss, new)
}
*s = ss
r = &((ss)[l])
if resetter, ok := any(r).(Resetter); ok {
resetter.Reset()
}
return r
}
func (s ReuseArray[T]) RangePoint(f func(yield *T) bool) {
for i := range len(s) {
if !f(&s[i]) {
return
}
}
}
func (s *ReuseArray[T]) Reset() {
*s = (*s)[:0]
}
func (s *ReuseArray[T]) Reduce() ReuseArray[T] {
ss := *s
ss = ss[:len(ss)-1]
*s = ss
return ss
}
func (s *ReuseArray[T]) Remove(item *T) bool {
for i := range *s {
if &(*s)[i] == item {
*s = append((*s)[:i], (*s)[i+1:]...)
return true
}
}
return false
}
func (s *ReuseArray[T]) Count() int {
return len(*s)
}
type Resetter interface {
Reset()
}

View File

@@ -1,7 +1,110 @@
package util
import (
"io"
"net"
"slices"
)
const (
MaxBlockSize = 1 << 22
BuddySize = MaxBlockSize << 7
MinPowerOf2 = 10
)
type Memory struct {
Size int
Buffers [][]byte
}
func NewMemory(buf []byte) Memory {
return Memory{
Buffers: net.Buffers{buf},
Size: len(buf),
}
}
func (m *Memory) WriteTo(w io.Writer) (n int64, err error) {
copy := net.Buffers(slices.Clone(m.Buffers))
return copy.WriteTo(w)
}
func (m *Memory) Reset() {
m.Buffers = m.Buffers[:0]
m.Size = 0
}
func (m *Memory) UpdateBuffer(index int, buf []byte) {
if index < 0 {
index = len(m.Buffers) + index
}
m.Size = len(buf) - len(m.Buffers[index])
m.Buffers[index] = buf
}
func (m *Memory) CopyFrom(b *Memory) {
buf := make([]byte, b.Size)
b.CopyTo(buf)
m.PushOne(buf)
}
func (m *Memory) Equal(b *Memory) bool {
if m.Size != b.Size || len(m.Buffers) != len(b.Buffers) {
return false
}
for i, buf := range m.Buffers {
if !slices.Equal(buf, b.Buffers[i]) {
return false
}
}
return true
}
func (m *Memory) CopyTo(buf []byte) {
for _, b := range m.Buffers {
l := len(b)
copy(buf, b)
buf = buf[l:]
}
}
func (m *Memory) ToBytes() []byte {
buf := make([]byte, m.Size)
m.CopyTo(buf)
return buf
}
func (m *Memory) PushOne(b []byte) {
m.Buffers = append(m.Buffers, b)
m.Size += len(b)
}
func (m *Memory) Push(b ...[]byte) {
m.Buffers = append(m.Buffers, b...)
for _, level0 := range b {
m.Size += len(level0)
}
}
func (m *Memory) Append(mm Memory) *Memory {
m.Buffers = append(m.Buffers, mm.Buffers...)
m.Size += mm.Size
return m
}
func (m *Memory) Count() int {
return len(m.Buffers)
}
func (m *Memory) Range(yield func([]byte)) {
for i := range m.Count() {
yield(m.Buffers[i])
}
}
func (m *Memory) NewReader() MemoryReader {
return MemoryReader{
Memory: m,
Length: m.Size,
}
}

View File

@@ -2,93 +2,23 @@ package util
import (
"io"
"net"
"slices"
)
type Memory struct {
Size int
net.Buffers
}
type MemoryReader struct {
*Memory
Length int
offset0 int
offset1 int
Length, offset0, offset1 int
}
func NewReadableBuffersFromBytes(b ...[]byte) *MemoryReader {
func NewReadableBuffersFromBytes(b ...[]byte) MemoryReader {
buf := &Memory{Buffers: b}
for _, level0 := range b {
buf.Size += len(level0)
}
return &MemoryReader{Memory: buf, Length: buf.Size}
return MemoryReader{Memory: buf, Length: buf.Size}
}
func NewMemory(buf []byte) Memory {
return Memory{
Buffers: net.Buffers{buf},
Size: len(buf),
}
}
func (m *Memory) UpdateBuffer(index int, buf []byte) {
if index < 0 {
index = len(m.Buffers) + index
}
m.Size = len(buf) - len(m.Buffers[index])
m.Buffers[index] = buf
}
func (m *Memory) CopyFrom(b *Memory) {
buf := make([]byte, b.Size)
b.CopyTo(buf)
m.AppendOne(buf)
}
func (m *Memory) CopyTo(buf []byte) {
for _, b := range m.Buffers {
l := len(b)
copy(buf, b)
buf = buf[l:]
}
}
func (m *Memory) ToBytes() []byte {
buf := make([]byte, m.Size)
m.CopyTo(buf)
return buf
}
func (m *Memory) AppendOne(b []byte) {
m.Buffers = append(m.Buffers, b)
m.Size += len(b)
}
func (m *Memory) Append(b ...[]byte) {
m.Buffers = append(m.Buffers, b...)
for _, level0 := range b {
m.Size += len(level0)
}
}
func (m *Memory) Count() int {
return len(m.Buffers)
}
func (m *Memory) Range(yield func([]byte)) {
for i := range m.Count() {
yield(m.Buffers[i])
}
}
func (m *Memory) NewReader() *MemoryReader {
var reader MemoryReader
reader.Memory = m
reader.Length = m.Size
return &reader
}
var _ io.Reader = (*MemoryReader)(nil)
func (r *MemoryReader) Offset() int {
return r.Size - r.Length
@@ -108,9 +38,9 @@ func (r *MemoryReader) MoveToEnd() {
r.Length = 0
}
func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
func (r *MemoryReader) Read(buf []byte) (actual int, err error) {
if r.Length == 0 {
return 0
return 0, io.EOF
}
n := len(buf)
curBuf := r.GetCurrent()
@@ -142,6 +72,7 @@ func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
actual += curBufLen
r.skipBuf()
if r.Length == 0 && n > 0 {
err = io.EOF
return
}
}
@@ -204,6 +135,9 @@ func (r *MemoryReader) getCurrentBufLen() int {
return len(r.Memory.Buffers[r.offset0]) - r.offset1
}
func (r *MemoryReader) Skip(n int) error {
if n <= 0 {
return nil
}
if n > r.Length {
return io.EOF
}
@@ -248,8 +182,8 @@ func (r *MemoryReader) ReadBytes(n int) ([]byte, error) {
return nil, io.EOF
}
b := make([]byte, n)
actual := r.ReadBytesTo(b)
return b[:actual], nil
actual, err := r.Read(b)
return b[:actual], err
}
func (r *MemoryReader) ReadBE(n int) (num uint32, err error) {

View File

@@ -22,13 +22,13 @@ func NewPromiseWithTimeout(ctx context.Context, timeout time.Duration) *Promise
p := &Promise{}
p.Context, p.CancelCauseFunc = context.WithCancelCause(ctx)
p.timer = time.AfterFunc(timeout, func() {
p.CancelCauseFunc(ErrTimeout)
p.CancelCauseFunc(errTimeout)
})
return p
}
var ErrResolve = errors.New("promise resolved")
var ErrTimeout = errors.New("promise timeout")
var errTimeout = errors.New("promise timeout")
func (p *Promise) Resolve() {
p.Fulfill(nil)
@@ -47,6 +47,10 @@ func (p *Promise) Await() (err error) {
return
}
func (p *Promise) IsRejected() bool {
return context.Cause(p.Context) != ErrResolve
}
func (p *Promise) Fulfill(err error) {
if p.timer != nil {
p.timer.Stop()

View File

@@ -4,12 +4,26 @@ package util
import (
"io"
"slices"
)
type RecyclableMemory struct {
Memory
}
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
return RecyclableMemory{}
}
func (r *RecyclableMemory) Clone() RecyclableMemory {
return RecyclableMemory{
Memory: Memory{
Buffers: slices.Clone(r.Buffers),
Size: r.Size,
},
}
}
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
}

View File

@@ -3,11 +3,9 @@
package util
import (
"container/list"
"fmt"
"io"
"slices"
"sync"
"unsafe"
)
@@ -17,8 +15,14 @@ type RecyclableMemory struct {
recycleIndexes []int
}
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
return RecyclableMemory{allocator: allocator}
}
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
r.recycleIndexes = make([]int, 0, max)
if r.recycleIndexes == nil {
r.recycleIndexes = make([]int, 0, max)
}
}
func (r *RecyclableMemory) GetAllocator() *ScalableMemoryAllocator {
@@ -30,7 +34,7 @@ func (r *RecyclableMemory) NextN(size int) (memory []byte) {
if r.recycleIndexes != nil {
r.recycleIndexes = append(r.recycleIndexes, r.Count())
}
r.AppendOne(memory)
r.PushOne(memory)
return
}
@@ -38,7 +42,7 @@ func (r *RecyclableMemory) AddRecycleBytes(b []byte) {
if r.recycleIndexes != nil {
r.recycleIndexes = append(r.recycleIndexes, r.Count())
}
r.AppendOne(b)
r.PushOne(b)
}
func (r *RecyclableMemory) SetAllocator(allocator *ScalableMemoryAllocator) {
@@ -56,55 +60,22 @@ func (r *RecyclableMemory) Recycle() {
r.allocator.Free(buf)
}
}
r.Reset()
}
var (
memoryPool [BuddySize]byte
buddy = NewBuddy(BuddySize >> MinPowerOf2)
lock sync.Mutex
poolStart = int64(uintptr(unsafe.Pointer(&memoryPool[0])))
blockPool = list.New()
//EnableCheckSize bool = false
)
type MemoryAllocator struct {
allocator *Allocator
start int64
memory []byte
Size int
}
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
lock.Lock()
offset, err := buddy.Alloc(size >> MinPowerOf2)
if blockPool.Len() > 0 {
ret = blockPool.Remove(blockPool.Front()).(*MemoryAllocator)
} else {
ret = &MemoryAllocator{
allocator: NewAllocator(size),
}
}
lock.Unlock()
ret.Size = size
ret.allocator.Init(size)
if err != nil {
ret.memory = make([]byte, size)
ret.start = int64(uintptr(unsafe.Pointer(&ret.memory[0])))
return
}
offset = offset << MinPowerOf2
ret.memory = memoryPool[offset : offset+size]
ret.start = poolStart + int64(offset)
return
recycle func()
}
func (ma *MemoryAllocator) Recycle() {
ma.allocator.Recycle()
lock.Lock()
blockPool.PushBack(ma)
_ = buddy.Free(int((poolStart - ma.start) >> MinPowerOf2))
ma.memory = nil
lock.Unlock()
if ma.recycle != nil {
ma.recycle()
}
}
func (ma *MemoryAllocator) Find(size int) (memory []byte) {

414
plugin.go
View File

@@ -6,6 +6,7 @@ import (
"crypto/md5"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net"
"net/http"
@@ -18,6 +19,8 @@ import (
"strings"
"time"
"gopkg.in/yaml.v3"
"m7s.live/v5/pkg/task"
"github.com/quic-go/quic-go"
@@ -25,8 +28,8 @@ import (
gatewayRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
myip "github.com/husanpao/ip"
"google.golang.org/grpc"
"gopkg.in/yaml.v3"
"gorm.io/gorm"
. "m7s.live/v5/pkg"
"m7s.live/v5/pkg/config"
"m7s.live/v5/pkg/db"
@@ -63,9 +66,7 @@ type (
IPlugin interface {
task.IJob
OnInit() error
OnStop()
Pull(string, config.Pull, *config.Publish)
Pull(string, config.Pull, *config.Publish) (*PullJob, error)
Push(string, config.Push, *config.Subscribe)
Transform(*Publisher, config.Transform)
OnPublish(*Publisher)
@@ -133,24 +134,9 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
finalConfig, _ := yaml.Marshal(p.Config.GetMap())
p.Logger.Handler().(*MultiLogHandler).SetLevel(ParseLevel(p.config.LogLevel))
p.Debug("config", "detail", string(finalConfig))
if s.DisableAll {
p.Disabled = true
}
if userConfig["enable"] == false {
p.Disabled = true
} else if userConfig["enable"] == true {
p.Disabled = false
}
if p.Disabled {
if userConfig["enable"] == false || (s.DisableAll && userConfig["enable"] != true) {
p.disable("config")
p.Warn("plugin disabled")
return
} else {
var handlers map[string]http.HandlerFunc
if v, ok := instance.(IRegisterHandler); ok {
handlers = v.RegisterHandler()
}
p.registerHandler(handlers)
}
p.Info("init", "version", plugin.Version)
var err error
@@ -158,7 +144,7 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
p.DB = s.DB
} else if p.config.DSN != "" {
if factory, ok := db.Factory[p.config.DBType]; ok {
s.DB, err = gorm.Open(factory(p.config.DSN), &gorm.Config{})
p.DB, err = gorm.Open(factory(p.config.DSN), &gorm.Config{})
if err != nil {
s.Error("failed to connect database", "error", err, "dsn", s.config.DSN, "type", s.config.DBType)
p.disable(fmt.Sprintf("database %v", err))
@@ -171,19 +157,51 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
p.disable(fmt.Sprintf("auto migrate record stream failed %v", err))
return
}
if err = p.DB.AutoMigrate(&EventRecordStream{}); err != nil {
p.disable(fmt.Sprintf("auto migrate event record stream failed %v", err))
return
}
}
s.AddTask(instance)
if err = s.AddTask(instance).WaitStarted(); err != nil {
p.disable(instance.StopReason().Error())
return
}
if err = p.listen(); err != nil {
p.Stop(err)
p.disable(err.Error())
return
}
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
if p.Meta.RegisterGRPCHandler != nil {
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
p.Stop(err)
p.disable(fmt.Sprintf("grpc %v", err))
return
} else {
p.Info("grpc handler registered")
}
}
}
if p.config.Hook != nil {
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
p.AddTask(&ServerKeepAliveTask{plugin: p})
}
}
var handlers map[string]http.HandlerFunc
if v, ok := instance.(IRegisterHandler); ok {
handlers = v.RegisterHandler()
}
p.registerHandler(handlers)
p.OnDispose(func() {
s.Plugins.Remove(p)
})
s.Plugins.Add(p)
return
}
// InstallPlugin 安装插件
func InstallPlugin[C iPlugin](options ...any) error {
var meta PluginMeta
for _, option := range options {
if m, ok := option.(PluginMeta); ok {
meta = m
}
}
func InstallPlugin[C iPlugin](meta PluginMeta) error {
var c *C
meta.Type = reflect.TypeOf(c).Elem()
if meta.Name == "" {
@@ -198,30 +216,6 @@ func InstallPlugin[C iPlugin](options ...any) error {
meta.Version = "dev"
}
}
for _, option := range options {
switch v := option.(type) {
case OnExitHandler:
meta.OnExit = v
case DefaultYaml:
meta.DefaultYaml = v
case PullerFactory:
meta.NewPuller = v
case PusherFactory:
meta.NewPusher = v
case RecorderFactory:
meta.NewRecorder = v
case TransformerFactory:
meta.NewTransformer = v
case AuthPublisher:
meta.OnAuthPub = v
case AuthSubscriber:
meta.OnAuthSub = v
case *grpc.ServiceDesc:
meta.ServiceDesc = v
case func(context.Context, *gatewayRuntime.ServeMux, *grpc.ClientConn) error:
meta.RegisterGRPCHandler = v
}
}
plugins = append(plugins, meta)
return nil
}
@@ -277,44 +271,10 @@ func (p *Plugin) GetPublicIP(netcardIP string) string {
func (p *Plugin) disable(reason string) {
p.Disabled = true
p.SetDescription("disableReason", reason)
p.Warn("plugin disabled")
p.Server.disabledPlugins = append(p.Server.disabledPlugins, p)
}
func (p *Plugin) Start() (err error) {
s := p.Server
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
if p.Meta.RegisterGRPCHandler != nil {
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
p.disable(fmt.Sprintf("grpc %v", err))
return
} else {
p.Info("grpc handler registered")
}
}
}
s.Plugins.Add(p)
if err = p.listen(); err != nil {
p.disable(fmt.Sprintf("listen %v", err))
return
}
if err = p.handler.OnInit(); err != nil {
p.disable(fmt.Sprintf("init %v", err))
return
}
if p.config.Hook != nil {
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
p.AddTask(&ServerKeepAliveTask{plugin: p})
}
}
return
}
func (p *Plugin) Dispose() {
p.handler.OnStop()
p.Server.Plugins.Remove(p)
}
func (p *Plugin) listen() (err error) {
httpConf := &p.config.HTTP
@@ -374,13 +334,11 @@ func (p *Plugin) listen() (err error) {
return
}
func (p *Plugin) OnInit() error {
return nil
type WebHookQueueTask struct {
task.Work
}
func (p *Plugin) OnStop() {
}
var webHookQueueTask WebHookQueueTask
type WebHookTask struct {
task.Task
@@ -389,6 +347,7 @@ type WebHookTask struct {
conf config.Webhook
data any
jsonData []byte
alarm AlarmInfo
}
func (t *WebHookTask) Start() error {
@@ -396,10 +355,58 @@ func (t *WebHookTask) Start() error {
return task.ErrTaskComplete
}
var err error
t.jsonData, err = json.Marshal(t.data)
if err != nil {
return fmt.Errorf("marshal webhook data: %w", err)
// 处理AlarmInfo数据
if t.data != nil {
// 获取主机名和IP地址
hostname, err := os.Hostname()
if err != nil {
hostname = "unknown"
}
// 获取本机IP地址
var ipAddr string
addrs, err := net.InterfaceAddrs()
if err == nil {
for _, addr := range addrs {
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
ipAddr = ipnet.IP.String()
break
}
}
}
}
if ipAddr == "" {
ipAddr = "unknown"
}
// 直接使用t.data作为AlarmInfo
alarmInfo, ok := t.data.(AlarmInfo)
if !ok {
return fmt.Errorf("data is not of type AlarmInfo")
}
// 更新服务器信息
if alarmInfo.ServerInfo == "" {
alarmInfo.ServerInfo = fmt.Sprintf("%s (%s)", hostname, ipAddr)
}
// 确保时间戳已设置
if alarmInfo.CreatedAt.IsZero() {
alarmInfo.CreatedAt = time.Now()
}
if alarmInfo.UpdatedAt.IsZero() {
alarmInfo.UpdatedAt = time.Now()
}
// 将AlarmInfo序列化为JSON
jsonData, err := json.Marshal(alarmInfo)
if err != nil {
return fmt.Errorf("marshal AlarmInfo to json: %w", err)
}
t.jsonData = jsonData
t.alarm = alarmInfo
}
t.SetRetry(t.conf.RetryTimes, t.conf.RetryInterval)
@@ -407,6 +414,20 @@ func (t *WebHookTask) Start() error {
}
func (t *WebHookTask) Go() error {
// 检查是否需要保存告警到数据库
var dbID uint
if t.conf.SaveAlarm && t.plugin.DB != nil {
// 默认 IsSent 为 false
t.alarm.IsSent = false
if err := t.plugin.DB.Create(&t.alarm).Error; err != nil {
t.plugin.Error("保存告警到数据库失败", "error", err)
} else {
dbID = t.alarm.ID
t.plugin.Info(""+
"", "id", dbID)
}
}
req, err := http.NewRequest(t.conf.Method, t.conf.URL, bytes.NewBuffer(t.jsonData))
if err != nil {
return err
@@ -423,28 +444,38 @@ func (t *WebHookTask) Go() error {
resp, err := client.Do(req)
if err != nil {
t.plugin.Error("webhook request failed", "error", err)
t.plugin.Error("webhook请求失败", "error", err)
return err
}
defer resp.Body.Close()
// 如果发送成功且已保存到数据库则更新IsSent字段为true
if resp.StatusCode >= 200 && resp.StatusCode < 300 && t.conf.SaveAlarm && t.plugin.DB != nil && dbID > 0 {
t.alarm.IsSent = true
if err := t.plugin.DB.Model(&AlarmInfo{}).Where("id = ?", dbID).Update("is_sent", true).Error; err != nil {
t.plugin.Error("更新告警发送状态失败", "error", err)
} else {
t.plugin.Info("告警发送状态已更新", "id", dbID, "is_sent", true)
}
return task.ErrTaskComplete
}
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
return task.ErrTaskComplete
}
err = fmt.Errorf("webhook request failed with status: %d", resp.StatusCode)
t.plugin.Error("webhook response error", "status", resp.StatusCode)
err = fmt.Errorf("webhook请求失败,状态码:%d", resp.StatusCode)
t.plugin.Error("webhook响应错误", "状态码", resp.StatusCode)
return err
}
func (p *Plugin) SendWebhook(hookType config.HookType, data any) *task.Task {
func (p *Plugin) SendWebhook(conf config.Webhook, data any) *task.Task {
webhookTask := &WebHookTask{
plugin: p,
hookType: hookType,
conf: p.config.Hook[hookType],
data: data,
plugin: p,
conf: conf,
data: data,
}
return p.AddTask(webhookTask)
return webHookQueueTask.AddTask(webhookTask)
}
// TODO: use alias stream
@@ -517,7 +548,11 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
if p.Meta.NewPuller != nil && reg.MatchString(streamPath) {
conf.Args = config.HTTPValues(args)
conf.URL = reg.Replace(streamPath, conf.URL)
p.handler.Pull(streamPath, conf, nil)
if job, err := p.handler.Pull(streamPath, conf, nil); err == nil {
if w, ok := p.Server.Waiting.Get(streamPath); ok {
job.Progress = &w.Progress
}
}
}
}
@@ -539,8 +574,19 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
// }
//}
}
func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf config.Publish) (publisher *Publisher, err error) {
publisher = createPublisher(p, streamPath, conf)
publisher = &Publisher{Publish: conf}
publisher.Type = conf.PubType
publisher.ID = task.GetNextTaskID()
publisher.Plugin = p
if conf.PublishTimeout > 0 {
publisher.TimeoutTimer = time.NewTimer(conf.PublishTimeout)
} else {
publisher.TimeoutTimer = time.NewTimer(time.Hour * 24 * 365)
}
publisher.Logger = p.Logger.With("streamPath", streamPath, "pId", publisher.ID)
publisher.Init(streamPath, &publisher.Publish)
if p.config.EnableAuth && publisher.Type == PublishTypeServer {
onAuthPub := p.Meta.OnAuthPub
if onAuthPub == nil {
@@ -558,35 +604,40 @@ func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf
}
}
}
err = p.Server.Streams.AddTask(publisher, ctx).WaitStarted()
if err == nil {
if sender := p.getHookSender(config.HookOnPublishEnd); sender != nil {
publisher.OnDispose(func() {
webhookData := map[string]interface{}{
"event": config.HookOnPublishEnd,
"streamPath": publisher.StreamPath,
"publishId": publisher.ID,
"reason": publisher.StopReason().Error(),
"timestamp": time.Now().Unix(),
}
sender(config.HookOnPublishEnd, webhookData)
})
}
if sender := p.getHookSender(config.HookOnPublishStart); sender != nil {
webhookData := map[string]interface{}{
"event": config.HookOnPublishStart,
"streamPath": publisher.StreamPath,
"args": publisher.Args,
"publishId": publisher.ID,
"remoteAddr": publisher.RemoteAddr,
"type": publisher.Type,
"pluginName": p.Meta.Name,
"timestamp": time.Now().Unix(),
for {
err = p.Server.Streams.Add(publisher, ctx).WaitStarted()
if err == nil {
if sender, webhook := p.getHookSender(config.HookOnPublishEnd); sender != nil {
publisher.OnDispose(func() {
alarmInfo := AlarmInfo{
AlarmName: string(config.HookOnPublishEnd),
AlarmDesc: publisher.StopReason().Error(),
AlarmType: config.AlarmPublishOffline,
StreamPath: publisher.StreamPath,
}
sender(webhook, alarmInfo)
})
}
sender(config.HookOnPublishStart, webhookData)
if sender, webhook := p.getHookSender(config.HookOnPublishStart); sender != nil {
alarmInfo := AlarmInfo{
AlarmName: string(config.HookOnPublishStart),
AlarmType: config.AlarmPublishRecover,
StreamPath: publisher.StreamPath,
}
sender(webhook, alarmInfo)
}
return
} else if oldStream := new(task.ExistTaskError); errors.As(err, oldStream) {
if conf.KickExist {
publisher.takeOver(oldStream.Task.(*Publisher))
oldStream.Task.WaitStopped()
} else {
return nil, ErrStreamExist
}
} else {
return
}
}
return
}
func (p *Plugin) Publish(ctx context.Context, streamPath string) (publisher *Publisher, err error) {
@@ -616,39 +667,32 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
if err == nil {
select {
case <-subscriber.waitPublishDone:
err = subscriber.Publisher.WaitTrack()
waitAudio := conf.WaitTrack == "all" || strings.Contains(conf.WaitTrack, "audio")
waitVideo := conf.WaitTrack == "all" || strings.Contains(conf.WaitTrack, "video")
err = subscriber.Publisher.WaitTrack(waitAudio, waitVideo)
case <-subscriber.Done():
err = subscriber.StopReason()
}
}
if err == nil {
if sender := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
if sender, webhook := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
subscriber.OnDispose(func() {
webhookData := map[string]interface{}{
"event": config.HookOnSubscribeEnd,
"streamPath": subscriber.StreamPath,
"subscriberId": subscriber.ID,
"reason": subscriber.StopReason().Error(),
"timestamp": time.Now().Unix(),
alarmInfo := AlarmInfo{
AlarmName: string(config.HookOnSubscribeEnd),
AlarmDesc: subscriber.StopReason().Error(),
AlarmType: config.AlarmSubscribeOffline,
StreamPath: subscriber.StreamPath,
}
if subscriber.Publisher != nil {
webhookData["publishId"] = subscriber.Publisher.ID
}
sender(config.HookOnSubscribeEnd, webhookData)
sender(webhook, alarmInfo)
})
}
if sender := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
webhookData := map[string]interface{}{
"event": config.HookOnSubscribeStart,
"streamPath": subscriber.StreamPath,
"publishId": subscriber.Publisher.ID,
"subscriberId": subscriber.ID,
"remoteAddr": subscriber.RemoteAddr,
"type": subscriber.Type,
"args": subscriber.Args,
"timestamp": time.Now().Unix(),
if sender, webhook := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
alarmInfo := AlarmInfo{
AlarmName: string(config.HookOnSubscribeStart),
AlarmType: config.AlarmSubscribeRecover,
StreamPath: subscriber.StreamPath,
}
sender(config.HookOnSubscribeStart, webhookData)
sender(webhook, alarmInfo)
}
}
return
@@ -658,12 +702,14 @@ func (p *Plugin) Subscribe(ctx context.Context, streamPath string) (subscriber *
return p.SubscribeWithConfig(ctx, streamPath, p.config.Subscribe)
}
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) {
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) (job *PullJob, err error) {
puller := p.Meta.NewPuller(conf)
if puller == nil {
return
return nil, ErrNotFound
}
puller.GetPullJob().Init(puller, p, streamPath, conf, pubConf)
job = puller.GetPullJob()
job.Init(puller, p, streamPath, conf, pubConf)
return
}
func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subscribe) {
@@ -674,14 +720,13 @@ func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subsc
func (p *Plugin) Record(pub *Publisher, conf config.Record, subConf *config.Subscribe) *RecordJob {
recorder := p.Meta.NewRecorder(conf)
job := recorder.GetRecordJob().Init(recorder, p, pub.StreamPath, conf, subConf)
job.Depend(pub)
pub.Using(job)
return job
}
func (p *Plugin) Transform(pub *Publisher, conf config.Transform) {
transformer := p.Meta.NewTransformer()
job := transformer.GetTransformJob().Init(transformer, p, pub, conf)
job.Depend(pub)
pub.Using(transformer.GetTransformJob().Init(transformer, p, pub, conf))
}
func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
@@ -719,10 +764,11 @@ func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
streamPath := r.PathValue("streamPath")
t := r.PathValue("type")
expire := r.URL.Query().Get("expire")
if t == "publish" {
switch t {
case "publish":
secret := md5.Sum([]byte(p.config.Publish.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
} else if t == "subscribe" {
case "subscribe":
secret := md5.Sum([]byte(p.config.Subscribe.Key + streamPath + expire))
rw.Write([]byte(hex.EncodeToString(secret[:])))
}
@@ -760,13 +806,21 @@ func (p *Plugin) handle(pattern string, handler http.Handler) {
p.Server.apiList = append(p.Server.apiList, pattern)
}
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(hookType config.HookType, data any) *task.Task) {
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(webhook config.Webhook, data any) *task.Task, conf config.Webhook) {
if p.config.Hook != nil {
if _, ok := p.config.Hook[hookType]; ok {
sender = p.SendWebhook
conf = p.config.Hook[hookType]
} else if _, ok := p.config.Hook[config.HookDefault]; ok {
sender = p.SendWebhook
conf = p.config.Hook[config.HookDefault]
} else if p.Server.config.Hook != nil {
if _, ok := p.Server.config.Hook[hookType]; ok {
conf = p.config.Hook[hookType]
sender = p.Server.SendWebhook
} else if _, ok := p.Server.config.Hook[config.HookDefault]; ok {
sender = p.Server.SendWebhook
conf = p.config.Hook[config.HookDefault]
}
}
}
@@ -783,19 +837,25 @@ func (t *ServerKeepAliveTask) GetTickInterval() time.Duration {
}
func (t *ServerKeepAliveTask) Tick(now any) {
sender := t.plugin.getHookSender(config.HookOnServerKeepAlive)
sender, webhook := t.plugin.getHookSender(config.HookOnServerKeepAlive)
if sender == nil {
return
}
s := t.plugin.Server
webhookData := map[string]interface{}{
"event": config.HookOnServerKeepAlive,
"timestamp": time.Now().Unix(),
"streams": s.Streams.Length,
"subscribers": s.Subscribers.Length,
"publisherCount": s.Streams.Length,
"subscriberCount": s.Subscribers.Length,
"uptime": time.Since(s.StartTime).Seconds(),
//s := t.plugin.Server
alarmInfo := AlarmInfo{
AlarmName: string(config.HookOnServerKeepAlive),
AlarmType: config.AlarmKeepAliveOnline,
StreamPath: "",
}
sender(config.HookOnServerKeepAlive, webhookData)
sender(webhook, alarmInfo)
//webhookData := map[string]interface{}{
// "event": config.HookOnServerKeepAlive,
// "timestamp": time.Now().Unix(),
// "streams": s.Streams.Length,
// "subscribers": s.Subscribers.Length,
// "publisherCount": s.Streams.Length,
// "subscriberCount": s.Subscribers.Length,
// "uptime": time.Since(s.StartTime).Seconds(),
//}
//sender(webhook, webhookData)
}

View File

@@ -53,14 +53,16 @@ Example:
const defaultConfig = m7s.DefaultYaml(`tcp:
listenaddr: :5554`)
var _ = m7s.InstallPlugin[MyPlugin](defaultConfig)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
DefaultYaml: defaultConfig,
})
```
## 3. Implement Event Callbacks (Optional)
### Initialization Callback
```go
func (config *MyPlugin) OnInit() (err error) {
func (config *MyPlugin) Start() (err error) {
// Initialize things
return
}
@@ -121,22 +123,25 @@ func (config *MyPlugin) test1(rw http.ResponseWriter, r *http.Request) {
Push client needs to implement IPusher interface and pass the creation method to InstallPlugin.
```go
type Pusher struct {
pullCtx m7s.PullJob
task.Task
pushJob m7s.PushJob
}
func (c *Pusher) GetPullJob() *m7s.PullJob {
return &c.pullCtx
func (c *Pusher) GetPushJob() *m7s.PushJob {
return &c.pushJob
}
func NewPusher(_ config.Push) m7s.IPusher {
return &Pusher{}
}
var _ = m7s.InstallPlugin[MyPlugin](NewPusher)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
NewPusher: NewPusher,
})
```
### Implement Pull Client
Pull client needs to implement IPuller interface and pass the creation method to InstallPlugin.
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling:
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling. You need to override the Start method for specific pulling logic:
```go
type Puller struct {
m7s.HTTPFilePuller
@@ -145,7 +150,9 @@ type Puller struct {
func NewPuller(_ config.Pull) m7s.IPuller {
return &Puller{}
}
var _ = m7s.InstallPlugin[MyPlugin](NewPuller)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
NewPuller: NewPuller,
})
```
## 6. Implement gRPC Service
@@ -226,7 +233,10 @@ import (
"m7s.live/v5/plugin/myplugin/pb"
)
var _ = m7s.InstallPlugin[MyPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
ServiceDesc: &pb.Api_ServiceDesc,
RegisterGRPCHandler: pb.RegisterApiHandler,
})
type MyPlugin struct {
pb.UnimplementedApiServer
@@ -257,33 +267,25 @@ After obtaining the `publisher`, you can publish audio/video data using `publish
If existing audio/video data formats don't meet your needs, you can define custom formats by implementing this interface:
```go
IAVFrame interface {
GetAllocator() *util.ScalableMemoryAllocator
SetAllocator(*util.ScalableMemoryAllocator)
Parse(*AVTrack) error
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error)
Demux(codec.ICodecCtx) (any, error)
Mux(codec.ICodecCtx, *AVFrame)
GetTimestamp() time.Duration
GetCTS() time.Duration
GetSample() *Sample
GetSize() int
CheckCodecChange() error
Demux() error // demux to raw format
Mux(*Sample) error // mux from origin format
Recycle()
String() string
Dump(byte, io.Writer)
}
```
> Define separate types for audio and video
- GetAllocator/SetAllocator: Automatically implemented when embedding RecyclableMemory
- Parse: Identifies key frames, sequence frames, and other important information
- ConvertCtx: Called when protocol conversion is needed
- Demux: Called when audio/video data needs to be demuxed
- Mux: Called when audio/video data needs to be muxed
- Recycle: Automatically implemented when embedding RecyclableMemory
- String: Prints audio/video data information
The methods serve the following purposes:
- GetSample: Gets the Sample object containing codec context and raw data
- GetSize: Gets the size of audio/video data
- GetTimestamp: Gets the timestamp in nanoseconds
- GetCTS: Gets the Composition Time Stamp in nanoseconds (PTS = DTS+CTS)
- Dump: Prints binary audio/video data
- CheckCodecChange: Checks if the codec has changed
- Demux: Demuxes audio/video data to raw format for use by other formats
- Mux: Muxes from original format to custom audio/video data format
- Recycle: Recycles resources, automatically implemented when embedding RecyclableMemory
- String: Prints audio/video data information
## 8. Subscribing to Streams
```go

View File

@@ -51,12 +51,14 @@ type MyPlugin struct {
const defaultConfig = m7s.DefaultYaml(`tcp:
listenaddr: :5554`)
var _ = m7s.InstallPlugin[MyPlugin](defaultConfig)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
DefaultYaml: defaultConfig,
})
```
## 3. 实现事件回调(可选)
### 初始化回调
```go
func (config *MyPlugin) OnInit() (err error) {
func (config *MyPlugin) Start() (err error) {
// 初始化一些东西
return
}
@@ -113,26 +115,29 @@ func (config *MyPlugin) test1(rw http.ResponseWriter, r *http.Request) {
## 5. 实现推拉流客户端
### 实现推流客户端
推流客户端就是想要实现一个 IPusher然后将创建 IPusher 的方法传入 InstallPlugin 中。
推流客户端要实现 IPusher 接口,然后将创建 IPusher 的方法传入 InstallPlugin 中。
```go
type Pusher struct {
pullCtx m7s.PullJob
task.Task
pushJob m7s.PushJob
}
func (c *Pusher) GetPullJob() *m7s.PullJob {
return &c.pullCtx
func (c *Pusher) GetPushJob() *m7s.PushJob {
return &c.pushJob
}
func NewPusher(_ config.Push) m7s.IPusher {
return &Pusher{}
}
var _ = m7s.InstallPlugin[MyPlugin](NewPusher)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
NewPusher: NewPusher,
})
```
### 实现拉流客户端
拉流客户端就是想要实现一个 IPuller然后将创建 IPuller 的方法传入 InstallPlugin 中。
下面这个 Puller 继承了 m7s.HTTPFilePuller可以实现基本的文件和 HTTP拉流。具体拉流逻辑需要覆盖 Run 方法。
拉流客户端要实现 IPuller 接口,然后将创建 IPuller 的方法传入 InstallPlugin 中。
下面这个 Puller 继承了 m7s.HTTPFilePuller可以实现基本的文件和 HTTP拉流。具体拉流逻辑需要覆盖 Start 方法。
```go
type Puller struct {
m7s.HTTPFilePuller
@@ -141,7 +146,9 @@ type Puller struct {
func NewPuller(_ config.Pull) m7s.IPuller {
return &Puller{}
}
var _ = m7s.InstallPlugin[MyPlugin](NewPuller)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
NewPuller: NewPuller,
})
```
## 6. 实现gRPC服务
@@ -221,7 +228,10 @@ import (
"m7s.live/v5/plugin/myplugin/pb"
)
var _ = m7s.InstallPlugin[MyPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
ServiceDesc: &pb.Api_ServiceDesc,
RegisterGRPCHandler: pb.RegisterApiHandler,
})
type MyPlugin struct {
pb.UnimplementedApiServer
@@ -253,35 +263,25 @@ publisher, err = p.Publish(streamPath, connectInfo)
但需要满足转换格式的要求。即需要实现下面这个接口:
```go
IAVFrame interface {
GetAllocator() *util.ScalableMemoryAllocator
SetAllocator(*util.ScalableMemoryAllocator)
Parse(*AVTrack) error // get codec info, idr
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) // convert codec from source stream
Demux(codec.ICodecCtx) (any, error) // demux to raw format
Mux(codec.ICodecCtx, *AVFrame) // mux from raw format
GetTimestamp() time.Duration
GetCTS() time.Duration
GetSample() *Sample
GetSize() int
CheckCodecChange() error
Demux() error // demux to raw format
Mux(*Sample) error // mux from origin format
Recycle()
String() string
Dump(byte, io.Writer)
}
```
> 音频和视频需要定义两个不同的类型
其中 `Parse` 方法用于解析音视频数据,`ConvertCtx` 方法用于转换音视频数据格式的上下文,`Demux` 方法用于解封装音视频数据,`Mux` 方法用于封装音视频数据,`Recycle` 方法用于回收资源。
- GetAllocator 方法用于获取内存分配器。(嵌入 RecyclableMemory 会自动实现)
- SetAllocator 方法用于设置内存分配器。(嵌入 RecyclableMemory 会自动实现)
- Parse方法主要从数据中识别关键帧序列帧等重要信息。
- ConvertCtx 会在需要转换协议的时候调用,传入原始的协议上下文,返回新的协议上下文(即自定义格式的上下文)。
- Demux 会在需要解封装音视频数据的时候调用,传入协议上下文,返回解封装后的音视频数据,用于给其他格式封装使用。
- Mux 会在需要封装音视频数据的时候调用,传入协议上下文和解封装后的音视频数据,用于封装成自定义格式的音视频数据。
- Recycle 方法会在嵌入 RecyclableMemory 时自动实现,无需手动实现。
- String 方法用于打印音视频数据的信息。
其中各方法的作用如下:
- GetSample 方法用于获取音视频数据的Sample对象包含编解码上下文和原始数据。
- GetSize 方法用于获取音视频数据的大小。
- GetTimestamp 方法用于获取音视频数据的时间戳(单位:纳秒)
- GetCTS 方法用于获取音视频数据的Composition Time Stamp(单位:纳秒)。PTS = DTS+CTS
- Dump 方法用于打印音视频数据的二进制数据。
- CheckCodecChange 方法用于检查编解码器是否发生变化
- Demux 方法用于解封装音视频数据到裸格式,用于给其他格式封装使用。
- Mux 方法用于从原始格式封装成自定义格式的音视频数据。
- Recycle 方法用于回收资源,会在嵌入 RecyclableMemory 时自动实现。
- String 方法用于打印音视频数据的信息。
### 6. 订阅流
```go

View File

@@ -19,10 +19,12 @@ type CascadeClientPlugin struct {
AutoPush bool `desc:"自动推流到上级"` //自动推流到上级
Server string `desc:"上级服务器"` // TODO: support multiple servers
Secret string `desc:"连接秘钥"`
conn quic.Connection
client *CascadeClient
}
var _ = m7s.InstallPlugin[CascadeClientPlugin](cascade.NewCascadePuller)
var _ = m7s.InstallPlugin[CascadeClientPlugin](m7s.PluginMeta{
NewPuller: cascade.NewCascadePuller,
})
type CascadeClient struct {
task.Work
@@ -79,7 +81,7 @@ func (task *CascadeClient) Run() (err error) {
return
}
func (c *CascadeClientPlugin) OnInit() (err error) {
func (c *CascadeClientPlugin) Start() (err error) {
if c.Secret == "" && c.Server == "" {
return nil
}
@@ -88,14 +90,17 @@ func (c *CascadeClientPlugin) OnInit() (err error) {
}
connectTask.SetRetry(-1, time.Second)
c.AddTask(&connectTask)
c.client = &connectTask
return
}
func (c *CascadeClientPlugin) Pull(streamPath string, conf config.Pull, pub *config.Publish) {
func (c *CascadeClientPlugin) Pull(streamPath string, conf config.Pull, pub *config.Publish) (job *m7s.PullJob, err error) {
puller := &cascade.Puller{
Connection: c.conn,
Connection: c.client.Connection,
}
puller.GetPullJob().Init(puller, &c.Plugin, streamPath, conf, pub)
job = puller.GetPullJob()
job.Init(puller, &c.Plugin, streamPath, conf, pub)
return
}
//func (c *CascadeClientPlugin) Start() {

View File

@@ -5,6 +5,7 @@ import (
"github.com/quic-go/quic-go"
"m7s.live/v5"
"m7s.live/v5/pkg/config"
flv "m7s.live/v5/plugin/flv/pkg"
)
@@ -17,7 +18,7 @@ func (p *Puller) GetPullJob() *m7s.PullJob {
return &p.PullJob
}
func NewCascadePuller() m7s.IPuller {
func NewCascadePuller(config.Pull) m7s.IPuller {
return &Puller{}
}

View File

@@ -29,7 +29,7 @@ type CascadeServerPlugin struct {
clients util.Collection[uint, *cascade.Instance]
}
func (c *CascadeServerPlugin) OnInit() (err error) {
func (c *CascadeServerPlugin) Start() (err error) {
if c.GetCommonConf().Quic.ListenAddr == "" {
return pkg.ErrNotListen
}
@@ -50,8 +50,12 @@ func (c *CascadeServerPlugin) OnInit() (err error) {
return
}
var _ = m7s.InstallPlugin[CascadeServerPlugin](m7s.DefaultYaml(`quic:
listenaddr: :44944`), &pb.Server_ServiceDesc, pb.RegisterServerHandler)
var _ = m7s.InstallPlugin[CascadeServerPlugin](m7s.PluginMeta{
DefaultYaml: `quic:
listenaddr: :44944`,
ServiceDesc: &pb.Server_ServiceDesc,
RegisterGRPCHandler: pb.RegisterServerHandler,
})
type CascadeServer struct {
task.Work

170
plugin/crontab/README.md Normal file
View File

@@ -0,0 +1,170 @@
# M7S Crontab 插件说明文档
## 1. 插件概述
Crontab 插件是 M7S 流媒体服务器的一个扩展组件,主要用于实现基于时间计划的自动录制功能。该插件允许用户创建定时录制计划,根据预设的时间表自动开始和停止流媒体的录制,支持灵活的周期性录制设置。
## 2. 核心功能
- **定时录制计划管理**:创建、更新、删除和查询录制计划
- **流路径关联**:将录制计划与特定的流路径关联
- **时间表设置**:通过 168 位的二进制字符串7天×24小时定义每周的录制时间段
- **自动录制控制**:根据时间计划自动开始和停止录制
- **状态监控**:查询当前正在执行和计划中的录制任务状态
## 3. 文件结构与功能
### 3.1 主要文件
#### 3.1.1 `index.go`
插件的入口文件,定义了 `CrontabPlugin` 结构体和初始化方法。
- **主要结构**
- `CrontabPlugin`插件的主结构体包含插件基础功能、API服务器实现和数据集合
- **主要功能**
- `OnInit()`:插件初始化函数,负责数据库迁移、加载已有录制计划并创建相应的定时任务
#### 3.1.2 `crontab.go`
定义了定时任务的核心逻辑和执行流程。
- **主要结构**
- `TimeSlot`:表示一个时间段,包含开始和结束时间
- `Crontab`:定时任务调度器,负责根据计划执行录制操作
- **主要功能**
- `Start()`:初始化定时任务
- `Run()`:阻塞运行定时任务,循环检查并执行录制操作
- `Dispose()`:停止定时任务
- `getNextTimeSlot()`:计算下一个需要执行的时间段
- `startRecording()`:开始录制流
- `stopRecording()`:停止录制流
#### 3.1.3 `api.go`
实现了插件的 API 接口,提供了与前端交互的功能。
- **主要功能**
- 录制计划管理:`List()`, `Add()`, `Update()`, `Remove()`
- 录制计划流关联管理:`ListRecordPlanStreams()`, `AddRecordPlanStream()`, `UpdateRecordPlanStream()`, `RemoveRecordPlanStream()`
- 计划解析:`ParsePlanTime()`
- 状态查询:`GetCrontabStatus()`
- **辅助功能**
- 时间计算:`getWeekdayName()`, `getWeekdayIndex()`, `getNextDateForWeekday()`
- 时间段计算:`calculateTimeSlots()`, `getNextTimeSlotFromNow()`
### 3.2 子目录文件
#### 3.2.1 `pkg` 目录
包含数据模型定义和数据库操作相关功能。
- **`recordplan.go`**
- 定义 `RecordPlan` 结构体,表示录制计划的数据模型
- 包含计划ID、名称、时间表和启用状态等字段
- **`recordplanstream.go`**
- 定义 `RecordPlanStream` 结构体,表示录制计划与流路径的关联
- 提供数据库查询的辅助函数,如按流路径模糊查询、按创建时间排序等
#### 3.2.2 `pb` 目录
包含 Protocol Buffers 定义和生成的代码,用于 API 接口和数据传输。
- **`crontab.proto`**
- 定义了插件的 API 服务接口
- 定义了各种请求和响应消息结构
- 包含 HTTP 路由映射配置
- **`crontab.pb.go`, `crontab.pb.gw.go`, `crontab_grpc.pb.go`**
- 由 Protocol Buffers 编译器自动生成的 Go 代码
- 实现了消息序列化/反序列化和 gRPC 服务接口
## 4. 工作流程
### 4.1 插件初始化流程
1. 插件启动时,`OnInit()` 方法被调用
2. 执行数据库迁移,确保必要的表结构存在
3. 从数据库加载所有录制计划和关联的流信息
4. 对于已启用的计划,创建并启动相应的定时任务
### 4.2 录制计划执行流程
1. 定时任务启动后,进入 `Run()` 方法的循环
2. 通过 `getNextTimeSlot()` 计算下一个需要执行的时间段
3. 设置定时器等待到达开始时间
4. 到达开始时间后,调用 `startRecording()` 开始录制
5. 设置定时器等待到达结束时间
6. 到达结束时间后,调用 `stopRecording()` 停止录制
7. 循环继续,计算下一个时间段
### 4.3 API 交互流程
1. 前端通过 HTTP/gRPC 接口与插件交互
2. 可以创建、更新、删除录制计划和流关联
3. 可以查询当前正在执行和计划中的录制任务状态
4. 可以解析计划字符串,获取时间段信息
## 5. 关键概念
### 5.1 录制计划 (RecordPlan)
录制计划定义了何时进行录制的时间表。每个计划包含:
- **ID**:唯一标识符
- **名称**:计划名称
- **时间表**168位的二进制字符串表示一周中每个小时是否进行录制
- **启用状态**:是否启用该计划
### 5.2 录制计划流 (RecordPlanStream)
将录制计划与特定的流路径关联,定义了录制的具体参数:
- **计划ID**关联的录制计划ID
- **流路径**:要录制的流的路径
- **分片设置**:录制文件的分片参数
- **文件路径**:录制文件的保存路径
- **启用状态**:是否启用该关联
### 5.3 时间表格式
时间表使用 168 位的二进制字符串表示一周中的每个小时是否进行录制:
- 每天 24 小时,一周 7 天,共 168 小时
- 字符串中的每一位对应一个小时,'1' 表示录制,'0' 表示不录制
- 字符串按周日到周六的顺序排列,每天 24 位
例如:
- 全为 '0':一周中不进行任何录制
- 前 24 位为 '1',其余为 '0':仅在周日全天录制
- 每天的第 9 位到第 17 位为 '1':每天上午 9 点到下午 5 点录制
## 6. 使用场景
1. **定期节目录制**:适用于每周固定时间播出的节目自动录制
2. **工作时间监控**:仅在工作时间段自动录制监控视频
3. **带宽管理**:在网络带宽充足的时间段进行录制,避开高峰期
4. **存储优化**:只录制有价值的时间段,节省存储空间
## 7. API 接口说明
### 7.1 录制计划管理
- **列表查询**`GET /plan/api/list`
- **添加计划**`POST /plan/api/add`
- **更新计划**`POST /plan/api/update/{id}`
- **删除计划**`POST /plan/api/remove/{id}`
### 7.2 录制计划流管理
- **列表查询**`GET /planstream/api/list`
- **添加关联**`POST /planstream/api/add`
- **更新关联**`POST /planstream/api/update`
- **删除关联**`POST /planstream/api/remove/{planId}/{streamPath}`
### 7.3 其他接口
- **解析计划**`GET /plan/api/parse/{plan}`
- **状态查询**`GET /crontab/api/status`
## 8. 总结
Crontab 插件为 M7S 流媒体服务器提供了强大的定时录制功能,通过灵活的时间表设置和流路径关联,实现了自动化的录制控制。该插件适用于需要定期录制特定时间段流媒体内容的场景,能有效节省人力和存储资源。

View File

@@ -2,7 +2,11 @@ package plugin_crontab
import (
"context"
"errors"
"fmt"
"sort"
"strings"
"time"
"google.golang.org/protobuf/types/known/timestamppb"
cronpb "m7s.live/v5/plugin/crontab/pb"
@@ -17,34 +21,29 @@ func (ct *CrontabPlugin) List(ctx context.Context, req *cronpb.ReqPlanList) (*cr
req.PageSize = 10
}
var total int64
var plans []pkg.RecordPlan
// 从内存中获取所有计划
plans := ct.recordPlans.Items
total := len(plans)
query := ct.DB.Model(&pkg.RecordPlan{})
result := query.Count(&total)
if result.Error != nil {
return &cronpb.PlanResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
// 计算分页
start := int(req.PageNum-1) * int(req.PageSize)
end := start + int(req.PageSize)
if start >= total {
start = total
}
if end > total {
end = total
}
offset := (req.PageNum - 1) * req.PageSize
result = query.Order("id desc").Offset(int(offset)).Limit(int(req.PageSize)).Find(&plans)
if result.Error != nil {
return &cronpb.PlanResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
}
// 获取当前页的数据
pagePlans := plans[start:end]
data := make([]*cronpb.Plan, 0, len(plans))
for _, plan := range plans {
data := make([]*cronpb.Plan, 0, len(pagePlans))
for _, plan := range pagePlans {
data = append(data, &cronpb.Plan{
Id: uint32(plan.ID),
Name: plan.Name,
Enable: plan.Enabled,
Enable: plan.Enable,
CreateTime: timestamppb.New(plan.CreatedAt),
UpdateTime: timestamppb.New(plan.UpdatedAt),
Plan: plan.Plan,
@@ -94,9 +93,9 @@ func (ct *CrontabPlugin) Add(ctx context.Context, req *cronpb.Plan) (*cronpb.Res
}
plan := &pkg.RecordPlan{
Name: req.Name,
Plan: req.Plan,
Enabled: req.Enable,
Name: req.Name,
Plan: req.Plan,
Enable: req.Enable,
}
if err := ct.DB.Create(plan).Error; err != nil {
@@ -106,6 +105,9 @@ func (ct *CrontabPlugin) Add(ctx context.Context, req *cronpb.Plan) (*cronpb.Res
}, nil
}
// 添加到内存中
ct.recordPlans.Add(plan)
return &cronpb.Response{
Code: 0,
Message: "success",
@@ -160,10 +162,14 @@ func (ct *CrontabPlugin) Update(ctx context.Context, req *cronpb.Plan) (*cronpb.
}, nil
}
// 处理 enable 状态变更
enableChanged := existingPlan.Enable != req.Enable
// 更新记录
updates := map[string]interface{}{
"name": req.Name,
"plan": req.Plan,
"enabled": req.Enable,
"name": req.Name,
"plan": req.Plan,
"enable": req.Enable,
}
if err := ct.DB.Model(&existingPlan).Updates(updates).Error; err != nil {
@@ -173,6 +179,45 @@ func (ct *CrontabPlugin) Update(ctx context.Context, req *cronpb.Plan) (*cronpb.
}, nil
}
// 更新内存中的记录
existingPlan.Name = req.Name
existingPlan.Plan = req.Plan
existingPlan.Enable = req.Enable
ct.recordPlans.Set(&existingPlan)
// 处理 enable 状态变更后的操作
if enableChanged {
if req.Enable {
// 从 false 变为 true需要创建并启动新的定时任务
var streams []pkg.RecordPlanStream
model := &pkg.RecordPlanStream{PlanID: existingPlan.ID}
if err := ct.DB.Model(model).Where(model).Find(&streams).Error; err != nil {
ct.Error("query record plan streams error: %v", err)
} else {
// 为每个流创建定时任务
for _, stream := range streams {
crontab := &Crontab{
ctp: ct,
RecordPlan: &existingPlan,
RecordPlanStream: &stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
}
} else {
// 从 true 变为 false需要停止相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlan.ID == existingPlan.ID {
crontab.Stop(nil)
}
return true
})
}
}
return &cronpb.Response{
Code: 0,
Message: "success",
@@ -196,6 +241,14 @@ func (ct *CrontabPlugin) Remove(ctx context.Context, req *cronpb.DeleteRequest)
}, nil
}
// 先停止所有相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlan.ID == existingPlan.ID {
crontab.Stop(nil)
}
return true
})
// 执行软删除
if err := ct.DB.Delete(&existingPlan).Error; err != nil {
return &cronpb.Response{
@@ -204,8 +257,737 @@ func (ct *CrontabPlugin) Remove(ctx context.Context, req *cronpb.DeleteRequest)
}, nil
}
// 从内存中移除
ct.recordPlans.RemoveByKey(existingPlan.ID)
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) ListRecordPlanStreams(ctx context.Context, req *cronpb.ReqPlanStreamList) (*cronpb.RecordPlanStreamResponseList, error) {
if req.PageNum < 1 {
req.PageNum = 1
}
if req.PageSize < 1 {
req.PageSize = 10
}
var total int64
var streams []pkg.RecordPlanStream
model := &pkg.RecordPlanStream{}
// 构建查询条件
query := ct.DB.Model(model).
Scopes(
pkg.ScopeRecordPlanID(uint(req.PlanId)),
pkg.ScopeStreamPathLike(req.StreamPath),
pkg.ScopeOrderByCreatedAtDesc(),
)
result := query.Count(&total)
if result.Error != nil {
return &cronpb.RecordPlanStreamResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
}
offset := (req.PageNum - 1) * req.PageSize
result = query.Offset(int(offset)).Limit(int(req.PageSize)).Find(&streams)
if result.Error != nil {
return &cronpb.RecordPlanStreamResponseList{
Code: 500,
Message: result.Error.Error(),
}, nil
}
data := make([]*cronpb.PlanStream, 0, len(streams))
for _, stream := range streams {
data = append(data, &cronpb.PlanStream{
PlanId: uint32(stream.PlanID),
StreamPath: stream.StreamPath,
Fragment: stream.Fragment,
FilePath: stream.FilePath,
CreatedAt: timestamppb.New(stream.CreatedAt),
UpdatedAt: timestamppb.New(stream.UpdatedAt),
Enable: stream.Enable,
})
}
return &cronpb.RecordPlanStreamResponseList{
Code: 0,
Message: "success",
TotalCount: uint32(total),
PageNum: req.PageNum,
PageSize: req.PageSize,
Data: data,
}, nil
}
func (ct *CrontabPlugin) AddRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
planId := 1
if req.PlanId > 0 {
planId = int(req.PlanId)
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 从内存中获取录制计划
plan, ok := ct.recordPlans.Get(uint(planId))
if !ok {
return &cronpb.Response{
Code: 404,
Message: "record plan not found",
}, nil
}
// 检查是否已存在相同的记录
var count int64
searchModel := pkg.RecordPlanStream{
PlanID: uint(planId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Model(&searchModel).Where(&searchModel).Count(&count).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
if count > 0 {
return &cronpb.Response{
Code: 400,
Message: "record already exists",
}, nil
}
fragment := "60s"
if req.Fragment != "" {
fragment = req.Fragment
}
stream := &pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
Fragment: fragment,
FilePath: req.FilePath,
Enable: req.Enable,
RecordType: req.RecordType,
}
if err := ct.DB.Create(stream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 如果计划是启用状态,创建并启动定时任务
if plan.Enable {
crontab := &Crontab{
ctp: ct,
RecordPlan: plan,
RecordPlanStream: stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) UpdateRecordPlanStream(ctx context.Context, req *cronpb.PlanStream) (*cronpb.Response, error) {
planId := 1
if req.PlanId > 0 {
planId = int(req.PlanId)
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 检查记录是否存在
var existingStream pkg.RecordPlanStream
searchModel := pkg.RecordPlanStream{
PlanID: uint(planId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Where(&searchModel).First(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 更新记录
existingStream.Fragment = req.Fragment
existingStream.FilePath = req.FilePath
existingStream.Enable = req.Enable
existingStream.RecordType = req.RecordType
if err := ct.DB.Save(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 停止当前流相关的所有任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlanStream.StreamPath == req.StreamPath {
crontab.Stop(nil)
}
return true
})
// 查询所有关联此流的记录
var streams []pkg.RecordPlanStream
if err := ct.DB.Where("stream_path = ?", req.StreamPath).Find(&streams).Error; err != nil {
ct.Error("query record plan streams error: %v", err)
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
// 为每个启用的计划创建新的定时任务
for _, stream := range streams {
// 从内存中获取对应的计划
plan, ok := ct.recordPlans.Get(stream.PlanID)
if !ok {
ct.Error("record plan not found in memory: %d", stream.PlanID)
continue
}
// 如果计划是启用状态,创建并启动定时任务
if plan.Enable && stream.Enable {
crontab := &Crontab{
ctp: ct,
RecordPlan: plan,
RecordPlanStream: &stream,
}
crontab.OnStart(func() {
ct.crontabs.Set(crontab)
})
ct.AddTask(crontab)
}
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
func (ct *CrontabPlugin) RemoveRecordPlanStream(ctx context.Context, req *cronpb.DeletePlanStreamRequest) (*cronpb.Response, error) {
if req.PlanId == 0 {
return &cronpb.Response{
Code: 400,
Message: "record_plan_id is required",
}, nil
}
if strings.TrimSpace(req.StreamPath) == "" {
return &cronpb.Response{
Code: 400,
Message: "stream_path is required",
}, nil
}
// 检查记录是否存在
var existingStream pkg.RecordPlanStream
searchModel := pkg.RecordPlanStream{
PlanID: uint(req.PlanId),
StreamPath: req.StreamPath,
}
if err := ct.DB.Where(&searchModel).First(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 404,
Message: "record not found",
}, nil
}
// 停止所有相关的定时任务
ct.crontabs.Range(func(crontab *Crontab) bool {
if crontab.RecordPlanStream.StreamPath == req.StreamPath && crontab.RecordPlan.ID == uint(req.PlanId) {
crontab.Stop(errors.New("remove record plan"))
}
return true
})
// 执行删除
if err := ct.DB.Delete(&existingStream).Error; err != nil {
return &cronpb.Response{
Code: 500,
Message: err.Error(),
}, nil
}
return &cronpb.Response{
Code: 0,
Message: "success",
}, nil
}
// 获取周几的名称0=周日1=周一,...6=周六)
func getWeekdayName(weekday int) string {
weekdays := []string{"周日", "周一", "周二", "周三", "周四", "周五", "周六"}
return weekdays[weekday]
}
// 获取周几的索引0=周日1=周一,...6=周六)
func getWeekdayIndex(weekdayName string) int {
weekdays := map[string]int{
"周日": 0, "周一": 1, "周二": 2, "周三": 3, "周四": 4, "周五": 5, "周六": 6,
}
return weekdays[weekdayName]
}
// 获取下一个指定周几的日期
func getNextDateForWeekday(now time.Time, targetWeekday int, location *time.Location) time.Time {
nowWeekday := int(now.Weekday())
daysToAdd := 0
if targetWeekday >= nowWeekday {
daysToAdd = targetWeekday - nowWeekday
} else {
daysToAdd = 7 - (nowWeekday - targetWeekday)
}
// 如果是同一天但当前时间已经过了最后的时间段,则推到下一周
if daysToAdd == 0 {
// 这里简化处理直接加7天到下周同一天
daysToAdd = 7
}
return now.AddDate(0, 0, daysToAdd)
}
// 计算计划中的所有时间段
func calculateTimeSlots(plan string, now time.Time, location *time.Location) ([]*cronpb.TimeSlotInfo, error) {
if len(plan) != 168 {
return nil, fmt.Errorf("invalid plan format: length should be 168")
}
var slots []*cronpb.TimeSlotInfo
// 按周几遍历0=周日1=周一,...6=周六)
for weekday := 0; weekday < 7; weekday++ {
dayOffset := weekday * 24
var startHour int = -1
// 遍历这一天的每个小时
for hour := 0; hour <= 24; hour++ {
// 如果到了一天的结尾或者当前小时状态为0
isEndOfDay := hour == 24
isHourOff := !isEndOfDay && plan[dayOffset+hour] == '0'
if isEndOfDay || isHourOff {
// 如果之前有开始的时间段,现在结束了
if startHour != -1 {
// 计算下一个该周几的日期
targetDate := getNextDateForWeekday(now, weekday, location)
// 创建时间段
startTime := time.Date(targetDate.Year(), targetDate.Month(), targetDate.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(targetDate.Year(), targetDate.Month(), targetDate.Day(), hour, 0, 0, 0, location)
// 转换为 UTC 时间
startTs := timestamppb.New(startTime.UTC())
endTs := timestamppb.New(endTime.UTC())
slots = append(slots, &cronpb.TimeSlotInfo{
Start: startTs,
End: endTs,
Weekday: getWeekdayName(weekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, hour),
})
startHour = -1
}
} else if plan[dayOffset+hour] == '1' && startHour == -1 {
// 找到新的开始时间
startHour = hour
}
}
}
// 按时间排序
sort.Slice(slots, func(i, j int) bool {
// 先按周几排序
weekdayI := getWeekdayIndex(slots[i].Weekday)
weekdayJ := getWeekdayIndex(slots[j].Weekday)
if weekdayI != weekdayJ {
return weekdayI < weekdayJ
}
// 同一天按开始时间排序
return slots[i].Start.AsTime().Hour() < slots[j].Start.AsTime().Hour()
})
return slots, nil
}
// 获取下一个时间段
func getNextTimeSlotFromNow(plan string, now time.Time, location *time.Location) (*cronpb.TimeSlotInfo, error) {
if len(plan) != 168 {
return nil, fmt.Errorf("invalid plan format: length should be 168")
}
// 将当前时间转换为本地时间
localNow := now.In(location)
currentWeekday := int(localNow.Weekday())
currentHour := localNow.Hour()
// 检查是否在整点边界附近(前后30秒)
isNearHourBoundary := localNow.Minute() == 59 && localNow.Second() >= 30 || localNow.Minute() == 0 && localNow.Second() <= 30
// 首先检查当前时间是否在某个时间段内
dayOffset := currentWeekday * 24
if currentHour < 24 && plan[dayOffset+currentHour] == '1' {
// 找到当前小时所在的完整时间段
startHour := currentHour
// 向前查找时间段的开始
for h := currentHour - 1; h >= 0; h-- {
if plan[dayOffset+h] == '1' {
startHour = h
} else {
break
}
}
// 向后查找时间段的结束
endHour := currentHour + 1
for h := endHour; h < 24; h++ {
if plan[dayOffset+h] == '1' {
endHour = h + 1
} else {
break
}
}
// 检查是否已经接近当前时间段的结束
isNearEndOfTimeSlot := currentHour == endHour-1 && localNow.Minute() >= 59 && localNow.Second() >= 30
// 如果我们靠近时间段结束且在小时边界附近,我们跳过此时间段,找下一个
if isNearEndOfTimeSlot && isNearHourBoundary {
// 继续查找下一个时间段
} else {
// 创建时间段
startTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), endHour, 0, 0, 0, location)
// 如果当前时间已经接近或超过了结束时间,调整结束时间
if localNow.After(endTime.Add(-30*time.Second)) || localNow.Equal(endTime) {
// 继续查找下一个时间段
} else {
// 返回当前时间段
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(currentWeekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
}
// 查找下一个时间段
// 先查找当天剩余时间
for h := currentHour + 1; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 创建时间段
startTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(localNow.Year(), localNow.Month(), localNow.Day(), endHour, 0, 0, 0, location)
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(currentWeekday),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
// 如果当天没有找到,则查找后续日期
for d := 1; d <= 7; d++ {
nextDay := (currentWeekday + d) % 7
dayOffset := nextDay * 24
for h := 0; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 计算日期
nextDate := localNow.AddDate(0, 0, d)
// 创建时间段
startTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), startHour, 0, 0, 0, location)
endTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), endHour, 0, 0, 0, location)
return &cronpb.TimeSlotInfo{
Start: timestamppb.New(startTime.UTC()),
End: timestamppb.New(endTime.UTC()),
Weekday: getWeekdayName(nextDay),
TimeRange: fmt.Sprintf("%02d:00-%02d:00", startHour, endHour),
}, nil
}
}
}
return nil, nil
}
func (ct *CrontabPlugin) ParsePlanTime(ctx context.Context, req *cronpb.ParsePlanRequest) (*cronpb.ParsePlanResponse, error) {
if len(req.Plan) != 168 {
return &cronpb.ParsePlanResponse{
Code: 400,
Message: "invalid plan format: length should be 168",
}, nil
}
// 检查字符串格式是否正确只包含0和1
for i, c := range req.Plan {
if c != '0' && c != '1' {
return &cronpb.ParsePlanResponse{
Code: 400,
Message: fmt.Sprintf("invalid character at position %d: %c (should be 0 or 1)", i, c),
}, nil
}
}
// 获取所有时间段
slots, err := calculateTimeSlots(req.Plan, time.Now(), time.Local)
if err != nil {
return &cronpb.ParsePlanResponse{
Code: 500,
Message: err.Error(),
}, nil
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(req.Plan, time.Now(), time.Local)
if err != nil {
return &cronpb.ParsePlanResponse{
Code: 500,
Message: err.Error(),
}, nil
}
return &cronpb.ParsePlanResponse{
Code: 0,
Message: "success",
Slots: slots,
NextSlot: nextSlot,
}, nil
}
// 辅助函数:构建任务状态信息
func buildCrontabTaskInfo(crontab *Crontab, now time.Time) *cronpb.CrontabTaskInfo {
// 基础任务信息
taskInfo := &cronpb.CrontabTaskInfo{
PlanId: uint32(crontab.RecordPlan.ID),
PlanName: crontab.RecordPlan.Name,
StreamPath: crontab.StreamPath,
FilePath: crontab.FilePath,
Fragment: crontab.Fragment,
}
// 获取完整计划时间段列表
if crontab.RecordPlan != nil && crontab.RecordPlan.Plan != "" {
planSlots, err := calculateTimeSlots(crontab.RecordPlan.Plan, now, time.Local)
if err == nil && planSlots != nil && len(planSlots) > 0 {
taskInfo.PlanSlots = planSlots
}
}
return taskInfo
}
// GetCrontabStatus 获取当前Crontab任务状态
func (ct *CrontabPlugin) GetCrontabStatus(ctx context.Context, req *cronpb.CrontabStatusRequest) (*cronpb.CrontabStatusResponse, error) {
response := &cronpb.CrontabStatusResponse{
Code: 0,
Message: "success",
RunningTasks: []*cronpb.CrontabTaskInfo{},
NextTasks: []*cronpb.CrontabTaskInfo{},
TotalRunning: 0,
TotalPlanned: 0,
}
// 获取当前正在运行的任务
runningTasks := make([]*cronpb.CrontabTaskInfo, 0)
nextTasks := make([]*cronpb.CrontabTaskInfo, 0)
// 如果只指定了流路径但未找到对应的任务,也返回该流的计划信息
streamPathFound := false
// 遍历所有Crontab任务
ct.crontabs.Range(func(crontab *Crontab) bool {
// 如果指定了stream_path过滤条件且不匹配则跳过
if req.StreamPath != "" && crontab.StreamPath != req.StreamPath {
return true // 继续遍历
}
// 标记已找到指定的流
if req.StreamPath != "" {
streamPathFound = true
}
now := time.Now()
// 构建基本任务信息
taskInfo := buildCrontabTaskInfo(crontab, now)
// 检查是否正在录制
if crontab.recording && crontab.currentSlot != nil {
// 当前正在录制
taskInfo.IsRecording = true
// 设置时间信息
taskInfo.StartTime = timestamppb.New(crontab.currentSlot.Start)
taskInfo.EndTime = timestamppb.New(crontab.currentSlot.End)
// 计算已运行时间和剩余时间
elapsedDuration := now.Sub(crontab.currentSlot.Start)
remainingDuration := crontab.currentSlot.End.Sub(now)
taskInfo.ElapsedSeconds = uint32(elapsedDuration.Seconds())
taskInfo.RemainingSeconds = uint32(remainingDuration.Seconds())
// 设置时间范围和周几
startHour := crontab.currentSlot.Start.Hour()
endHour := crontab.currentSlot.End.Hour()
taskInfo.TimeRange = fmt.Sprintf("%02d:00-%02d:00", startHour, endHour)
taskInfo.Weekday = getWeekdayName(int(crontab.currentSlot.Start.Weekday()))
// 添加到正在运行的任务列表
runningTasks = append(runningTasks, taskInfo)
} else {
// 获取下一个时间段
nextSlot := crontab.getNextTimeSlot()
if nextSlot != nil {
// 设置下一个任务的信息
taskInfo.IsRecording = false
// 设置时间信息
taskInfo.StartTime = timestamppb.New(nextSlot.Start)
taskInfo.EndTime = timestamppb.New(nextSlot.End)
// 计算等待时间
waitingDuration := nextSlot.Start.Sub(now)
taskInfo.RemainingSeconds = uint32(waitingDuration.Seconds())
// 设置时间范围和周几
startHour := nextSlot.Start.Hour()
endHour := nextSlot.End.Hour()
taskInfo.TimeRange = fmt.Sprintf("%02d:00-%02d:00", startHour, endHour)
taskInfo.Weekday = getWeekdayName(int(nextSlot.Start.Weekday()))
// 添加到计划任务列表
nextTasks = append(nextTasks, taskInfo)
}
}
return true // 继续遍历
})
// 如果指定了流路径但未找到对应的任务,查询数据库获取该流的计划信息
if req.StreamPath != "" && !streamPathFound {
// 查询与该流相关的所有计划
var streams []pkg.RecordPlanStream
if err := ct.DB.Where("stream_path = ?", req.StreamPath).Find(&streams).Error; err == nil && len(streams) > 0 {
for _, stream := range streams {
// 获取对应的计划
var plan pkg.RecordPlan
if err := ct.DB.First(&plan, stream.PlanID).Error; err == nil && plan.Enable && stream.Enable {
now := time.Now()
// 构建任务信息
taskInfo := &cronpb.CrontabTaskInfo{
PlanId: uint32(plan.ID),
PlanName: plan.Name,
StreamPath: stream.StreamPath,
FilePath: stream.FilePath,
Fragment: stream.Fragment,
IsRecording: false,
}
// 获取完整计划时间段列表
planSlots, err := calculateTimeSlots(plan.Plan, now, time.Local)
if err == nil && planSlots != nil && len(planSlots) > 0 {
taskInfo.PlanSlots = planSlots
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(plan.Plan, now, time.Local)
if err == nil && nextSlot != nil {
// 设置时间信息
taskInfo.StartTime = nextSlot.Start
taskInfo.EndTime = nextSlot.End
taskInfo.TimeRange = nextSlot.TimeRange
taskInfo.Weekday = nextSlot.Weekday
// 计算等待时间
waitingDuration := nextSlot.Start.AsTime().Sub(now)
taskInfo.RemainingSeconds = uint32(waitingDuration.Seconds())
// 添加到计划任务列表
nextTasks = append(nextTasks, taskInfo)
}
}
}
}
}
// 按开始时间排序下一个任务列表
sort.Slice(nextTasks, func(i, j int) bool {
return nextTasks[i].StartTime.AsTime().Before(nextTasks[j].StartTime.AsTime())
})
// 设置响应结果
response.RunningTasks = runningTasks
response.NextTasks = nextTasks
response.TotalRunning = uint32(len(runningTasks))
response.TotalPlanned = uint32(len(nextTasks))
return response, nil
}

244
plugin/crontab/api_test.go Normal file
View File

@@ -0,0 +1,244 @@
package plugin_crontab
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestCalculateTimeSlots(t *testing.T) {
// 测试案例:周五的凌晨和上午有开启时间段
// 字符串中1的索引是120(0点),122(2点),123(3点),125(5点),130(10点),135(15点)
// 000000000000000000000000 - 周日(0-23小时) - 全0
// 000000000000000000000000 - 周一(24-47小时) - 全0
// 000000000000000000000000 - 周二(48-71小时) - 全0
// 000000000000000000000000 - 周三(72-95小时) - 全0
// 000000000000000000000000 - 周四(96-119小时) - 全0
// 101101000010000100000000 - 周五(120-143小时) - 0,2,3,5,10,15点开启
// 000000000000000000000000 - 周六(144-167小时) - 全0
planStr := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
now := time.Date(2023, 5, 1, 12, 0, 0, 0, time.Local) // 周一中午
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
assert.Equal(t, 5, len(slots), "应该有5个时间段")
// 检查结果中的时间段(按实际解析结果排序)
assert.Equal(t, "周五", slots[0].Weekday)
assert.Equal(t, "10:00-11:00", slots[0].TimeRange)
assert.Equal(t, "周五", slots[1].Weekday)
assert.Equal(t, "15:00-16:00", slots[1].TimeRange)
assert.Equal(t, "周五", slots[2].Weekday)
assert.Equal(t, "00:00-01:00", slots[2].TimeRange)
assert.Equal(t, "周五", slots[3].Weekday)
assert.Equal(t, "02:00-04:00", slots[3].TimeRange)
assert.Equal(t, "周五", slots[4].Weekday)
assert.Equal(t, "05:00-06:00", slots[4].TimeRange)
// 打印出所有时间段,便于调试
for i, slot := range slots {
t.Logf("时间段 %d: %s %s", i, slot.Weekday, slot.TimeRange)
}
}
func TestGetNextTimeSlotFromNow(t *testing.T) {
// 测试案例:周五的凌晨和上午有开启时间段
planStr := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
// 测试1: 当前是周一下一个时间段应该是周五凌晨0点
now1 := time.Date(2023, 5, 1, 12, 0, 0, 0, time.Local) // 周一中午
nextSlot1, err := getNextTimeSlotFromNow(planStr, now1, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot1)
assert.Equal(t, "周五", nextSlot1.Weekday)
assert.Equal(t, "00:00-01:00", nextSlot1.TimeRange)
// 测试2: 当前是周五凌晨1点下一个时间段应该是周五凌晨2点
now2 := time.Date(2023, 5, 5, 1, 30, 0, 0, time.Local) // 周五凌晨1:30
nextSlot2, err := getNextTimeSlotFromNow(planStr, now2, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot2)
assert.Equal(t, "周五", nextSlot2.Weekday)
assert.Equal(t, "02:00-04:00", nextSlot2.TimeRange)
// 测试3: 当前是周五凌晨3点此时正在一个时间段内
now3 := time.Date(2023, 5, 5, 3, 0, 0, 0, time.Local) // 周五凌晨3:00
nextSlot3, err := getNextTimeSlotFromNow(planStr, now3, time.Local)
assert.NoError(t, err)
assert.NotNil(t, nextSlot3)
assert.Equal(t, "周五", nextSlot3.Weekday)
assert.Equal(t, "02:00-04:00", nextSlot3.TimeRange)
}
func TestParsePlanFromString(t *testing.T) {
// 测试用户提供的案例字符串的第36-41位表示周一的时间段
// 这个案例中对应周一的12点、14-15点、17点和22点开启
planStr := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 验证解析结果
var foundMondaySlots bool
for _, slot := range slots {
if slot.Weekday == "周一" {
foundMondaySlots = true
t.Logf("找到周一时间段: %s", slot.TimeRange)
}
}
assert.True(t, foundMondaySlots, "应该找到周一的时间段")
// 预期的周一时间段
var mondaySlots []string
for _, slot := range slots {
if slot.Weekday == "周一" {
mondaySlots = append(mondaySlots, slot.TimeRange)
}
}
// 检查是否包含预期的时间段
expectedSlots := []string{
"12:00-13:00",
"14:00-16:00",
"17:00-18:00",
"22:00-23:00",
}
for _, expected := range expectedSlots {
found := false
for _, actual := range mondaySlots {
if expected == actual {
found = true
break
}
}
assert.True(t, found, "应该找到周一时间段:"+expected)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("下一个时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
} else {
t.Log("没有找到下一个时间段")
}
}
// 手动计算字符串长度的辅助函数
func TestCountStringLength(t *testing.T) {
str1 := "000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101101000010000100000000000000000000000000000000"
assert.Equal(t, 168, len(str1), "第一个测试字符串长度应为168")
str2 := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
assert.Equal(t, 168, len(str2), "第二个测试字符串长度应为168")
}
// 测试用户提供的具体字符串
func TestUserProvidedPlanString(t *testing.T) {
// 用户提供的测试字符串
planStr := "000000000000000000000000000000000000101101000010000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
// 验证字符串长度
assert.Equal(t, 168, len(planStr), "字符串长度应为168")
// 解析时间段
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 打印所有时间段
t.Log("所有时间段:")
for i, slot := range slots {
t.Logf("%d: %s %s", i, slot.Weekday, slot.TimeRange)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("下一个执行时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
t.Logf("开始时间: %s", nextSlot.Start.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
t.Logf("结束时间: %s", nextSlot.End.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
} else {
t.Log("没有找到下一个时间段")
}
// 验证周一的时间段
var mondaySlots []string
for _, slot := range slots {
if slot.Weekday == "周一" {
mondaySlots = append(mondaySlots, slot.TimeRange)
}
}
// 预期周一应该有这些时间段
expectedMondaySlots := []string{
"12:00-13:00",
"14:00-16:00",
"17:00-18:00",
"22:00-23:00",
}
assert.Equal(t, len(expectedMondaySlots), len(mondaySlots), "周一时间段数量不匹配")
for i, expected := range expectedMondaySlots {
if i < len(mondaySlots) {
t.Logf("期望周一时间段 %s, 实际是 %s", expected, mondaySlots[i])
}
}
}
// 测试用户提供的第二个字符串
func TestUserProvidedPlanString2(t *testing.T) {
// 用户提供的第二个测试字符串
planStr := "000000000000000000000000000000000000000000000000000000000000001011010100001000000000000000000000000100000000000000000000000010000000000000000000000001000000000000000000"
// 验证字符串长度
assert.Equal(t, 168, len(planStr), "字符串长度应为168")
// 解析时间段
now := time.Now()
slots, err := calculateTimeSlots(planStr, now, time.Local)
assert.NoError(t, err)
// 打印所有时间段并按周几分组
weekdaySlots := make(map[string][]string)
for _, slot := range slots {
weekdaySlots[slot.Weekday] = append(weekdaySlots[slot.Weekday], slot.TimeRange)
}
t.Log("所有时间段(按周几分组):")
weekdays := []string{"周日", "周一", "周二", "周三", "周四", "周五", "周六"}
for _, weekday := range weekdays {
if timeRanges, ok := weekdaySlots[weekday]; ok {
t.Logf("%s: %v", weekday, timeRanges)
}
}
// 打印所有时间段的详细信息
t.Log("\n所有时间段详细信息:")
for i, slot := range slots {
t.Logf("%d: %s %s", i, slot.Weekday, slot.TimeRange)
}
// 获取下一个时间段
nextSlot, err := getNextTimeSlotFromNow(planStr, now, time.Local)
assert.NoError(t, err)
if nextSlot != nil {
t.Logf("\n下一个执行时间段: %s %s", nextSlot.Weekday, nextSlot.TimeRange)
t.Logf("开始时间: %s", nextSlot.Start.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
t.Logf("结束时间: %s", nextSlot.End.AsTime().In(time.Local).Format("2006-01-02 15:04:05"))
} else {
t.Log("没有找到下一个时间段")
}
}

View File

@@ -1,59 +1,425 @@
package plugin_crontab
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"m7s.live/v5/pkg/task"
"m7s.live/v5/plugin/crontab/pkg"
)
// 计划时间段
type TimeSlot struct {
Start time.Time // 开始时间
End time.Time // 结束时间
}
// Crontab 定时任务调度器
type Crontab struct {
task.TickTask
task.Job
ctp *CrontabPlugin
*pkg.RecordPlan
*pkg.RecordPlanStream
stop chan struct{}
running bool
location *time.Location
timer *time.Timer
currentSlot *TimeSlot // 当前执行的时间段
recording bool // 是否正在录制
}
func (r *Crontab) GetTickInterval() time.Duration {
return time.Minute
func (cron *Crontab) GetKey() string {
return strconv.Itoa(int(cron.PlanID)) + "_" + cron.StreamPath
}
func (r *Crontab) Tick(any) {
r.Info("开始检查录制计划")
// 获取当前时间
now := time.Now()
// 计算当前是一周中的第几天(0-6, 0是周日)和当前小时(0-23)
weekday := int(now.Weekday())
if weekday == 0 {
weekday = 7 // 将周日从0改为7以便计算
}
hour := now.Hour()
// 计算当前时间对应的位置索引
// (weekday-1)*24 + hour 得到当前时间在144位字符串中的位置
// weekday-1 是因为我们要从周一开始计算
index := (weekday-1)*24 + hour
// 查询所有启用的录制计划
var plans []pkg.RecordPlan
model := pkg.RecordPlan{
Enabled: true,
}
if err := r.ctp.DB.Where(&model).Find(&plans).Error; err != nil {
r.Error("查询录制计划失败:", err)
return
// 初始化
func (cron *Crontab) Start() (err error) {
cron.Info("crontab plugin start")
if cron.running {
return // 已经运行中,不重复启动
}
// 遍历所有计划
for _, plan := range plans {
if len(plan.Plan) != 144 {
r.Error("录制计划格式错误plan长度应为144位:", plan.Name)
// 初始化必要字段
if cron.stop == nil {
cron.stop = make(chan struct{})
}
if cron.location == nil {
cron.location = time.Local
}
cron.running = true
return nil
}
// 阻塞运行
func (cron *Crontab) Run() (err error) {
cron.Info("crontab plugin is running")
// 初始化必要字段
if cron.stop == nil {
cron.stop = make(chan struct{})
}
if cron.location == nil {
cron.location = time.Local
}
cron.Info("调度器启动")
for {
// 获取当前时间
now := time.Now().In(cron.location)
// 首先检查是否需要立即执行操作(如停止录制)
if cron.recording && cron.currentSlot != nil &&
(now.Equal(cron.currentSlot.End) || now.After(cron.currentSlot.End)) {
cron.stopRecording()
continue
}
// 检查当前时间对应的位置是否为1
if plan.Plan[index] == '1' {
r.Info("检测到需要开启录像的计划:", plan.Name)
// TODO: 在这里添加开启录像的逻辑
// 确定下一个事件
var nextEvent time.Time
var isStartEvent bool
if cron.recording {
// 如果正在录制,下一个事件是结束时间
nextEvent = cron.currentSlot.End
isStartEvent = false
} else {
// 如果没有录制,计算下一个开始时间
nextSlot := cron.getNextTimeSlot()
if nextSlot == nil {
// 无法确定下次执行时间,使用默认间隔
cron.timer = time.NewTimer(1 * time.Hour)
cron.Info("无有效计划等待1小时后重试")
// 等待定时器或停止信号
select {
case <-cron.timer.C:
continue // 继续循环
case <-cron.stop:
// 停止调度器
if cron.timer != nil {
cron.timer.Stop()
}
cron.Info("调度器停止")
return
}
}
cron.currentSlot = nextSlot
nextEvent = nextSlot.Start
isStartEvent = true
// 如果已过开始时间,立即开始录制
if now.Equal(nextEvent) || now.After(nextEvent) {
cron.startRecording()
continue
}
}
// 计算等待时间
waitDuration := nextEvent.Sub(now)
// 如果等待时间为负,立即执行
if waitDuration <= 0 {
if isStartEvent {
cron.startRecording()
} else {
cron.stopRecording()
}
continue
}
// 设置定时器
timer := time.NewTimer(waitDuration)
if isStartEvent {
cron.Info("下次开始时间: ", nextEvent, "等待时间:", waitDuration)
} else {
cron.Info("下次结束时间: ", nextEvent, " 等待时间:", waitDuration)
}
// 等待定时器或停止信号
select {
case now = <-timer.C:
// 更新当前时间为定时器触发时间
now = now.In(cron.location)
// 执行任务
if isStartEvent {
cron.startRecording()
} else {
cron.stopRecording()
}
case <-cron.stop:
// 停止调度器
timer.Stop()
cron.Info("调度器停止")
return
}
}
}
// 停止
func (cron *Crontab) Dispose() (err error) {
if cron.running {
cron.stop <- struct{}{}
cron.running = false
if cron.timer != nil {
cron.timer.Stop()
}
// 如果还在录制,停止录制
if cron.recording {
cron.stopRecording()
}
}
return
}
// 获取下一个时间段
func (cron *Crontab) getNextTimeSlot() *TimeSlot {
if cron.RecordPlan == nil || !cron.RecordPlan.Enable || cron.RecordPlan.Plan == "" {
return nil // 无有效计划
}
plan := cron.RecordPlan.Plan
if len(plan) != 168 {
cron.Error("无效的计划格式: %s, 长度应为168", plan)
return nil
}
// 使用当地时间
now := time.Now().In(cron.location)
cron.Debug("当前本地时间: %v, 星期%d, 小时%d", now.Format("2006-01-02 15:04:05"), now.Weekday(), now.Hour())
// 当前小时
currentWeekday := int(now.Weekday())
currentHour := now.Hour()
// 检查是否在整点边界附近(前后30秒)
isNearHourBoundary := now.Minute() == 59 && now.Second() >= 30 || now.Minute() == 0 && now.Second() <= 30
// 首先检查当前时间是否在某个时间段内
dayOffset := currentWeekday * 24
if currentHour < 24 && plan[dayOffset+currentHour] == '1' {
// 找到当前小时所在的完整时间段
startHour := currentHour
// 向前查找时间段的开始
for h := currentHour - 1; h >= 0; h-- {
if plan[dayOffset+h] == '1' {
startHour = h
} else {
break
}
}
// 向后查找时间段的结束
endHour := currentHour + 1
for h := endHour; h < 24; h++ {
if plan[dayOffset+h] == '1' {
endHour = h + 1
} else {
break
}
}
// 检查我们是否已经接近当前时间段的结束
isNearEndOfTimeSlot := currentHour == endHour-1 && now.Minute() == 59 && now.Second() >= 30
// 如果我们靠近时间段结束且在小时边界附近,我们跳过此时间段,找下一个
if isNearEndOfTimeSlot && isNearHourBoundary {
cron.Debug("接近当前时间段结束,准备查找下一个时间段")
} else {
// 创建时间段
startTime := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(now.Year(), now.Month(), now.Day(), endHour, 0, 0, 0, cron.location)
// 如果当前时间已经接近或超过了结束时间,调整结束时间
if now.After(endTime.Add(-30*time.Second)) || now.Equal(endTime) {
cron.Debug("当前时间已接近或超过结束时间,尝试查找下一个时间段")
} else {
cron.Debug("当前已在有效时间段内: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
}
// 查找下一个时间段
// 先查找当天剩余时间
for h := currentHour + 1; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 创建时间段
startTime := time.Date(now.Year(), now.Month(), now.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(now.Year(), now.Month(), now.Day(), endHour, 0, 0, 0, cron.location)
cron.Debug("找到今天的有效时间段: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
// 如果当天没有找到,则查找后续日期
for d := 1; d <= 7; d++ {
nextDay := (currentWeekday + d) % 7
dayOffset := nextDay * 24
for h := 0; h < 24; h++ {
if plan[dayOffset+h] == '1' {
// 找到开始小时
startHour := h
// 查找结束小时
endHour := h + 1
for j := h + 1; j < 24; j++ {
if plan[dayOffset+j] == '1' {
endHour = j + 1
} else {
break
}
}
// 计算日期
nextDate := now.AddDate(0, 0, d)
// 创建时间段
startTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), startHour, 0, 0, 0, cron.location)
endTime := time.Date(nextDate.Year(), nextDate.Month(), nextDate.Day(), endHour, 0, 0, 0, cron.location)
cron.Debug("找到未来有效时间段: 开始=%v, 结束=%v",
startTime.Format("2006-01-02 15:04:05"), endTime.Format("2006-01-02 15:04:05"))
return &TimeSlot{
Start: startTime,
End: endTime,
}
}
}
}
cron.Debug("未找到有效的时间段")
return nil
}
// 开始录制
func (cron *Crontab) startRecording() {
if cron.recording {
return // 已经在录制了
}
now := time.Now().In(cron.location)
cron.Info("开始录制任务: %s, 时间: %v, 计划结束时间: %v",
cron.RecordPlan.Name, now, cron.currentSlot.End)
// 构造请求体
reqBody := map[string]string{
"fragment": cron.Fragment,
"filePath": cron.FilePath,
}
jsonBody, err := json.Marshal(reqBody)
if err != nil {
cron.Error("构造请求体失败: %v", err)
return
}
// 获取 HTTP 地址
addr := cron.ctp.Plugin.GetCommonConf().HTTP.ListenAddr
if addr == "" {
addr = ":8080" // 使用默认端口
}
if addr[0] == ':' {
addr = "localhost" + addr
}
// 发送开始录制请求
resp, err := http.Post(fmt.Sprintf("http://%s/mp4/api/start/%s", addr, cron.StreamPath), "application/json", bytes.NewBuffer(jsonBody))
cron.Debug("record request", "url is ", fmt.Sprintf("http://%s/mp4/api/start/%s", addr, cron.StreamPath), "jsonBody is ", string(jsonBody))
if err != nil {
time.Sleep(time.Second)
cron.Error("开始录制失败: %v", err)
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
time.Sleep(time.Second)
cron.Error("开始录制失败HTTP状态码: %d", resp.StatusCode)
return
}
cron.recording = true
}
// 停止录制
func (cron *Crontab) stopRecording() {
if !cron.recording {
return // 没有在录制
}
// 立即记录当前时间并重置状态,避免重复调用
now := time.Now().In(cron.location)
cron.Info("停止录制任务: %s, 时间: %v", cron.RecordPlan.Name, now)
// 先重置状态,避免循环中重复检测到停止条件
wasRecording := cron.recording
cron.recording = false
savedSlot := cron.currentSlot
cron.currentSlot = nil
// 获取 HTTP 地址
addr := cron.ctp.Plugin.GetCommonConf().HTTP.ListenAddr
if addr == "" {
addr = ":8080" // 使用默认端口
}
if addr[0] == ':' {
addr = "localhost" + addr
}
// 发送停止录制请求
resp, err := http.Post(fmt.Sprintf("http://%s/mp4/api/stop/%s", addr, cron.StreamPath), "application/json", nil)
if err != nil {
cron.Error("停止录制失败: %v", err)
// 如果请求失败,恢复状态以便下次重试
if wasRecording {
cron.recording = true
cron.currentSlot = savedSlot
}
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
cron.Error("停止录制失败HTTP状态码: %d", resp.StatusCode)
// 如果请求失败,恢复状态以便下次重试
if wasRecording {
cron.recording = true
cron.currentSlot = savedSlot
}
}
}

Some files were not shown because too many files have changed in this diff Show More