mirror of
https://github.com/langhuihui/monibuca.git
synced 2025-09-27 18:42:09 +08:00
Compare commits
46 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
fe3ac4e3c5 | ||
![]() |
438a8ddee1 | ||
![]() |
4e68cfccba | ||
![]() |
21b3bd053a | ||
![]() |
b5c339de6b | ||
![]() |
2311931432 | ||
![]() |
f60c9fd421 | ||
![]() |
7ad6136f23 | ||
![]() |
2499963c39 | ||
![]() |
fd089aab9b | ||
![]() |
93bcdfbec2 | ||
![]() |
7bc993a9ed | ||
![]() |
f1e3714729 | ||
![]() |
9869f8110d | ||
![]() |
0786b80cff | ||
![]() |
abafc80494 | ||
![]() |
7d181bf661 | ||
![]() |
8a9fffb987 | ||
![]() |
b6ee2843b0 | ||
![]() |
1a8e2bc816 | ||
![]() |
bc0c761aa8 | ||
![]() |
cabd0e3088 | ||
![]() |
2034f068c0 | ||
![]() |
eba62c4054 | ||
![]() |
a070dc64f8 | ||
![]() |
e10dfec816 | ||
![]() |
96b9cbfc08 | ||
![]() |
2bbee90a9f | ||
![]() |
272def302a | ||
![]() |
04843002bf | ||
![]() |
e4810e9c55 | ||
![]() |
15d830f1eb | ||
![]() |
ad32f6f96e | ||
![]() |
56c4ea5907 | ||
![]() |
28c71545db | ||
![]() |
17faf3f064 | ||
![]() |
131af312f1 | ||
![]() |
cf3b7dfabe | ||
![]() |
584c2e9932 | ||
![]() |
a7f04faa23 | ||
![]() |
966153f873 | ||
![]() |
4391ad2d8d | ||
![]() |
747a5a1104 | ||
![]() |
97d8de523d | ||
![]() |
cad47aec5c | ||
![]() |
baf3640b23 |
5
.cursor/rules/monibuca.mdc
Normal file
5
.cursor/rules/monibuca.mdc
Normal file
@@ -0,0 +1,5 @@
|
||||
---
|
||||
description: build pb
|
||||
alwaysApply: false
|
||||
---
|
||||
如果修改了 proto 文件需要编译,请使用 scripts 目录下的脚本来编译
|
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.23.4
|
||||
go-version: 1.25.0
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v4
|
||||
|
101
.github/workflows/iflow.yml
vendored
Normal file
101
.github/workflows/iflow.yml
vendored
Normal file
@@ -0,0 +1,101 @@
|
||||
name: '🏷️ iFLOW CLI Automated Issue Triage'
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- 'opened'
|
||||
- 'reopened'
|
||||
issue_comment:
|
||||
types:
|
||||
- 'created'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
issue_number:
|
||||
description: 'issue number to triage'
|
||||
required: true
|
||||
type: 'number'
|
||||
|
||||
concurrency:
|
||||
group: '${{ github.workflow }}-${{ github.event.issue.number }}'
|
||||
cancel-in-progress: true
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: 'bash'
|
||||
|
||||
permissions:
|
||||
contents: 'read'
|
||||
issues: 'write'
|
||||
statuses: 'write'
|
||||
|
||||
jobs:
|
||||
triage-issue:
|
||||
if: |-
|
||||
github.event_name == 'issues' ||
|
||||
github.event_name == 'workflow_dispatch' ||
|
||||
(
|
||||
github.event_name == 'issue_comment' &&
|
||||
contains(github.event.comment.body, '@iflow-cli /triage') &&
|
||||
contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.comment.author_association)
|
||||
)
|
||||
timeout-minutes: 5
|
||||
runs-on: 'ubuntu-latest'
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: 'Run iFlow CLI Issue Triage'
|
||||
uses: vibe-ideas/iflow-cli-action@main
|
||||
id: 'iflow_cli_issue_triage'
|
||||
env:
|
||||
GITHUB_TOKEN: '${{ secrets.GITHUB_TOKEN }}'
|
||||
ISSUE_TITLE: '${{ github.event.issue.title }}'
|
||||
ISSUE_BODY: '${{ github.event.issue.body }}'
|
||||
ISSUE_NUMBER: '${{ github.event.issue.number }}'
|
||||
REPOSITORY: '${{ github.repository }}'
|
||||
with:
|
||||
api_key: ${{ secrets.IFLOW_API_KEY }}
|
||||
timeout: "3600"
|
||||
extra_args: "--debug"
|
||||
prompt: |
|
||||
## Role
|
||||
|
||||
You are an issue triage assistant. Analyze the current GitHub issue
|
||||
and apply the most appropriate existing labels. Use the available
|
||||
tools to gather information; do not ask for information to be
|
||||
provided.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Run: `gh label list` to get all available labels.
|
||||
2. Review the issue title and body provided in the environment
|
||||
variables: "${ISSUE_TITLE}" and "${ISSUE_BODY}".
|
||||
3. Classify issues by their kind (bug, enhancement, documentation,
|
||||
cleanup, etc) and their priority (p0, p1, p2, p3). Set the
|
||||
labels according to the format `kind/*` and `priority/*` patterns.
|
||||
4. Apply the selected labels to this issue using:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --add-label "label1,label2"`
|
||||
5. If the "status/needs-triage" label is present, remove it using:
|
||||
`gh issue edit "${ISSUE_NUMBER}" --remove-label "status/needs-triage"`
|
||||
|
||||
## Guidelines
|
||||
|
||||
- Only use labels that already exist in the repository
|
||||
- Do not add comments or modify the issue content
|
||||
- Triage only the current issue
|
||||
- Assign all applicable labels based on the issue content
|
||||
- Reference all shell variables as "${VAR}" (with quotes and braces)
|
||||
|
||||
- name: 'Post Issue Triage Failure Comment'
|
||||
if: |-
|
||||
${{ failure() && steps.iflow_cli_issue_triage.outcome == 'failure' }}
|
||||
uses: 'actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea'
|
||||
with:
|
||||
github-token: '${{ secrets.GITHUB_TOKEN }}'
|
||||
script: |-
|
||||
github.rest.issues.createComment({
|
||||
owner: '${{ github.repository }}'.split('/')[0],
|
||||
repo: '${{ github.repository }}'.split('/')[1],
|
||||
issue_number: '${{ github.event.issue.number }}',
|
||||
body: 'There is a problem with the iFlow CLI issue triaging. Please check the [action logs](${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}) for details.'
|
||||
})
|
5
.gitignore
vendored
5
.gitignore
vendored
@@ -19,4 +19,7 @@ __debug*
|
||||
example/default/*
|
||||
!example/default/main.go
|
||||
!example/default/config.yaml
|
||||
shutdown.sh
|
||||
shutdown.sh
|
||||
!example/test/test.db
|
||||
*.mp4
|
||||
shutdown.bat
|
369
CLAUDE.md
Normal file
369
CLAUDE.md
Normal file
@@ -0,0 +1,369 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Monibuca is a high-performance streaming server framework written in Go. It's designed to be a modular, scalable platform for real-time audio/video streaming with support for multiple protocols including RTMP, RTSP, HLS, WebRTC, GB28181, and more.
|
||||
|
||||
## Development Commands
|
||||
|
||||
### Building and Running
|
||||
|
||||
**Basic Run (with SQLite):**
|
||||
```bash
|
||||
cd example/default
|
||||
go run -tags sqlite main.go
|
||||
```
|
||||
|
||||
**Build Tags:**
|
||||
- `sqlite` - Enable SQLite database support
|
||||
- `sqliteCGO` - Enable SQLite with CGO
|
||||
- `mysql` - Enable MySQL database support
|
||||
- `postgres` - Enable PostgreSQL database support
|
||||
- `duckdb` - Enable DuckDB database support
|
||||
- `disable_rm` - Disable memory pool
|
||||
- `fasthttp` - Use fasthttp instead of net/http
|
||||
- `taskpanic` - Enable panics for testing
|
||||
|
||||
**Protocol Buffer Generation:**
|
||||
```bash
|
||||
# Generate all proto files
|
||||
sh scripts/protoc.sh
|
||||
|
||||
# Generate specific plugin proto
|
||||
sh scripts/protoc.sh plugin_name
|
||||
```
|
||||
|
||||
**Release Building:**
|
||||
```bash
|
||||
# Uses goreleaser configuration
|
||||
goreleaser build
|
||||
```
|
||||
|
||||
**Testing:**
|
||||
```bash
|
||||
go test ./...
|
||||
```
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
**Server (`server.go`):** Main server instance that manages plugins, streams, and configurations. Implements the central event loop and lifecycle management.
|
||||
|
||||
**Plugin System (`plugin.go`):** Modular architecture where functionality is provided through plugins. Each plugin implements the `IPlugin` interface and can provide:
|
||||
- Protocol handlers (RTMP, RTSP, etc.)
|
||||
- Media transformers
|
||||
- Pull/Push proxies
|
||||
- Recording capabilities
|
||||
- Custom HTTP endpoints
|
||||
|
||||
**Configuration System (`pkg/config/`):** Hierarchical configuration system with priority order: dynamic modifications > environment variables > config files > default YAML > global config > defaults.
|
||||
|
||||
**Task System (`pkg/task/`):** Advanced asynchronous task management system with multiple layers:
|
||||
- **Task:** Basic unit of work with lifecycle management (Start/Run/Dispose)
|
||||
- **Job:** Container that manages multiple child tasks and provides event loops
|
||||
- **Work:** Special type of Job that acts as a persistent queue manager (keepalive=true)
|
||||
- **Channel:** Event-driven task for handling continuous data streams
|
||||
|
||||
### Task System Deep Dive
|
||||
|
||||
#### Task Hierarchy and Lifecycle
|
||||
```
|
||||
Work (Queue Manager)
|
||||
└── Job (Container with Event Loop)
|
||||
└── Task (Basic Work Unit)
|
||||
├── Start() - Initialization phase
|
||||
├── Run() - Main execution phase
|
||||
└── Dispose() - Cleanup phase
|
||||
```
|
||||
|
||||
#### Queue-based Asynchronous Processing
|
||||
The Task system supports sophisticated queue-based processing patterns:
|
||||
|
||||
1. **Work as Queue Manager:** Work instances stay alive indefinitely and manage queues of tasks
|
||||
2. **Task Queuing:** Use `workInstance.AddTask(task, logger)` to queue tasks
|
||||
3. **Automatic Lifecycle:** Tasks are automatically started, executed, and disposed
|
||||
4. **Error Handling:** Built-in retry mechanisms and error propagation
|
||||
|
||||
**Example Pattern (from S3 plugin):**
|
||||
```go
|
||||
type UploadQueueTask struct {
|
||||
task.Work // Persistent queue manager
|
||||
}
|
||||
|
||||
type FileUploadTask struct {
|
||||
task.Task // Individual work item
|
||||
// ... task-specific fields
|
||||
}
|
||||
|
||||
// Initialize queue manager (typically in init())
|
||||
var uploadQueueTask UploadQueueTask
|
||||
m7s.Servers.AddTask(&uploadQueueTask)
|
||||
|
||||
// Queue individual tasks
|
||||
uploadQueueTask.AddTask(&FileUploadTask{...}, logger)
|
||||
```
|
||||
|
||||
#### Cross-Plugin Task Cooperation
|
||||
Tasks can coordinate across different plugins through:
|
||||
|
||||
1. **Global Instance Pattern:** Plugins expose global instances for cross-plugin access
|
||||
2. **Event-based Triggers:** One plugin triggers tasks in another plugin
|
||||
3. **Shared Queue Managers:** Multiple plugins can use the same Work instance
|
||||
|
||||
**Example (MP4 → S3 Integration):**
|
||||
```go
|
||||
// In MP4 plugin: trigger S3 upload after recording completes
|
||||
s3plugin.TriggerUpload(filePath, deleteAfter)
|
||||
|
||||
// S3 plugin receives trigger and queues upload task
|
||||
func TriggerUpload(filePath string, deleteAfter bool) {
|
||||
if s3PluginInstance != nil {
|
||||
s3PluginInstance.QueueUpload(filePath, objectKey, deleteAfter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Key Interfaces
|
||||
|
||||
**Publisher:** Handles incoming media streams and manages track information
|
||||
**Subscriber:** Handles outgoing media streams to clients
|
||||
**Puller:** Pulls streams from external sources
|
||||
**Pusher:** Pushes streams to external destinations
|
||||
**Transformer:** Processes/transcodes media streams
|
||||
**Recorder:** Records streams to storage
|
||||
|
||||
### Stream Processing Flow
|
||||
|
||||
1. **Publisher** receives media data and creates tracks
|
||||
2. **Tracks** handle audio/video data with specific codecs
|
||||
3. **Subscribers** attach to publishers to receive media
|
||||
4. **Transformers** can process streams between publishers and subscribers
|
||||
5. **Plugins** provide protocol-specific implementations
|
||||
|
||||
### Post-Recording Workflow
|
||||
|
||||
Monibuca implements a sophisticated post-recording processing pipeline:
|
||||
|
||||
1. **Recording Completion:** MP4 recorder finishes writing stream data
|
||||
2. **Trailer Writing:** Asynchronous task moves MOOV box to file beginning for web compatibility
|
||||
3. **File Optimization:** Temporary file operations ensure atomic updates
|
||||
4. **External Storage Integration:** Automatic upload to S3-compatible services
|
||||
5. **Cleanup:** Optional local file deletion after successful upload
|
||||
|
||||
This workflow uses queue-based task processing to avoid blocking the main recording pipeline.
|
||||
|
||||
## Plugin Development
|
||||
|
||||
### Creating a Plugin
|
||||
|
||||
1. Implement the `IPlugin` interface
|
||||
2. Define plugin metadata using `PluginMeta`
|
||||
3. Register with `InstallPlugin[YourPluginType](meta)`
|
||||
4. Optionally implement protocol-specific interfaces:
|
||||
- `ITCPPlugin` for TCP servers
|
||||
- `IUDPPlugin` for UDP servers
|
||||
- `IQUICPlugin` for QUIC servers
|
||||
- `IRegisterHandler` for HTTP endpoints
|
||||
|
||||
### Plugin Lifecycle
|
||||
|
||||
1. **Init:** Configuration parsing and initialization
|
||||
2. **Start:** Network listeners and task registration
|
||||
3. **Run:** Active operation
|
||||
4. **Dispose:** Cleanup and shutdown
|
||||
|
||||
### Cross-Plugin Communication Patterns
|
||||
|
||||
#### 1. Global Instance Pattern
|
||||
```go
|
||||
// Expose global instance for cross-plugin access
|
||||
var s3PluginInstance *S3Plugin
|
||||
|
||||
func (p *S3Plugin) Start() error {
|
||||
s3PluginInstance = p // Set global instance
|
||||
// ... rest of start logic
|
||||
}
|
||||
|
||||
// Provide public API functions
|
||||
func TriggerUpload(filePath string, deleteAfter bool) {
|
||||
if s3PluginInstance != nil {
|
||||
s3PluginInstance.QueueUpload(filePath, objectKey, deleteAfter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. Event-Driven Integration
|
||||
```go
|
||||
// In one plugin: trigger event after completion
|
||||
if t.filePath != "" {
|
||||
t.Info("MP4 file processing completed, triggering S3 upload")
|
||||
s3plugin.TriggerUpload(t.filePath, false)
|
||||
}
|
||||
```
|
||||
|
||||
#### 3. Shared Queue Managers
|
||||
Multiple plugins can share Work instances for coordinated processing.
|
||||
|
||||
### Asynchronous Task Development Best Practices
|
||||
|
||||
#### 1. Implement Task Interfaces
|
||||
```go
|
||||
type MyTask struct {
|
||||
task.Task
|
||||
// ... custom fields
|
||||
}
|
||||
|
||||
func (t *MyTask) Start() error {
|
||||
// Initialize resources, validate inputs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *MyTask) Run() error {
|
||||
// Main work execution
|
||||
// Return task.ErrTaskComplete for successful completion
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
#### 2. Use Work for Queue Management
|
||||
```go
|
||||
type MyQueueManager struct {
|
||||
task.Work
|
||||
}
|
||||
|
||||
var myQueue MyQueueManager
|
||||
|
||||
func init() {
|
||||
m7s.Servers.AddTask(&myQueue)
|
||||
}
|
||||
|
||||
// Queue tasks from anywhere
|
||||
myQueue.AddTask(&MyTask{...}, logger)
|
||||
```
|
||||
|
||||
#### 3. Error Handling and Retry
|
||||
- Tasks automatically support retry mechanisms
|
||||
- Use `task.SetRetry(maxRetry, interval)` for custom retry behavior
|
||||
- Return `task.ErrTaskComplete` for successful completion
|
||||
- Return other errors to trigger retry or failure handling
|
||||
|
||||
## Configuration Structure
|
||||
|
||||
### Global Configuration
|
||||
- HTTP/TCP/UDP/QUIC listeners
|
||||
- Database connections (SQLite, MySQL, PostgreSQL, DuckDB)
|
||||
- Authentication settings
|
||||
- Admin interface settings
|
||||
- Global stream alias mappings
|
||||
|
||||
### Plugin Configuration
|
||||
Each plugin can define its own configuration structure that gets merged with global settings.
|
||||
|
||||
## Database Integration
|
||||
|
||||
Supports multiple database backends:
|
||||
- **SQLite:** Default lightweight option
|
||||
- **MySQL:** Production deployments
|
||||
- **PostgreSQL:** Production deployments
|
||||
- **DuckDB:** Analytics use cases
|
||||
|
||||
Automatic migration is handled for core models including users, proxies, and stream aliases.
|
||||
|
||||
## Protocol Support
|
||||
|
||||
### Built-in Plugins
|
||||
- **RTMP:** Real-time messaging protocol
|
||||
- **RTSP:** Real-time streaming protocol
|
||||
- **HLS:** HTTP live streaming
|
||||
- **WebRTC:** Web real-time communication
|
||||
- **GB28181:** Chinese surveillance standard
|
||||
- **FLV:** Flash video format
|
||||
- **MP4:** MPEG-4 format with post-processing capabilities
|
||||
- **SRT:** Secure reliable transport
|
||||
- **S3:** File upload integration with AWS S3/MinIO compatibility
|
||||
|
||||
## Authentication & Security
|
||||
|
||||
- JWT-based authentication for admin interface
|
||||
- Stream-level authentication with URL signing
|
||||
- Role-based access control (admin/user)
|
||||
- Webhook support for external auth integration
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
### Code Style
|
||||
- Follow existing patterns and naming conventions
|
||||
- Use the task system for async operations
|
||||
- Implement proper error handling and logging
|
||||
- Use the configuration system for all settings
|
||||
|
||||
### Testing
|
||||
- Unit tests should be placed alongside source files
|
||||
- Integration tests can use the example configurations
|
||||
- Use the mock.py script for protocol testing
|
||||
|
||||
### Async Task Development
|
||||
- Always use Work instances for queue management
|
||||
- Implement proper Start/Run lifecycle in tasks
|
||||
- Use global instance pattern for cross-plugin communication
|
||||
- Handle errors gracefully with appropriate retry strategies
|
||||
|
||||
### Performance Considerations
|
||||
- Memory pool is enabled by default (disable with `disable_rm`)
|
||||
- Zero-copy design for media data where possible
|
||||
- Lock-free data structures for high concurrency
|
||||
- Efficient buffer management with ring buffers
|
||||
- Queue-based processing prevents blocking main threads
|
||||
|
||||
## Debugging
|
||||
|
||||
### Built-in Debug Plugin
|
||||
- Performance monitoring and profiling
|
||||
- Real-time metrics via Prometheus endpoint (`/api/metrics`)
|
||||
- pprof integration for memory/cpu profiling
|
||||
|
||||
### Logging
|
||||
- Structured logging with zerolog
|
||||
- Configurable log levels
|
||||
- Log rotation support
|
||||
- Fatal crash logging
|
||||
|
||||
### Task System Debugging
|
||||
- Tasks automatically include detailed logging with task IDs and types
|
||||
- Use `task.Debug/Info/Warn/Error` methods for consistent logging
|
||||
- Task state and progress can be monitored through descriptions
|
||||
- Event loop status and queue lengths are logged automatically
|
||||
|
||||
## Web Admin Interface
|
||||
|
||||
- Web-based admin UI served from `admin.zip`
|
||||
- RESTful API for all operations
|
||||
- Real-time stream monitoring
|
||||
- Configuration management
|
||||
- User management (when auth enabled)
|
||||
|
||||
## Common Issues
|
||||
|
||||
### Port Conflicts
|
||||
- Default HTTP port: 8080
|
||||
- Default gRPC port: 50051
|
||||
- Check plugin-specific port configurations
|
||||
|
||||
### Database Connection
|
||||
- Ensure proper build tags for database support
|
||||
- Check DSN configuration strings
|
||||
- Verify database file permissions
|
||||
|
||||
### Plugin Loading
|
||||
- Plugins are auto-discovered from imports
|
||||
- Check plugin enable/disable status
|
||||
- Verify configuration merging
|
||||
|
||||
### Task System Issues
|
||||
- Ensure Work instances are added to server during initialization
|
||||
- Check task queue status if tasks aren't executing
|
||||
- Verify proper error handling in task implementation
|
||||
- Monitor task retry counts and failure reasons in logs
|
92
GEMINI.md
Normal file
92
GEMINI.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Gemini Context: Monibuca Project
|
||||
|
||||
This document provides a summary of the Monibuca project to give context for AI-assisted development.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Monibuca is a modular, high-performance streaming media server framework written in Go. Its core design is lightweight and plugin-based, allowing developers to extend functionality by adding or developing plugins for different streaming protocols and features. The project's module path is `m7s.live/v4`.
|
||||
|
||||
The architecture is centered around a core engine (`m7s.live/v4`) that manages plugins, streams, and the main event loop. Functionality is added by importing plugins, which register themselves with the core engine.
|
||||
|
||||
**Key Technologies:**
|
||||
- **Language:** Go
|
||||
- **Architecture:** Plugin-based
|
||||
- **APIs:** RESTful HTTP API, gRPC API
|
||||
|
||||
**Supported Protocols (based on plugins):**
|
||||
- RTMP
|
||||
- RTSP
|
||||
- HLS
|
||||
- FLV
|
||||
- WebRTC
|
||||
- GB28181
|
||||
- SRT
|
||||
- And more...
|
||||
|
||||
## Building and Running
|
||||
|
||||
### Build
|
||||
To build the server, run the following command from the project root:
|
||||
```bash
|
||||
go build -v .
|
||||
```
|
||||
|
||||
### Test
|
||||
To run the test suite:
|
||||
```bash
|
||||
go test -v ./...
|
||||
```
|
||||
|
||||
### Running the Server
|
||||
The server is typically run by creating a `main.go` file that imports the core engine and the desired plugins.
|
||||
|
||||
**Example `main.go`:**
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"m7s.live/v4"
|
||||
// Import desired plugins to register them
|
||||
_ "m7s.live/plugin/rtmp/v4"
|
||||
_ "m7s.live/plugin/rtsp/v4"
|
||||
_ "m7s.live/plugin/hls/v4"
|
||||
_ "m7s.live/plugin/webrtc/v4"
|
||||
)
|
||||
|
||||
func main() {
|
||||
m7s.Run()
|
||||
}
|
||||
```
|
||||
The server is executed by running `go run main.go`. Configuration is managed through a `config.yaml` file in the same directory.
|
||||
|
||||
### Docker
|
||||
The project includes a `Dockerfile` to build and run in a container.
|
||||
```bash
|
||||
# Build the image
|
||||
docker build -t monibuca .
|
||||
|
||||
# Run the container
|
||||
docker run -p 8080:8080 monibuca
|
||||
```
|
||||
|
||||
## Development Conventions
|
||||
|
||||
### Project Structure
|
||||
- `server.go`: Core engine logic.
|
||||
- `plugin/`: Contains individual plugins for different protocols and features.
|
||||
- `pkg/`: Shared packages and utilities used across the project.
|
||||
- `pb/`: Protobuf definitions for the gRPC API.
|
||||
- `example/`: Example implementations and configurations.
|
||||
- `doc/`: Project documentation.
|
||||
|
||||
### Plugin System
|
||||
The primary way to add functionality is by creating or enabling plugins. A plugin is a Go package that registers itself with the core engine upon import (using the `init()` function). This modular approach keeps the core small and allows for custom builds with only the necessary features.
|
||||
|
||||
### API
|
||||
- **RESTful API:** Defined in `api.go`, provides HTTP endpoints for controlling and monitoring the server.
|
||||
- **gRPC API:** Defined in the `pb/` directory using protobuf. `protoc.sh` is used to generate the Go code from the `.proto` files.
|
||||
|
||||
### Code Style and CI
|
||||
- The project uses `golangci-lint` for linting, as seen in the `.github/workflows/go.yml` file.
|
||||
- Static analysis is configured via `staticcheck.conf` and `qodana.yaml`.
|
||||
- All code should be formatted with `gofmt`.
|
124
IFLOW.md
Normal file
124
IFLOW.md
Normal file
@@ -0,0 +1,124 @@
|
||||
# Monibuca v5 项目概述
|
||||
|
||||
Monibuca 是一个使用纯 Go 语言开发的、高度可扩展的高性能流媒体服务器开发框架。它旨在提供高并发、低延迟的流媒体处理能力,并支持多种流媒体协议和功能。
|
||||
|
||||
## 核心特性
|
||||
|
||||
* **高性能**: 采用无锁设计、部分手动内存管理和多核计算。
|
||||
* **低延迟**: 实现零等待转发,全链路亚秒级延迟。
|
||||
* **模块化**: 按需加载,无限扩展性。
|
||||
* **灵活性**: 高度可配置,适应各种流媒体场景。
|
||||
* **可扩展性**: 支持分布式部署,轻松应对大规模场景。
|
||||
* **调试友好**: 内置调试插件,实时性能监控与分析。
|
||||
* **媒体处理**: 支持截图、转码、SEI 数据处理。
|
||||
* **集群能力**: 内置级联和房间管理。
|
||||
* **预览功能**: 支持视频预览、多屏预览、自定义屏幕布局。
|
||||
* **安全性**: 提供加密传输和流认证。
|
||||
* **性能监控**: 支持压力测试和性能指标收集(集成在测试插件中)。
|
||||
* **日志管理**: 日志轮转、自动清理、自定义扩展。
|
||||
* **录制与回放**: 支持 MP4、HLS、FLV 格式,支持倍速、寻址、暂停。
|
||||
* **动态时移**: 动态缓存设计,支持直播时移回放。
|
||||
* **远程调用**: 支持 gRPC 接口,实现跨语言集成。
|
||||
* **流别名**: 支持动态流别名,灵活的多流管理。
|
||||
* **AI 能力**: 集成推理引擎,支持 ONNX 模型,支持自定义前后处理。
|
||||
* **WebHook**: 订阅流生命周期事件,用于业务系统集成。
|
||||
* **私有协议**: 支持自定义私有协议以满足特殊业务需求。
|
||||
|
||||
## 支持的协议
|
||||
|
||||
* RTMP
|
||||
* RTSP
|
||||
* HTTP-FLV
|
||||
* WS-FLV
|
||||
* HLS
|
||||
* WebRTC
|
||||
* GB28181
|
||||
* ONVIF
|
||||
* SRT
|
||||
|
||||
## 技术架构
|
||||
|
||||
Monibuca 基于插件化架构设计,核心功能通过插件扩展。主要组件包括:
|
||||
|
||||
* **Server**: 核心服务器,负责管理流、插件、任务等。
|
||||
* **Plugin**: 插件系统,提供各种功能扩展。
|
||||
* **Publisher**: 流发布者,负责接收和管理流数据。
|
||||
* **Subscriber**: 流订阅者,负责消费流数据。
|
||||
* **Task**: 任务系统,用于管理异步任务和生命周期。
|
||||
* **Config**: 配置系统,支持多层级配置(环境变量、配置文件、默认值等)。
|
||||
|
||||
## 构建与运行
|
||||
|
||||
### 前提条件
|
||||
|
||||
* Go 1.23 或更高版本
|
||||
* 对流媒体协议有基本了解
|
||||
|
||||
### 运行默认配置
|
||||
|
||||
```bash
|
||||
cd example/default
|
||||
go run -tags sqlite main.go
|
||||
```
|
||||
|
||||
### 构建标签
|
||||
|
||||
可以使用以下构建标签来自定义构建:
|
||||
|
||||
| 构建标签 | 描述 |
|
||||
| :--- | :--- |
|
||||
| `disable_rm` | 禁用内存池 |
|
||||
| `sqlite` | 启用 sqlite DB |
|
||||
| `sqliteCGO` | 启用 sqlite cgo 版本 DB |
|
||||
| `mysql` | 启用 mysql DB |
|
||||
| `postgres` | 启用 postgres DB |
|
||||
| `duckdb` | 启用 duckdb DB |
|
||||
| `taskpanic` | 抛出 panic,用于测试 |
|
||||
| `fasthttp` | 启用 fasthttp 服务器而不是 net/http |
|
||||
|
||||
### Web UI
|
||||
|
||||
将 `admin.zip` 文件(不要解压)放在与配置文件相同的目录中。然后访问 http://localhost:8080 即可访问 UI。
|
||||
|
||||
## 开发约定
|
||||
|
||||
### 项目结构
|
||||
|
||||
* `example/`: 包含各种使用示例。
|
||||
* `pkg/`: 核心库代码。
|
||||
* `plugin/`: 各种功能插件。
|
||||
* `pb/`: Protocol Buffer 生成的代码。
|
||||
* `doc/`: 项目文档。
|
||||
* `scripts/`: 脚本文件。
|
||||
|
||||
### 配置
|
||||
|
||||
* 使用 YAML 格式进行配置。
|
||||
* 支持多层级配置覆盖(环境变量 > 配置文件 > 默认值)。
|
||||
* 插件配置通常以插件名小写作为前缀。
|
||||
|
||||
### 日志
|
||||
|
||||
* 使用 `slog` 进行日志记录。
|
||||
* 支持不同日志级别(debug, info, warn, error, trace)。
|
||||
* 插件可以有自己的日志记录器。
|
||||
|
||||
### 插件开发
|
||||
|
||||
* 插件需要实现 `IPlugin` 接口。
|
||||
* 通过 `InstallPlugin` 函数注册插件。
|
||||
* 插件可以注册 HTTP 处理函数、gRPC 服务等。
|
||||
* 插件可以有自己的配置结构体。
|
||||
|
||||
### 任务系统
|
||||
|
||||
* 使用 `task` 包管理异步任务。
|
||||
* 任务具有生命周期管理(启动、停止、销毁)。
|
||||
* 任务可以有父子关系,形成任务树。
|
||||
* 支持任务重试机制。
|
||||
|
||||
### 测试
|
||||
|
||||
* 使用 Go 标准测试包 `testing`。
|
||||
* 在 `test/` 目录下编写集成测试。
|
||||
* 使用 `example/test` 目录进行功能测试。
|
@@ -61,7 +61,7 @@ Monibuca is a powerful streaming server framework written entirely in Go. It's d
|
||||
- 🔄 **Cluster Capability** - Built-in cascade and room management
|
||||
- 🎮 **Preview Features** - Supports video preview, multi-screen preview, custom screen layouts
|
||||
- 🔐 **Security** - Provides encrypted transmission and stream authentication
|
||||
- 📊 **Performance Monitoring** - Supports stress testing and performance metrics collection
|
||||
- 📊 **Performance Monitoring** - Supports stress testing and performance metrics collection (integrated in test plugin)
|
||||
- 📝 **Log Management** - Log rotation, auto cleanup, custom extensions
|
||||
- 🎬 **Recording & Playback** - Supports MP4, HLS, FLV formats, speed control, seeking, pause
|
||||
- ⏱️ **Dynamic Time-Shift** - Dynamic cache design, supports live time-shift playback
|
||||
|
@@ -1,5 +1,17 @@
|
||||
# Monibuca v5.0.x Release Notes
|
||||
|
||||
## v5.0.4 (2025-08-15)
|
||||
|
||||
### 新增 / 改进 (Features & Improvements)
|
||||
- GB28181: 支持更新 channelName / channelId(eba62c4)
|
||||
- 定时任务(crontab): 初始化 SQL 支持(2bbee90)
|
||||
- Snap 插件: 支持批量抓图(272def3)
|
||||
- 管理后台: 支持自定义首页(15d830f)
|
||||
- 推/拉代理: 支持可选参数更新(ad32f6f)
|
||||
- 心跳/脉冲: pulse interval 允许为 0(17faf3f)
|
||||
- 告警上报: 通过 Hook 发送报警(baf3640)
|
||||
- 告警信息上报: 通过 Hook 发送 alarminfo(cad47ae)
|
||||
|
||||
## v5.0.3 (2025-06-27)
|
||||
|
||||
### 🎉 新功能 (New Features)
|
||||
|
25
alarm.go
Normal file
25
alarm.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package m7s
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// AlarmInfo 报警信息实体,用于存储到数据库
|
||||
type AlarmInfo struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"` // 主键,自增ID
|
||||
ServerInfo string `gorm:"type:varchar(255);not null" json:"serverInfo"` // 服务器信息
|
||||
StreamName string `gorm:"type:varchar(255);index" json:"streamName"` // 流名称
|
||||
StreamPath string `gorm:"type:varchar(500)" json:"streamPath"` // 流的streampath
|
||||
AlarmName string `gorm:"type:varchar(255);not null" json:"alarmName"` // 报警名称
|
||||
AlarmDesc string `gorm:"type:varchar(500);not null" json:"alarmDesc"` // 报警描述
|
||||
AlarmType int `gorm:"not null;index" json:"alarmType"` // 报警类型(对应之前定义的常量)
|
||||
IsSent bool `gorm:"default:false" json:"isSent"` // 是否已成功发送
|
||||
CreatedAt time.Time `gorm:"autoCreateTime" json:"createdAt"` // 创建时间,报警时间
|
||||
UpdatedAt time.Time `gorm:"autoUpdateTime" json:"updatedAt"` // 更新时间
|
||||
FilePath string `gorm:"type:varchar(255)" json:"filePath"` // 文件路径
|
||||
}
|
||||
|
||||
// TableName 指定表名
|
||||
func (AlarmInfo) TableName() string {
|
||||
return "alarm_info"
|
||||
}
|
8
alias.go
8
alias.go
@@ -48,7 +48,7 @@ func (s *Server) initStreamAlias() {
|
||||
|
||||
func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *pb.StreamAliasListResponse, err error) {
|
||||
res = &pb.StreamAliasListResponse{}
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
for alias := range s.AliasStreams.Range {
|
||||
info := &pb.StreamAlias{
|
||||
StreamPath: alias.StreamPath,
|
||||
@@ -62,18 +62,17 @@ func (s *Server) GetStreamAlias(ctx context.Context, req *emptypb.Empty) (res *p
|
||||
}
|
||||
res.Data = append(res.Data, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasRequest) (res *pb.SuccessResponse, err error) {
|
||||
res = &pb.SuccessResponse{}
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if req.StreamPath != "" {
|
||||
u, err := url.Parse(req.StreamPath)
|
||||
if err != nil {
|
||||
return err
|
||||
return
|
||||
}
|
||||
req.StreamPath = strings.TrimPrefix(u.Path, "/")
|
||||
publisher, canReplace := s.Streams.Get(req.StreamPath)
|
||||
@@ -159,7 +158,6 @@ func (s *Server) SetStreamAlias(ctx context.Context, req *pb.SetStreamAliasReque
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
329
api.go
329
api.go
@@ -12,6 +12,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
myip "github.com/husanpao/ip"
|
||||
@@ -25,7 +26,7 @@ import (
|
||||
"gopkg.in/yaml.v3"
|
||||
"m7s.live/v5/pb"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
@@ -96,9 +97,8 @@ func (s *Server) api_Stream_AnnexB_(rw http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
defer reader.StopRead()
|
||||
var annexb *pkg.AnnexB
|
||||
var converter = pkg.NewAVFrameConvert[*pkg.AnnexB](publisher.VideoTrack.AVTrack, nil)
|
||||
annexb, err = converter.ConvertFromAVFrame(&reader.Value)
|
||||
var annexb format.AnnexB
|
||||
err = pkg.ConvertFrameType(reader.Value.Wraps[0], &annexb)
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
@@ -150,6 +150,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
}
|
||||
res.Data.AudioTrack.SampleRate = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetSampleRate())
|
||||
res.Data.AudioTrack.Channels = uint32(t.ICodecCtx.(pkg.IAudioCodecCtx).GetChannels())
|
||||
if pub.State == PublisherStateInit {
|
||||
res.Data.State = int32(PublisherStateTrackAdded)
|
||||
}
|
||||
}
|
||||
}
|
||||
if t := pub.VideoTrack.AVTrack; t != nil {
|
||||
@@ -165,6 +168,9 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
}
|
||||
res.Data.VideoTrack.Width = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Width())
|
||||
res.Data.VideoTrack.Height = uint32(t.ICodecCtx.(pkg.IVideoCodecCtx).Height())
|
||||
if pub.State == PublisherStateInit {
|
||||
res.Data.State = int32(PublisherStateTrackAdded)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -172,7 +178,7 @@ func (s *Server) getStreamInfo(pub *Publisher) (res *pb.StreamInfoResponse, err
|
||||
|
||||
func (s *Server) StreamInfo(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.StreamInfoResponse, err error) {
|
||||
var recordings []*pb.RecordingDetail
|
||||
s.Records.SafeRange(func(record *RecordJob) bool {
|
||||
s.Records.Range(func(record *RecordJob) bool {
|
||||
if record.StreamPath == req.StreamPath {
|
||||
recordings = append(recordings, &pb.RecordingDetail{
|
||||
FilePath: record.RecConf.FilePath,
|
||||
@@ -212,11 +218,13 @@ func (s *Server) TaskTree(context.Context, *emptypb.Empty) (res *pb.TaskTreeResp
|
||||
StartTime: timestamppb.New(t.StartTime),
|
||||
Description: m.GetDescriptions(),
|
||||
StartReason: t.StartReason,
|
||||
Level: uint32(t.GetLevel()),
|
||||
}
|
||||
if job, ok := m.(task.IJob); ok {
|
||||
if blockedTask := job.Blocked(); blockedTask != nil {
|
||||
res.Blocked = fillData(blockedTask)
|
||||
}
|
||||
res.EventLoopRunning = job.EventLoopRunning()
|
||||
for t := range job.RangeSubTask {
|
||||
child := fillData(t)
|
||||
if child == nil {
|
||||
@@ -251,7 +259,7 @@ func (s *Server) RestartTask(ctx context.Context, req *pb.RequestWithId64) (resp
|
||||
|
||||
func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb.RecordingListResponse, err error) {
|
||||
resp = &pb.RecordingListResponse{}
|
||||
s.Records.SafeRange(func(record *RecordJob) bool {
|
||||
s.Records.Range(func(record *RecordJob) bool {
|
||||
resp.Data = append(resp.Data, &pb.Recording{
|
||||
StreamPath: record.StreamPath,
|
||||
StartTime: timestamppb.New(record.StartTime),
|
||||
@@ -264,7 +272,7 @@ func (s *Server) GetRecording(ctx context.Context, req *emptypb.Empty) (resp *pb
|
||||
}
|
||||
|
||||
func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *pb.SubscribersResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
var subscribers []*pb.SubscriberSnapShot
|
||||
for subscriber := range s.Subscribers.Range {
|
||||
meta, _ := json.Marshal(subscriber.GetDescriptions())
|
||||
@@ -303,7 +311,6 @@ func (s *Server) GetSubscribers(context.Context, *pb.SubscribersRequest) (res *p
|
||||
Data: subscribers,
|
||||
Total: int32(s.Subscribers.Length),
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -323,7 +330,8 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
|
||||
}
|
||||
}
|
||||
pub.AudioTrack.Ring.Do(func(v *pkg.AVFrame) {
|
||||
if len(v.Wraps) > 0 {
|
||||
if len(v.Wraps) > 0 && v.TryRLock() {
|
||||
defer v.RUnlock()
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = v.Sequence
|
||||
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
|
||||
@@ -333,7 +341,7 @@ func (s *Server) AudioTrackSnap(_ context.Context, req *pb.StreamSnapRequest) (r
|
||||
data.RingDataSize += uint32(v.Wraps[0].GetSize())
|
||||
for i, wrap := range v.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -374,7 +382,7 @@ func (s *Server) api_VideoTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
snap.KeyFrame = frame.IDR
|
||||
for i, wrap := range frame.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -407,7 +415,7 @@ func (s *Server) api_AudioTrack_SSE(rw http.ResponseWriter, r *http.Request) {
|
||||
snap.KeyFrame = frame.IDR
|
||||
for i, wrap := range frame.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -433,7 +441,8 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
}
|
||||
}
|
||||
pub.VideoTrack.Ring.Do(func(v *pkg.AVFrame) {
|
||||
if len(v.Wraps) > 0 {
|
||||
if len(v.Wraps) > 0 && v.TryRLock() {
|
||||
defer v.RUnlock()
|
||||
var snap pb.TrackSnapShot
|
||||
snap.Sequence = v.Sequence
|
||||
snap.Timestamp = uint32(v.Timestamp / time.Millisecond)
|
||||
@@ -443,7 +452,7 @@ func (s *Server) VideoTrackSnap(ctx context.Context, req *pb.StreamSnapRequest)
|
||||
data.RingDataSize += uint32(v.Wraps[0].GetSize())
|
||||
for i, wrap := range v.Wraps {
|
||||
snap.Wrap[i] = &pb.Wrap{
|
||||
Timestamp: uint32(wrap.GetTimestamp() / time.Millisecond),
|
||||
Timestamp: uint32(wrap.GetSample().Timestamp / time.Millisecond),
|
||||
Size: uint32(wrap.GetSize()),
|
||||
Data: wrap.String(),
|
||||
}
|
||||
@@ -476,29 +485,27 @@ func (s *Server) Shutdown(ctx context.Context, req *pb.RequestWithId) (res *pb.S
|
||||
}
|
||||
|
||||
func (s *Server) ChangeSubscribe(ctx context.Context, req *pb.ChangeSubscribeRequest) (res *pb.SuccessResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
|
||||
if pub, ok := s.Streams.Get(req.StreamPath); ok {
|
||||
subscriber.Publisher.RemoveSubscriber(subscriber)
|
||||
subscriber.StreamPath = req.StreamPath
|
||||
pub.AddSubscriber(subscriber)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
err = pkg.ErrNotFound
|
||||
return nil
|
||||
})
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
|
||||
func (s *Server) StopSubscribe(ctx context.Context, req *pb.RequestWithId) (res *pb.SuccessResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
if subscriber, ok := s.Subscribers.Get(req.Id); ok {
|
||||
subscriber.Stop(errors.New("stop by api"))
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return &pb.SuccessResponse{}, err
|
||||
}
|
||||
@@ -543,7 +550,7 @@ func (s *Server) StopPublish(ctx context.Context, req *pb.StreamSnapRequest) (re
|
||||
// /api/stream/list
|
||||
func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *pb.StreamListResponse, err error) {
|
||||
recordingMap := make(map[string][]*pb.RecordingDetail)
|
||||
for record := range s.Records.SafeRange {
|
||||
for record := range s.Records.Range {
|
||||
recordingMap[record.StreamPath] = append(recordingMap[record.StreamPath], &pb.RecordingDetail{
|
||||
FilePath: record.RecConf.FilePath,
|
||||
Mode: record.RecConf.Mode,
|
||||
@@ -567,14 +574,46 @@ func (s *Server) StreamList(_ context.Context, req *pb.StreamListRequest) (res *
|
||||
}
|
||||
|
||||
func (s *Server) WaitList(context.Context, *emptypb.Empty) (res *pb.StreamWaitListResponse, err error) {
|
||||
s.Streams.Call(func() error {
|
||||
s.CallOnStreamTask(func() {
|
||||
res = &pb.StreamWaitListResponse{
|
||||
List: make(map[string]int32),
|
||||
}
|
||||
for subs := range s.Waiting.Range {
|
||||
res.List[subs.StreamPath] = int32(subs.Length)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) GetSubscriptionProgress(ctx context.Context, req *pb.StreamSnapRequest) (res *pb.SubscriptionProgressResponse, err error) {
|
||||
s.CallOnStreamTask(func() {
|
||||
if waitStream, ok := s.Waiting.Get(req.StreamPath); ok {
|
||||
progress := waitStream.Progress
|
||||
res = &pb.SubscriptionProgressResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
Data: &pb.SubscriptionProgressData{
|
||||
CurrentStep: int32(progress.CurrentStep),
|
||||
},
|
||||
}
|
||||
// Convert steps
|
||||
for _, step := range progress.Steps {
|
||||
pbStep := &pb.Step{
|
||||
Name: step.Name,
|
||||
Description: step.Description,
|
||||
Error: step.Error,
|
||||
}
|
||||
if !step.StartedAt.IsZero() {
|
||||
pbStep.StartedAt = timestamppb.New(step.StartedAt)
|
||||
}
|
||||
if !step.CompletedAt.IsZero() {
|
||||
pbStep.CompletedAt = timestamppb.New(step.CompletedAt)
|
||||
}
|
||||
res.Data.Steps = append(res.Data.Steps, pbStep)
|
||||
}
|
||||
} else {
|
||||
err = pkg.ErrNotFound
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
@@ -643,10 +682,10 @@ func (s *Server) Summary(context.Context, *emptypb.Empty) (res *pb.SummaryRespon
|
||||
netWorks = append(netWorks, info)
|
||||
}
|
||||
res.StreamCount = int32(s.Streams.Length)
|
||||
res.PullCount = int32(s.Pulls.Length)
|
||||
res.PushCount = int32(s.Pushs.Length)
|
||||
res.PullCount = int32(s.Pulls.Length())
|
||||
res.PushCount = int32(s.Pushs.Length())
|
||||
res.SubscribeCount = int32(s.Subscribers.Length)
|
||||
res.RecordCount = int32(s.Records.Length)
|
||||
res.RecordCount = int32(s.Records.Length())
|
||||
res.TransformCount = int32(s.Transforms.Length)
|
||||
res.NetWork = netWorks
|
||||
s.lastSummary = res
|
||||
@@ -920,7 +959,7 @@ func (s *Server) DeleteRecord(ctx context.Context, req *pb.ReqRecordDelete) (res
|
||||
|
||||
func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res *pb.TransformListResponse, err error) {
|
||||
res = &pb.TransformListResponse{}
|
||||
s.Transforms.Call(func() error {
|
||||
s.Transforms.Call(func() {
|
||||
for transform := range s.Transforms.Range {
|
||||
info := &pb.Transform{
|
||||
StreamPath: transform.StreamPath,
|
||||
@@ -932,13 +971,247 @@ func (s *Server) GetTransformList(ctx context.Context, req *emptypb.Empty) (res
|
||||
result, err = yaml.Marshal(transform.TransformJob.Config)
|
||||
if err != nil {
|
||||
s.Error("marshal transform config failed", "error", err)
|
||||
return err
|
||||
return
|
||||
}
|
||||
info.Config = string(result)
|
||||
}
|
||||
res.Data = append(res.Data, info)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (s *Server) StartPull(ctx context.Context, req *pb.GlobalPullRequest) (res *pb.SuccessResponse, err error) {
|
||||
// 创建拉流配置
|
||||
pullConfig := config.Pull{
|
||||
URL: req.RemoteURL,
|
||||
TestMode: int(req.TestMode),
|
||||
}
|
||||
|
||||
// 使用请求中的流路径,如果未提供则生成默认路径
|
||||
streamPath := req.StreamPath
|
||||
protocol := req.Protocol
|
||||
|
||||
// 如果没有提供protocol,则从URL推测
|
||||
if protocol == "" {
|
||||
u, err := url.Parse(req.RemoteURL)
|
||||
if err == nil {
|
||||
switch {
|
||||
case strings.HasPrefix(u.Scheme, "rtmp"):
|
||||
protocol = "rtmp"
|
||||
case strings.HasPrefix(u.Scheme, "rtsp"):
|
||||
protocol = "rtsp"
|
||||
case strings.HasPrefix(u.Scheme, "srt"):
|
||||
protocol = "srt"
|
||||
case strings.HasPrefix(u.Scheme, "whep"):
|
||||
protocol = "webrtc"
|
||||
case strings.HasPrefix(u.Scheme, "http"):
|
||||
if strings.Contains(u.Path, ".m3u8") {
|
||||
protocol = "hls"
|
||||
} else if strings.Contains(u.Path, ".flv") {
|
||||
protocol = "flv"
|
||||
} else if strings.Contains(u.Path, ".mp4") {
|
||||
protocol = "mp4"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if streamPath == "" {
|
||||
if protocol == "" {
|
||||
streamPath = "pull/unknown"
|
||||
} else {
|
||||
streamPath = "pull/" + protocol
|
||||
}
|
||||
}
|
||||
|
||||
// 根据protocol找到对应的plugin进行pull
|
||||
if protocol != "" {
|
||||
for p := range s.Plugins.Range {
|
||||
if strings.EqualFold(p.Meta.Name, protocol) {
|
||||
pubConfig := p.GetCommonConf().Publish
|
||||
|
||||
// 设置发布配置参数
|
||||
if req.PubAudio != nil {
|
||||
pubConfig.PubAudio = *req.PubAudio
|
||||
}
|
||||
if req.PubVideo != nil {
|
||||
pubConfig.PubVideo = *req.PubVideo
|
||||
}
|
||||
if req.DelayCloseTimeout != nil {
|
||||
pubConfig.DelayCloseTimeout = req.DelayCloseTimeout.AsDuration()
|
||||
}
|
||||
if req.Speed != nil {
|
||||
pubConfig.Speed = *req.Speed
|
||||
}
|
||||
if req.MaxCount != nil {
|
||||
pubConfig.MaxCount = int(*req.MaxCount)
|
||||
}
|
||||
if req.KickExist != nil {
|
||||
pubConfig.KickExist = *req.KickExist
|
||||
}
|
||||
if req.PublishTimeout != nil {
|
||||
pubConfig.PublishTimeout = req.PublishTimeout.AsDuration()
|
||||
}
|
||||
if req.WaitCloseTimeout != nil {
|
||||
pubConfig.WaitCloseTimeout = req.WaitCloseTimeout.AsDuration()
|
||||
}
|
||||
if req.IdleTimeout != nil {
|
||||
pubConfig.IdleTimeout = req.IdleTimeout.AsDuration()
|
||||
}
|
||||
if req.PauseTimeout != nil {
|
||||
pubConfig.PauseTimeout = req.PauseTimeout.AsDuration()
|
||||
}
|
||||
if req.BufferTime != nil {
|
||||
pubConfig.BufferTime = req.BufferTime.AsDuration()
|
||||
}
|
||||
if req.Scale != nil {
|
||||
pubConfig.Scale = *req.Scale
|
||||
}
|
||||
if req.MaxFPS != nil {
|
||||
pubConfig.MaxFPS = int(*req.MaxFPS)
|
||||
}
|
||||
if req.Key != nil {
|
||||
pubConfig.Key = *req.Key
|
||||
}
|
||||
if req.RelayMode != nil {
|
||||
pubConfig.RelayMode = *req.RelayMode
|
||||
}
|
||||
if req.PubType != nil {
|
||||
pubConfig.PubType = *req.PubType
|
||||
}
|
||||
if req.Dump != nil {
|
||||
pubConfig.Dump = *req.Dump
|
||||
}
|
||||
|
||||
_, err = p.Pull(streamPath, pullConfig, &pubConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.SuccessResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.SuccessResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetAlarmList(ctx context.Context, req *pb.AlarmListRequest) (res *pb.AlarmListResponse, err error) {
|
||||
// 初始化响应对象
|
||||
res = &pb.AlarmListResponse{
|
||||
Code: 0,
|
||||
Message: "success",
|
||||
PageNum: req.PageNum,
|
||||
PageSize: req.PageSize,
|
||||
}
|
||||
|
||||
// 检查数据库连接是否可用
|
||||
if s.DB == nil {
|
||||
res.Code = 500
|
||||
res.Message = "数据库连接不可用"
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 构建查询条件
|
||||
query := s.DB.Model(&AlarmInfo{})
|
||||
|
||||
// 添加时间范围过滤
|
||||
startTime, endTime, err := util.TimeRangeQueryParse(url.Values{
|
||||
"range": []string{req.Range},
|
||||
"start": []string{req.Start},
|
||||
"end": []string{req.End},
|
||||
})
|
||||
if err == nil {
|
||||
if !startTime.IsZero() {
|
||||
query = query.Where("created_at >= ?", startTime)
|
||||
}
|
||||
if !endTime.IsZero() {
|
||||
query = query.Where("created_at <= ?", endTime)
|
||||
}
|
||||
}
|
||||
|
||||
// 添加告警类型过滤
|
||||
if req.AlarmType != 0 {
|
||||
query = query.Where("alarm_type = ?", req.AlarmType)
|
||||
}
|
||||
|
||||
// 添加 StreamPath 过滤
|
||||
if req.StreamPath != "" {
|
||||
if strings.Contains(req.StreamPath, "*") {
|
||||
// 支持通配符搜索
|
||||
query = query.Where("stream_path LIKE ?", strings.ReplaceAll(req.StreamPath, "*", "%"))
|
||||
} else {
|
||||
query = query.Where("stream_path = ?", req.StreamPath)
|
||||
}
|
||||
}
|
||||
|
||||
// 添加 StreamName 过滤
|
||||
if req.StreamName != "" {
|
||||
if strings.Contains(req.StreamName, "*") {
|
||||
// 支持通配符搜索
|
||||
query = query.Where("stream_name LIKE ?", strings.ReplaceAll(req.StreamName, "*", "%"))
|
||||
} else {
|
||||
query = query.Where("stream_name = ?", req.StreamName)
|
||||
}
|
||||
}
|
||||
|
||||
// 计算总记录数
|
||||
var total int64
|
||||
if err = query.Count(&total).Error; err != nil {
|
||||
res.Code = 500
|
||||
res.Message = "查询告警信息总数失败: " + err.Error()
|
||||
return res, nil
|
||||
}
|
||||
res.Total = int32(total)
|
||||
|
||||
// 如果没有记录,直接返回
|
||||
if total == 0 {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 处理分页参数
|
||||
if req.PageNum <= 0 {
|
||||
req.PageNum = 1
|
||||
}
|
||||
if req.PageSize <= 0 {
|
||||
req.PageSize = 10
|
||||
}
|
||||
|
||||
// 查询分页数据
|
||||
var alarmInfoList []AlarmInfo
|
||||
offset := (req.PageNum - 1) * req.PageSize
|
||||
if err = query.Order("created_at DESC").
|
||||
Offset(int(offset)).
|
||||
Limit(int(req.PageSize)).
|
||||
Find(&alarmInfoList).Error; err != nil {
|
||||
res.Code = 500
|
||||
res.Message = "查询告警信息失败: " + err.Error()
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// 转换为 protobuf 格式
|
||||
res.Data = make([]*pb.AlarmInfo, len(alarmInfoList))
|
||||
for i, alarm := range alarmInfoList {
|
||||
res.Data[i] = &pb.AlarmInfo{
|
||||
Id: uint32(alarm.ID),
|
||||
ServerInfo: alarm.ServerInfo,
|
||||
StreamName: alarm.StreamName,
|
||||
StreamPath: alarm.StreamPath,
|
||||
AlarmDesc: alarm.AlarmDesc,
|
||||
AlarmName: alarm.AlarmName,
|
||||
AlarmType: int32(alarm.AlarmType),
|
||||
IsSent: alarm.IsSent,
|
||||
CreatedAt: timestamppb.New(alarm.CreatedAt),
|
||||
UpdatedAt: timestamppb.New(alarm.UpdatedAt),
|
||||
FilePath: alarm.FilePath,
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
@@ -143,10 +143,10 @@ func (s *Server) api_Config_YAML_All(rw http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// 3. Process plugin configs.
|
||||
for _, meta := range plugins {
|
||||
if filterName != "" && meta.Name != filterName {
|
||||
if filterName != "" && !strings.EqualFold(meta.Name, filterName) {
|
||||
continue
|
||||
}
|
||||
|
||||
name := strings.ToLower(meta.Name)
|
||||
configType := meta.Type
|
||||
if configType.Kind() == reflect.Ptr {
|
||||
configType = configType.Elem()
|
||||
@@ -168,12 +168,12 @@ func (s *Server) api_Config_YAML_All(rw http.ResponseWriter, r *http.Request) {
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{meta.Name, mergedConf})
|
||||
}{name, mergedConf})
|
||||
} else {
|
||||
configSections = append(configSections, struct {
|
||||
name string
|
||||
data any
|
||||
}{meta.Name, pluginConf})
|
||||
}{name, pluginConf})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -93,7 +93,7 @@ Plugins can add global middleware using the `AddMiddleware` method to handle all
|
||||
|
||||
Example code:
|
||||
```go
|
||||
func (p *YourPlugin) OnInit() {
|
||||
func (p *YourPlugin) Start() {
|
||||
// Add authentication middleware
|
||||
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@@ -116,7 +116,7 @@ type MyLogHandler struct {
|
||||
}
|
||||
|
||||
// Add handler during plugin initialization
|
||||
func (p *MyPlugin) OnInit() error {
|
||||
func (p *MyPlugin) Start() error {
|
||||
handler := &MyLogHandler{}
|
||||
p.Server.LogHandler.Add(handler)
|
||||
return nil
|
||||
|
@@ -93,7 +93,7 @@ Plugins start through the `Plugin.Start` method, executing these operations in s
|
||||
- Start QUIC services (if implementing IQUICPlugin interface)
|
||||
|
||||
4. Plugin Initialization Callback
|
||||
- Call plugin's OnInit method
|
||||
- Call plugin's Start method
|
||||
- Handle initialization errors
|
||||
|
||||
5. Timer Task Setup
|
||||
@@ -109,7 +109,7 @@ The startup phase is crucial for plugins to begin providing services, with all p
|
||||
|
||||
### 4. Stop Phase (Stop)
|
||||
|
||||
The plugin stop phase is implemented through the `Plugin.OnStop` method and related stop handling logic, including:
|
||||
The plugin stop phase is implemented through the `Plugin.OnDispose` method and related stop handling logic, including:
|
||||
|
||||
1. Service Shutdown
|
||||
- Stop all network services (HTTP/HTTPS/TCP/UDP/QUIC)
|
||||
@@ -127,7 +127,7 @@ The plugin stop phase is implemented through the `Plugin.OnStop` method and rela
|
||||
- Trigger stop event notifications
|
||||
|
||||
4. Callback Processing
|
||||
- Call plugin's custom OnStop method
|
||||
- Call plugin's custom OnDispose method
|
||||
- Execute registered stop callback functions
|
||||
- Handle errors during stop process
|
||||
|
||||
@@ -143,7 +143,7 @@ The stop phase aims to ensure plugins can safely and cleanly stop running withou
|
||||
The plugin destroy phase is implemented through the `Plugin.Dispose` method, the final phase in a plugin's lifecycle, including:
|
||||
|
||||
1. Resource Release
|
||||
- Call plugin's OnStop method for stop processing
|
||||
- Call plugin's OnDispose method for stop processing
|
||||
- Remove from server's plugin list
|
||||
- Release all allocated system resources
|
||||
|
||||
|
@@ -57,7 +57,7 @@ monibuca/
|
||||
│ ├── debug/ # 调试插件
|
||||
│ ├── cascade/ # 级联插件
|
||||
│ ├── logrotate/ # 日志轮转插件
|
||||
│ ├── stress/ # 压力测试插件
|
||||
│ ├── test/ # 测试插件(包含压力测试功能)
|
||||
│ ├── vmlog/ # 虚拟内存日志插件
|
||||
│ ├── preview/ # 预览插件
|
||||
│ └── transcode/ # 转码插件
|
||||
|
@@ -93,7 +93,7 @@ func (p *YourPlugin) RegisterHandler() {
|
||||
|
||||
示例代码:
|
||||
```go
|
||||
func (p *YourPlugin) OnInit() {
|
||||
func (p *YourPlugin) Start() {
|
||||
// 添加认证中间件
|
||||
p.GetCommonConf().AddMiddleware(func(next http.HandlerFunc) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@@ -116,7 +116,7 @@ type MyLogHandler struct {
|
||||
}
|
||||
|
||||
// 在插件初始化时添加处理器
|
||||
func (p *MyPlugin) OnInit() error {
|
||||
func (p *MyPlugin) Start() error {
|
||||
handler := &MyLogHandler{}
|
||||
p.Server.LogHandler.Add(handler)
|
||||
return nil
|
||||
|
@@ -109,7 +109,7 @@ Monibuca 采用插件化架构设计,通过插件机制来扩展功能。插
|
||||
|
||||
### 4. 停止阶段 (Stop)
|
||||
|
||||
插件的停止阶段通过 `Plugin.OnStop` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
|
||||
插件的停止阶段通过 `Plugin.OnDispose` 方法和相关的停止处理逻辑实现,主要包含以下步骤:
|
||||
|
||||
1. 停止服务
|
||||
- 停止所有网络服务(HTTP/HTTPS/TCP/UDP/QUIC)
|
||||
|
@@ -10,3 +10,5 @@ cascadeclient:
|
||||
onsub:
|
||||
pull:
|
||||
.*: m7s://$0
|
||||
flv:
|
||||
enable: true
|
||||
|
@@ -9,7 +9,7 @@ transcode:
|
||||
transform:
|
||||
^live.+:
|
||||
input:
|
||||
mode: rtsp
|
||||
mode: pipe
|
||||
output:
|
||||
- target: rtmp://localhost/trans/$0/small
|
||||
conf: -loglevel debug -c:a aac -c:v h264 -vf scale=320:240
|
||||
|
@@ -4,12 +4,15 @@ import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
mp4 "m7s.live/v5/plugin/mp4/pkg"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
@@ -17,12 +20,9 @@ import (
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@@ -1,27 +1,31 @@
|
||||
global:
|
||||
location:
|
||||
"^/hdl/(.*)": "/flv/$1"
|
||||
"^/hdl/(.*)": "/flv/$1" # 兼容 v4
|
||||
"^/stress/(.*)": "/test/$1" # 5.0.x
|
||||
"^/monitor/(.*)": "/debug/$1" # 5.0.x
|
||||
loglevel: debug
|
||||
admin:
|
||||
enablelogin: false
|
||||
debug:
|
||||
enableTaskHistory: true #是否启用任务历史记录
|
||||
srt:
|
||||
listenaddr: :6000
|
||||
passphrase: foobarfoobar
|
||||
gb28181:
|
||||
enable: false # 是否启用GB28181协议
|
||||
enable: false # 是否启用GB28181协议
|
||||
autoinvite: false #建议使用false,开启后会自动邀请设备推流
|
||||
mediaip: 192.168.1.21 #流媒体收流IP,外网情况下使用公网IP,内网情况下使用网卡IP,不要用127.0.0.1
|
||||
sipip: 192.168.1.21 #SIP通讯IP,不管公网还是内网都使用本机网卡IP,不要用127.0.0.1
|
||||
mediaip: 192.168.1.21 #流媒体收流IP,外网情况下使用公网IP,内网情况下使用网卡IP,不要用127.0.0.1
|
||||
sipip: 192.168.1.21 #SIP通讯IP,不管公网还是内网都使用本机网卡IP,不要用127.0.0.1
|
||||
sip:
|
||||
listenaddr:
|
||||
- udp::5060
|
||||
- udp::5060
|
||||
onsub:
|
||||
pull:
|
||||
^\d{20}/\d{20}$: $0
|
||||
^gb_\d+/(.+)$: $1
|
||||
# .* : $0
|
||||
# .* : $0
|
||||
platforms:
|
||||
- enable: false #是否启用平台
|
||||
- enable: false #是否启用平台
|
||||
name: "测试平台" #平台名称
|
||||
servergbid: "34020000002000000002" #上级平台GBID
|
||||
servergbdomain: "3402000000" #上级平台GB域
|
||||
@@ -51,7 +55,6 @@ mp4:
|
||||
# ^live/.+:
|
||||
# fragment: 10s
|
||||
# filepath: record/$0
|
||||
# type: fmp4
|
||||
# pull:
|
||||
# live/test: /Users/dexter/Movies/1744963190.mp4
|
||||
onsub:
|
||||
@@ -86,47 +89,51 @@ hls:
|
||||
# onpub:
|
||||
# transform:
|
||||
# .* : 5s x 3
|
||||
#rtsp:
|
||||
# pull:
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@giroro.tpddns.cn:1554/Streaming/Channels/101
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@localhost:8554/live/test
|
||||
|
||||
s3:
|
||||
enable: false
|
||||
auto: true # 启用自动上传
|
||||
deleteAfterUpload: false # 上传后保留本地文件
|
||||
endpoint: "storage-dev.xiding.tech"
|
||||
accessKeyId: "xidinguser"
|
||||
secretAccessKey: "U2FsdGVkX1/7uyvj0trCzSNFsfDZ66dMSAEZjNlvW1c="
|
||||
bucket: "vidu-media-bucket"
|
||||
pathPrefix: "recordings"
|
||||
forcePathStyle: true
|
||||
useSSL: true
|
||||
|
||||
rtsp:
|
||||
# pull:
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@58.212.158.30/Streaming/Channels/101
|
||||
# live/test: rtsp://admin:1qaz2wsx3EDC@localhost:8554/live/test
|
||||
webrtc:
|
||||
publish:
|
||||
pubaudio: false
|
||||
port: udp:9000-9100
|
||||
snap:
|
||||
enable: false
|
||||
onpub:
|
||||
transform:
|
||||
.+:
|
||||
output:
|
||||
- watermark:
|
||||
text: "abcd" # 水印文字内容
|
||||
fontpath: /Users/dexter/Library/Fonts/MapleMono-NF-CN-Medium.ttf # 水印字体文件路径
|
||||
fontcolor: "rgba(255,165,0,1)" # 水印字体颜色,支持rgba格式
|
||||
fontsize: 36 # 水印字体大小
|
||||
offsetx: 0 # 水印位置X偏移
|
||||
offsety: 0 # 水印位置Y偏移
|
||||
timeinterval: 1s # 截图时间间隔
|
||||
savepath: "snaps" # 截图保存路径
|
||||
iframeinterval: 3 # 间隔多少帧截图
|
||||
querytimedelta: 3 # 查询截图时允许的最大时间差(秒)
|
||||
|
||||
crypto:
|
||||
enable: false
|
||||
isstatic: false
|
||||
algo: aes_ctr # 加密算法 支持 aes_ctr xor_c
|
||||
encryptlen: 1024
|
||||
secret:
|
||||
key: your key
|
||||
iv: your iv
|
||||
onpub:
|
||||
transform:
|
||||
.* : $0
|
||||
- watermark:
|
||||
text: "abcd" # 水印文字内容
|
||||
fontpath: /Users/dexter/Library/Fonts/MapleMono-NF-CN-Medium.ttf # 水印字体文件路径
|
||||
fontcolor: "rgba(255,165,0,1)" # 水印字体颜色,支持rgba格式
|
||||
fontsize: 36 # 水印字体大小
|
||||
offsetx: 0 # 水印位置X偏移
|
||||
offsety: 0 # 水印位置Y偏移
|
||||
timeinterval: 1s # 截图时间间隔
|
||||
savepath: "snaps" # 截图保存路径
|
||||
iframeinterval: 3 # 间隔多少帧截图
|
||||
querytimedelta: 3 # 查询截图时允许的最大时间差(秒)
|
||||
onvif:
|
||||
enable: false
|
||||
discoverinterval: 3 # 发现设备的间隔,单位秒,默认30秒,建议比rtsp插件的重连间隔大点
|
||||
autopull: true
|
||||
autoadd: true
|
||||
interfaces: # 设备发现指定网卡,以及该网卡对应IP段的全局默认账号密码,支持多网卡
|
||||
- interfacename: 以太网 # 网卡名称 或者"以太网" "eth0"等,使用ipconfig 或者 ifconfig 查看网卡名称
|
||||
- interfacename: 以太网 # 网卡名称 或者"以太网" "eth0"等,使用ipconfig 或者 ifconfig 查看网卡名称
|
||||
username: admin # onvif 账号
|
||||
password: admin # onvif 密码
|
||||
# - interfacename: WLAN 2 # 网卡2
|
||||
@@ -138,4 +145,4 @@ onvif:
|
||||
# password: '123'
|
||||
# - ip: 192.168.1.2
|
||||
# username: admin
|
||||
# password: '456'
|
||||
# password: '456'
|
||||
|
@@ -7,13 +7,11 @@ import (
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
|
||||
_ "m7s.live/v5/plugin/crypto"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/hls"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
_ "m7s.live/v5/plugin/onvif"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
@@ -22,7 +20,7 @@ import (
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/snap"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
_ "m7s.live/v5/plugin/webtransport"
|
||||
|
@@ -3,15 +3,15 @@ package main
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/rtmp"
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
)
|
||||
|
||||
|
@@ -16,7 +16,6 @@ import (
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/monitor"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
mp4 "m7s.live/v5/plugin/mp4/pkg"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
@@ -24,7 +23,7 @@ import (
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/stress"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
)
|
||||
|
2
example/test/config.yaml
Normal file
2
example/test/config.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
global:
|
||||
log_level: debug
|
37
example/test/main.go
Normal file
37
example/test/main.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"m7s.live/v5"
|
||||
_ "m7s.live/v5/plugin/cascade"
|
||||
|
||||
_ "m7s.live/v5/plugin/debug"
|
||||
_ "m7s.live/v5/plugin/flv"
|
||||
_ "m7s.live/v5/plugin/gb28181"
|
||||
_ "m7s.live/v5/plugin/hls"
|
||||
_ "m7s.live/v5/plugin/logrotate"
|
||||
_ "m7s.live/v5/plugin/mp4"
|
||||
_ "m7s.live/v5/plugin/onvif"
|
||||
_ "m7s.live/v5/plugin/preview"
|
||||
_ "m7s.live/v5/plugin/rtmp"
|
||||
_ "m7s.live/v5/plugin/rtp"
|
||||
_ "m7s.live/v5/plugin/rtsp"
|
||||
_ "m7s.live/v5/plugin/sei"
|
||||
_ "m7s.live/v5/plugin/snap"
|
||||
_ "m7s.live/v5/plugin/srt"
|
||||
_ "m7s.live/v5/plugin/test"
|
||||
_ "m7s.live/v5/plugin/transcode"
|
||||
_ "m7s.live/v5/plugin/webrtc"
|
||||
_ "m7s.live/v5/plugin/webtransport"
|
||||
)
|
||||
|
||||
func main() {
|
||||
conf := flag.String("c", "config.yaml", "config file")
|
||||
flag.Parse()
|
||||
// ctx, _ := context.WithDeadline(context.Background(), time.Now().Add(time.Second*100))
|
||||
err := m7s.Run(context.Background(), *conf)
|
||||
fmt.Println(err)
|
||||
}
|
@@ -1,126 +0,0 @@
|
||||
// Copyright 2019 Asavie Technologies Ltd. All rights reserved.
|
||||
//
|
||||
// Use of this source code is governed by a BSD-style license
|
||||
// that can be found in the LICENSE file in the root of the source
|
||||
// tree.
|
||||
|
||||
/*
|
||||
dumpframes demostrates how to receive frames from a network link using
|
||||
github.com/asavie/xdp package, it sets up an XDP socket attached to a
|
||||
particular network link and dumps all frames it receives to standard output.
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/asavie/xdp"
|
||||
"github.com/asavie/xdp/examples/dumpframes/ebpf"
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
)
|
||||
|
||||
func main() {
|
||||
var linkName string
|
||||
var queueID int
|
||||
var protocol int64
|
||||
|
||||
log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds)
|
||||
|
||||
flag.StringVar(&linkName, "linkname", "enp3s0", "The network link on which rebroadcast should run on.")
|
||||
flag.IntVar(&queueID, "queueid", 0, "The ID of the Rx queue to which to attach to on the network link.")
|
||||
flag.Int64Var(&protocol, "ip-proto", 0, "If greater than 0 and less than or equal to 255, limit xdp bpf_redirect_map to packets with the specified IP protocol number.")
|
||||
flag.Parse()
|
||||
|
||||
interfaces, err := net.Interfaces()
|
||||
if err != nil {
|
||||
fmt.Printf("error: failed to fetch the list of network interfaces on the system: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
Ifindex := -1
|
||||
for _, iface := range interfaces {
|
||||
if iface.Name == linkName {
|
||||
Ifindex = iface.Index
|
||||
break
|
||||
}
|
||||
}
|
||||
if Ifindex == -1 {
|
||||
fmt.Printf("error: couldn't find a suitable network interface to attach to\n")
|
||||
return
|
||||
}
|
||||
|
||||
var program *xdp.Program
|
||||
|
||||
// Create a new XDP eBPF program and attach it to our chosen network link.
|
||||
if protocol == 0 {
|
||||
program, err = xdp.NewProgram(queueID + 1)
|
||||
} else {
|
||||
program, err = ebpf.NewIPProtoProgram(uint32(protocol), nil)
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Printf("error: failed to create xdp program: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer program.Close()
|
||||
if err := program.Attach(Ifindex); err != nil {
|
||||
fmt.Printf("error: failed to attach xdp program to interface: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer program.Detach(Ifindex)
|
||||
|
||||
// Create and initialize an XDP socket attached to our chosen network
|
||||
// link.
|
||||
xsk, err := xdp.NewSocket(Ifindex, queueID, nil)
|
||||
if err != nil {
|
||||
fmt.Printf("error: failed to create an XDP socket: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Register our XDP socket file descriptor with the eBPF program so it can be redirected packets
|
||||
if err := program.Register(queueID, xsk.FD()); err != nil {
|
||||
fmt.Printf("error: failed to register socket in BPF map: %v\n", err)
|
||||
return
|
||||
}
|
||||
defer program.Unregister(queueID)
|
||||
|
||||
for {
|
||||
// If there are any free slots on the Fill queue...
|
||||
if n := xsk.NumFreeFillSlots(); n > 0 {
|
||||
// ...then fetch up to that number of not-in-use
|
||||
// descriptors and push them onto the Fill ring queue
|
||||
// for the kernel to fill them with the received
|
||||
// frames.
|
||||
xsk.Fill(xsk.GetDescs(n, true))
|
||||
}
|
||||
|
||||
// Wait for receive - meaning the kernel has
|
||||
// produced one or more descriptors filled with a received
|
||||
// frame onto the Rx ring queue.
|
||||
log.Printf("waiting for frame(s) to be received...")
|
||||
numRx, _, err := xsk.Poll(-1)
|
||||
if err != nil {
|
||||
fmt.Printf("error: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if numRx > 0 {
|
||||
// Consume the descriptors filled with received frames
|
||||
// from the Rx ring queue.
|
||||
rxDescs := xsk.Receive(numRx)
|
||||
|
||||
// Print the received frames and also modify them
|
||||
// in-place replacing the destination MAC address with
|
||||
// broadcast address.
|
||||
for i := 0; i < len(rxDescs); i++ {
|
||||
pktData := xsk.GetFrame(rxDescs[i])
|
||||
pkt := gopacket.NewPacket(pktData, layers.LayerTypeEthernet, gopacket.Default)
|
||||
log.Printf("received frame:\n%s%+v", hex.Dump(pktData[:]), pkt)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
40
go.mod
40
go.mod
@@ -29,14 +29,14 @@ require (
|
||||
github.com/mattn/go-sqlite3 v1.14.24
|
||||
github.com/mcuadros/go-defaults v1.2.0
|
||||
github.com/mozillazg/go-pinyin v0.20.0
|
||||
github.com/ncruces/go-sqlite3 v0.18.1
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0
|
||||
github.com/pion/interceptor v0.1.37
|
||||
github.com/pion/logging v0.2.2
|
||||
github.com/ncruces/go-sqlite3 v0.27.1
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0
|
||||
github.com/pion/interceptor v0.1.40
|
||||
github.com/pion/logging v0.2.4
|
||||
github.com/pion/rtcp v1.2.15
|
||||
github.com/pion/rtp v1.8.10
|
||||
github.com/pion/sdp/v3 v3.0.9
|
||||
github.com/pion/webrtc/v4 v4.0.7
|
||||
github.com/pion/rtp v1.8.21
|
||||
github.com/pion/sdp/v3 v3.0.15
|
||||
github.com/pion/webrtc/v4 v4.1.4
|
||||
github.com/quic-go/qpack v0.5.1
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/rs/zerolog v1.33.0
|
||||
@@ -47,7 +47,7 @@ require (
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
github.com/yapingcat/gomedia v0.0.0-20240601043430-920523f8e5c7
|
||||
golang.org/x/image v0.22.0
|
||||
golang.org/x/text v0.24.0
|
||||
golang.org/x/text v0.27.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d
|
||||
google.golang.org/grpc v1.65.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
@@ -98,15 +98,15 @@ require (
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/ncruces/julianday v1.0.0 // indirect
|
||||
github.com/pion/datachannel v1.5.10 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.4 // indirect
|
||||
github.com/pion/ice/v4 v4.0.3 // indirect
|
||||
github.com/pion/dtls/v3 v3.0.7 // indirect
|
||||
github.com/pion/ice/v4 v4.0.10 // indirect
|
||||
github.com/pion/mdns/v2 v2.0.7 // indirect
|
||||
github.com/pion/randutil v0.1.0 // indirect
|
||||
github.com/pion/sctp v1.8.35 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.4 // indirect
|
||||
github.com/pion/sctp v1.8.39 // indirect
|
||||
github.com/pion/srtp/v3 v3.0.7 // indirect
|
||||
github.com/pion/stun/v3 v3.0.0 // indirect
|
||||
github.com/pion/transport/v3 v3.0.7 // indirect
|
||||
github.com/pion/turn/v4 v4.0.0 // indirect
|
||||
github.com/pion/turn/v4 v4.1.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
@@ -117,7 +117,7 @@ require (
|
||||
github.com/shoenig/go-m1cpu v0.1.6 // indirect
|
||||
github.com/sirupsen/logrus v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.7.1 // indirect
|
||||
github.com/tetratelabs/wazero v1.8.0 // indirect
|
||||
github.com/tetratelabs/wazero v1.9.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.12 // indirect
|
||||
github.com/tklauser/numcpus v0.6.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
@@ -131,7 +131,7 @@ require (
|
||||
github.com/yosida95/uritemplate/v3 v3.0.2 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.4 // indirect
|
||||
golang.org/x/arch v0.8.0 // indirect
|
||||
golang.org/x/sync v0.13.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect
|
||||
)
|
||||
|
||||
@@ -149,11 +149,11 @@ require (
|
||||
github.com/prometheus/client_golang v1.20.4
|
||||
github.com/quangngotan95/go-m3u8 v0.1.0
|
||||
go.uber.org/mock v0.5.0 // indirect
|
||||
golang.org/x/crypto v0.37.0
|
||||
golang.org/x/crypto v0.40.0
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7
|
||||
golang.org/x/mod v0.19.0 // indirect
|
||||
golang.org/x/net v0.39.0
|
||||
golang.org/x/sys v0.32.0
|
||||
golang.org/x/tools v0.23.0 // indirect
|
||||
golang.org/x/mod v0.25.0 // indirect
|
||||
golang.org/x/net v0.41.0
|
||||
golang.org/x/sys v0.34.0
|
||||
golang.org/x/tools v0.34.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
87
go.sum
87
go.sum
@@ -189,10 +189,10 @@ github.com/mozillazg/go-pinyin v0.20.0 h1:BtR3DsxpApHfKReaPO1fCqF4pThRwH9uwvXzm+
|
||||
github.com/mozillazg/go-pinyin v0.20.0/go.mod h1:iR4EnMMRXkfpFVV5FMi4FNB6wGq9NV6uDWbUuPhP4Yc=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/ncruces/go-sqlite3 v0.18.1 h1:iN8IMZV5EMxpH88NUac9vId23eTKNFUhP7jgY0EBbNc=
|
||||
github.com/ncruces/go-sqlite3 v0.18.1/go.mod h1:eEOyZnW1dGTJ+zDpMuzfYamEUBtdFz5zeYhqLBtHxvM=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0 h1:KqP9a9wlX/Ba+yG+aeVX4pnNBNdaSO6xHdNDWzPxPnk=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.18.0/go.mod h1:RXeT1hknrz3A0tBDL6IfluDHuNkHdJeImn5TBMQg9zc=
|
||||
github.com/ncruces/go-sqlite3 v0.27.1 h1:suqlM7xhSyDVMV9RgX99MCPqt9mB6YOCzHZuiI36K34=
|
||||
github.com/ncruces/go-sqlite3 v0.27.1/go.mod h1:gpF5s+92aw2MbDmZK0ZOnCdFlpe11BH20CTspVqri0c=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0 h1:81sHeq3CCdhjoqAB650n5wEdRlLO9VBvosArskcN3+c=
|
||||
github.com/ncruces/go-sqlite3/gormlite v0.24.0/go.mod h1:vXfVWdBfg7qOgqQqHpzUWl9LLswD0h+8mK4oouaV2oc=
|
||||
github.com/ncruces/julianday v1.0.0 h1:fH0OKwa7NWvniGQtxdJRxAgkBMolni2BjDHaWTxqt7M=
|
||||
github.com/ncruces/julianday v1.0.0/go.mod h1:Dusn2KvZrrovOMJuOt0TNXL6tB7U2E8kvza5fFc9G7g=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
@@ -208,36 +208,36 @@ github.com/phsym/console-slog v0.3.1 h1:Fuzcrjr40xTc004S9Kni8XfNsk+qrptQmyR+wZw9
|
||||
github.com/phsym/console-slog v0.3.1/go.mod h1:oJskjp/X6e6c0mGpfP8ELkfKUsrkDifYRAqJQgmdDS0=
|
||||
github.com/pion/datachannel v1.5.10 h1:ly0Q26K1i6ZkGf42W7D4hQYR90pZwzFOjTq5AuCKk4o=
|
||||
github.com/pion/datachannel v1.5.10/go.mod h1:p/jJfC9arb29W7WrxyKbepTU20CFgyx5oLo8Rs4Py/M=
|
||||
github.com/pion/dtls/v3 v3.0.4 h1:44CZekewMzfrn9pmGrj5BNnTMDCFwr+6sLH+cCuLM7U=
|
||||
github.com/pion/dtls/v3 v3.0.4/go.mod h1:R373CsjxWqNPf6MEkfdy3aSe9niZvL/JaKlGeFphtMg=
|
||||
github.com/pion/ice/v4 v4.0.3 h1:9s5rI1WKzF5DRqhJ+Id8bls/8PzM7mau0mj1WZb4IXE=
|
||||
github.com/pion/ice/v4 v4.0.3/go.mod h1:VfHy0beAZ5loDT7BmJ2LtMtC4dbawIkkkejHPRZNB3Y=
|
||||
github.com/pion/interceptor v0.1.37 h1:aRA8Zpab/wE7/c0O3fh1PqY0AJI3fCSEM5lRWJVorwI=
|
||||
github.com/pion/interceptor v0.1.37/go.mod h1:JzxbJ4umVTlZAf+/utHzNesY8tmRkM2lVmkS82TTj8Y=
|
||||
github.com/pion/logging v0.2.2 h1:M9+AIj/+pxNsDfAT64+MAVgJO0rsyLnoJKCqf//DoeY=
|
||||
github.com/pion/logging v0.2.2/go.mod h1:k0/tDVsRCX2Mb2ZEmTqNa7CWsQPc+YYCB7Q+5pahoms=
|
||||
github.com/pion/dtls/v3 v3.0.7 h1:bItXtTYYhZwkPFk4t1n3Kkf5TDrfj6+4wG+CZR8uI9Q=
|
||||
github.com/pion/dtls/v3 v3.0.7/go.mod h1:uDlH5VPrgOQIw59irKYkMudSFprY9IEFCqz/eTz16f8=
|
||||
github.com/pion/ice/v4 v4.0.10 h1:P59w1iauC/wPk9PdY8Vjl4fOFL5B+USq1+xbDcN6gT4=
|
||||
github.com/pion/ice/v4 v4.0.10/go.mod h1:y3M18aPhIxLlcO/4dn9X8LzLLSma84cx6emMSu14FGw=
|
||||
github.com/pion/interceptor v0.1.40 h1:e0BjnPcGpr2CFQgKhrQisBU7V3GXK6wrfYrGYaU6Jq4=
|
||||
github.com/pion/interceptor v0.1.40/go.mod h1:Z6kqH7M/FYirg3frjGJ21VLSRJGBXB/KqaTIrdqnOic=
|
||||
github.com/pion/logging v0.2.4 h1:tTew+7cmQ+Mc1pTBLKH2puKsOvhm32dROumOZ655zB8=
|
||||
github.com/pion/logging v0.2.4/go.mod h1:DffhXTKYdNZU+KtJ5pyQDjvOAh/GsNSyv1lbkFbe3so=
|
||||
github.com/pion/mdns/v2 v2.0.7 h1:c9kM8ewCgjslaAmicYMFQIde2H9/lrZpjBkN8VwoVtM=
|
||||
github.com/pion/mdns/v2 v2.0.7/go.mod h1:vAdSYNAT0Jy3Ru0zl2YiW3Rm/fJCwIeM0nToenfOJKA=
|
||||
github.com/pion/randutil v0.1.0 h1:CFG1UdESneORglEsnimhUjf33Rwjubwj6xfiOXBa3mA=
|
||||
github.com/pion/randutil v0.1.0/go.mod h1:XcJrSMMbbMRhASFVOlj/5hQial/Y8oH/HVo7TBZq+j8=
|
||||
github.com/pion/rtcp v1.2.15 h1:LZQi2JbdipLOj4eBjK4wlVoQWfrZbh3Q6eHtWtJBZBo=
|
||||
github.com/pion/rtcp v1.2.15/go.mod h1:jlGuAjHMEXwMUHK78RgX0UmEJFV4zUKOFHR7OP+D3D0=
|
||||
github.com/pion/rtp v1.8.10 h1:puphjdbjPB+L+NFaVuZ5h6bt1g5q4kFIoI+r5q/g0CU=
|
||||
github.com/pion/rtp v1.8.10/go.mod h1:8uMBJj32Pa1wwx8Fuv/AsFhn8jsgw+3rUC2PfoBZ8p4=
|
||||
github.com/pion/sctp v1.8.35 h1:qwtKvNK1Wc5tHMIYgTDJhfZk7vATGVHhXbUDfHbYwzA=
|
||||
github.com/pion/sctp v1.8.35/go.mod h1:EcXP8zCYVTRy3W9xtOF7wJm1L1aXfKRQzaM33SjQlzg=
|
||||
github.com/pion/sdp/v3 v3.0.9 h1:pX++dCHoHUwq43kuwf3PyJfHlwIj4hXA7Vrifiq0IJY=
|
||||
github.com/pion/sdp/v3 v3.0.9/go.mod h1:B5xmvENq5IXJimIO4zfp6LAe1fD9N+kFv+V/1lOdz8M=
|
||||
github.com/pion/srtp/v3 v3.0.4 h1:2Z6vDVxzrX3UHEgrUyIGM4rRouoC7v+NiF1IHtp9B5M=
|
||||
github.com/pion/srtp/v3 v3.0.4/go.mod h1:1Jx3FwDoxpRaTh1oRV8A/6G1BnFL+QI82eK4ms8EEJQ=
|
||||
github.com/pion/rtp v1.8.21 h1:3yrOwmZFyUpcIosNcWRpQaU+UXIJ6yxLuJ8Bx0mw37Y=
|
||||
github.com/pion/rtp v1.8.21/go.mod h1:bAu2UFKScgzyFqvUKmbvzSdPr+NGbZtv6UB2hesqXBk=
|
||||
github.com/pion/sctp v1.8.39 h1:PJma40vRHa3UTO3C4MyeJDQ+KIobVYRZQZ0Nt7SjQnE=
|
||||
github.com/pion/sctp v1.8.39/go.mod h1:cNiLdchXra8fHQwmIoqw0MbLLMs+f7uQ+dGMG2gWebE=
|
||||
github.com/pion/sdp/v3 v3.0.15 h1:F0I1zds+K/+37ZrzdADmx2Q44OFDOPRLhPnNTaUX9hk=
|
||||
github.com/pion/sdp/v3 v3.0.15/go.mod h1:88GMahN5xnScv1hIMTqLdu/cOcUkj6a9ytbncwMCq2E=
|
||||
github.com/pion/srtp/v3 v3.0.7 h1:QUElw0A/FUg3MP8/KNMZB3i0m8F9XeMnTum86F7S4bs=
|
||||
github.com/pion/srtp/v3 v3.0.7/go.mod h1:qvnHeqbhT7kDdB+OGB05KA/P067G3mm7XBfLaLiaNF0=
|
||||
github.com/pion/stun/v3 v3.0.0 h1:4h1gwhWLWuZWOJIJR9s2ferRO+W3zA/b6ijOI6mKzUw=
|
||||
github.com/pion/stun/v3 v3.0.0/go.mod h1:HvCN8txt8mwi4FBvS3EmDghW6aQJ24T+y+1TKjB5jyU=
|
||||
github.com/pion/transport/v3 v3.0.7 h1:iRbMH05BzSNwhILHoBoAPxoB9xQgOaJk+591KC9P1o0=
|
||||
github.com/pion/transport/v3 v3.0.7/go.mod h1:YleKiTZ4vqNxVwh77Z0zytYi7rXHl7j6uPLGhhz9rwo=
|
||||
github.com/pion/turn/v4 v4.0.0 h1:qxplo3Rxa9Yg1xXDxxH8xaqcyGUtbHYw4QSCvmFWvhM=
|
||||
github.com/pion/turn/v4 v4.0.0/go.mod h1:MuPDkm15nYSklKpN8vWJ9W2M0PlyQZqYt1McGuxG7mA=
|
||||
github.com/pion/webrtc/v4 v4.0.7 h1:aeq78uVnFZd2umXW0O9A2VFQYuS7+BZxWetQvSp2jPo=
|
||||
github.com/pion/webrtc/v4 v4.0.7/go.mod h1:oFVBBVSHU3vAEwSgnk3BuKCwAUwpDwQhko1EDwyZWbU=
|
||||
github.com/pion/turn/v4 v4.1.1 h1:9UnY2HB99tpDyz3cVVZguSxcqkJ1DsTSZ+8TGruh4fc=
|
||||
github.com/pion/turn/v4 v4.1.1/go.mod h1:2123tHk1O++vmjI5VSD0awT50NywDAq5A2NNNU4Jjs8=
|
||||
github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
|
||||
github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.4.0/go.mod h1:NWz/XGvpEW1FyYQ7fCx4dqYBLlfTcE+A9FLAkNKqjFE=
|
||||
@@ -287,22 +287,15 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
|
||||
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/sunfish-shogi/bufseekio v0.0.0-20210207115823-a4185644b365/go.mod h1:dEzdXgvImkQ3WLI+0KQpmEx8T/C/ma9KeS3AfmU899I=
|
||||
github.com/tetratelabs/wazero v1.8.0 h1:iEKu0d4c2Pd+QSRieYbnQC9yiFlMS9D+Jr0LsRmcF4g=
|
||||
github.com/tetratelabs/wazero v1.8.0/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs=
|
||||
github.com/tetratelabs/wazero v1.9.0 h1:IcZ56OuxrtaEz8UYNRHBrUa9bYeX9oVY93KspZZBf/I=
|
||||
github.com/tetratelabs/wazero v1.9.0/go.mod h1:TSbcXCfFP0L2FGkRPxHphadXPjo1T6W+CseNNY7EkjM=
|
||||
github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
|
||||
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
|
||||
github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
|
||||
@@ -341,8 +334,8 @@ golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
|
||||
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
|
||||
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7 h1:wDLEX9a7YQoKdKNQt88rtydkqDxeGaBUTnIYc3iG/mA=
|
||||
golang.org/x/exp v0.0.0-20240716175740-e3f259677ff7/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
|
||||
golang.org/x/image v0.0.0-20191009234506-e7c1f5e7dbb8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
@@ -350,17 +343,17 @@ golang.org/x/image v0.22.0 h1:UtK5yLUzilVrkjMAZAZ34DXGpASN8i8pj8g+O+yd10g=
|
||||
golang.org/x/image v0.22.0/go.mod h1:9hPFhljd4zZ1GNSIZJ49sqbp45GKK9t6w+iXvGqZUz4=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
|
||||
golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/mod v0.25.0 h1:n7a+ZbQKQA/Ysbyb0/6IbB1H/X41mKgbhfv7AfG/44w=
|
||||
golang.org/x/mod v0.25.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
|
||||
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
|
||||
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
|
||||
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
|
||||
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
|
||||
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -381,19 +374,19 @@ golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
|
||||
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA=
|
||||
golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
|
||||
golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
|
||||
golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
|
||||
golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo=
|
||||
golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240711142825-46eb208f015d h1:kHjw/5UfflP/L5EbledDrcG4C2597RtymmGRZvHiCuY=
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.36.6
|
||||
// protoc v5.29.3
|
||||
// protoc v6.31.1
|
||||
// source: auth.proto
|
||||
|
||||
package pb
|
||||
|
168
pb/auth.pb.gw.go
168
pb/auth.pb.gw.go
@@ -10,6 +10,7 @@ package pb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
@@ -24,116 +25,118 @@ import (
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = metadata.Join
|
||||
var (
|
||||
_ codes.Code
|
||||
_ io.Reader
|
||||
_ status.Status
|
||||
_ = errors.New
|
||||
_ = runtime.String
|
||||
_ = utilities.NewDoubleArray
|
||||
_ = metadata.Join
|
||||
)
|
||||
|
||||
func request_Auth_Login_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LoginRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
var (
|
||||
protoReq LoginRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.Login(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_Login_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LoginRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
var (
|
||||
protoReq LoginRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Login(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Auth_Logout_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LogoutRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
var (
|
||||
protoReq LogoutRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
msg, err := client.Logout(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_Logout_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq LogoutRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
var (
|
||||
protoReq LogoutRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := marshaler.NewDecoder(req.Body).Decode(&protoReq); err != nil && !errors.Is(err, io.EOF) {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.Logout(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
var (
|
||||
filter_Auth_GetUserInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
)
|
||||
var filter_Auth_GetUserInfo_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
|
||||
func request_Auth_GetUserInfo_0(ctx context.Context, marshaler runtime.Marshaler, client AuthClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UserInfoRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
protoReq UserInfoRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if req.Body != nil {
|
||||
_, _ = io.Copy(io.Discard, req.Body)
|
||||
}
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Auth_GetUserInfo_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.GetUserInfo(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Auth_GetUserInfo_0(ctx context.Context, marshaler runtime.Marshaler, server AuthServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq UserInfoRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
protoReq UserInfoRequest
|
||||
metadata runtime.ServerMetadata
|
||||
)
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Auth_GetUserInfo_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.GetUserInfo(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterAuthHandlerServer registers the http handlers for service Auth to "mux".
|
||||
// UnaryRPC :call AuthServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterAuthHandlerFromEndpoint instead.
|
||||
// GRPC interceptors will not work for this type of registration. To use interceptors, you must use the "runtime.WithMiddlewares" option in the "runtime.NewServeMux" call.
|
||||
func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AuthServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodPost, pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -145,20 +148,15 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Login_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodPost, pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -170,20 +168,15 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Logout_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodGet, pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -195,9 +188,7 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_GetUserInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
@@ -206,25 +197,24 @@ func RegisterAuthHandlerServer(ctx context.Context, mux *runtime.ServeMux, serve
|
||||
// RegisterAuthHandlerFromEndpoint is same as RegisterAuthHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterAuthHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.DialContext(ctx, endpoint, opts...)
|
||||
conn, err := grpc.NewClient(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
grpclog.Errorf("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterAuthHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
@@ -238,16 +228,13 @@ func RegisterAuthHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AuthClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AuthClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "AuthClient" to call the correct interceptors.
|
||||
// "AuthClient" to call the correct interceptors. This client ignores the HTTP middlewares.
|
||||
func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AuthClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodPost, pattern_Auth_Login_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Login", runtime.WithHTTPPathPattern("/api/auth/login"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -258,18 +245,13 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Login_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodPost, pattern_Auth_Logout_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/Logout", runtime.WithHTTPPathPattern("/api/auth/logout"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -280,18 +262,13 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_Logout_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
mux.Handle(http.MethodGet, pattern_Auth_GetUserInfo_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
var err error
|
||||
var annotatedContext context.Context
|
||||
annotatedContext, err = runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/pb.Auth/GetUserInfo", runtime.WithHTTPPathPattern("/api/auth/userinfo"))
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
@@ -302,26 +279,19 @@ func RegisterAuthHandlerClient(ctx context.Context, mux *runtime.ServeMux, clien
|
||||
runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Auth_GetUserInfo_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Auth_Login_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "login"}, ""))
|
||||
|
||||
pattern_Auth_Logout_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "logout"}, ""))
|
||||
|
||||
pattern_Auth_Login_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "login"}, ""))
|
||||
pattern_Auth_Logout_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "logout"}, ""))
|
||||
pattern_Auth_GetUserInfo_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"api", "auth", "userinfo"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Auth_Login_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Auth_Logout_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Auth_Login_0 = runtime.ForwardResponseMessage
|
||||
forward_Auth_Logout_0 = runtime.ForwardResponseMessage
|
||||
forward_Auth_GetUserInfo_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc v5.29.3
|
||||
// - protoc v6.31.1
|
||||
// source: auth.proto
|
||||
|
||||
package pb
|
||||
|
1806
pb/global.pb.go
1806
pb/global.pb.go
File diff suppressed because it is too large
Load Diff
2782
pb/global.pb.gw.go
2782
pb/global.pb.gw.go
File diff suppressed because it is too large
Load Diff
134
pb/global.proto
134
pb/global.proto
@@ -181,7 +181,7 @@ service api {
|
||||
}
|
||||
};
|
||||
}
|
||||
rpc UpdatePullProxy (PullProxyInfo) returns (SuccessResponse) {
|
||||
rpc UpdatePullProxy (UpdatePullProxyRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/pull/update"
|
||||
body: "*"
|
||||
@@ -208,7 +208,7 @@ service api {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc UpdatePushProxy (PushProxyInfo) returns (SuccessResponse) {
|
||||
rpc UpdatePushProxy (UpdatePushProxyRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/proxy/push/update"
|
||||
body: "*"
|
||||
@@ -245,6 +245,23 @@ service api {
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
rpc GetAlarmList (AlarmListRequest) returns (AlarmListResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/alarm/list"
|
||||
};
|
||||
}
|
||||
rpc GetSubscriptionProgress (StreamSnapRequest) returns (SubscriptionProgressResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/api/stream/progress/{streamPath=**}"
|
||||
};
|
||||
}
|
||||
|
||||
rpc StartPull (GlobalPullRequest) returns (SuccessResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/api/stream/pull"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message DisabledPluginsResponse {
|
||||
@@ -361,6 +378,8 @@ message TaskTreeData {
|
||||
TaskTreeData blocked = 8;
|
||||
uint64 pointer = 9;
|
||||
string startReason = 10;
|
||||
bool eventLoopRunning = 11;
|
||||
uint32 level = 12;
|
||||
}
|
||||
|
||||
message TaskTreeResponse {
|
||||
@@ -564,6 +583,24 @@ message PullProxyInfo {
|
||||
google.protobuf.Duration recordFragment = 14; // 录制片段长度
|
||||
uint32 rtt = 15; // 平均RTT
|
||||
string streamPath = 16; // 流路径
|
||||
google.protobuf.Duration checkInterval = 17; // 检查间隔
|
||||
}
|
||||
|
||||
message UpdatePullProxyRequest {
|
||||
uint32 ID = 1;
|
||||
optional uint32 parentID = 2; // 父设备ID
|
||||
optional string name = 3; // 设备名称
|
||||
optional string type = 4; // 设备类型
|
||||
optional uint32 status = 5; // 设备状态
|
||||
optional string pullURL = 6; // 拉流地址
|
||||
optional bool pullOnStart = 7; // 启动时拉流
|
||||
optional bool stopOnIdle = 8; // 空闲时停止拉流
|
||||
optional bool audio = 9; // 是否拉取音频
|
||||
optional string description = 10; // 设备描述
|
||||
optional string recordPath = 11; // 录制路径
|
||||
optional google.protobuf.Duration recordFragment = 12; // 录制片段长度
|
||||
optional string streamPath = 13; // 流路径
|
||||
optional google.protobuf.Duration checkInterval = 14; // 检查间隔
|
||||
}
|
||||
|
||||
message PushProxyInfo {
|
||||
@@ -582,6 +619,20 @@ message PushProxyInfo {
|
||||
string streamPath = 13; // 流路径
|
||||
}
|
||||
|
||||
message UpdatePushProxyRequest {
|
||||
uint32 ID = 1;
|
||||
optional uint32 parentID = 2; // 父设备ID
|
||||
optional string name = 3; // 设备名称
|
||||
optional string type = 4; // 设备类型
|
||||
optional uint32 status = 5; // 设备状态
|
||||
optional string pushURL = 6; // 推流地址
|
||||
optional bool pushOnStart = 7; // 启动时推流
|
||||
optional bool audio = 8; // 是否推音频
|
||||
optional string description = 9; // 设备描述
|
||||
optional uint32 rtt = 10; // 平均RTT
|
||||
optional string streamPath = 11; // 流路径
|
||||
}
|
||||
|
||||
message PushProxyListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
@@ -741,4 +792,83 @@ message ResponseDelete {
|
||||
|
||||
message ReqRecordCatalog {
|
||||
string type = 1;
|
||||
}
|
||||
|
||||
message AlarmInfo {
|
||||
uint32 id = 1;
|
||||
string serverInfo = 2;
|
||||
string streamName = 3;
|
||||
string streamPath = 4;
|
||||
string alarmDesc = 5;
|
||||
string alarmName = 6;
|
||||
int32 alarmType = 7;
|
||||
bool isSent = 8;
|
||||
string filePath = 9;
|
||||
google.protobuf.Timestamp createdAt = 10;
|
||||
google.protobuf.Timestamp updatedAt = 11;
|
||||
}
|
||||
|
||||
message AlarmListRequest {
|
||||
int32 pageNum = 1;
|
||||
int32 pageSize = 2;
|
||||
string range = 3;
|
||||
string start = 4;
|
||||
string end = 5;
|
||||
int32 alarmType = 6;
|
||||
string streamPath = 7;
|
||||
string streamName = 8;
|
||||
}
|
||||
|
||||
message AlarmListResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
int32 total = 3;
|
||||
int32 pageNum = 4;
|
||||
int32 pageSize = 5;
|
||||
repeated AlarmInfo data = 6;
|
||||
}
|
||||
|
||||
message Step {
|
||||
string name = 1;
|
||||
string description = 2;
|
||||
string error = 3;
|
||||
google.protobuf.Timestamp startedAt = 4;
|
||||
google.protobuf.Timestamp completedAt = 5;
|
||||
}
|
||||
|
||||
message SubscriptionProgressData {
|
||||
repeated Step steps = 1;
|
||||
int32 currentStep = 2;
|
||||
}
|
||||
|
||||
message SubscriptionProgressResponse {
|
||||
int32 code = 1;
|
||||
string message = 2;
|
||||
SubscriptionProgressData data = 3;
|
||||
}
|
||||
|
||||
message GlobalPullRequest {
|
||||
string remoteURL = 1;
|
||||
string protocol = 2;
|
||||
int32 testMode = 3; // 0: pull, 1: pull without publish
|
||||
string streamPath = 4; // 流路径
|
||||
|
||||
// Publish configuration
|
||||
optional bool pubAudio = 5;
|
||||
optional bool pubVideo = 6;
|
||||
optional google.protobuf.Duration delayCloseTimeout = 7; // 延迟自动关闭(无订阅时)
|
||||
optional double speed = 8; // 发送速率
|
||||
optional int32 maxCount = 9; // 最大发布者数量
|
||||
optional bool kickExist = 10; // 是否踢掉已经存在的发布者
|
||||
optional google.protobuf.Duration publishTimeout = 11; // 发布无数据超时
|
||||
optional google.protobuf.Duration waitCloseTimeout = 12; // 延迟自动关闭(等待重连)
|
||||
optional google.protobuf.Duration idleTimeout = 13; // 空闲(无订阅)超时
|
||||
optional google.protobuf.Duration pauseTimeout = 14; // 暂停超时时间
|
||||
optional google.protobuf.Duration bufferTime = 15; // 缓冲时长,0代表取最近关键帧
|
||||
optional double scale = 16; // 缩放倍数
|
||||
optional int32 maxFPS = 17; // 最大FPS
|
||||
optional string key = 18; // 发布鉴权key
|
||||
optional string relayMode = 19; // 转发模式
|
||||
optional string pubType = 20; // 发布类型
|
||||
optional bool dump = 21; // 是否dump
|
||||
}
|
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.5.1
|
||||
// - protoc v5.29.3
|
||||
// - protoc v6.31.1
|
||||
// source: global.proto
|
||||
|
||||
package pb
|
||||
@@ -20,47 +20,50 @@ import (
|
||||
const _ = grpc.SupportPackageIsVersion9
|
||||
|
||||
const (
|
||||
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
|
||||
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
|
||||
Api_Summary_FullMethodName = "/global.api/Summary"
|
||||
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
|
||||
Api_Restart_FullMethodName = "/global.api/Restart"
|
||||
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
|
||||
Api_StopTask_FullMethodName = "/global.api/StopTask"
|
||||
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
|
||||
Api_StreamList_FullMethodName = "/global.api/StreamList"
|
||||
Api_WaitList_FullMethodName = "/global.api/WaitList"
|
||||
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
|
||||
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
|
||||
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
|
||||
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
|
||||
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
|
||||
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
|
||||
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
|
||||
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
|
||||
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
|
||||
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
|
||||
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
|
||||
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
|
||||
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
|
||||
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
|
||||
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
|
||||
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
|
||||
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
|
||||
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
|
||||
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
|
||||
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
|
||||
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
|
||||
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
|
||||
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
|
||||
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
|
||||
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
|
||||
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
|
||||
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
|
||||
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
|
||||
Api_GetEventRecordList_FullMethodName = "/global.api/GetEventRecordList"
|
||||
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
|
||||
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
|
||||
Api_SysInfo_FullMethodName = "/global.api/SysInfo"
|
||||
Api_DisabledPlugins_FullMethodName = "/global.api/DisabledPlugins"
|
||||
Api_Summary_FullMethodName = "/global.api/Summary"
|
||||
Api_Shutdown_FullMethodName = "/global.api/Shutdown"
|
||||
Api_Restart_FullMethodName = "/global.api/Restart"
|
||||
Api_TaskTree_FullMethodName = "/global.api/TaskTree"
|
||||
Api_StopTask_FullMethodName = "/global.api/StopTask"
|
||||
Api_RestartTask_FullMethodName = "/global.api/RestartTask"
|
||||
Api_StreamList_FullMethodName = "/global.api/StreamList"
|
||||
Api_WaitList_FullMethodName = "/global.api/WaitList"
|
||||
Api_StreamInfo_FullMethodName = "/global.api/StreamInfo"
|
||||
Api_PauseStream_FullMethodName = "/global.api/PauseStream"
|
||||
Api_ResumeStream_FullMethodName = "/global.api/ResumeStream"
|
||||
Api_SetStreamSpeed_FullMethodName = "/global.api/SetStreamSpeed"
|
||||
Api_SeekStream_FullMethodName = "/global.api/SeekStream"
|
||||
Api_GetSubscribers_FullMethodName = "/global.api/GetSubscribers"
|
||||
Api_AudioTrackSnap_FullMethodName = "/global.api/AudioTrackSnap"
|
||||
Api_VideoTrackSnap_FullMethodName = "/global.api/VideoTrackSnap"
|
||||
Api_ChangeSubscribe_FullMethodName = "/global.api/ChangeSubscribe"
|
||||
Api_GetStreamAlias_FullMethodName = "/global.api/GetStreamAlias"
|
||||
Api_SetStreamAlias_FullMethodName = "/global.api/SetStreamAlias"
|
||||
Api_StopPublish_FullMethodName = "/global.api/StopPublish"
|
||||
Api_StopSubscribe_FullMethodName = "/global.api/StopSubscribe"
|
||||
Api_GetConfigFile_FullMethodName = "/global.api/GetConfigFile"
|
||||
Api_UpdateConfigFile_FullMethodName = "/global.api/UpdateConfigFile"
|
||||
Api_GetConfig_FullMethodName = "/global.api/GetConfig"
|
||||
Api_GetFormily_FullMethodName = "/global.api/GetFormily"
|
||||
Api_GetPullProxyList_FullMethodName = "/global.api/GetPullProxyList"
|
||||
Api_AddPullProxy_FullMethodName = "/global.api/AddPullProxy"
|
||||
Api_RemovePullProxy_FullMethodName = "/global.api/RemovePullProxy"
|
||||
Api_UpdatePullProxy_FullMethodName = "/global.api/UpdatePullProxy"
|
||||
Api_GetPushProxyList_FullMethodName = "/global.api/GetPushProxyList"
|
||||
Api_AddPushProxy_FullMethodName = "/global.api/AddPushProxy"
|
||||
Api_RemovePushProxy_FullMethodName = "/global.api/RemovePushProxy"
|
||||
Api_UpdatePushProxy_FullMethodName = "/global.api/UpdatePushProxy"
|
||||
Api_GetRecording_FullMethodName = "/global.api/GetRecording"
|
||||
Api_GetTransformList_FullMethodName = "/global.api/GetTransformList"
|
||||
Api_GetRecordList_FullMethodName = "/global.api/GetRecordList"
|
||||
Api_GetEventRecordList_FullMethodName = "/global.api/GetEventRecordList"
|
||||
Api_GetRecordCatalog_FullMethodName = "/global.api/GetRecordCatalog"
|
||||
Api_DeleteRecord_FullMethodName = "/global.api/DeleteRecord"
|
||||
Api_GetAlarmList_FullMethodName = "/global.api/GetAlarmList"
|
||||
Api_GetSubscriptionProgress_FullMethodName = "/global.api/GetSubscriptionProgress"
|
||||
Api_StartPull_FullMethodName = "/global.api/StartPull"
|
||||
)
|
||||
|
||||
// ApiClient is the client API for Api service.
|
||||
@@ -97,17 +100,20 @@ type ApiClient interface {
|
||||
GetPullProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PullProxyListResponse, error)
|
||||
AddPullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePullProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetPushProxyList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*PushProxyListResponse, error)
|
||||
AddPushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
RemovePushProxy(ctx context.Context, in *RequestWithId, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
GetRecording(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*RecordingListResponse, error)
|
||||
GetTransformList(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*TransformListResponse, error)
|
||||
GetRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*RecordResponseList, error)
|
||||
GetEventRecordList(ctx context.Context, in *ReqRecordList, opts ...grpc.CallOption) (*EventRecordResponseList, error)
|
||||
GetRecordCatalog(ctx context.Context, in *ReqRecordCatalog, opts ...grpc.CallOption) (*ResponseCatalog, error)
|
||||
DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts ...grpc.CallOption) (*ResponseDelete, error)
|
||||
GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error)
|
||||
GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error)
|
||||
StartPull(ctx context.Context, in *GlobalPullRequest, opts ...grpc.CallOption) (*SuccessResponse, error)
|
||||
}
|
||||
|
||||
type apiClient struct {
|
||||
@@ -418,7 +424,7 @@ func (c *apiClient) RemovePullProxy(ctx context.Context, in *RequestWithId, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *PullProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) UpdatePullProxy(ctx context.Context, in *UpdatePullProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, Api_UpdatePullProxy_FullMethodName, in, out, cOpts...)
|
||||
@@ -458,7 +464,7 @@ func (c *apiClient) RemovePushProxy(ctx context.Context, in *RequestWithId, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *PushProxyInfo, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
func (c *apiClient) UpdatePushProxy(ctx context.Context, in *UpdatePushProxyRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, Api_UpdatePushProxy_FullMethodName, in, out, cOpts...)
|
||||
@@ -528,6 +534,36 @@ func (c *apiClient) DeleteRecord(ctx context.Context, in *ReqRecordDelete, opts
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetAlarmList(ctx context.Context, in *AlarmListRequest, opts ...grpc.CallOption) (*AlarmListResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(AlarmListResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetAlarmList_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) GetSubscriptionProgress(ctx context.Context, in *StreamSnapRequest, opts ...grpc.CallOption) (*SubscriptionProgressResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SubscriptionProgressResponse)
|
||||
err := c.cc.Invoke(ctx, Api_GetSubscriptionProgress_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *apiClient) StartPull(ctx context.Context, in *GlobalPullRequest, opts ...grpc.CallOption) (*SuccessResponse, error) {
|
||||
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
|
||||
out := new(SuccessResponse)
|
||||
err := c.cc.Invoke(ctx, Api_StartPull_FullMethodName, in, out, cOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// ApiServer is the server API for Api service.
|
||||
// All implementations must embed UnimplementedApiServer
|
||||
// for forward compatibility.
|
||||
@@ -562,17 +598,20 @@ type ApiServer interface {
|
||||
GetPullProxyList(context.Context, *emptypb.Empty) (*PullProxyListResponse, error)
|
||||
AddPullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error)
|
||||
UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error)
|
||||
GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error)
|
||||
AddPushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error)
|
||||
UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error)
|
||||
UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error)
|
||||
GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error)
|
||||
GetTransformList(context.Context, *emptypb.Empty) (*TransformListResponse, error)
|
||||
GetRecordList(context.Context, *ReqRecordList) (*RecordResponseList, error)
|
||||
GetEventRecordList(context.Context, *ReqRecordList) (*EventRecordResponseList, error)
|
||||
GetRecordCatalog(context.Context, *ReqRecordCatalog) (*ResponseCatalog, error)
|
||||
DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error)
|
||||
GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error)
|
||||
GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error)
|
||||
StartPull(context.Context, *GlobalPullRequest) (*SuccessResponse, error)
|
||||
mustEmbedUnimplementedApiServer()
|
||||
}
|
||||
|
||||
@@ -673,7 +712,7 @@ func (UnimplementedApiServer) AddPullProxy(context.Context, *PullProxyInfo) (*Su
|
||||
func (UnimplementedApiServer) RemovePullProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *PullProxyInfo) (*SuccessResponse, error) {
|
||||
func (UnimplementedApiServer) UpdatePullProxy(context.Context, *UpdatePullProxyRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePullProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetPushProxyList(context.Context, *emptypb.Empty) (*PushProxyListResponse, error) {
|
||||
@@ -685,7 +724,7 @@ func (UnimplementedApiServer) AddPushProxy(context.Context, *PushProxyInfo) (*Su
|
||||
func (UnimplementedApiServer) RemovePushProxy(context.Context, *RequestWithId) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method RemovePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *PushProxyInfo) (*SuccessResponse, error) {
|
||||
func (UnimplementedApiServer) UpdatePushProxy(context.Context, *UpdatePushProxyRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method UpdatePushProxy not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetRecording(context.Context, *emptypb.Empty) (*RecordingListResponse, error) {
|
||||
@@ -706,6 +745,15 @@ func (UnimplementedApiServer) GetRecordCatalog(context.Context, *ReqRecordCatalo
|
||||
func (UnimplementedApiServer) DeleteRecord(context.Context, *ReqRecordDelete) (*ResponseDelete, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRecord not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetAlarmList(context.Context, *AlarmListRequest) (*AlarmListResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetAlarmList not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) GetSubscriptionProgress(context.Context, *StreamSnapRequest) (*SubscriptionProgressResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method GetSubscriptionProgress not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) StartPull(context.Context, *GlobalPullRequest) (*SuccessResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method StartPull not implemented")
|
||||
}
|
||||
func (UnimplementedApiServer) mustEmbedUnimplementedApiServer() {}
|
||||
func (UnimplementedApiServer) testEmbeddedByValue() {}
|
||||
|
||||
@@ -1268,7 +1316,7 @@ func _Api_RemovePullProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
}
|
||||
|
||||
func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PullProxyInfo)
|
||||
in := new(UpdatePullProxyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1280,7 +1328,7 @@ func _Api_UpdatePullProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
FullMethod: Api_UpdatePullProxy_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*PullProxyInfo))
|
||||
return srv.(ApiServer).UpdatePullProxy(ctx, req.(*UpdatePullProxyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -1340,7 +1388,7 @@ func _Api_RemovePushProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
}
|
||||
|
||||
func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PushProxyInfo)
|
||||
in := new(UpdatePushProxyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1352,7 +1400,7 @@ func _Api_UpdatePushProxy_Handler(srv interface{}, ctx context.Context, dec func
|
||||
FullMethod: Api_UpdatePushProxy_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*PushProxyInfo))
|
||||
return srv.(ApiServer).UpdatePushProxy(ctx, req.(*UpdatePushProxyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
@@ -1465,6 +1513,60 @@ func _Api_DeleteRecord_Handler(srv interface{}, ctx context.Context, dec func(in
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetAlarmList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(AlarmListRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetAlarmList(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetAlarmList_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetAlarmList(ctx, req.(*AlarmListRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_GetSubscriptionProgress_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(StreamSnapRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).GetSubscriptionProgress(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_GetSubscriptionProgress_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).GetSubscriptionProgress(ctx, req.(*StreamSnapRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Api_StartPull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(GlobalPullRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(ApiServer).StartPull(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: Api_StartPull_FullMethodName,
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(ApiServer).StartPull(ctx, req.(*GlobalPullRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
// Api_ServiceDesc is the grpc.ServiceDesc for Api service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
@@ -1636,6 +1738,18 @@ var Api_ServiceDesc = grpc.ServiceDesc{
|
||||
MethodName: "DeleteRecord",
|
||||
Handler: _Api_DeleteRecord_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetAlarmList",
|
||||
Handler: _Api_GetAlarmList_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "GetSubscriptionProgress",
|
||||
Handler: _Api_GetSubscriptionProgress_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "StartPull",
|
||||
Handler: _Api_StartPull_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "global.proto",
|
||||
|
90
pkg/adts.go
90
pkg/adts.go
@@ -1,90 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*ADTS)(nil)
|
||||
|
||||
type ADTS struct {
|
||||
DTS time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (A *ADTS) Parse(track *AVTrack) (err error) {
|
||||
if track.ICodecCtx == nil {
|
||||
var ctx = &codec.AACCtx{}
|
||||
var reader = A.NewReader()
|
||||
var adts []byte
|
||||
adts, err = reader.ReadBytes(7)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var hdrlen, framelen, samples int
|
||||
ctx.Config, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
aacparser.WriteMPEG4AudioConfig(b, ctx.Config)
|
||||
ctx.ConfigBytes = b.Bytes()
|
||||
track.ICodecCtx = ctx
|
||||
track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
|
||||
}
|
||||
track.Value.Raw, err = A.Demux(track.ICodecCtx)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *ADTS) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
func (A *ADTS) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
var reader = A.NewReader()
|
||||
err := reader.Skip(7)
|
||||
var mem util.Memory
|
||||
reader.Range(mem.AppendOne)
|
||||
return mem, err
|
||||
}
|
||||
|
||||
func (A *ADTS) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
A.InitRecycleIndexes(1)
|
||||
A.DTS = frame.Timestamp * 90 / time.Millisecond
|
||||
aacCtx, ok := ctx.GetBase().(*codec.AACCtx)
|
||||
if !ok {
|
||||
A.Append(frame.Raw.(util.Memory).Buffers...)
|
||||
return
|
||||
}
|
||||
adts := A.NextN(7)
|
||||
raw := frame.Raw.(util.Memory)
|
||||
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
|
||||
A.Append(raw.Buffers...)
|
||||
}
|
||||
|
||||
func (A *ADTS) GetTimestamp() time.Duration {
|
||||
return A.DTS * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (A *ADTS) GetCTS() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (A *ADTS) GetSize() int {
|
||||
return A.Size
|
||||
}
|
||||
|
||||
func (A *ADTS) String() string {
|
||||
return fmt.Sprintf("ADTS{size:%d}", A.Size)
|
||||
}
|
||||
|
||||
func (A *ADTS) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
182
pkg/annexb.go
182
pkg/annexb.go
@@ -1,182 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*AnnexB)(nil)
|
||||
|
||||
type AnnexB struct {
|
||||
Hevc bool
|
||||
PTS time.Duration
|
||||
DTS time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (a *AnnexB) Dump(t byte, w io.Writer) {
|
||||
m := a.GetAllocator().Borrow(4 + a.Size)
|
||||
binary.BigEndian.PutUint32(m, uint32(a.Size))
|
||||
a.CopyTo(m[4:])
|
||||
w.Write(m)
|
||||
}
|
||||
|
||||
// DecodeConfig implements pkg.IAVFrame.
|
||||
func (a *AnnexB) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
// GetSize implements pkg.IAVFrame.
|
||||
func (a *AnnexB) GetSize() int {
|
||||
return a.Size
|
||||
}
|
||||
|
||||
func (a *AnnexB) GetTimestamp() time.Duration {
|
||||
return a.DTS * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (a *AnnexB) GetCTS() time.Duration {
|
||||
return (a.PTS - a.DTS) * time.Millisecond / 90
|
||||
}
|
||||
|
||||
// Parse implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Parse(t *AVTrack) (err error) {
|
||||
if a.Hevc {
|
||||
if t.ICodecCtx == nil {
|
||||
t.ICodecCtx = &codec.H265Ctx{}
|
||||
}
|
||||
} else {
|
||||
if t.ICodecCtx == nil {
|
||||
t.ICodecCtx = &codec.H264Ctx{}
|
||||
}
|
||||
}
|
||||
if t.Value.Raw, err = a.Demux(t.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
for _, nalu := range t.Value.Raw.(Nalus) {
|
||||
if a.Hevc {
|
||||
ctx := t.ICodecCtx.(*codec.H265Ctx)
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
t.Value.IDR = true
|
||||
}
|
||||
} else {
|
||||
ctx := t.ICodecCtx.(*codec.H264Ctx)
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if len(ctx.RecordInfo.PPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
if len(ctx.RecordInfo.SPS) > 0 {
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
}
|
||||
case codec.NALU_IDR_Picture:
|
||||
t.Value.IDR = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements pkg.IAVFrame.
|
||||
func (a *AnnexB) String() string {
|
||||
return fmt.Sprintf("%d %d", a.DTS, a.Memory.Size)
|
||||
}
|
||||
|
||||
// Demux implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Demux(codecCtx codec.ICodecCtx) (ret any, err error) {
|
||||
var nalus Nalus
|
||||
var lastFourBytes [4]byte
|
||||
var b byte
|
||||
var shallow util.Memory
|
||||
shallow.Append(a.Buffers...)
|
||||
reader := shallow.NewReader()
|
||||
|
||||
gotNalu := func() {
|
||||
var nalu util.Memory
|
||||
for buf := range reader.ClipFront {
|
||||
nalu.AppendOne(buf)
|
||||
}
|
||||
nalus = append(nalus, nalu)
|
||||
|
||||
}
|
||||
|
||||
for {
|
||||
b, err = reader.ReadByte()
|
||||
if err == nil {
|
||||
copy(lastFourBytes[:], lastFourBytes[1:])
|
||||
lastFourBytes[3] = b
|
||||
var startCode = 0
|
||||
if lastFourBytes == codec.NALU_Delimiter2 {
|
||||
startCode = 4
|
||||
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
|
||||
startCode = 3
|
||||
}
|
||||
if startCode > 0 && reader.Offset() >= 3 {
|
||||
if reader.Offset() == 3 {
|
||||
startCode = 3
|
||||
}
|
||||
reader.Unread(startCode)
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
reader.Skip(startCode)
|
||||
for range reader.ClipFront {
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
ret = nalus
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Mux(codecCtx codec.ICodecCtx, frame *AVFrame) {
|
||||
a.DTS = frame.Timestamp * 90 / time.Millisecond
|
||||
a.PTS = a.DTS + frame.CTS*90/time.Millisecond
|
||||
a.InitRecycleIndexes(0)
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.AppendOne(delimiter2)
|
||||
if frame.IDR {
|
||||
switch ctx := codecCtx.(type) {
|
||||
case *codec.H264Ctx:
|
||||
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
|
||||
case *codec.H265Ctx:
|
||||
a.Append(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
|
||||
}
|
||||
}
|
||||
for i, nalu := range frame.Raw.(Nalus) {
|
||||
if i > 0 {
|
||||
a.AppendOne(codec.NALU_Delimiter1[:])
|
||||
}
|
||||
a.Append(nalu.Buffers...)
|
||||
}
|
||||
}
|
219
pkg/annexb_reader.go
Normal file
219
pkg/annexb_reader.go
Normal file
@@ -0,0 +1,219 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
// AnnexBReader 专门用于读取 AnnexB 格式数据的读取器
|
||||
// 模仿 MemoryReader 结构,支持跨切片读取和动态数据管理
|
||||
type AnnexBReader struct {
|
||||
util.Memory // 存储数据的多段内存
|
||||
Length, offset0, offset1 int // 可读长度和当前读取位置
|
||||
}
|
||||
|
||||
// AppendBuffer 追加单个数据缓冲区
|
||||
func (r *AnnexBReader) AppendBuffer(buf []byte) {
|
||||
r.PushOne(buf)
|
||||
r.Length += len(buf)
|
||||
}
|
||||
|
||||
// ClipFront 剔除已读取的数据,释放内存
|
||||
func (r *AnnexBReader) ClipFront() {
|
||||
readOffset := r.Size - r.Length
|
||||
if readOffset == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
// 剔除已完全读取的缓冲区(不回收内存)
|
||||
if r.offset0 > 0 {
|
||||
r.Buffers = r.Buffers[r.offset0:]
|
||||
r.Size -= readOffset
|
||||
r.offset0 = 0
|
||||
}
|
||||
|
||||
// 处理部分读取的缓冲区(不回收内存)
|
||||
if r.offset1 > 0 && len(r.Buffers) > 0 {
|
||||
buf := r.Buffers[0]
|
||||
r.Buffers[0] = buf[r.offset1:]
|
||||
r.Size -= r.offset1
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
||||
|
||||
// FindStartCode 查找 NALU 起始码,返回起始码位置和长度
|
||||
func (r *AnnexBReader) FindStartCode() (pos int, startCodeLen int, found bool) {
|
||||
if r.Length < 3 {
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// 逐字节检查起始码
|
||||
for i := 0; i <= r.Length-3; i++ {
|
||||
// 优先检查 4 字节起始码
|
||||
if i <= r.Length-4 {
|
||||
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 &&
|
||||
r.getByteAt(i+2) == 0x00 && r.getByteAt(i+3) == 0x01 {
|
||||
return i, 4, true
|
||||
}
|
||||
}
|
||||
|
||||
// 检查 3 字节起始码(但要确保不是 4 字节起始码的一部分)
|
||||
if r.getByteAt(i) == 0x00 && r.getByteAt(i+1) == 0x00 && r.getByteAt(i+2) == 0x01 {
|
||||
// 确保这不是4字节起始码的一部分
|
||||
if i == 0 || r.getByteAt(i-1) != 0x00 {
|
||||
return i, 3, true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0, 0, false
|
||||
}
|
||||
|
||||
// getByteAt 获取指定位置的字节,不改变读取位置
|
||||
func (r *AnnexBReader) getByteAt(pos int) byte {
|
||||
if pos >= r.Length {
|
||||
return 0
|
||||
}
|
||||
|
||||
// 计算在哪个缓冲区和缓冲区内的位置
|
||||
currentPos := 0
|
||||
bufferIndex := r.offset0
|
||||
bufferOffset := r.offset1
|
||||
|
||||
for bufferIndex < len(r.Buffers) {
|
||||
buf := r.Buffers[bufferIndex]
|
||||
available := len(buf) - bufferOffset
|
||||
|
||||
if currentPos+available > pos {
|
||||
// 目标位置在当前缓冲区内
|
||||
return buf[bufferOffset+(pos-currentPos)]
|
||||
}
|
||||
|
||||
currentPos += available
|
||||
bufferIndex++
|
||||
bufferOffset = 0
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
type InvalidDataError struct {
|
||||
util.Memory
|
||||
}
|
||||
|
||||
func (e InvalidDataError) Error() string {
|
||||
return fmt.Sprintf("% 02X", e.ToBytes())
|
||||
}
|
||||
|
||||
// ReadNALU 读取一个完整的 NALU
|
||||
// withStart 用于接收“包含起始码”的内存段
|
||||
// withoutStart 用于接收“不包含起始码”的内存段
|
||||
// 允许 withStart 或 withoutStart 为 nil(表示调用方不需要该形式的数据)
|
||||
func (r *AnnexBReader) ReadNALU(withStart, withoutStart *util.Memory) error {
|
||||
r.ClipFront()
|
||||
// 定位到第一个起始码
|
||||
firstPos, startCodeLen, found := r.FindStartCode()
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 跳过起始码之前的无效数据
|
||||
if firstPos > 0 {
|
||||
var invalidData util.Memory
|
||||
var reader util.MemoryReader
|
||||
reader.Memory = &r.Memory
|
||||
reader.RangeN(firstPos, invalidData.PushOne)
|
||||
return InvalidDataError{invalidData}
|
||||
}
|
||||
|
||||
// 为了查找下一个起始码,需要临时跳过当前起始码再查找
|
||||
saveOffset0, saveOffset1, saveLength := r.offset0, r.offset1, r.Length
|
||||
r.forward(startCodeLen)
|
||||
nextPosAfterStart, _, nextFound := r.FindStartCode()
|
||||
// 恢复到起始码起点
|
||||
r.offset0, r.offset1, r.Length = saveOffset0, saveOffset1, saveLength
|
||||
if !nextFound {
|
||||
return nil
|
||||
}
|
||||
|
||||
// 依次读取并填充输出,同时推进读取位置到 NALU 末尾(不消耗下一个起始码)
|
||||
remaining := startCodeLen + nextPosAfterStart
|
||||
// 需要在 withoutStart 中跳过的前缀(即起始码长度)
|
||||
skipForWithout := startCodeLen
|
||||
|
||||
for remaining > 0 && r.offset0 < len(r.Buffers) {
|
||||
buf := r.getCurrentBuffer()
|
||||
readLen := len(buf)
|
||||
if readLen > remaining {
|
||||
readLen = remaining
|
||||
}
|
||||
segment := buf[:readLen]
|
||||
|
||||
if withStart != nil {
|
||||
withStart.PushOne(segment)
|
||||
}
|
||||
|
||||
if withoutStart != nil {
|
||||
if skipForWithout >= readLen {
|
||||
// 本段全部属于起始码,跳过
|
||||
skipForWithout -= readLen
|
||||
} else {
|
||||
// 仅跳过起始码前缀,余下推入 withoutStart
|
||||
withoutStart.PushOne(segment[skipForWithout:])
|
||||
skipForWithout = 0
|
||||
}
|
||||
}
|
||||
|
||||
if readLen == len(buf) {
|
||||
r.skipCurrentBuffer()
|
||||
} else {
|
||||
r.forward(readLen)
|
||||
}
|
||||
remaining -= readLen
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getCurrentBuffer 获取当前读取位置的缓冲区
|
||||
func (r *AnnexBReader) getCurrentBuffer() []byte {
|
||||
if r.offset0 >= len(r.Buffers) {
|
||||
return nil
|
||||
}
|
||||
return r.Buffers[r.offset0][r.offset1:]
|
||||
}
|
||||
|
||||
// forward 向前移动读取位置
|
||||
func (r *AnnexBReader) forward(n int) {
|
||||
if n <= 0 || r.Length <= 0 {
|
||||
return
|
||||
}
|
||||
if n > r.Length { // 防御:不允许超出剩余长度
|
||||
n = r.Length
|
||||
}
|
||||
r.Length -= n
|
||||
for n > 0 && r.offset0 < len(r.Buffers) {
|
||||
cur := r.Buffers[r.offset0]
|
||||
remain := len(cur) - r.offset1
|
||||
if n < remain { // 仍在当前缓冲区内
|
||||
r.offset1 += n
|
||||
n = 0
|
||||
return
|
||||
}
|
||||
// 用掉当前缓冲区剩余部分,跳到下一个缓冲区起点
|
||||
n -= remain
|
||||
r.offset0++
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
||||
|
||||
// skipCurrentBuffer 跳过当前缓冲区
|
||||
func (r *AnnexBReader) skipCurrentBuffer() {
|
||||
if r.offset0 < len(r.Buffers) {
|
||||
curBufLen := len(r.Buffers[r.offset0]) - r.offset1
|
||||
r.Length -= curBufLen
|
||||
r.offset0++
|
||||
r.offset1 = 0
|
||||
}
|
||||
}
|
173
pkg/annexb_reader_test.go
Normal file
173
pkg/annexb_reader_test.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"math/rand"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func bytesFromMemory(m util.Memory) []byte {
|
||||
if m.Size == 0 {
|
||||
return nil
|
||||
}
|
||||
out := make([]byte, 0, m.Size)
|
||||
for _, b := range m.Buffers {
|
||||
out = append(out, b...)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func TestAnnexBReader_ReadNALU_Basic(t *testing.T) {
|
||||
|
||||
var reader AnnexBReader
|
||||
|
||||
// 3 个 NALU,分别使用 4 字节、3 字节、4 字节起始码
|
||||
expected1 := []byte{0x67, 0x42, 0x00, 0x1E}
|
||||
expected2 := []byte{0x68, 0xCE, 0x3C, 0x80}
|
||||
expected3 := []byte{0x65, 0x88, 0x84, 0x00}
|
||||
|
||||
buf := append([]byte{0x00, 0x00, 0x00, 0x01}, expected1...)
|
||||
buf = append(buf, append([]byte{0x00, 0x00, 0x01}, expected2...)...)
|
||||
buf = append(buf, append([]byte{0x00, 0x00, 0x00, 0x01}, expected3...)...)
|
||||
|
||||
reader.AppendBuffer(append(buf, codec.NALU_Delimiter2[:]...))
|
||||
|
||||
// 读取并校验 3 个 NALU(不包含起始码)
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 1: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected1) {
|
||||
t.Fatalf("nalu1 mismatch")
|
||||
}
|
||||
|
||||
n = util.Memory{}
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 2: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected2) {
|
||||
t.Fatalf("nalu2 mismatch")
|
||||
}
|
||||
|
||||
n = util.Memory{}
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu 3: %v", err)
|
||||
}
|
||||
if !bytes.Equal(bytesFromMemory(n), expected3) {
|
||||
t.Fatalf("nalu3 mismatch")
|
||||
}
|
||||
|
||||
// 再读一次应无更多起始码,返回 nil 错误且长度为 0
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("expected nil error when no more nalu, got: %v", err)
|
||||
}
|
||||
if reader.Length != 4 {
|
||||
t.Fatalf("expected length 0 after reading all, got %d", reader.Length)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAnnexBReader_AppendBuffer_MultiChunk_Random(t *testing.T) {
|
||||
|
||||
var reader AnnexBReader
|
||||
|
||||
rng := rand.New(rand.NewSource(1)) // 固定种子,保证可复现
|
||||
|
||||
// 生成随机 NALU(仅负载部分),并构造 AnnexB 数据(随机 3/4 字节起始码)
|
||||
numNALU := 12
|
||||
expectedPayloads := make([][]byte, 0, numNALU)
|
||||
fullStream := make([]byte, 0, 1024)
|
||||
|
||||
for i := 0; i < numNALU; i++ {
|
||||
payloadLen := 1 + rng.Intn(32)
|
||||
payload := make([]byte, payloadLen)
|
||||
for j := 0; j < payloadLen; j++ {
|
||||
payload[j] = byte(rng.Intn(256))
|
||||
}
|
||||
expectedPayloads = append(expectedPayloads, payload)
|
||||
|
||||
if rng.Intn(2) == 0 {
|
||||
fullStream = append(fullStream, 0x00, 0x00, 0x01)
|
||||
} else {
|
||||
fullStream = append(fullStream, 0x00, 0x00, 0x00, 0x01)
|
||||
}
|
||||
fullStream = append(fullStream, payload...)
|
||||
}
|
||||
fullStream = append(fullStream, codec.NALU_Delimiter2[:]...) // 结尾加个起始码,方便读取到最后一个 NALU
|
||||
// 随机切割为多段并 AppendBuffer
|
||||
for i := 0; i < len(fullStream); {
|
||||
// 每段长度 1..7 字节(或剩余长度)
|
||||
maxStep := 7
|
||||
remain := len(fullStream) - i
|
||||
step := 1 + rng.Intn(maxStep)
|
||||
if step > remain {
|
||||
step = remain
|
||||
}
|
||||
reader.AppendBuffer(fullStream[i : i+step])
|
||||
i += step
|
||||
}
|
||||
|
||||
// 依次读取并校验
|
||||
for idx, expected := range expectedPayloads {
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu %d: %v", idx+1, err)
|
||||
}
|
||||
got := bytesFromMemory(n)
|
||||
if !bytes.Equal(got, expected) {
|
||||
t.Fatalf("nalu %d mismatch: expected %d bytes, got %d bytes", idx+1, len(expected), len(got))
|
||||
}
|
||||
}
|
||||
|
||||
// 没有更多 NALU
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("expected nil error when no more nalu, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// 起始码跨越两个缓冲区的情况测试(例如 00 00 | 00 01)
|
||||
func TestAnnexBReader_StartCodeAcrossBuffers(t *testing.T) {
|
||||
var reader AnnexBReader
|
||||
// 构造一个 4 字节起始码被拆成两段的情况,后跟一个短 payload
|
||||
reader.AppendBuffer([]byte{0x00, 0x00})
|
||||
reader.AppendBuffer([]byte{0x00})
|
||||
reader.AppendBuffer([]byte{0x01, 0x11, 0x22, 0x33}) // payload: 11 22 33
|
||||
reader.AppendBuffer(codec.NALU_Delimiter2[:])
|
||||
var n util.Memory
|
||||
if err := reader.ReadNALU(nil, &n); err != nil {
|
||||
t.Fatalf("read nalu: %v", err)
|
||||
}
|
||||
got := bytesFromMemory(n)
|
||||
expected := []byte{0x11, 0x22, 0x33}
|
||||
if !bytes.Equal(got, expected) {
|
||||
t.Fatalf("payload mismatch: expected %v got %v", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
//go:embed test.h264
|
||||
var annexbH264Sample []byte
|
||||
|
||||
var clipSizesH264 = [...]int{7823, 7157, 5137, 6268, 5958, 4573, 5661, 5589, 3917, 5207, 5347, 4111, 4755, 5199, 3761, 5014, 4981, 3736, 5075, 4889, 3739, 4701, 4655, 3471, 4086, 4428, 3309, 4388, 28, 8, 63974, 63976, 37544, 4945, 6525, 6974, 4874, 6317, 6141, 4455, 5833, 4105, 5407, 5479, 3741, 5142, 4939, 3745, 4945, 4857, 3518, 4624, 4930, 3649, 4846, 5020, 3293, 4588, 4571, 3430, 4844, 4822, 21223, 8461, 7188, 4882, 6108, 5870, 4432, 5389, 5466, 3726}
|
||||
|
||||
func TestAnnexBReader_EmbeddedAnnexB_H265(t *testing.T) {
|
||||
var reader AnnexBReader
|
||||
offset := 0
|
||||
for _, size := range clipSizesH264 {
|
||||
reader.AppendBuffer(annexbH264Sample[offset : offset+size])
|
||||
offset += size
|
||||
var nalu util.Memory
|
||||
if err := reader.ReadNALU(nil, &nalu); err != nil {
|
||||
t.Fatalf("read nalu: %v", err)
|
||||
} else {
|
||||
t.Logf("read nalu: %d bytes", nalu.Size)
|
||||
if nalu.Size > 0 {
|
||||
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
t.Logf("tryH264Type: %d", tryH264Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@@ -174,7 +174,9 @@ func (r *AVRingReader) ReadFrame(conf *config.Subscribe) (err error) {
|
||||
r.Delay = r.Track.LastValue.Sequence - r.Value.Sequence
|
||||
// fmt.Println(r.Delay)
|
||||
if r.Track.ICodecCtx != nil {
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
|
||||
if r.Logger.Enabled(context.TODO(), task.TraceLevel) {
|
||||
r.Log(context.TODO(), task.TraceLevel, r.Track.FourCC().String(), "ts", r.Value.Timestamp, "delay", r.Delay, "bps", r.BPS)
|
||||
}
|
||||
} else {
|
||||
r.Warn("no codec")
|
||||
}
|
189
pkg/avframe.go
189
pkg/avframe.go
@@ -1,8 +1,6 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -27,21 +25,28 @@ type (
|
||||
}
|
||||
// Source -> Parse -> Demux -> (ConvertCtx) -> Mux(GetAllocator) -> Recycle
|
||||
IAVFrame interface {
|
||||
GetAllocator() *util.ScalableMemoryAllocator
|
||||
SetAllocator(*util.ScalableMemoryAllocator)
|
||||
Parse(*AVTrack) error // get codec info, idr
|
||||
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) // convert codec from source stream
|
||||
Demux(codec.ICodecCtx) (any, error) // demux to raw format
|
||||
Mux(codec.ICodecCtx, *AVFrame) // mux from raw format
|
||||
GetTimestamp() time.Duration
|
||||
GetCTS() time.Duration
|
||||
GetSample() *Sample
|
||||
GetSize() int
|
||||
CheckCodecChange() error
|
||||
Demux() error // demux to raw format
|
||||
Mux(*Sample) error // mux from origin format
|
||||
Recycle()
|
||||
String() string
|
||||
Dump(byte, io.Writer)
|
||||
}
|
||||
|
||||
Nalus []util.Memory
|
||||
ISequenceCodecCtx[T any] interface {
|
||||
GetSequenceFrame() T
|
||||
}
|
||||
BaseSample struct {
|
||||
Raw IRaw // 裸格式用于转换的中间格式
|
||||
IDR bool
|
||||
TS0, Timestamp, CTS time.Duration // 原始 TS、修正 TS、Composition Time Stamp
|
||||
}
|
||||
Sample struct {
|
||||
codec.ICodecCtx
|
||||
util.RecyclableMemory
|
||||
*BaseSample
|
||||
}
|
||||
Nalus = util.ReuseArray[util.Memory]
|
||||
|
||||
AudioData = util.Memory
|
||||
|
||||
@@ -49,36 +54,130 @@ type (
|
||||
|
||||
AVFrame struct {
|
||||
DataFrame
|
||||
IDR bool
|
||||
Timestamp time.Duration // 绝对时间戳
|
||||
CTS time.Duration // composition time stamp
|
||||
Wraps []IAVFrame // 封装格式
|
||||
*Sample
|
||||
Wraps []IAVFrame // 封装格式
|
||||
}
|
||||
IRaw interface {
|
||||
util.Resetter
|
||||
Count() int
|
||||
}
|
||||
|
||||
AVRing = util.Ring[AVFrame]
|
||||
DataFrame struct {
|
||||
sync.RWMutex
|
||||
discard bool
|
||||
Sequence uint32 // 在一个Track中的序号
|
||||
WriteTime time.Time // 写入时间,可用于比较两个帧的先后
|
||||
Raw any // 裸格式
|
||||
}
|
||||
)
|
||||
|
||||
func (frame *AVFrame) Clone() {
|
||||
func (sample *Sample) GetSize() int {
|
||||
return sample.Size
|
||||
}
|
||||
|
||||
func (sample *Sample) GetSample() *Sample {
|
||||
return sample
|
||||
}
|
||||
|
||||
func (sample *Sample) CheckCodecChange() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (sample *Sample) Demux() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sample *Sample) Mux(from *Sample) error {
|
||||
sample.ICodecCtx = from.GetBase()
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertFrameType(from, to IAVFrame) (err error) {
|
||||
fromSampe, toSample := from.GetSample(), to.GetSample()
|
||||
if !fromSampe.HasRaw() {
|
||||
if err = from.Demux(); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
toSample.SetAllocator(fromSampe.GetAllocator())
|
||||
toSample.BaseSample = fromSampe.BaseSample
|
||||
return to.Mux(fromSampe)
|
||||
}
|
||||
|
||||
func (b *BaseSample) HasRaw() bool {
|
||||
return b.Raw != nil && b.Raw.Count() > 0
|
||||
}
|
||||
|
||||
// 90Hz
|
||||
func (b *BaseSample) GetDTS() time.Duration {
|
||||
return b.Timestamp * 90 / time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetPTS() time.Duration {
|
||||
return (b.Timestamp + b.CTS) * 90 / time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetDTS(dts time.Duration) {
|
||||
b.Timestamp = dts * time.Millisecond / 90
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetPTS(pts time.Duration) {
|
||||
b.CTS = pts*time.Millisecond/90 - b.Timestamp
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetTS32(ts uint32) {
|
||||
b.Timestamp = time.Duration(ts) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetTS32() uint32 {
|
||||
return uint32(b.Timestamp / time.Millisecond)
|
||||
}
|
||||
|
||||
func (b *BaseSample) SetCTS32(ts uint32) {
|
||||
b.CTS = time.Duration(ts) * time.Millisecond
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetCTS32() uint32 {
|
||||
return uint32(b.CTS / time.Millisecond)
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetNalus() *Nalus {
|
||||
if b.Raw == nil {
|
||||
b.Raw = &Nalus{}
|
||||
}
|
||||
return b.Raw.(*Nalus)
|
||||
}
|
||||
|
||||
func (b *BaseSample) GetAudioData() *AudioData {
|
||||
if b.Raw == nil {
|
||||
b.Raw = &AudioData{}
|
||||
}
|
||||
return b.Raw.(*AudioData)
|
||||
}
|
||||
|
||||
func (b *BaseSample) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
|
||||
array := b.GetNalus()
|
||||
for reader.Length > 0 {
|
||||
l, err := reader.ReadBE(naluSizeLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
reader.RangeN(int(l), array.GetNextPointer().PushOne)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (frame *AVFrame) Reset() {
|
||||
frame.Timestamp = 0
|
||||
frame.IDR = false
|
||||
frame.CTS = 0
|
||||
frame.Raw = nil
|
||||
if len(frame.Wraps) > 0 {
|
||||
for _, wrap := range frame.Wraps {
|
||||
wrap.Recycle()
|
||||
}
|
||||
frame.Wraps = frame.Wraps[:0]
|
||||
frame.BaseSample.IDR = false
|
||||
frame.BaseSample.TS0 = 0
|
||||
frame.BaseSample.Timestamp = 0
|
||||
frame.BaseSample.CTS = 0
|
||||
if frame.Raw != nil {
|
||||
frame.Raw.Reset()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -87,11 +186,6 @@ func (frame *AVFrame) Discard() {
|
||||
frame.Reset()
|
||||
}
|
||||
|
||||
func (frame *AVFrame) Demux(codecCtx codec.ICodecCtx) (err error) {
|
||||
frame.Raw, err = frame.Wraps[0].Demux(codecCtx)
|
||||
return
|
||||
}
|
||||
|
||||
func (df *DataFrame) StartWrite() (success bool) {
|
||||
if df.discard {
|
||||
return
|
||||
@@ -108,31 +202,6 @@ func (df *DataFrame) Ready() {
|
||||
df.Unlock()
|
||||
}
|
||||
|
||||
func (nalus *Nalus) H264Type() codec.H264NALUType {
|
||||
return codec.ParseH264NALUType((*nalus)[0].Buffers[0][0])
|
||||
}
|
||||
|
||||
func (nalus *Nalus) H265Type() codec.H265NALUType {
|
||||
return codec.ParseH265NALUType((*nalus)[0].Buffers[0][0])
|
||||
}
|
||||
|
||||
func (nalus *Nalus) Append(bytes []byte) {
|
||||
*nalus = append(*nalus, util.Memory{Buffers: net.Buffers{bytes}, Size: len(bytes)})
|
||||
}
|
||||
|
||||
func (nalus *Nalus) ParseAVCC(reader *util.MemoryReader, naluSizeLen int) error {
|
||||
for reader.Length > 0 {
|
||||
l, err := reader.ReadBE(naluSizeLen)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var mem util.Memory
|
||||
reader.RangeN(int(l), mem.AppendOne)
|
||||
*nalus = append(*nalus, mem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
|
||||
var obuHeader av1.OBUHeader
|
||||
startLen := reader.Length
|
||||
@@ -157,7 +226,15 @@ func (obus *OBUs) ParseAVCC(reader *util.MemoryReader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*AudioData)(obus).AppendOne(obu)
|
||||
(*AudioData)(obus).PushOne(obu)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (obus *OBUs) Reset() {
|
||||
((*util.Memory)(obus)).Reset()
|
||||
}
|
||||
|
||||
func (obus *OBUs) Count() int {
|
||||
return (*util.Memory)(obus).Count()
|
||||
}
|
||||
|
@@ -1,74 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
type AVFrameConvert[T IAVFrame] struct {
|
||||
FromTrack, ToTrack *AVTrack
|
||||
lastFromCodecCtx codec.ICodecCtx
|
||||
}
|
||||
|
||||
func NewAVFrameConvert[T IAVFrame](fromTrack *AVTrack, toTrack *AVTrack) *AVFrameConvert[T] {
|
||||
ret := &AVFrameConvert[T]{}
|
||||
ret.FromTrack = fromTrack
|
||||
ret.ToTrack = toTrack
|
||||
if ret.FromTrack == nil {
|
||||
ret.FromTrack = &AVTrack{
|
||||
RingWriter: &RingWriter{
|
||||
Ring: util.NewRing[AVFrame](1),
|
||||
},
|
||||
}
|
||||
}
|
||||
if ret.ToTrack == nil {
|
||||
ret.ToTrack = &AVTrack{
|
||||
RingWriter: &RingWriter{
|
||||
Ring: util.NewRing[AVFrame](1),
|
||||
},
|
||||
}
|
||||
var to T
|
||||
ret.ToTrack.FrameType = reflect.TypeOf(to).Elem()
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *AVFrameConvert[T]) ConvertFromAVFrame(avFrame *AVFrame) (to T, err error) {
|
||||
to = reflect.New(c.ToTrack.FrameType).Interface().(T)
|
||||
if c.ToTrack.ICodecCtx == nil {
|
||||
if c.ToTrack.ICodecCtx, c.ToTrack.SequenceFrame, err = to.ConvertCtx(c.FromTrack.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = avFrame.Demux(c.FromTrack.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
to.SetAllocator(avFrame.Wraps[0].GetAllocator())
|
||||
to.Mux(c.ToTrack.ICodecCtx, avFrame)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *AVFrameConvert[T]) Convert(frame IAVFrame) (to T, err error) {
|
||||
to = reflect.New(c.ToTrack.FrameType).Interface().(T)
|
||||
// Not From Publisher
|
||||
if c.FromTrack.LastValue == nil {
|
||||
err = frame.Parse(c.FromTrack)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if c.ToTrack.ICodecCtx == nil || c.lastFromCodecCtx != c.FromTrack.ICodecCtx {
|
||||
if c.ToTrack.ICodecCtx, c.ToTrack.SequenceFrame, err = to.ConvertCtx(c.FromTrack.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
c.lastFromCodecCtx = c.FromTrack.ICodecCtx
|
||||
if c.FromTrack.Value.Raw, err = frame.Demux(c.FromTrack.ICodecCtx); err != nil {
|
||||
return
|
||||
}
|
||||
to.SetAllocator(frame.GetAllocator())
|
||||
to.Mux(c.ToTrack.ICodecCtx, &c.FromTrack.Value)
|
||||
return
|
||||
}
|
@@ -27,6 +27,32 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewAACCtxFromRecord(record []byte) (ret *AACCtx, err error) {
|
||||
ret = &AACCtx{}
|
||||
ret.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(record)
|
||||
return
|
||||
}
|
||||
|
||||
func NewPCMACtx() *PCMACtx {
|
||||
return &PCMACtx{
|
||||
AudioCtx: AudioCtx{
|
||||
SampleRate: 90000,
|
||||
Channels: 1,
|
||||
SampleSize: 16,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewPCMUCtx() *PCMUCtx {
|
||||
return &PCMUCtx{
|
||||
AudioCtx: AudioCtx{
|
||||
SampleRate: 90000,
|
||||
Channels: 1,
|
||||
SampleSize: 16,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (ctx *AudioCtx) GetRecord() []byte {
|
||||
return []byte{}
|
||||
}
|
||||
|
@@ -112,6 +112,12 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewH264CtxFromRecord(record []byte) (ret *H264Ctx, err error) {
|
||||
ret = &H264Ctx{}
|
||||
ret.CodecData, err = h264parser.NewCodecDataFromAVCDecoderConfRecord(record)
|
||||
return
|
||||
}
|
||||
|
||||
func (*H264Ctx) FourCC() FourCC {
|
||||
return FourCC_H264
|
||||
}
|
||||
|
@@ -24,6 +24,15 @@ type (
|
||||
}
|
||||
)
|
||||
|
||||
func NewH265CtxFromRecord(record []byte) (ret *H265Ctx, err error) {
|
||||
ret = &H265Ctx{}
|
||||
ret.CodecData, err = h265parser.NewCodecDataFromAVCDecoderConfRecord(record)
|
||||
if err == nil {
|
||||
ret.RecordInfo.LengthSizeMinusOne = 3
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (ctx *H265Ctx) GetInfo() string {
|
||||
return fmt.Sprintf("fps: %d, resolution: %s", ctx.FPS(), ctx.Resolution())
|
||||
}
|
||||
|
25
pkg/codec/h26x.go
Normal file
25
pkg/codec/h26x.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package codec
|
||||
|
||||
type H26XCtx struct {
|
||||
VPS, SPS, PPS []byte
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) FourCC() (f FourCC) {
|
||||
return
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetInfo() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetBase() ICodecCtx {
|
||||
return ctx
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) GetRecord() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *H26XCtx) String() string {
|
||||
return ""
|
||||
}
|
@@ -36,6 +36,22 @@ type Config struct {
|
||||
var (
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
regexpType = reflect.TypeOf(Regexp{})
|
||||
basicTypes = []reflect.Kind{
|
||||
reflect.Bool,
|
||||
reflect.Int,
|
||||
reflect.Int8,
|
||||
reflect.Int16,
|
||||
reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint,
|
||||
reflect.Uint8,
|
||||
reflect.Uint16,
|
||||
reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32,
|
||||
reflect.Float64,
|
||||
reflect.String,
|
||||
}
|
||||
)
|
||||
|
||||
func (config *Config) Range(f func(key string, value Config)) {
|
||||
@@ -99,29 +115,29 @@ func (config *Config) Parse(s any, prefix ...string) {
|
||||
if t.Kind() == reflect.Pointer {
|
||||
t, v = t.Elem(), v.Elem()
|
||||
}
|
||||
|
||||
isStruct := t.Kind() == reflect.Struct && t != regexpType
|
||||
if isStruct {
|
||||
defaults.SetDefaults(v.Addr().Interface())
|
||||
}
|
||||
config.Ptr = v
|
||||
|
||||
if !v.IsValid() {
|
||||
fmt.Println("parse to ", prefix, config.name, s, "is not valid")
|
||||
return
|
||||
}
|
||||
|
||||
config.Default = v.Interface()
|
||||
|
||||
if l := len(prefix); l > 0 { // 读取环境变量
|
||||
name := strings.ToLower(prefix[l-1])
|
||||
if tag := config.tag.Get("default"); tag != "" {
|
||||
_, isUnmarshaler := v.Addr().Interface().(yaml.Unmarshaler)
|
||||
tag := config.tag.Get("default")
|
||||
if tag != "" && isUnmarshaler {
|
||||
v.Set(config.assign(name, tag))
|
||||
config.Default = v.Interface()
|
||||
}
|
||||
if envValue := os.Getenv(strings.Join(prefix, "_")); envValue != "" {
|
||||
v.Set(config.assign(name, envValue))
|
||||
config.Env = v.Interface()
|
||||
}
|
||||
}
|
||||
|
||||
if t.Kind() == reflect.Struct && t != regexpType {
|
||||
config.Default = v.Interface()
|
||||
if isStruct {
|
||||
for i, j := 0, t.NumField(); i < j; i++ {
|
||||
ft, fv := t.Field(i), v.Field(i)
|
||||
|
||||
@@ -315,16 +331,18 @@ func (config *Config) GetMap() map[string]any {
|
||||
|
||||
var regexPureNumber = regexp.MustCompile(`^\d+$`)
|
||||
|
||||
func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
ft := config.Ptr.Type()
|
||||
|
||||
func unmarshal(ft reflect.Type, v any) (target reflect.Value) {
|
||||
source := reflect.ValueOf(v)
|
||||
|
||||
for _, t := range basicTypes {
|
||||
if source.Kind() == t && ft.Kind() == t {
|
||||
return source
|
||||
}
|
||||
}
|
||||
switch ft {
|
||||
case durationType:
|
||||
target = reflect.New(ft).Elem()
|
||||
if source.Type() == durationType {
|
||||
target.Set(source)
|
||||
return source
|
||||
} else if source.IsZero() || !source.IsValid() {
|
||||
target.SetInt(0)
|
||||
} else {
|
||||
@@ -332,7 +350,7 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
if d, err := time.ParseDuration(timeStr); err == nil && !regexPureNumber.MatchString(timeStr) {
|
||||
target.SetInt(int64(d))
|
||||
} else {
|
||||
slog.Error("invalid duration value please add unit (s,m,h,d),eg: 100ms, 10s, 4m, 1h", "key", k, "value", source)
|
||||
slog.Error("invalid duration value please add unit (s,m,h,d),eg: 100ms, 10s, 4m, 1h", "value", timeStr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -341,58 +359,69 @@ func (config *Config) assign(k string, v any) (target reflect.Value) {
|
||||
regexpStr := source.String()
|
||||
target.Set(reflect.ValueOf(Regexp{regexp.MustCompile(regexpStr)}))
|
||||
default:
|
||||
if ft.Kind() == reflect.Map {
|
||||
target = reflect.MakeMap(ft)
|
||||
if v != nil {
|
||||
tmpStruct := reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: "Key",
|
||||
Type: ft.Key(),
|
||||
},
|
||||
})
|
||||
tmpValue := reflect.New(tmpStruct)
|
||||
for k, v := range v.(map[string]any) {
|
||||
_ = yaml.Unmarshal([]byte(fmt.Sprintf("key: %s", k)), tmpValue.Interface())
|
||||
var value reflect.Value
|
||||
if ft.Elem().Kind() == reflect.Struct {
|
||||
value = reflect.New(ft.Elem())
|
||||
defaults.SetDefaults(value.Interface())
|
||||
if reflect.TypeOf(v).Kind() != reflect.Map {
|
||||
value.Elem().Field(0).Set(reflect.ValueOf(v))
|
||||
} else {
|
||||
out, _ := yaml.Marshal(v)
|
||||
_ = yaml.Unmarshal(out, value.Interface())
|
||||
}
|
||||
value = value.Elem()
|
||||
} else {
|
||||
value = reflect.ValueOf(v)
|
||||
switch ft.Kind() {
|
||||
case reflect.Struct:
|
||||
newStruct := reflect.New(ft)
|
||||
defaults.SetDefaults(newStruct.Interface())
|
||||
if value, ok := v.(map[string]any); ok {
|
||||
for i := 0; i < ft.NumField(); i++ {
|
||||
key := strings.ToLower(ft.Field(i).Name)
|
||||
if vv, ok := value[key]; ok {
|
||||
newStruct.Elem().Field(i).Set(unmarshal(ft.Field(i).Type, vv))
|
||||
}
|
||||
target.SetMapIndex(tmpValue.Elem().Field(0), value)
|
||||
}
|
||||
} else {
|
||||
newStruct.Elem().Field(0).Set(unmarshal(ft.Field(0).Type, v))
|
||||
}
|
||||
return newStruct.Elem()
|
||||
case reflect.Map:
|
||||
if v != nil {
|
||||
target = reflect.MakeMap(ft)
|
||||
for k, v := range v.(map[string]any) {
|
||||
target.SetMapIndex(unmarshal(ft.Key(), k), unmarshal(ft.Elem(), v))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tmpStruct := reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: strings.ToUpper(k),
|
||||
Type: ft,
|
||||
},
|
||||
})
|
||||
tmpValue := reflect.New(tmpStruct)
|
||||
case reflect.Slice:
|
||||
if v != nil {
|
||||
s := v.([]any)
|
||||
target = reflect.MakeSlice(ft, len(s), len(s))
|
||||
for i, v := range s {
|
||||
target.Index(i).Set(unmarshal(ft.Elem(), v))
|
||||
}
|
||||
}
|
||||
default:
|
||||
if v != nil {
|
||||
var out []byte
|
||||
var err error
|
||||
if vv, ok := v.(string); ok {
|
||||
out = []byte(fmt.Sprintf("%s: %s", k, vv))
|
||||
out = []byte(fmt.Sprintf("%s: %s", "value", vv))
|
||||
} else {
|
||||
out, _ = yaml.Marshal(map[string]any{k: v})
|
||||
out, err = yaml.Marshal(map[string]any{"value": v})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
_ = yaml.Unmarshal(out, tmpValue.Interface())
|
||||
tmpValue := reflect.New(reflect.StructOf([]reflect.StructField{
|
||||
{
|
||||
Name: "Value",
|
||||
Type: ft,
|
||||
},
|
||||
}))
|
||||
err = yaml.Unmarshal(out, tmpValue.Interface())
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return tmpValue.Elem().Field(0)
|
||||
}
|
||||
target = tmpValue.Elem().Field(0)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (config *Config) assign(k string, v any) reflect.Value {
|
||||
return unmarshal(config.Ptr.Type(), v)
|
||||
}
|
||||
|
||||
func Parse(target any, conf map[string]any) {
|
||||
var c Config
|
||||
c.Parse(target)
|
||||
|
@@ -49,6 +49,7 @@ func (task *ListenQuicWork) Start() (err error) {
|
||||
task.Error("listen quic error", err)
|
||||
return
|
||||
}
|
||||
task.OnStop(task.Listener.Close)
|
||||
task.Info("listen quic on", task.ListenAddr)
|
||||
return
|
||||
}
|
||||
@@ -63,7 +64,3 @@ func (task *ListenQuicWork) Go() error {
|
||||
task.AddTask(subTask)
|
||||
}
|
||||
}
|
||||
|
||||
func (task *ListenQuicWork) Dispose() {
|
||||
_ = task.Listener.Close()
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ const (
|
||||
|
||||
RecordModeAuto RecordMode = "auto"
|
||||
RecordModeEvent RecordMode = "event"
|
||||
RecordModeTest RecordMode = "test"
|
||||
|
||||
HookOnServerKeepAlive HookType = "server_keep_alive"
|
||||
HookOnPublishStart HookType = "publish_start"
|
||||
@@ -32,15 +33,33 @@ const (
|
||||
HookOnRecordEnd HookType = "record_end"
|
||||
HookOnTransformStart HookType = "transform_start"
|
||||
HookOnTransformEnd HookType = "transform_end"
|
||||
HookOnSystemStart HookType = "system_start"
|
||||
HookDefault HookType = "default"
|
||||
|
||||
EventLevelLow EventLevel = "low"
|
||||
EventLevelHigh EventLevel = "high"
|
||||
|
||||
AlarmStorageException = 0x10010 // 存储异常
|
||||
AlarmStorageExceptionRecover = 0x10011 // 存储异常恢复
|
||||
AlarmPullOffline = 0x10012 // 拉流异常,触发一次报警。
|
||||
AlarmPullRecover = 0x10013 // 拉流恢复
|
||||
AlarmDiskSpaceFull = 0x10014 // 磁盘空间满,磁盘占有率,超出最大磁盘空间使用率,触发报警。
|
||||
AlarmStartupRunning = 0x10015 // 启动运行
|
||||
AlarmPublishOffline = 0x10016 // 发布者异常,触发一次报警。
|
||||
AlarmPublishRecover = 0x10017 // 发布者恢复
|
||||
AlarmSubscribeOffline = 0x10018 // 订阅者异常,触发一次报警。
|
||||
AlarmSubscribeRecover = 0x10019 // 订阅者恢复
|
||||
AlarmPushOffline = 0x10020 // 推流异常,触发一次报警。
|
||||
AlarmPushRecover = 0x10021 // 推流恢复
|
||||
AlarmTransformOffline = 0x10022 // 转换异常,触发一次报警。
|
||||
AlarmTransformRecover = 0x10023 // 转换恢复
|
||||
AlarmKeepAliveOnline = 0x10024 // 保活正常,触发一次报警。
|
||||
)
|
||||
|
||||
type (
|
||||
EventLevel = string
|
||||
RecordMode = string
|
||||
HookType string
|
||||
HookType = string
|
||||
Publish struct {
|
||||
MaxCount int `default:"0" desc:"最大发布者数量"` // 最大发布者数量
|
||||
PubAudio bool `default:"true" desc:"是否发布音频"`
|
||||
@@ -52,7 +71,7 @@ type (
|
||||
IdleTimeout time.Duration `desc:"空闲(无订阅)超时"` // 空闲(无订阅)超时
|
||||
PauseTimeout time.Duration `default:"30s" desc:"暂停超时时间"` // 暂停超时
|
||||
BufferTime time.Duration `desc:"缓冲时长,0代表取最近关键帧"` // 缓冲长度(单位:秒),0代表取最近关键帧
|
||||
Speed float64 `default:"1" desc:"发送速率"` // 发送速率,0 为不限速
|
||||
Speed float64 `desc:"发送速率"` // 发送速率,0 为不限速
|
||||
Scale float64 `default:"1" desc:"缩放倍数"` // 缩放倍数
|
||||
MaxFPS int `default:"60" desc:"最大FPS"` // 最大FPS
|
||||
Key string `desc:"发布鉴权key"` // 发布鉴权key
|
||||
@@ -78,10 +97,10 @@ type (
|
||||
HTTPValues map[string][]string
|
||||
Pull struct {
|
||||
URL string `desc:"拉流地址"`
|
||||
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
|
||||
MaxRetry int `default:"-1" desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉,高于0 的数代表最大重拉次数
|
||||
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
|
||||
Proxy string `desc:"代理地址"` // 代理地址
|
||||
Loop int `desc:"拉流循环次数,-1:无限循环"` // 拉流循环次数,-1 表示无限循环
|
||||
MaxRetry int `desc:"断开后自动重试次数,0:不重试,-1:无限重试"` // 断开后自动重拉,0 表示不自动重拉,-1 表示无限重拉,高于0 的数代表最大重拉次数
|
||||
RetryInterval time.Duration `default:"5s" desc:"重试间隔"` // 重试间隔
|
||||
Proxy string `desc:"代理地址"` // 代理地址
|
||||
Header HTTPValues
|
||||
Args HTTPValues `gorm:"-:all"` // 拉流参数
|
||||
TestMode int `desc:"测试模式,0:关闭,1:只拉流不发布"` // 测试模式
|
||||
@@ -106,6 +125,7 @@ type (
|
||||
Type string `desc:"录制类型"` // 录制类型 mp4、flv、hls、hlsv7
|
||||
FilePath string `desc:"录制文件路径"` // 录制文件路径
|
||||
Fragment time.Duration `desc:"分片时长"` // 分片时长
|
||||
RealTime bool `desc:"是否实时录制"` // 是否实时录制
|
||||
Append bool `desc:"是否追加录制"` // 是否追加录制
|
||||
Event *RecordEvent `json:"event" desc:"事件录像配置" gorm:"-"` // 事件录像配置
|
||||
}
|
||||
@@ -131,10 +151,11 @@ type (
|
||||
URL string // Webhook 地址
|
||||
Method string `default:"POST"` // HTTP 方法
|
||||
Headers map[string]string // 自定义请求头
|
||||
TimeoutSeconds int `default:"5"` // 超时时间(秒)
|
||||
RetryTimes int `default:"3"` // 重试次数
|
||||
RetryInterval time.Duration `default:"1s"` // 重试间隔
|
||||
Interval int `default:"60"` // 保活间隔(秒)
|
||||
TimeoutSeconds int `default:"5"` // 超时时间(秒)
|
||||
RetryTimes int `default:"3"` // 重试次数
|
||||
RetryInterval time.Duration `default:"1s"` // 重试间隔
|
||||
Interval int `default:"60"` // 保活间隔(秒)
|
||||
SaveAlarm bool `default:"false"` // 是否保存告警到数据库
|
||||
}
|
||||
Common struct {
|
||||
PublicIP string
|
||||
|
@@ -4,6 +4,7 @@ import "errors"
|
||||
|
||||
var (
|
||||
ErrNotFound = errors.New("not found")
|
||||
ErrDisposed = errors.New("disposed")
|
||||
ErrDisabled = errors.New("disabled")
|
||||
ErrStreamExist = errors.New("stream exist")
|
||||
ErrRecordExists = errors.New("record exists")
|
||||
|
82
pkg/format/adts.go
Normal file
82
pkg/format/adts.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
)
|
||||
|
||||
var _ pkg.IAVFrame = (*Mpeg2Audio)(nil)
|
||||
|
||||
type Mpeg2Audio struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) CheckCodecChange() (err error) {
|
||||
old := A.ICodecCtx
|
||||
if old == nil || old.FourCC().Is(codec.FourCC_MP4A) {
|
||||
var reader = A.NewReader()
|
||||
var adts []byte
|
||||
adts, err = reader.ReadBytes(7)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var hdrlen, framelen, samples int
|
||||
var conf aacparser.MPEG4AudioConfig
|
||||
conf, hdrlen, framelen, samples, err = aacparser.ParseADTSHeader(adts)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
aacparser.WriteMPEG4AudioConfig(b, conf)
|
||||
if old == nil || !bytes.Equal(b.Bytes(), old.GetRecord()) {
|
||||
var ctx = &codec.AACCtx{}
|
||||
ctx.ConfigBytes = b.Bytes()
|
||||
A.ICodecCtx = ctx
|
||||
if false {
|
||||
println("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples, "config", ctx.Config)
|
||||
}
|
||||
// track.Info("ADTS", "hdrlen", hdrlen, "framelen", framelen, "samples", samples)
|
||||
} else {
|
||||
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) Demux() (err error) {
|
||||
var reader = A.NewReader()
|
||||
mem := A.GetAudioData()
|
||||
if A.ICodecCtx.FourCC().Is(codec.FourCC_MP4A) {
|
||||
err = reader.Skip(7)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
reader.Range(mem.PushOne)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) Mux(frame *pkg.Sample) (err error) {
|
||||
if A.ICodecCtx == nil {
|
||||
A.ICodecCtx = frame.GetBase()
|
||||
}
|
||||
raw := frame.Raw.(*pkg.AudioData)
|
||||
aacCtx, ok := A.ICodecCtx.(*codec.AACCtx)
|
||||
if ok {
|
||||
A.InitRecycleIndexes(1)
|
||||
adts := A.NextN(7)
|
||||
aacparser.FillADTSHeader(adts, aacCtx.Config, raw.Size/aacCtx.GetSampleSize(), raw.Size)
|
||||
} else {
|
||||
A.InitRecycleIndexes(0)
|
||||
}
|
||||
A.Push(raw.Buffers...)
|
||||
return
|
||||
}
|
||||
|
||||
func (A *Mpeg2Audio) String() string {
|
||||
return fmt.Sprintf("ADTS{size:%d}", A.Size)
|
||||
}
|
290
pkg/format/annexb.go
Normal file
290
pkg/format/annexb.go
Normal file
@@ -0,0 +1,290 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
type AnnexB struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (a *AnnexB) CheckCodecChange() (err error) {
|
||||
if !a.HasRaw() || a.ICodecCtx == nil {
|
||||
err = a.Demux()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx == nil {
|
||||
return pkg.ErrSkip
|
||||
}
|
||||
var vps, sps, pps []byte
|
||||
a.IDR = false
|
||||
for nalu := range a.Raw.(*pkg.Nalus).RangePoint {
|
||||
if a.FourCC() == codec.FourCC_H265 {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
vps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
a.IDR = true
|
||||
}
|
||||
} else {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case codec.NALU_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case codec.NALU_IDR_Picture:
|
||||
a.IDR = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.FourCC() == codec.FourCC_H265 {
|
||||
if vps != nil && sps != nil && pps != nil {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H265Ctx).Record) {
|
||||
a.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx.(*codec.H265Ctx).Record == nil {
|
||||
err = pkg.ErrSkip
|
||||
}
|
||||
} else {
|
||||
if sps != nil && pps != nil {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, a.ICodecCtx.(*codec.H264Ctx).Record) {
|
||||
a.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
if a.ICodecCtx.(*codec.H264Ctx).Record == nil {
|
||||
err = pkg.ErrSkip
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements pkg.IAVFrame.
|
||||
func (a *AnnexB) String() string {
|
||||
return fmt.Sprintf("%d %d", a.Timestamp, a.Memory.Size)
|
||||
}
|
||||
|
||||
// Demux implements pkg.IAVFrame.
|
||||
func (a *AnnexB) Demux() (err error) {
|
||||
nalus := a.GetNalus()
|
||||
var lastFourBytes [4]byte
|
||||
var b byte
|
||||
var shallow util.Memory
|
||||
shallow.Push(a.Buffers...)
|
||||
reader := shallow.NewReader()
|
||||
gotNalu := func() {
|
||||
nalu := nalus.GetNextPointer()
|
||||
for buf := range reader.ClipFront {
|
||||
nalu.PushOne(buf)
|
||||
}
|
||||
if a.ICodecCtx == nil {
|
||||
naluType := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
switch naluType {
|
||||
case codec.NALU_Non_IDR_Picture,
|
||||
codec.NALU_IDR_Picture,
|
||||
codec.NALU_SEI,
|
||||
codec.NALU_SPS,
|
||||
codec.NALU_PPS,
|
||||
codec.NALU_Access_Unit_Delimiter:
|
||||
a.ICodecCtx = &codec.H264Ctx{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
b, err = reader.ReadByte()
|
||||
if err == nil {
|
||||
copy(lastFourBytes[:], lastFourBytes[1:])
|
||||
lastFourBytes[3] = b
|
||||
var startCode = 0
|
||||
if lastFourBytes == codec.NALU_Delimiter2 {
|
||||
startCode = 4
|
||||
} else if [3]byte(lastFourBytes[1:]) == codec.NALU_Delimiter1 {
|
||||
startCode = 3
|
||||
}
|
||||
if startCode > 0 && reader.Offset() >= 3 {
|
||||
if reader.Offset() == 3 {
|
||||
startCode = 3
|
||||
}
|
||||
reader.Unread(startCode)
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
reader.Skip(startCode)
|
||||
for range reader.ClipFront {
|
||||
}
|
||||
}
|
||||
} else if err == io.EOF {
|
||||
if reader.Offset() > 0 {
|
||||
gotNalu()
|
||||
}
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Mux(fromBase *pkg.Sample) (err error) {
|
||||
if a.ICodecCtx == nil {
|
||||
a.ICodecCtx = fromBase.GetBase()
|
||||
}
|
||||
a.InitRecycleIndexes(0)
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.PushOne(delimiter2)
|
||||
if fromBase.IDR {
|
||||
switch ctx := fromBase.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2)
|
||||
case *codec.H265Ctx:
|
||||
a.Push(ctx.SPS(), delimiter2, ctx.PPS(), delimiter2, ctx.VPS(), delimiter2)
|
||||
}
|
||||
}
|
||||
for i, nalu := range *fromBase.Raw.(*pkg.Nalus) {
|
||||
if i > 0 {
|
||||
a.PushOne(codec.NALU_Delimiter1[:])
|
||||
}
|
||||
a.Push(nalu.Buffers...)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (a *AnnexB) Parse(reader *pkg.AnnexBReader) (hasFrame bool, err error) {
|
||||
nalus := a.BaseSample.GetNalus()
|
||||
for !hasFrame {
|
||||
nalu := nalus.GetNextPointer()
|
||||
reader.ReadNALU(&a.Memory, nalu)
|
||||
if nalu.Size == 0 {
|
||||
nalus.Reduce()
|
||||
return
|
||||
}
|
||||
tryH264Type := codec.ParseH264NALUType(nalu.Buffers[0][0])
|
||||
h265Type := codec.ParseH265NALUType(nalu.Buffers[0][0])
|
||||
if a.ICodecCtx == nil {
|
||||
a.ICodecCtx = &codec.H26XCtx{}
|
||||
}
|
||||
switch ctx := a.ICodecCtx.(type) {
|
||||
case *codec.H26XCtx:
|
||||
if tryH264Type == codec.NALU_SPS {
|
||||
ctx.SPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if tryH264Type == codec.NALU_PPS {
|
||||
ctx.PPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_VPS {
|
||||
ctx.VPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_SPS {
|
||||
ctx.SPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else if h265Type == h265parser.NAL_UNIT_PPS {
|
||||
ctx.PPS = nalu.ToBytes()
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
} else {
|
||||
if ctx.SPS != nil && ctx.PPS != nil && tryH264Type == codec.NALU_IDR_Picture {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS, ctx.PPS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
a.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
|
||||
a.Size += 8 + len(ctx.SPS) + len(ctx.PPS)
|
||||
} else if ctx.VPS != nil && ctx.SPS != nil && ctx.PPS != nil && h265Type == h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS, ctx.SPS, ctx.PPS)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
a.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
*nalus = slices.Insert(*nalus, 0, util.NewMemory(ctx.VPS), util.NewMemory(ctx.SPS), util.NewMemory(ctx.PPS))
|
||||
delimiter2 := codec.NALU_Delimiter2[:]
|
||||
a.Buffers = slices.Insert(a.Buffers, 0, delimiter2, ctx.VPS, delimiter2, ctx.SPS, delimiter2, ctx.PPS)
|
||||
a.Size += 24 + len(ctx.VPS) + len(ctx.SPS) + len(ctx.PPS)
|
||||
} else {
|
||||
nalus.Reduce()
|
||||
a.Recycle()
|
||||
}
|
||||
}
|
||||
case *codec.H264Ctx:
|
||||
switch tryH264Type {
|
||||
case codec.NALU_IDR_Picture:
|
||||
a.IDR = true
|
||||
hasFrame = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
a.IDR = false
|
||||
hasFrame = true
|
||||
}
|
||||
case *codec.H265Ctx:
|
||||
switch h265Type {
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
a.IDR = true
|
||||
hasFrame = true
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_TRAIL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TRAIL_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TSA_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_TSA_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_STSA_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_STSA_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RADL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RADL_R,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RASL_N,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_RASL_R:
|
||||
a.IDR = false
|
||||
hasFrame = true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
309
pkg/format/ps/mpegps.go
Normal file
309
pkg/format/ps/mpegps.go
Normal file
@@ -0,0 +1,309 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
|
||||
mpegts "m7s.live/v5/pkg/format/ts"
|
||||
)
|
||||
|
||||
const (
|
||||
StartCodePS = 0x000001ba
|
||||
StartCodeSYS = 0x000001bb
|
||||
StartCodeMAP = 0x000001bc
|
||||
StartCodePadding = 0x000001be
|
||||
StartCodeVideo = 0x000001e0
|
||||
StartCodeVideo1 = 0x000001e1
|
||||
StartCodeVideo2 = 0x000001e2
|
||||
StartCodeAudio = 0x000001c0
|
||||
PrivateStreamCode = 0x000001bd
|
||||
MEPGProgramEndCode = 0x000001b9
|
||||
)
|
||||
|
||||
// PS包头常量
|
||||
const (
|
||||
PSPackHeaderSize = 14 // PS pack header basic size
|
||||
PSSystemHeaderSize = 18 // PS system header basic size
|
||||
PSMHeaderSize = 12 // PS map header basic size
|
||||
PESHeaderMinSize = 9 // PES header minimum size
|
||||
MaxPESPayloadSize = 0xFFEB // 0xFFFF - 14 (to leave room for headers)
|
||||
)
|
||||
|
||||
type MpegPsDemuxer struct {
|
||||
stAudio, stVideo byte
|
||||
Publisher *m7s.Publisher
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
writer m7s.PublishWriter[*format.Mpeg2Audio, *format.AnnexB]
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) Feed(reader *util.BufReader) (err error) {
|
||||
writer := &s.writer
|
||||
var payload util.Memory
|
||||
var pesHeader mpegts.MpegPESHeader
|
||||
var lastVideoPts, lastAudioPts uint64
|
||||
var annexbReader pkg.AnnexBReader
|
||||
for {
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch code {
|
||||
case StartCodePS:
|
||||
var psl byte
|
||||
if err = reader.Skip(9); err != nil {
|
||||
return err
|
||||
}
|
||||
psl, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
return err
|
||||
}
|
||||
case StartCodeVideo:
|
||||
payload, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !s.Publisher.PubVideo {
|
||||
continue
|
||||
}
|
||||
if writer.PublishVideoWriter == nil {
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*format.AnnexB](s.Publisher, s.Allocator)
|
||||
switch s.stVideo {
|
||||
case mpegts.STREAM_TYPE_H264:
|
||||
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
|
||||
case mpegts.STREAM_TYPE_H265:
|
||||
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
|
||||
}
|
||||
}
|
||||
pes := writer.VideoFrame
|
||||
reader := payload.NewReader()
|
||||
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read PES header"))
|
||||
}
|
||||
if pesHeader.Pts != 0 && pesHeader.Pts != lastVideoPts {
|
||||
if pes.Size > 0 {
|
||||
err = writer.NextVideo()
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to get next video frame"))
|
||||
}
|
||||
pes = writer.VideoFrame
|
||||
}
|
||||
pes.SetDTS(time.Duration(pesHeader.Dts))
|
||||
pes.SetPTS(time.Duration(pesHeader.Pts))
|
||||
lastVideoPts = pesHeader.Pts
|
||||
}
|
||||
annexb := s.Allocator.Malloc(reader.Length)
|
||||
reader.Read(annexb)
|
||||
annexbReader.AppendBuffer(annexb)
|
||||
_, err = pes.Parse(&annexbReader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to parse annexb"))
|
||||
}
|
||||
case StartCodeAudio:
|
||||
payload, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read audio payload"))
|
||||
}
|
||||
if s.stAudio == 0 || !s.Publisher.PubAudio {
|
||||
continue
|
||||
}
|
||||
if writer.PublishAudioWriter == nil {
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
switch s.stAudio {
|
||||
case mpegts.STREAM_TYPE_AAC:
|
||||
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
|
||||
case mpegts.STREAM_TYPE_G711A:
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
|
||||
case mpegts.STREAM_TYPE_G711U:
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
|
||||
}
|
||||
}
|
||||
pes := writer.AudioFrame
|
||||
reader := payload.NewReader()
|
||||
pesHeader, err = mpegts.ReadPESHeader(&io.LimitedReader{R: &reader, N: int64(payload.Size)})
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read PES header"))
|
||||
}
|
||||
if pesHeader.Pts != 0 && pesHeader.Pts != lastAudioPts {
|
||||
if pes.Size > 0 {
|
||||
err = writer.NextAudio()
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to get next audio frame"))
|
||||
}
|
||||
pes = writer.AudioFrame
|
||||
}
|
||||
pes.SetDTS(time.Duration(pesHeader.Pts))
|
||||
pes.SetPTS(time.Duration(pesHeader.Pts))
|
||||
lastAudioPts = pesHeader.Pts
|
||||
}
|
||||
reader.Range(func(buf []byte) {
|
||||
copy(pes.NextN(len(buf)), buf)
|
||||
})
|
||||
// reader.Range(pes.PushOne)
|
||||
case StartCodeMAP:
|
||||
var psm util.Memory
|
||||
psm, err = s.ReadPayload(reader)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read program stream map"))
|
||||
}
|
||||
err = s.decProgramStreamMap(psm)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to decode program stream map"))
|
||||
}
|
||||
default:
|
||||
payloadlen, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
return errors.Join(err, fmt.Errorf("failed to read payload length"))
|
||||
}
|
||||
reader.Skip(payloadlen)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) ReadPayload(reader *util.BufReader) (payload util.Memory, err error) {
|
||||
payloadlen, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
return reader.ReadBytes(payloadlen)
|
||||
}
|
||||
|
||||
func (s *MpegPsDemuxer) decProgramStreamMap(psm util.Memory) (err error) {
|
||||
var programStreamInfoLen, programStreamMapLen, elementaryStreamInfoLength uint32
|
||||
var streamType, elementaryStreamID byte
|
||||
reader := psm.NewReader()
|
||||
reader.Skip(2)
|
||||
programStreamInfoLen, err = reader.ReadBE(2)
|
||||
reader.Skip(int(programStreamInfoLen))
|
||||
programStreamMapLen, err = reader.ReadBE(2)
|
||||
for programStreamMapLen > 0 {
|
||||
streamType, err = reader.ReadByte()
|
||||
elementaryStreamID, err = reader.ReadByte()
|
||||
if elementaryStreamID >= 0xe0 && elementaryStreamID <= 0xef {
|
||||
s.stVideo = streamType
|
||||
|
||||
} else if elementaryStreamID >= 0xc0 && elementaryStreamID <= 0xdf {
|
||||
s.stAudio = streamType
|
||||
}
|
||||
elementaryStreamInfoLength, err = reader.ReadBE(2)
|
||||
reader.Skip(int(elementaryStreamInfoLength))
|
||||
programStreamMapLen -= 4 + elementaryStreamInfoLength
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MpegPSMuxer struct {
|
||||
*m7s.Subscriber
|
||||
Packet *util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (muxer *MpegPSMuxer) Mux(onPacket func() error) {
|
||||
var pesAudio, pesVideo *MpegpsPESFrame
|
||||
puber := muxer.Publisher
|
||||
var elementary_stream_map_length uint16
|
||||
if puber.HasAudioTrack() {
|
||||
elementary_stream_map_length += 4
|
||||
pesAudio = &MpegpsPESFrame{}
|
||||
pesAudio.StreamID = mpegts.STREAM_ID_AUDIO
|
||||
switch puber.AudioTrack.ICodecCtx.FourCC() {
|
||||
case codec.FourCC_ALAW:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_G711A
|
||||
case codec.FourCC_ULAW:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_G711U
|
||||
case codec.FourCC_MP4A:
|
||||
pesAudio.StreamType = mpegts.STREAM_TYPE_AAC
|
||||
}
|
||||
}
|
||||
if puber.HasVideoTrack() {
|
||||
elementary_stream_map_length += 4
|
||||
pesVideo = &MpegpsPESFrame{}
|
||||
pesVideo.StreamID = mpegts.STREAM_ID_VIDEO
|
||||
switch puber.VideoTrack.ICodecCtx.FourCC() {
|
||||
case codec.FourCC_H264:
|
||||
pesVideo.StreamType = mpegts.STREAM_TYPE_H264
|
||||
case codec.FourCC_H265:
|
||||
pesVideo.StreamType = mpegts.STREAM_TYPE_H265
|
||||
}
|
||||
}
|
||||
var outputBuffer util.Buffer = muxer.Packet.NextN(PSPackHeaderSize + PSMHeaderSize + int(elementary_stream_map_length))
|
||||
outputBuffer.Reset()
|
||||
MuxPSHeader(&outputBuffer)
|
||||
// System Header - 定义流的缓冲区信息
|
||||
// outputBuffer.WriteUint32(StartCodeSYS)
|
||||
// outputBuffer.WriteByte(0x00) // header_length high
|
||||
// outputBuffer.WriteByte(0x0C) // header_length low (12 bytes)
|
||||
// outputBuffer.WriteByte(0x80) // marker + rate_bound[21..15]
|
||||
// outputBuffer.WriteByte(0x62) // rate_bound[14..8]
|
||||
// outputBuffer.WriteByte(0x4E) // rate_bound[7..1] + marker
|
||||
// outputBuffer.WriteByte(0x01) // audio_bound + fixed_flag + CSPS_flag + system_audio_lock_flag + system_video_lock_flag + marker
|
||||
// outputBuffer.WriteByte(0x01) // video_bound + packet_rate_restriction_flag + reserved
|
||||
// outputBuffer.WriteByte(frame.StreamId) // stream_id
|
||||
// outputBuffer.WriteByte(0xC0) // '11' + P-STD_buffer_bound_scale
|
||||
// outputBuffer.WriteByte(0x20) // P-STD_buffer_size_bound low
|
||||
// outputBuffer.WriteByte(0x00) // P-STD_buffer_size_bound high
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
// outputBuffer.WriteByte(0x00)
|
||||
|
||||
// PSM Header - 程序流映射,定义流类型
|
||||
outputBuffer.WriteUint32(StartCodeMAP)
|
||||
outputBuffer.WriteUint16(uint16(PSMHeaderSize) + elementary_stream_map_length - 6) // psm_length
|
||||
outputBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
|
||||
outputBuffer.WriteByte(0xFF) // reserved + marker
|
||||
outputBuffer.WriteUint16(0) // program_stream_info_length
|
||||
|
||||
outputBuffer.WriteUint16(elementary_stream_map_length)
|
||||
if pesAudio != nil {
|
||||
outputBuffer.WriteByte(pesAudio.StreamType) // stream_type
|
||||
outputBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
|
||||
outputBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
if pesVideo != nil {
|
||||
outputBuffer.WriteByte(pesVideo.StreamType) // stream_type
|
||||
outputBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
|
||||
outputBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
onPacket()
|
||||
m7s.PlayBlock(muxer.Subscriber, func(audio *format.Mpeg2Audio) error {
|
||||
pesAudio.Pts = uint64(audio.GetPTS())
|
||||
pesAudio.WritePESPacket(audio.Memory, muxer.Packet)
|
||||
return onPacket()
|
||||
}, func(video *format.AnnexB) error {
|
||||
pesVideo.Pts = uint64(video.GetPTS())
|
||||
pesVideo.Dts = uint64(video.GetDTS())
|
||||
pesVideo.WritePESPacket(video.Memory, muxer.Packet)
|
||||
|
||||
return onPacket()
|
||||
})
|
||||
}
|
||||
|
||||
func MuxPSHeader(outputBuffer *util.Buffer) {
|
||||
// 写入PS Pack Header - 参考MPEG-2程序流标准
|
||||
// Pack start code: 0x000001BA
|
||||
outputBuffer.WriteUint32(StartCodePS)
|
||||
// SCR字段 (System Clock Reference) - 参考ps-muxer.go的实现
|
||||
// 系统时钟参考
|
||||
scr := uint64(time.Now().UnixMilli()) * 90
|
||||
outputBuffer.WriteByte(0x44 | byte((scr>>30)&0x07)) // '01' + SCR[32..30]
|
||||
outputBuffer.WriteByte(byte((scr >> 22) & 0xFF)) // SCR[29..22]
|
||||
outputBuffer.WriteByte(0x04 | byte((scr>>20)&0x03)) // marker + SCR[21..20]
|
||||
outputBuffer.WriteByte(byte((scr >> 12) & 0xFF)) // SCR[19..12]
|
||||
outputBuffer.WriteByte(0x04 | byte((scr>>10)&0x03)) // marker + SCR[11..10]
|
||||
outputBuffer.WriteByte(byte((scr >> 2) & 0xFF)) // SCR[9..2]
|
||||
outputBuffer.WriteByte(0x04 | byte(scr&0x03)) // marker + SCR[1..0]
|
||||
outputBuffer.WriteByte(0x01) // SCR_ext + marker
|
||||
outputBuffer.WriteByte(0x89) // program_mux_rate high
|
||||
outputBuffer.WriteByte(0xC8) // program_mux_rate low + markers + reserved + stuffing_length(0)
|
||||
}
|
853
pkg/format/ps/mpegps_test.go
Normal file
853
pkg/format/ps/mpegps_test.go
Normal file
@@ -0,0 +1,853 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func TestMpegPSConstants(t *testing.T) {
|
||||
// Test that PS constants are properly defined
|
||||
t.Run("Constants", func(t *testing.T) {
|
||||
if StartCodePS != 0x000001ba {
|
||||
t.Errorf("Expected StartCodePS %x, got %x", 0x000001ba, StartCodePS)
|
||||
}
|
||||
|
||||
if PSPackHeaderSize != 14 {
|
||||
t.Errorf("Expected PSPackHeaderSize %d, got %d", 14, PSPackHeaderSize)
|
||||
}
|
||||
|
||||
if MaxPESPayloadSize != 0xFFEB {
|
||||
t.Errorf("Expected MaxPESPayloadSize %x, got %x", 0xFFEB, MaxPESPayloadSize)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestMuxPSHeader(t *testing.T) {
|
||||
// Test PS header generation
|
||||
t.Run("PSHeader", func(t *testing.T) {
|
||||
// Create a buffer for testing - initialize with length 0 to allow appending
|
||||
buffer := make([]byte, 0, PSPackHeaderSize)
|
||||
utilBuffer := util.Buffer(buffer)
|
||||
|
||||
// Call MuxPSHeader
|
||||
MuxPSHeader(&utilBuffer)
|
||||
|
||||
// Check the buffer length
|
||||
if len(utilBuffer) != PSPackHeaderSize {
|
||||
t.Errorf("Expected buffer length %d, got %d", PSPackHeaderSize, len(utilBuffer))
|
||||
}
|
||||
|
||||
// Check PS start code (first 4 bytes should be 0x00 0x00 0x01 0xBA)
|
||||
expectedStartCode := []byte{0x00, 0x00, 0x01, 0xBA}
|
||||
if !bytes.Equal(utilBuffer[:4], expectedStartCode) {
|
||||
t.Errorf("Expected PS start code %x, got %x", expectedStartCode, utilBuffer[:4])
|
||||
}
|
||||
|
||||
t.Logf("PS Header: %x", utilBuffer)
|
||||
t.Logf("Buffer length: %d", len(utilBuffer))
|
||||
})
|
||||
}
|
||||
|
||||
func TestMpegpsPESFrame(t *testing.T) {
|
||||
// Test MpegpsPESFrame basic functionality
|
||||
t.Run("PESFrame", func(t *testing.T) {
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000 // 1 second in 90kHz clock
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Test basic properties
|
||||
if pesFrame.StreamType != 0x1B {
|
||||
t.Errorf("Expected stream type 0x1B, got %x", pesFrame.StreamType)
|
||||
}
|
||||
|
||||
if pesFrame.Pts != 90000 {
|
||||
t.Errorf("Expected PTS %d, got %d", 90000, pesFrame.Pts)
|
||||
}
|
||||
|
||||
if pesFrame.Dts != 90000 {
|
||||
t.Errorf("Expected DTS %d, got %d", 90000, pesFrame.Dts)
|
||||
}
|
||||
|
||||
t.Logf("PES Frame: StreamType=%x, PTS=%d, DTS=%d", pesFrame.StreamType, pesFrame.Pts, pesFrame.Dts)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadPayload(t *testing.T) {
|
||||
// Test ReadPayload functionality
|
||||
t.Run("ReadPayload", func(t *testing.T) {
|
||||
// Create test data with payload length and payload
|
||||
testData := []byte{
|
||||
0x00, 0x05, // Payload length = 5 bytes
|
||||
0x01, 0x02, 0x03, 0x04, 0x05, // Payload data
|
||||
}
|
||||
|
||||
demuxer := &MpegPsDemuxer{}
|
||||
reader := util.NewBufReader(bytes.NewReader(testData))
|
||||
|
||||
payload, err := demuxer.ReadPayload(reader)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadPayload failed: %v", err)
|
||||
}
|
||||
|
||||
if payload.Size != 5 {
|
||||
t.Errorf("Expected payload size 5, got %d", payload.Size)
|
||||
}
|
||||
|
||||
expectedPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
if !bytes.Equal(payload.ToBytes(), expectedPayload) {
|
||||
t.Errorf("Expected payload %x, got %x", expectedPayload, payload.ToBytes())
|
||||
}
|
||||
|
||||
t.Logf("ReadPayload successful: %x", payload.ToBytes())
|
||||
})
|
||||
}
|
||||
|
||||
func TestMpegPSMuxerBasic(t *testing.T) {
|
||||
// Test MpegPSMuxer basic functionality
|
||||
t.Run("MuxBasic", func(t *testing.T) {
|
||||
|
||||
// Test basic PS header generation without PlayBlock
|
||||
// This focuses on testing the header generation logic
|
||||
var outputBuffer util.Buffer = make([]byte, 0, 1024)
|
||||
outputBuffer.Reset()
|
||||
|
||||
// Test PS header generation
|
||||
MuxPSHeader(&outputBuffer)
|
||||
|
||||
// Add stuffing bytes as expected by the demuxer
|
||||
// The demuxer expects: 9 bytes + 1 stuffing length byte + stuffing bytes
|
||||
stuffingLength := byte(0x00) // No stuffing bytes
|
||||
outputBuffer.WriteByte(stuffingLength)
|
||||
|
||||
// Verify PS header contains expected start code
|
||||
if len(outputBuffer) != PSPackHeaderSize+1 {
|
||||
t.Errorf("Expected PS header size %d, got %d", PSPackHeaderSize+1, len(outputBuffer))
|
||||
}
|
||||
|
||||
// Check for PS start code
|
||||
if !bytes.Contains(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PS header does not contain PS start code")
|
||||
}
|
||||
|
||||
t.Logf("PS Header: %x", outputBuffer)
|
||||
t.Logf("PS Header size: %d bytes", len(outputBuffer))
|
||||
|
||||
// Test PSM header generation
|
||||
var pesAudio, pesVideo *MpegpsPESFrame
|
||||
var elementary_stream_map_length uint16
|
||||
|
||||
// Simulate audio stream
|
||||
hasAudio := true
|
||||
if hasAudio {
|
||||
elementary_stream_map_length += 4
|
||||
pesAudio = &MpegpsPESFrame{}
|
||||
pesAudio.StreamID = 0xC0 // MPEG audio
|
||||
pesAudio.StreamType = 0x0F // AAC
|
||||
}
|
||||
|
||||
// Simulate video stream
|
||||
hasVideo := true
|
||||
if hasVideo {
|
||||
elementary_stream_map_length += 4
|
||||
pesVideo = &MpegpsPESFrame{}
|
||||
pesVideo.StreamID = 0xE0 // MPEG video
|
||||
pesVideo.StreamType = 0x1B // H.264
|
||||
}
|
||||
|
||||
// Create PSM header with proper payload length
|
||||
psmData := make([]byte, 0, PSMHeaderSize+int(elementary_stream_map_length))
|
||||
psmBuffer := util.Buffer(psmData)
|
||||
psmBuffer.Reset()
|
||||
|
||||
// Write PSM start code
|
||||
psmBuffer.WriteUint32(StartCodeMAP)
|
||||
psmLength := uint16(PSMHeaderSize + int(elementary_stream_map_length) - 6)
|
||||
psmBuffer.WriteUint16(psmLength) // psm_length
|
||||
psmBuffer.WriteByte(0xE0) // current_next_indicator + reserved + psm_version
|
||||
psmBuffer.WriteByte(0xFF) // reserved + marker
|
||||
psmBuffer.WriteUint16(0) // program_stream_info_length
|
||||
|
||||
psmBuffer.WriteUint16(elementary_stream_map_length)
|
||||
if pesAudio != nil {
|
||||
psmBuffer.WriteByte(pesAudio.StreamType) // stream_type
|
||||
psmBuffer.WriteByte(pesAudio.StreamID) // elementary_stream_id
|
||||
psmBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
if pesVideo != nil {
|
||||
psmBuffer.WriteByte(pesVideo.StreamType) // stream_type
|
||||
psmBuffer.WriteByte(pesVideo.StreamID) // elementary_stream_id
|
||||
psmBuffer.WriteUint16(0) // elementary_stream_info_length
|
||||
}
|
||||
|
||||
// Verify PSM header
|
||||
if len(psmBuffer) != PSMHeaderSize+int(elementary_stream_map_length) {
|
||||
t.Errorf("Expected PSM size %d, got %d", PSMHeaderSize+int(elementary_stream_map_length), len(psmBuffer))
|
||||
}
|
||||
|
||||
// Check for PSM start code
|
||||
if !bytes.Contains(psmBuffer, []byte{0x00, 0x00, 0x01, 0xBC}) {
|
||||
t.Error("PSM header does not contain PSM start code")
|
||||
}
|
||||
|
||||
t.Logf("PSM Header: %x", psmBuffer)
|
||||
t.Logf("PSM Header size: %d bytes", len(psmBuffer))
|
||||
|
||||
// Test ReadPayload function directly
|
||||
t.Run("ReadPayload", func(t *testing.T) {
|
||||
// Create test payload data
|
||||
testPayload := []byte{0x01, 0x02, 0x03, 0x04, 0x05}
|
||||
|
||||
// Create a packet with length prefix
|
||||
packetData := make([]byte, 0, 2+len(testPayload))
|
||||
packetData = append(packetData, byte(len(testPayload)>>8), byte(len(testPayload)))
|
||||
packetData = append(packetData, testPayload...)
|
||||
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
demuxer := &MpegPsDemuxer{}
|
||||
|
||||
// Test ReadPayload function
|
||||
payload, err := demuxer.ReadPayload(reader)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadPayload failed: %v", err)
|
||||
}
|
||||
|
||||
if payload.Size != len(testPayload) {
|
||||
t.Errorf("Expected payload size %d, got %d", len(testPayload), payload.Size)
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload.ToBytes(), testPayload) {
|
||||
t.Errorf("Expected payload %x, got %x", testPayload, payload.ToBytes())
|
||||
}
|
||||
|
||||
t.Logf("ReadPayload test passed: %x", payload.ToBytes())
|
||||
})
|
||||
|
||||
// Test basic demuxing with PS header only
|
||||
t.Run("PSHeader", func(t *testing.T) {
|
||||
// Create a simple test that just verifies the PS header structure
|
||||
// without trying to demux it (which expects more data)
|
||||
if len(outputBuffer) < 4 {
|
||||
t.Errorf("PS header too short: %d bytes", len(outputBuffer))
|
||||
}
|
||||
|
||||
// Check that it starts with the correct start code
|
||||
if !bytes.HasPrefix(outputBuffer, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Errorf("PS header does not start with correct start code: %x", outputBuffer[:4])
|
||||
}
|
||||
|
||||
t.Logf("PS header structure test passed")
|
||||
})
|
||||
|
||||
t.Logf("Basic mux/demux test completed successfully")
|
||||
})
|
||||
|
||||
// Test basic PES packet generation without PlayBlock
|
||||
t.Run("PESGeneration", func(t *testing.T) {
|
||||
// Create a test that simulates PES packet generation
|
||||
// without requiring a full subscriber setup
|
||||
|
||||
// Create test payload
|
||||
testPayload := make([]byte, 5000)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("PES packet generated: %d bytes", len(packetData))
|
||||
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Test reading back the packet
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packets directly by parsing the PES structure
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
// Verify total payload size matches
|
||||
if totalPayloadSize != len(testPayload) {
|
||||
t.Errorf("Expected total payload size %d, got %d", len(testPayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
t.Logf("PES generation test completed successfully: %d packets, total %d bytes", packetCount, totalPayloadSize)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPESPacketWriteRead(t *testing.T) {
|
||||
// Test PES packet writing and reading functionality
|
||||
t.Run("PESWriteRead", func(t *testing.T) {
|
||||
// Create test payload data
|
||||
testPayload := make([]byte, 1000)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 90000 // 1 second in 90kHz clock
|
||||
pesFrame.Dts = 90000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("PES packet written: %d bytes", len(packetData))
|
||||
t.Logf("Packet data (first 64 bytes): %x", packetData[:min(64, len(packetData))])
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Now test reading the PES packet back
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Read and process the PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header (9 bytes + stuffing length)
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packet directly by parsing the PES structure
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
t.Logf("PES packet %d: stream_id=0x%02x", packetCount+1, pesStartCode&0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", packetCount+1, payload.Size)
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
t.Logf("PES payload read: %d bytes", totalPayloadSize)
|
||||
|
||||
// Verify payload size
|
||||
if totalPayloadSize != len(testPayload) {
|
||||
t.Errorf("Expected payload size %d, got %d", len(testPayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
// Note: We can't easily verify the content because the payload is fragmented across multiple PES packets
|
||||
// But we can verify the total size is correct
|
||||
|
||||
t.Logf("PES packet write-read test completed successfully")
|
||||
})
|
||||
}
|
||||
|
||||
func TestLargePESPacket(t *testing.T) {
|
||||
// Test large PES packet handling (payload > 65535 bytes)
|
||||
t.Run("LargePESPacket", func(t *testing.T) {
|
||||
// Create large test payload (exceeds 65535 bytes)
|
||||
largePayload := make([]byte, 70000) // 70KB payload
|
||||
for i := range largePayload {
|
||||
largePayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = 180000 // 2 seconds in 90kHz clock
|
||||
pesFrame.Dts = 180000
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024) // 1MB allocator
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write large PES packet
|
||||
t.Logf("Writing large PES packet with %d bytes payload", len(largePayload))
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(largePayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed for large payload: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 {
|
||||
t.Fatal("No data was written to packet")
|
||||
}
|
||||
|
||||
t.Logf("Large PES packet written: %d bytes", len(packetData))
|
||||
|
||||
// Verify PS header is present
|
||||
if !bytes.Contains(packetData, []byte{0x00, 0x00, 0x01, 0xBA}) {
|
||||
t.Error("Large PES packet does not contain PS start code")
|
||||
}
|
||||
|
||||
// Count number of PES packets (should be multiple due to size limitation)
|
||||
pesCount := 0
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read and count PES packets
|
||||
totalPayloadSize := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// streamID := byte(pesStartCode & 0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
t.Logf("PES packet %d: %d bytes payload", pesCount+1, payload.Size)
|
||||
}
|
||||
|
||||
pesCount++
|
||||
}
|
||||
|
||||
// Verify that we got multiple PES packets
|
||||
if pesCount < 2 {
|
||||
t.Errorf("Expected multiple PES packets for large payload, got %d", pesCount)
|
||||
}
|
||||
|
||||
// Verify total payload size
|
||||
if totalPayloadSize != len(largePayload) {
|
||||
t.Errorf("Expected total payload size %d, got %d", len(largePayload), totalPayloadSize)
|
||||
}
|
||||
|
||||
// Verify individual PES packet sizes don't exceed maximum
|
||||
maxPacketSize := MaxPESPayloadSize + PESHeaderMinSize
|
||||
if pesCount == 1 && len(packetData) > maxPacketSize {
|
||||
t.Errorf("Single PES packet exceeds maximum size: %d > %d", len(packetData), maxPacketSize)
|
||||
}
|
||||
|
||||
t.Logf("Large PES packet test completed successfully: %d packets, total %d bytes", pesCount, totalPayloadSize)
|
||||
})
|
||||
}
|
||||
|
||||
func TestPESPacketBoundaryConditions(t *testing.T) {
|
||||
// Test PES packet boundary conditions
|
||||
t.Run("BoundaryConditions", func(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
payloadSize int
|
||||
}{
|
||||
{"EmptyPayload", 0},
|
||||
{"SmallPayload", 1},
|
||||
{"ExactBoundary", MaxPESPayloadSize},
|
||||
{"JustOverBoundary", MaxPESPayloadSize + 1},
|
||||
{"MultipleBoundary", MaxPESPayloadSize * 2 + 100},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Create test payload
|
||||
testPayload := make([]byte, tc.payloadSize)
|
||||
for i := range testPayload {
|
||||
testPayload[i] = byte(i % 256)
|
||||
}
|
||||
|
||||
// Create PES frame
|
||||
pesFrame := &MpegpsPESFrame{
|
||||
StreamType: 0x1B, // H.264
|
||||
}
|
||||
pesFrame.Pts = uint64(tc.payloadSize) * 90 // Use payload size as PTS
|
||||
pesFrame.Dts = uint64(tc.payloadSize) * 90
|
||||
|
||||
// Create allocator for testing
|
||||
allocator := util.NewScalableMemoryAllocator(1024*1024)
|
||||
packet := util.NewRecyclableMemory(allocator)
|
||||
|
||||
// Write PES packet
|
||||
err := pesFrame.WritePESPacket(util.NewMemory(testPayload), &packet)
|
||||
if err != nil {
|
||||
t.Fatalf("WritePESPacket failed: %v", err)
|
||||
}
|
||||
|
||||
// Verify that packet was written
|
||||
packetData := packet.ToBytes()
|
||||
if len(packetData) == 0 && tc.payloadSize > 0 {
|
||||
t.Fatal("No data was written to packet for non-empty payload")
|
||||
}
|
||||
|
||||
t.Logf("%s: %d bytes payload -> %d bytes packet", tc.name, tc.payloadSize, len(packetData))
|
||||
|
||||
// For non-empty payloads, verify we can read them back
|
||||
if tc.payloadSize > 0 {
|
||||
reader := util.NewBufReader(bytes.NewReader(packetData))
|
||||
|
||||
// Skip PS header
|
||||
code, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read start code: %v", err)
|
||||
}
|
||||
if code != StartCodePS {
|
||||
t.Errorf("Expected PS start code %x, got %x", StartCodePS, code)
|
||||
}
|
||||
|
||||
// Skip PS header
|
||||
if err = reader.Skip(9); err != nil {
|
||||
t.Fatalf("Failed to skip PS header: %v", err)
|
||||
}
|
||||
psl, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read stuffing length: %v", err)
|
||||
}
|
||||
psl &= 0x07
|
||||
if err = reader.Skip(int(psl)); err != nil {
|
||||
t.Fatalf("Failed to skip stuffing bytes: %v", err)
|
||||
}
|
||||
|
||||
// Read PES packets
|
||||
totalPayloadSize := 0
|
||||
packetCount := 0
|
||||
|
||||
for reader.Buffered() > 0 {
|
||||
// Read PES packet start code (0x00000100 + stream_id)
|
||||
pesStartCode, err := reader.ReadBE32(4)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
t.Fatalf("Failed to read PES start code: %v", err)
|
||||
}
|
||||
|
||||
// Check if it's a PES packet (starts with 0x000001)
|
||||
if pesStartCode&0xFFFFFF00 != 0x00000100 {
|
||||
t.Errorf("Invalid PES start code: %x", pesStartCode)
|
||||
break
|
||||
}
|
||||
|
||||
// // streamID := byte(pesStartCode & 0xFF)
|
||||
|
||||
// Read PES packet length
|
||||
pesLength, err := reader.ReadBE(2)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES length: %v", err)
|
||||
}
|
||||
|
||||
// Read PES header
|
||||
// Skip the first byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags1: %v", err)
|
||||
}
|
||||
|
||||
// Skip the second byte (flags)
|
||||
_, err = reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES flags2: %v", err)
|
||||
}
|
||||
|
||||
// Read header data length
|
||||
headerDataLength, err := reader.ReadByte()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES header data length: %v", err)
|
||||
}
|
||||
|
||||
// Skip header data
|
||||
if err = reader.Skip(int(headerDataLength)); err != nil {
|
||||
t.Fatalf("Failed to skip PES header data: %v", err)
|
||||
}
|
||||
|
||||
// Calculate payload size
|
||||
payloadSize := pesLength - 3 - int(headerDataLength) // 3 = flags1 + flags2 + headerDataLength
|
||||
if payloadSize > 0 {
|
||||
// Read payload data
|
||||
payload, err := reader.ReadBytes(payloadSize)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read PES payload: %v", err)
|
||||
}
|
||||
|
||||
totalPayloadSize += payload.Size
|
||||
}
|
||||
|
||||
packetCount++
|
||||
}
|
||||
|
||||
// Verify total payload size matches
|
||||
if totalPayloadSize != tc.payloadSize {
|
||||
t.Errorf("Expected total payload size %d, got %d", tc.payloadSize, totalPayloadSize)
|
||||
}
|
||||
|
||||
t.Logf("%s: Successfully read back %d PES packets", tc.name, packetCount)
|
||||
}
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
35
pkg/format/ps/pes.go
Normal file
35
pkg/format/ps/pes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package mpegps
|
||||
|
||||
import (
|
||||
mpegts "m7s.live/v5/pkg/format/ts"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
type MpegpsPESFrame struct {
|
||||
StreamType byte // Stream type (e.g., video, audio)
|
||||
mpegts.MpegPESHeader
|
||||
}
|
||||
|
||||
func (frame *MpegpsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
|
||||
frame.DataAlignmentIndicator = 1
|
||||
|
||||
pesReader := payload.NewReader()
|
||||
var outputMemory util.Buffer = allocator.NextN(PSPackHeaderSize)
|
||||
outputMemory.Reset()
|
||||
MuxPSHeader(&outputMemory)
|
||||
for pesReader.Length > 0 {
|
||||
currentPESPayload := min(pesReader.Length, MaxPESPayloadSize)
|
||||
var pesHeadItem util.Buffer
|
||||
pesHeadItem, err = frame.WritePESHeader(currentPESPayload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
copy(allocator.NextN(pesHeadItem.Len()), pesHeadItem)
|
||||
// 申请输出缓冲
|
||||
outputMemory = allocator.NextN(currentPESPayload)
|
||||
pesReader.Read(outputMemory)
|
||||
frame.DataAlignmentIndicator = 0
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
131
pkg/format/raw.go
Normal file
131
pkg/format/raw.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package format
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ pkg.IAVFrame = (*RawAudio)(nil)
|
||||
|
||||
type RawAudio struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetSize() int {
|
||||
return r.Raw.(*util.Memory).Size
|
||||
}
|
||||
|
||||
func (r *RawAudio) Demux() error {
|
||||
r.Raw = &r.Memory
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Mux(from *pkg.Sample) (err error) {
|
||||
r.InitRecycleIndexes(0)
|
||||
r.Memory = *from.Raw.(*util.Memory)
|
||||
r.ICodecCtx = from.GetBase()
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RawAudio) String() string {
|
||||
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC(), r.Timestamp, r.Size)
|
||||
}
|
||||
|
||||
var _ pkg.IAVFrame = (*H26xFrame)(nil)
|
||||
|
||||
type H26xFrame struct {
|
||||
pkg.Sample
|
||||
}
|
||||
|
||||
func (h *H26xFrame) CheckCodecChange() (err error) {
|
||||
if h.ICodecCtx == nil {
|
||||
return pkg.ErrUnsupportCodec
|
||||
}
|
||||
var hasVideoFrame bool
|
||||
switch ctx := h.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
var sps, pps []byte
|
||||
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case codec.NALU_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case codec.NALU_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case codec.NALU_IDR_Picture:
|
||||
h.IDR = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
if sps != nil && pps != nil {
|
||||
var codecData h264parser.CodecData
|
||||
codecData, err = h264parser.NewCodecDataFromSPSAndPPS(sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, ctx.Record) {
|
||||
h.ICodecCtx = &codec.H264Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
case *codec.H265Ctx:
|
||||
var vps, sps, pps []byte
|
||||
for nalu := range h.Raw.(*pkg.Nalus).RangePoint {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
vps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
sps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
pps = nalu.ToBytes()
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
h.IDR = true
|
||||
case 1, 2, 3, 4, 5, 6, 7, 8, 9:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
if vps != nil && sps != nil && pps != nil {
|
||||
var codecData h265parser.CodecData
|
||||
codecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(vps, sps, pps)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !bytes.Equal(codecData.Record, ctx.Record) {
|
||||
h.ICodecCtx = &codec.H265Ctx{
|
||||
CodecData: codecData,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Return ErrSkip if no video frames are present (only metadata NALUs)
|
||||
if !hasVideoFrame && !h.IDR {
|
||||
return pkg.ErrSkip
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *H26xFrame) GetSize() (ret int) {
|
||||
switch raw := r.Raw.(type) {
|
||||
case *pkg.Nalus:
|
||||
for nalu := range raw.RangePoint {
|
||||
ret += nalu.Size
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (h *H26xFrame) String() string {
|
||||
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
|
||||
}
|
@@ -4,7 +4,11 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
//"sync"
|
||||
)
|
||||
@@ -101,22 +105,16 @@ const (
|
||||
//
|
||||
|
||||
type MpegTsStream struct {
|
||||
PAT MpegTsPAT // PAT表信息
|
||||
PMT MpegTsPMT // PMT表信息
|
||||
PESBuffer map[uint16]*MpegTsPESPacket
|
||||
PESChan chan *MpegTsPESPacket
|
||||
PAT MpegTsPAT // PAT表信息
|
||||
PMT MpegTsPMT // PMT表信息
|
||||
Publisher *m7s.Publisher
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
writer m7s.PublishWriter[*format.Mpeg2Audio, *VideoFrame]
|
||||
audioPID, videoPID, pmtPID uint16
|
||||
tsPacket [TS_PACKET_SIZE]byte
|
||||
}
|
||||
|
||||
// ios13818-1-CN.pdf 33/165
|
||||
//
|
||||
// TS
|
||||
//
|
||||
|
||||
// Packet == Header + Payload == 188 bytes
|
||||
type MpegTsPacket struct {
|
||||
Header MpegTsHeader
|
||||
Payload []byte
|
||||
}
|
||||
|
||||
// 前面32bit的数据即TS分组首部,它指出了这个分组的属性
|
||||
type MpegTsHeader struct {
|
||||
@@ -185,25 +183,6 @@ type MpegTsDescriptor struct {
|
||||
Data []byte
|
||||
}
|
||||
|
||||
func ReadTsPacket(r io.Reader) (packet MpegTsPacket, err error) {
|
||||
lr := &io.LimitedReader{R: r, N: TS_PACKET_SIZE}
|
||||
|
||||
// header
|
||||
packet.Header, err = ReadTsHeader(lr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// payload
|
||||
packet.Payload = make([]byte, lr.N)
|
||||
_, err = lr.Read(packet.Payload)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
|
||||
var h uint32
|
||||
|
||||
@@ -365,7 +344,7 @@ func ReadTsHeader(r io.Reader) (header MpegTsHeader, err error) {
|
||||
// Discard 是一个 io.Writer,对它进行的任何 Write 调用都将无条件成功
|
||||
// 但是ioutil.Discard不记录copy得到的数值
|
||||
// 用于发送需要读取但不想存储的数据,目的是耗尽读取端的数据
|
||||
if _, err = io.CopyN(ioutil.Discard, lr, int64(lr.N)); err != nil {
|
||||
if _, err = io.CopyN(io.Discard, lr, int64(lr.N)); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -440,138 +419,96 @@ func WriteTsHeader(w io.Writer, header MpegTsHeader) (written int, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
//
|
||||
//func (s *MpegTsStream) TestWrite(fileName string) error {
|
||||
//
|
||||
// if fileName != "" {
|
||||
// file, err := os.Create(fileName)
|
||||
// if err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// defer file.Close()
|
||||
//
|
||||
// patTsHeader := []byte{0x47, 0x40, 0x00, 0x10}
|
||||
//
|
||||
// if err := WritePATPacket(file, patTsHeader, *s.pat); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
//
|
||||
// // TODO:这里的pid应该是由PAT给的
|
||||
// pmtTsHeader := []byte{0x47, 0x41, 0x00, 0x10}
|
||||
//
|
||||
// if err := WritePMTPacket(file, pmtTsHeader, *s.pmt); err != nil {
|
||||
// panic(err)
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// var videoFrame int
|
||||
// var audioFrame int
|
||||
// for {
|
||||
// tsPesPkt, ok := <-s.TsPesPktChan
|
||||
// if !ok {
|
||||
// fmt.Println("frame index, video , audio :", videoFrame, audioFrame)
|
||||
// break
|
||||
// }
|
||||
//
|
||||
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_AUDIO {
|
||||
// audioFrame++
|
||||
// }
|
||||
//
|
||||
// if tsPesPkt.PesPkt.Header.StreamID == STREAM_ID_VIDEO {
|
||||
// println(tsPesPkt.PesPkt.Header.Pts)
|
||||
// videoFrame++
|
||||
// }
|
||||
//
|
||||
// fmt.Sprintf("%s", tsPesPkt)
|
||||
//
|
||||
// // if err := WritePESPacket(file, tsPesPkt.TsPkt.Header, tsPesPkt.PesPkt); err != nil {
|
||||
// // return err
|
||||
// // }
|
||||
//
|
||||
// }
|
||||
//
|
||||
// return nil
|
||||
//}
|
||||
|
||||
func (s *MpegTsStream) ReadPAT(packet *MpegTsPacket, pr io.Reader) (err error) {
|
||||
// 首先找到PID==0x00的TS包(PAT)
|
||||
if PID_PAT == packet.Header.Pid {
|
||||
if len(packet.Payload) == 188 {
|
||||
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
|
||||
}
|
||||
// Header + PSI + Paylod
|
||||
s.PAT, err = ReadPAT(pr)
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MpegTsStream) ReadPMT(packet *MpegTsPacket, pr io.Reader) (err error) {
|
||||
// 在读取PAT中已经将所有频道节目信息(PMT_PID)保存了起来
|
||||
// 接着读取所有TS包里面的PID,找出PID==PMT_PID的TS包,就是PMT表
|
||||
for _, v := range s.PAT.Program {
|
||||
if v.ProgramMapPID == packet.Header.Pid {
|
||||
if len(packet.Payload) == 188 {
|
||||
pr = &util.Crc32Reader{R: pr, Crc32: 0xffffffff}
|
||||
}
|
||||
// Header + PSI + Paylod
|
||||
s.PMT, err = ReadPMT(pr)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
func (s *MpegTsStream) Feed(ts io.Reader) (err error) {
|
||||
writer := &s.writer
|
||||
var reader bytes.Reader
|
||||
var lr io.LimitedReader
|
||||
lr.R = &reader
|
||||
var tsHeader MpegTsHeader
|
||||
tsData := make([]byte, TS_PACKET_SIZE)
|
||||
for {
|
||||
_, err = io.ReadFull(ts, tsData)
|
||||
var pesHeader MpegPESHeader
|
||||
for !s.Publisher.IsStopped() {
|
||||
_, err = io.ReadFull(ts, s.tsPacket[:])
|
||||
if err == io.EOF {
|
||||
// 文件结尾 把最后面的数据发出去
|
||||
for _, pesPkt := range s.PESBuffer {
|
||||
if pesPkt != nil {
|
||||
s.PESChan <- pesPkt
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return
|
||||
}
|
||||
reader.Reset(tsData)
|
||||
reader.Reset(s.tsPacket[:])
|
||||
lr.N = TS_PACKET_SIZE
|
||||
if tsHeader, err = ReadTsHeader(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if tsHeader.Pid == PID_PAT {
|
||||
switch tsHeader.Pid {
|
||||
case PID_PAT:
|
||||
if s.PAT, err = ReadPAT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
s.pmtPID = s.PAT.Program[0].ProgramMapPID
|
||||
continue
|
||||
}
|
||||
if len(s.PMT.Stream) == 0 {
|
||||
for _, v := range s.PAT.Program {
|
||||
if v.ProgramMapPID == tsHeader.Pid {
|
||||
if s.PMT, err = ReadPMT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
for _, v := range s.PMT.Stream {
|
||||
s.PESBuffer[v.ElementaryPID] = nil
|
||||
}
|
||||
}
|
||||
case s.pmtPID:
|
||||
if len(s.PMT.Stream) != 0 {
|
||||
continue
|
||||
}
|
||||
} else if pesPkt, ok := s.PESBuffer[tsHeader.Pid]; ok {
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesPkt != nil {
|
||||
s.PESChan <- pesPkt
|
||||
}
|
||||
pesPkt = &MpegTsPESPacket{}
|
||||
s.PESBuffer[tsHeader.Pid] = pesPkt
|
||||
if pesPkt.Header, err = ReadPESHeader(&lr); err != nil {
|
||||
return
|
||||
if s.PMT, err = ReadPMT(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
for _, pmt := range s.PMT.Stream {
|
||||
switch pmt.StreamType {
|
||||
case STREAM_TYPE_H265:
|
||||
s.videoPID = pmt.ElementaryPID
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
|
||||
writer.VideoFrame.ICodecCtx = &codec.H265Ctx{}
|
||||
case STREAM_TYPE_H264:
|
||||
s.videoPID = pmt.ElementaryPID
|
||||
writer.PublishVideoWriter = m7s.NewPublishVideoWriter[*VideoFrame](s.Publisher, s.Allocator)
|
||||
writer.VideoFrame.ICodecCtx = &codec.H264Ctx{}
|
||||
case STREAM_TYPE_AAC:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = &codec.AACCtx{}
|
||||
case STREAM_TYPE_G711A:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMACtx()
|
||||
case STREAM_TYPE_G711U:
|
||||
s.audioPID = pmt.ElementaryPID
|
||||
writer.PublishAudioWriter = m7s.NewPublishAudioWriter[*format.Mpeg2Audio](s.Publisher, s.Allocator)
|
||||
writer.AudioFrame.ICodecCtx = codec.NewPCMUCtx()
|
||||
}
|
||||
}
|
||||
io.Copy(&pesPkt.Payload, &lr)
|
||||
case s.audioPID:
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if !s.Publisher.PubAudio {
|
||||
continue
|
||||
}
|
||||
if writer.AudioFrame.Size > 0 {
|
||||
if err = writer.NextAudio(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
writer.AudioFrame.SetDTS(time.Duration(pesHeader.Pts))
|
||||
}
|
||||
lr.Read(writer.AudioFrame.NextN(int(lr.N)))
|
||||
case s.videoPID:
|
||||
if tsHeader.PayloadUnitStartIndicator == 1 {
|
||||
if pesHeader, err = ReadPESHeader0(&lr); err != nil {
|
||||
return
|
||||
}
|
||||
if !s.Publisher.PubVideo {
|
||||
continue
|
||||
}
|
||||
if writer.VideoFrame.Size > 0 {
|
||||
if err = writer.NextVideo(); err != nil {
|
||||
continue
|
||||
}
|
||||
}
|
||||
writer.VideoFrame.SetDTS(time.Duration(pesHeader.Dts))
|
||||
writer.VideoFrame.SetPTS(time.Duration(pesHeader.Pts))
|
||||
|
||||
}
|
||||
lr.Read(writer.VideoFrame.NextN(int(lr.N)))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
@@ -2,39 +2,19 @@ package mpegts
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
"net"
|
||||
)
|
||||
|
||||
// ios13818-1-CN.pdf 45/166
|
||||
//
|
||||
// PES
|
||||
//
|
||||
|
||||
// 每个传输流和节目流在逻辑上都是由 PES 包构造的
|
||||
type MpegTsPesStream struct {
|
||||
TsPkt MpegTsPacket
|
||||
PesPkt MpegTsPESPacket
|
||||
}
|
||||
|
||||
// PES--Packetized Elementary Streams (分组的ES),ES形成的分组称为PES分组,是用来传递ES的一种数据结构
|
||||
// 1110 xxxx 为视频流(0xE0)
|
||||
// 110x xxxx 为音频流(0xC0)
|
||||
type MpegTsPESPacket struct {
|
||||
Header MpegTsPESHeader
|
||||
Payload util.Buffer //从TS包中读取的数据
|
||||
Buffers net.Buffers //用于写TS包
|
||||
}
|
||||
|
||||
type MpegTsPESHeader struct {
|
||||
PacketStartCodePrefix uint32 // 24 bits 同跟随它的 stream_id 一起组成标识包起始端的包起始码.packet_start_code_prefix 为比特串"0000 0000 0000 0000 0000 0001"(0x000001)
|
||||
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
|
||||
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
|
||||
|
||||
type MpegPESHeader struct {
|
||||
header [32]byte
|
||||
StreamID byte // 8 bits stream_id 指示基本流的类型和编号,如 stream_id 表 2-22 所定义的.传输流中,stream_id 可以设置为准确描述基本流类型的任何有效值,如表 2-22 所规定的.传输流中,基本流类型在 2.4.4 中所指示的节目特定信息中指定
|
||||
PesPacketLength uint16 // 16 bits 指示 PES 包中跟随该字段最后字节的字节数.0->指示 PES 包长度既未指示也未限定并且仅在这样的 PES 包中才被允许,该 PES 包的有效载荷由来自传输流包中所包含的视频基本流的字节组成
|
||||
MpegTsOptionalPESHeader
|
||||
|
||||
PayloadLength uint64 // 这个不是标准文档里面的字段,是自己添加的,方便计算
|
||||
}
|
||||
|
||||
// 可选的PES Header = MpegTsOptionalPESHeader + stuffing bytes(0xFF) m * 8
|
||||
@@ -99,23 +79,35 @@ type MpegTsOptionalPESHeader struct {
|
||||
// pts_dts_Flags == "11" -> PTS + DTS
|
||||
|
||||
type MpegtsPESFrame struct {
|
||||
Pid uint16
|
||||
IsKeyFrame bool
|
||||
ContinuityCounter byte
|
||||
ProgramClockReferenceBase uint64
|
||||
Pid uint16
|
||||
IsKeyFrame bool
|
||||
ContinuityCounter byte
|
||||
MpegPESHeader
|
||||
}
|
||||
|
||||
func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
var flags uint8
|
||||
var length uint
|
||||
func CreatePESWriters() (pesAudio, pesVideo MpegtsPESFrame) {
|
||||
pesAudio, pesVideo = MpegtsPESFrame{
|
||||
Pid: PID_AUDIO,
|
||||
}, MpegtsPESFrame{
|
||||
Pid: PID_VIDEO,
|
||||
}
|
||||
pesAudio.DataAlignmentIndicator = 1
|
||||
pesVideo.DataAlignmentIndicator = 1
|
||||
pesAudio.StreamID = STREAM_ID_AUDIO
|
||||
pesVideo.StreamID = STREAM_ID_VIDEO
|
||||
return
|
||||
}
|
||||
|
||||
func ReadPESHeader0(r *io.LimitedReader) (header MpegPESHeader, err error) {
|
||||
var length uint
|
||||
var packetStartCodePrefix uint32
|
||||
// packetStartCodePrefix(24) (0x000001)
|
||||
header.PacketStartCodePrefix, err = util.ReadByteToUint24(r, true)
|
||||
packetStartCodePrefix, err = util.ReadByteToUint24(r, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if header.PacketStartCodePrefix != 0x0000001 {
|
||||
if packetStartCodePrefix != 0x0000001 {
|
||||
err = errors.New("read PacketStartCodePrefix is not 0x0000001")
|
||||
return
|
||||
}
|
||||
@@ -141,18 +133,27 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
if length == 0 {
|
||||
length = 1 << 31
|
||||
}
|
||||
var header1 MpegPESHeader
|
||||
header1, err = ReadPESHeader(r)
|
||||
if err == nil {
|
||||
if header.PesPacketLength == 0 {
|
||||
header1.PesPacketLength = uint16(r.N)
|
||||
}
|
||||
header1.StreamID = header.StreamID
|
||||
return header1, nil
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// lrPacket 和 lrHeader 位置指针是在同一位置的
|
||||
lrPacket := &io.LimitedReader{R: r, N: int64(length)}
|
||||
lrHeader := lrPacket
|
||||
|
||||
func ReadPESHeader(lrPacket *io.LimitedReader) (header MpegPESHeader, err error) {
|
||||
var flags uint8
|
||||
// constTen(2)
|
||||
// pes_ScramblingControl(2)
|
||||
// pes_Priority(1)
|
||||
// dataAlignmentIndicator(1)
|
||||
// copyright(1)
|
||||
// originalOrCopy(1)
|
||||
flags, err = util.ReadByteToUint8(lrHeader)
|
||||
flags, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -171,7 +172,7 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
// additionalCopyInfoFlag(1)
|
||||
// pes_CRCFlag(1)
|
||||
// pes_ExtensionFlag(1)
|
||||
flags, err = util.ReadByteToUint8(lrHeader)
|
||||
flags, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -185,14 +186,14 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
header.PesExtensionFlag = flags & 0x01
|
||||
|
||||
// pes_HeaderDataLength(8)
|
||||
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrHeader)
|
||||
header.PesHeaderDataLength, err = util.ReadByteToUint8(lrPacket)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
length = uint(header.PesHeaderDataLength)
|
||||
length := uint(header.PesHeaderDataLength)
|
||||
|
||||
lrHeader = &io.LimitedReader{R: lrHeader, N: int64(length)}
|
||||
lrHeader := &io.LimitedReader{R: lrPacket, N: int64(length)}
|
||||
|
||||
// 00 -> PES 包头中既无任何PTS 字段也无任何DTS 字段存在
|
||||
// 10 -> PES 包头中PTS 字段存在
|
||||
@@ -219,6 +220,8 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
}
|
||||
|
||||
header.Dts = util.GetPtsDts(dts)
|
||||
} else {
|
||||
header.Dts = header.Pts
|
||||
}
|
||||
|
||||
// reserved(2) + escr_Base1(3) + marker_bit(1) +
|
||||
@@ -336,48 +339,31 @@ func ReadPESHeader(r io.Reader) (header MpegTsPESHeader, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
// 2的16次方,16个字节
|
||||
if lrPacket.N < 65536 {
|
||||
// 这里得到的其实是负载长度,因为已经偏移过了Header部分.
|
||||
//header.pes_PacketLength = uint16(lrPacket.N)
|
||||
header.PayloadLength = uint64(lrPacket.N)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error) {
|
||||
if header.PacketStartCodePrefix != 0x0000001 {
|
||||
err = errors.New("write PacketStartCodePrefix is not 0x0000001")
|
||||
return
|
||||
func (header *MpegPESHeader) WritePESHeader(esSize int) (w util.Buffer, err error) {
|
||||
if header.DataAlignmentIndicator == 1 {
|
||||
if header.Pts == header.Dts {
|
||||
header.PtsDtsFlags = 0x80
|
||||
header.PesHeaderDataLength = 5
|
||||
} else {
|
||||
header.PtsDtsFlags = 0xC0
|
||||
header.PesHeaderDataLength = 10
|
||||
}
|
||||
} else {
|
||||
header.PtsDtsFlags = 0
|
||||
header.PesHeaderDataLength = 0
|
||||
}
|
||||
|
||||
// packetStartCodePrefix(24) (0x000001)
|
||||
if err = util.WriteUint24ToByte(w, header.PacketStartCodePrefix, true); err != nil {
|
||||
return
|
||||
pktLength := esSize + int(header.PesHeaderDataLength) + 3
|
||||
if pktLength > 0xffff {
|
||||
pktLength = 0
|
||||
}
|
||||
header.PesPacketLength = uint16(pktLength)
|
||||
|
||||
written += 3
|
||||
|
||||
// streamID(8)
|
||||
if err = util.WriteUint8ToByte(w, header.StreamID); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
// pes_PacketLength(16)
|
||||
// PES包长度可能为0,这个时候,需要自己去算
|
||||
// 0 <= len <= 65535
|
||||
if err = util.WriteUint16ToByte(w, header.PesPacketLength, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
//fmt.Println("Length :", payloadLength)
|
||||
//fmt.Println("PES Packet Length :", header.pes_PacketLength)
|
||||
|
||||
written += 2
|
||||
|
||||
w = header.header[:0]
|
||||
w.WriteUint32(0x00000100 | uint32(header.StreamID))
|
||||
w.WriteUint16(header.PesPacketLength)
|
||||
// constTen(2)
|
||||
// pes_ScramblingControl(2)
|
||||
// pes_Priority(1)
|
||||
@@ -385,18 +371,9 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// copyright(1)
|
||||
// originalOrCopy(1)
|
||||
// 1000 0001
|
||||
if header.ConstTen != 0x80 {
|
||||
err = errors.New("pes header ConstTen != 0x80")
|
||||
return
|
||||
}
|
||||
|
||||
flags := header.ConstTen | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
|
||||
if err = util.WriteUint8ToByte(w, flags); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
flags := 0x80 | header.PesScramblingControl | header.PesPriority | header.DataAlignmentIndicator | header.Copyright | header.OriginalOrCopy
|
||||
w.WriteByte(flags)
|
||||
// pts_dts_Flags(2)
|
||||
// escr_Flag(1)
|
||||
// es_RateFlag(1)
|
||||
@@ -405,19 +382,8 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// pes_CRCFlag(1)
|
||||
// pes_ExtensionFlag(1)
|
||||
sevenFlags := header.PtsDtsFlags | header.EscrFlag | header.EsRateFlag | header.DsmTrickModeFlag | header.AdditionalCopyInfoFlag | header.PesCRCFlag | header.PesExtensionFlag
|
||||
if err = util.WriteUint8ToByte(w, sevenFlags); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
// pes_HeaderDataLength(8)
|
||||
if err = util.WriteUint8ToByte(w, header.PesHeaderDataLength); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 1
|
||||
|
||||
w.WriteByte(sevenFlags)
|
||||
w.WriteByte(header.PesHeaderDataLength)
|
||||
// PtsDtsFlags == 192(11), 128(10), 64(01)禁用, 0(00)
|
||||
if header.PtsDtsFlags&0x80 != 0 {
|
||||
// PTS和DTS都存在(11),否则只有PTS(10)
|
||||
@@ -425,30 +391,121 @@ func WritePESHeader(w io.Writer, header MpegTsPESHeader) (written int, err error
|
||||
// 11:PTS和DTS
|
||||
// PTS(33) + 4 + 3
|
||||
pts := util.PutPtsDts(header.Pts) | 3<<36
|
||||
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
|
||||
// DTS(33) + 4 + 3
|
||||
dts := util.PutPtsDts(header.Dts) | 1<<36
|
||||
if err = util.WriteUint40ToByte(w, dts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, dts, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
} else {
|
||||
// 10:只有PTS
|
||||
// PTS(33) + 4 + 3
|
||||
pts := util.PutPtsDts(header.Pts) | 2<<36
|
||||
if err = util.WriteUint40ToByte(w, pts, true); err != nil {
|
||||
if err = util.WriteUint40ToByte(&w, pts, true); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
written += 5
|
||||
func (frame *MpegtsPESFrame) WritePESPacket(payload util.Memory, allocator *util.RecyclableMemory) (err error) {
|
||||
var pesHeadItem util.Buffer
|
||||
pesHeadItem, err = frame.WritePESHeader(payload.Size)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
pesBuffers := util.NewMemory(pesHeadItem)
|
||||
payload.Range(pesBuffers.PushOne)
|
||||
pesPktLength := int64(pesBuffers.Size)
|
||||
pesReader := pesBuffers.NewReader()
|
||||
var tsHeaderLength int
|
||||
for i := 0; pesPktLength > 0; i++ {
|
||||
var buffer util.Buffer = allocator.NextN(TS_PACKET_SIZE)
|
||||
bwTsHeader := &buffer
|
||||
bwTsHeader.Reset()
|
||||
tsHeader := MpegTsHeader{
|
||||
SyncByte: 0x47,
|
||||
TransportErrorIndicator: 0,
|
||||
PayloadUnitStartIndicator: 0,
|
||||
TransportPriority: 0,
|
||||
Pid: frame.Pid,
|
||||
TransportScramblingControl: 0,
|
||||
AdaptionFieldControl: 1,
|
||||
ContinuityCounter: frame.ContinuityCounter,
|
||||
}
|
||||
|
||||
frame.ContinuityCounter++
|
||||
frame.ContinuityCounter = frame.ContinuityCounter % 16
|
||||
|
||||
// 每一帧的开头,当含有pcr的时候,包含调整字段
|
||||
if i == 0 {
|
||||
tsHeader.PayloadUnitStartIndicator = 1
|
||||
|
||||
// 当PCRFlag为1的时候,包含调整字段
|
||||
if frame.IsKeyFrame {
|
||||
tsHeader.AdaptionFieldControl = 0x03
|
||||
tsHeader.AdaptationFieldLength = 7
|
||||
tsHeader.PCRFlag = 1
|
||||
tsHeader.RandomAccessIndicator = 1
|
||||
tsHeader.ProgramClockReferenceBase = frame.Pts
|
||||
}
|
||||
}
|
||||
|
||||
// 每一帧的结尾,当不满足188个字节的时候,包含调整字段
|
||||
if pesPktLength < TS_PACKET_SIZE-4 {
|
||||
var tsStuffingLength uint8
|
||||
|
||||
tsHeader.AdaptionFieldControl = 0x03
|
||||
tsHeader.AdaptationFieldLength = uint8(TS_PACKET_SIZE - 4 - 1 - pesPktLength)
|
||||
|
||||
// TODO:如果第一个TS包也是最后一个TS包,是不是需要考虑这个情况?
|
||||
// MpegTsHeader最少占6个字节.(前4个走字节 + AdaptationFieldLength(1 byte) + 3个指示符5个标志位(1 byte))
|
||||
if tsHeader.AdaptationFieldLength >= 1 {
|
||||
tsStuffingLength = tsHeader.AdaptationFieldLength - 1
|
||||
} else {
|
||||
tsStuffingLength = 0
|
||||
}
|
||||
// error
|
||||
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if tsStuffingLength > 0 {
|
||||
if _, err = bwTsHeader.Write(Stuffing[:tsStuffingLength]); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
tsHeaderLength += int(tsStuffingLength)
|
||||
} else {
|
||||
|
||||
tsHeaderLength, err = WriteTsHeader(bwTsHeader, tsHeader)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
tsPayloadLength := TS_PACKET_SIZE - tsHeaderLength
|
||||
|
||||
//fmt.Println("tsPayloadLength :", tsPayloadLength)
|
||||
|
||||
// 这里不断的减少PES包
|
||||
written, _ := io.CopyN(bwTsHeader, &pesReader, int64(tsPayloadLength))
|
||||
// tmp := tsHeaderByte[3] << 2
|
||||
// tmp = tmp >> 6
|
||||
// if tmp == 2 {
|
||||
// fmt.Println("fuck you mother.")
|
||||
// }
|
||||
pesPktLength -= written
|
||||
tsPktByteLen := bwTsHeader.Len()
|
||||
|
||||
if tsPktByteLen != TS_PACKET_SIZE {
|
||||
err = errors.New(fmt.Sprintf("%s, packet size=%d", "TS_PACKET_SIZE != 188,", tsPktByteLen))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return nil
|
||||
}
|
20
pkg/format/ts/video.go
Normal file
20
pkg/format/ts/video.go
Normal file
@@ -0,0 +1,20 @@
|
||||
package mpegts
|
||||
|
||||
import (
|
||||
"m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/format"
|
||||
)
|
||||
|
||||
type VideoFrame struct {
|
||||
format.AnnexB
|
||||
}
|
||||
|
||||
func (a *VideoFrame) Mux(fromBase *pkg.Sample) (err error) {
|
||||
if fromBase.GetBase().FourCC().Is(codec.FourCC_H265) {
|
||||
a.PushOne(codec.AudNalu)
|
||||
} else {
|
||||
a.PushOne(codec.NALU_AUD_BYTE)
|
||||
}
|
||||
return a.AnnexB.Mux(fromBase)
|
||||
}
|
236
pkg/raw.go
236
pkg/raw.go
@@ -1,236 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/deepch/vdk/codec/aacparser"
|
||||
"github.com/deepch/vdk/codec/h264parser"
|
||||
"github.com/deepch/vdk/codec/h265parser"
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var _ IAVFrame = (*RawAudio)(nil)
|
||||
|
||||
type RawAudio struct {
|
||||
codec.FourCC
|
||||
Timestamp time.Duration
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (r *RawAudio) Parse(track *AVTrack) (err error) {
|
||||
if track.ICodecCtx == nil {
|
||||
switch r.FourCC {
|
||||
case codec.FourCC_MP4A:
|
||||
ctx := &codec.AACCtx{}
|
||||
ctx.CodecData, err = aacparser.NewCodecDataFromMPEG4AudioConfigBytes(r.ToBytes())
|
||||
track.ICodecCtx = ctx
|
||||
case codec.FourCC_ALAW:
|
||||
track.ICodecCtx = &codec.PCMACtx{
|
||||
AudioCtx: codec.AudioCtx{
|
||||
SampleRate: 8000,
|
||||
Channels: 1,
|
||||
SampleSize: 8,
|
||||
},
|
||||
}
|
||||
case codec.FourCC_ULAW:
|
||||
track.ICodecCtx = &codec.PCMUCtx{
|
||||
AudioCtx: codec.AudioCtx{
|
||||
SampleRate: 8000,
|
||||
Channels: 1,
|
||||
SampleSize: 8,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *RawAudio) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
c := ctx.GetBase()
|
||||
if c.FourCC().Is(codec.FourCC_MP4A) {
|
||||
seq := &RawAudio{
|
||||
FourCC: codec.FourCC_MP4A,
|
||||
Timestamp: r.Timestamp,
|
||||
}
|
||||
seq.SetAllocator(r.GetAllocator())
|
||||
seq.Memory.Append(c.GetRecord())
|
||||
return c, seq, nil
|
||||
}
|
||||
return c, nil, nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
return r.Memory, nil
|
||||
}
|
||||
|
||||
func (r *RawAudio) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
r.InitRecycleIndexes(0)
|
||||
r.FourCC = ctx.FourCC()
|
||||
r.Memory = frame.Raw.(util.Memory)
|
||||
r.Timestamp = frame.Timestamp
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetTimestamp() time.Duration {
|
||||
return r.Timestamp
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetCTS() time.Duration {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *RawAudio) GetSize() int {
|
||||
return r.Size
|
||||
}
|
||||
|
||||
func (r *RawAudio) String() string {
|
||||
return fmt.Sprintf("RawAudio{FourCC: %s, Timestamp: %s, Size: %d}", r.FourCC, r.Timestamp, r.Size)
|
||||
}
|
||||
|
||||
func (r *RawAudio) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
||||
|
||||
var _ IAVFrame = (*H26xFrame)(nil)
|
||||
|
||||
type H26xFrame struct {
|
||||
codec.FourCC
|
||||
Timestamp time.Duration
|
||||
CTS time.Duration
|
||||
Nalus
|
||||
util.RecyclableMemory
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Parse(track *AVTrack) (err error) {
|
||||
var hasVideoFrame bool
|
||||
|
||||
switch h.FourCC {
|
||||
case codec.FourCC_H264:
|
||||
var ctx *codec.H264Ctx
|
||||
if track.ICodecCtx != nil {
|
||||
ctx = track.ICodecCtx.GetBase().(*codec.H264Ctx)
|
||||
}
|
||||
for _, nalu := range h.Nalus {
|
||||
switch codec.ParseH264NALUType(nalu.Buffers[0][0]) {
|
||||
case h264parser.NALU_SPS:
|
||||
ctx = &codec.H264Ctx{}
|
||||
track.ICodecCtx = ctx
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if ctx.SPSInfo, err = h264parser.ParseSPS(ctx.SPS()); err != nil {
|
||||
return
|
||||
}
|
||||
case h264parser.NALU_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h264parser.NewCodecDataFromSPSAndPPS(ctx.SPS(), ctx.PPS())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
case codec.NALU_IDR_Picture:
|
||||
track.Value.IDR = true
|
||||
hasVideoFrame = true
|
||||
case codec.NALU_Non_IDR_Picture:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
case codec.FourCC_H265:
|
||||
var ctx *codec.H265Ctx
|
||||
if track.ICodecCtx != nil {
|
||||
ctx = track.ICodecCtx.GetBase().(*codec.H265Ctx)
|
||||
}
|
||||
for _, nalu := range h.Nalus {
|
||||
switch codec.ParseH265NALUType(nalu.Buffers[0][0]) {
|
||||
case h265parser.NAL_UNIT_VPS:
|
||||
ctx = &codec.H265Ctx{}
|
||||
ctx.RecordInfo.VPS = [][]byte{nalu.ToBytes()}
|
||||
track.ICodecCtx = ctx
|
||||
case h265parser.NAL_UNIT_SPS:
|
||||
ctx.RecordInfo.SPS = [][]byte{nalu.ToBytes()}
|
||||
if ctx.SPSInfo, err = h265parser.ParseSPS(ctx.SPS()); err != nil {
|
||||
return
|
||||
}
|
||||
case h265parser.NAL_UNIT_PPS:
|
||||
ctx.RecordInfo.PPS = [][]byte{nalu.ToBytes()}
|
||||
ctx.CodecData, err = h265parser.NewCodecDataFromVPSAndSPSAndPPS(ctx.VPS(), ctx.SPS(), ctx.PPS())
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_BLA_W_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_BLA_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_IDR_N_LP,
|
||||
h265parser.NAL_UNIT_CODED_SLICE_CRA:
|
||||
track.Value.IDR = true
|
||||
hasVideoFrame = true
|
||||
case 0, 1, 2, 3, 4, 5, 6, 7, 8, 9:
|
||||
hasVideoFrame = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return ErrSkip if no video frames are present (only metadata NALUs)
|
||||
if !hasVideoFrame {
|
||||
return ErrSkip
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (h *H26xFrame) ConvertCtx(ctx codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error) {
|
||||
switch c := ctx.GetBase().(type) {
|
||||
case *codec.H264Ctx:
|
||||
return c, &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory(c.SPS()),
|
||||
util.NewMemory(c.PPS()),
|
||||
},
|
||||
}, nil
|
||||
case *codec.H265Ctx:
|
||||
return c, &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory(c.VPS()),
|
||||
util.NewMemory(c.SPS()),
|
||||
util.NewMemory(c.PPS()),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return ctx.GetBase(), nil, nil
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Demux(ctx codec.ICodecCtx) (any, error) {
|
||||
return h.Nalus, nil
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Mux(ctx codec.ICodecCtx, frame *AVFrame) {
|
||||
h.FourCC = ctx.FourCC()
|
||||
h.Nalus = frame.Raw.(Nalus)
|
||||
h.Timestamp = frame.Timestamp
|
||||
h.CTS = frame.CTS
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetTimestamp() time.Duration {
|
||||
return h.Timestamp
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetCTS() time.Duration {
|
||||
return h.CTS
|
||||
}
|
||||
|
||||
func (h *H26xFrame) GetSize() int {
|
||||
var size int
|
||||
for _, nalu := range h.Nalus {
|
||||
size += nalu.Size
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (h *H26xFrame) String() string {
|
||||
return fmt.Sprintf("H26xFrame{FourCC: %s, Timestamp: %s, CTS: %s}", h.FourCC, h.Timestamp, h.CTS)
|
||||
}
|
||||
|
||||
func (h *H26xFrame) Dump(b byte, writer io.Writer) {
|
||||
//TODO implement me
|
||||
panic("implement me")
|
||||
}
|
157
pkg/raw_test.go
157
pkg/raw_test.go
@@ -1,157 +0,0 @@
|
||||
package pkg
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"m7s.live/v5/pkg/codec"
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func TestH26xFrame_Parse_VideoFrameDetection(t *testing.T) {
|
||||
// Test H264 IDR Picture (should not skip)
|
||||
t.Run("H264_IDR_Picture", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H264 IDR frame")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 Non-IDR Picture (should not skip)
|
||||
t.Run("H264_Non_IDR_Picture", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x21}), // Non-IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 Non-IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 metadata only (should skip)
|
||||
t.Run("H264_SPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x67}), // SPS NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H264 SPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test H264 PPS only (should skip)
|
||||
t.Run("H264_PPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x68}), // PPS NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H264 PPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test H265 IDR slice (should not skip)
|
||||
t.Run("H265_IDR_Slice", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x4E, 0x01}), // IDR_W_RADL slice type (19 << 1 = 38 = 0x26, so first byte should be 0x4C, but let's use a simpler approach)
|
||||
// Using NAL_UNIT_CODED_SLICE_IDR_W_RADL which should be type 19
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
|
||||
// Let's use the correct byte pattern for H265 IDR slice
|
||||
// NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
|
||||
// H265 header: (type << 1) | layer_id_bit
|
||||
idrSliceByte := byte(19 << 1) // 19 * 2 = 38 = 0x26
|
||||
frame.Nalus[0] = util.NewMemory([]byte{idrSliceByte})
|
||||
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H265 IDR slice to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H265 IDR slice")
|
||||
}
|
||||
})
|
||||
|
||||
// Test H265 metadata only (should skip)
|
||||
t.Run("H265_VPS_Only", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1 = 64 = 0x40)
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err != ErrSkip {
|
||||
t.Errorf("Expected H265 VPS-only frame to be skipped, but got: %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Test mixed H264 frame with SPS and IDR (should not skip)
|
||||
t.Run("H264_Mixed_SPS_And_IDR", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H264,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x67}), // SPS NALU type
|
||||
util.NewMemory([]byte{0x65}), // IDR Picture NALU type
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H264 mixed SPS+IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H264 mixed frame with IDR")
|
||||
}
|
||||
})
|
||||
|
||||
// Test mixed H265 frame with VPS and IDR (should not skip)
|
||||
t.Run("H265_Mixed_VPS_And_IDR", func(t *testing.T) {
|
||||
frame := &H26xFrame{
|
||||
FourCC: codec.FourCC_H265,
|
||||
Nalus: []util.Memory{
|
||||
util.NewMemory([]byte{0x40, 0x01}), // VPS NALU type (32 << 1)
|
||||
util.NewMemory([]byte{0x4C, 0x01}), // IDR_W_RADL slice type (19 << 1)
|
||||
},
|
||||
}
|
||||
track := &AVTrack{}
|
||||
|
||||
// Fix the IDR slice byte for H265
|
||||
idrSliceByte := byte(19 << 1) // NAL_UNIT_CODED_SLICE_IDR_W_RADL = 19
|
||||
frame.Nalus[1] = util.NewMemory([]byte{idrSliceByte, 0x01})
|
||||
|
||||
err := frame.Parse(track)
|
||||
if err == ErrSkip {
|
||||
t.Error("Expected H265 mixed VPS+IDR frame to not be skipped, but got ErrSkip")
|
||||
}
|
||||
if !track.Value.IDR {
|
||||
t.Error("Expected IDR flag to be set for H265 mixed frame with IDR")
|
||||
}
|
||||
})
|
||||
}
|
@@ -3,6 +3,7 @@ package pkg
|
||||
import (
|
||||
"log/slog"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
@@ -21,6 +22,7 @@ type RingWriter struct {
|
||||
Size int
|
||||
LastValue *AVFrame
|
||||
SLogger *slog.Logger
|
||||
status atomic.Int32 // 0: init, 1: writing, 2: disposed
|
||||
}
|
||||
|
||||
func NewRingWriter(sizeRange util.Range[int]) (rb *RingWriter) {
|
||||
@@ -90,7 +92,9 @@ func (rb *RingWriter) reduce(size int) {
|
||||
|
||||
func (rb *RingWriter) Dispose() {
|
||||
rb.SLogger.Debug("dispose")
|
||||
rb.Value.Ready()
|
||||
if rb.status.Add(-1) == -1 { // normal dispose
|
||||
rb.Value.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (rb *RingWriter) GetIDR() *util.Ring[AVFrame] {
|
||||
@@ -185,18 +189,70 @@ func (rb *RingWriter) Step() (normal bool) {
|
||||
|
||||
rb.LastValue = &rb.Value
|
||||
nextSeq := rb.LastValue.Sequence + 1
|
||||
if normal = next.Value.StartWrite(); normal {
|
||||
next.Value.Reset()
|
||||
rb.Ring = next
|
||||
} else {
|
||||
rb.reduce(1) //抛弃还有订阅者的节点
|
||||
rb.Ring = rb.glow(1, "refill") //补充一个新节点
|
||||
normal = rb.Value.StartWrite()
|
||||
if !normal {
|
||||
panic("RingWriter.Step")
|
||||
|
||||
/*
|
||||
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
participant Caller as Caller
|
||||
participant RW as RingWriter
|
||||
participant Val as AVFrame.Value
|
||||
|
||||
Note over RW: status initial = 0 (idle)
|
||||
|
||||
Caller->>RW: Step()
|
||||
activate RW
|
||||
RW->>RW: status.Add(1) (0→1)
|
||||
alt entered writing (result == 1)
|
||||
Note over RW: writing
|
||||
RW->>Val: StartWrite()
|
||||
RW->>Val: Reset()
|
||||
opt Dispose during write
|
||||
Caller->>RW: Dispose()
|
||||
RW->>RW: status.Add(-1) (1→0)
|
||||
end
|
||||
RW->>RW: status.Add(-1) at end of Step
|
||||
alt returns 0 (write completed)
|
||||
RW->>Val: Ready()
|
||||
else returns -1 (disposed during write)
|
||||
RW->>Val: Unlock()
|
||||
end
|
||||
else not entered
|
||||
Note over RW: Step aborted (already disposed/busy)
|
||||
end
|
||||
deactivate RW
|
||||
|
||||
Caller->>RW: Dispose()
|
||||
activate RW
|
||||
RW->>RW: status.Add(-1)
|
||||
alt returns -1 (idle dispose)
|
||||
RW->>Val: Unlock()
|
||||
else returns 0 (dispose during write)
|
||||
Note over RW: Unlock will occur at Step end (no Ready)
|
||||
end
|
||||
deactivate RW
|
||||
|
||||
Note over RW: States: -1 (disposed), 0 (idle), 1 (writing)
|
||||
|
||||
*/
|
||||
if rb.status.Add(1) == 1 {
|
||||
if normal = next.Value.StartWrite(); normal {
|
||||
next.Value.Reset()
|
||||
rb.Ring = next
|
||||
} else {
|
||||
rb.reduce(1) //抛弃还有订阅者的节点
|
||||
rb.Ring = rb.glow(1, "refill") //补充一个新节点
|
||||
normal = rb.Value.StartWrite()
|
||||
if !normal {
|
||||
panic("RingWriter.Step")
|
||||
}
|
||||
}
|
||||
rb.Value.Sequence = nextSeq
|
||||
if rb.status.Add(-1) == 0 {
|
||||
rb.LastValue.Ready()
|
||||
} else {
|
||||
rb.Value.Unlock()
|
||||
}
|
||||
}
|
||||
rb.Value.Sequence = nextSeq
|
||||
rb.LastValue.Ready()
|
||||
return
|
||||
}
|
||||
|
@@ -5,6 +5,8 @@ import (
|
||||
"log/slog"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
func TestRing(t *testing.T) {
|
||||
@@ -13,7 +15,7 @@ func TestRing(t *testing.T) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
go t.Run("writer", func(t *testing.T) {
|
||||
for i := 0; ctx.Err() == nil; i++ {
|
||||
w.Value.Raw = i
|
||||
w.Value.Raw = &util.Memory{}
|
||||
normal := w.Step()
|
||||
t.Log("write", i, normal)
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
@@ -76,7 +78,7 @@ func BenchmarkRing(b *testing.B) {
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*5)
|
||||
go func() {
|
||||
for i := 0; ctx.Err() == nil; i++ {
|
||||
w.Value.Raw = i
|
||||
w.Value.Raw = &util.Memory{}
|
||||
w.Step()
|
||||
time.Sleep(time.Millisecond * 50)
|
||||
}
|
||||
|
21
pkg/steps.go
Normal file
21
pkg/steps.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package pkg
|
||||
|
||||
// StepName is a typed alias for all workflow step identifiers.
|
||||
type StepName string
|
||||
|
||||
// StepDef defines a step with typed name and description.
|
||||
type StepDef struct {
|
||||
Name StepName
|
||||
Description string
|
||||
}
|
||||
|
||||
// Standard, cross-plugin step name constants for pull/publish workflows.
|
||||
// Plugin-specific step names should be defined in their respective plugin packages.
|
||||
const (
|
||||
StepPublish StepName = "publish"
|
||||
StepURLParsing StepName = "url_parsing"
|
||||
StepConnection StepName = "connection"
|
||||
StepHandshake StepName = "handshake"
|
||||
StepParsing StepName = "parsing"
|
||||
StepStreaming StepName = "streaming"
|
||||
)
|
59
pkg/task/README.md
Normal file
59
pkg/task/README.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# 任务系统概要
|
||||
|
||||
# 任务的启动
|
||||
|
||||
任务通过调用父任务的 AddTask 来启动,此时会进入队列中等待启动,父任务的 EventLoop 会接受到子任务,然后调用子任务的 Start 方法进行启动操作
|
||||
|
||||
## EventLoop 的初始化
|
||||
为了节省资源,EventLoop 在没有子任务时不会创建协程,一直等到有子任务时才会创建,并且如果这个子任务也是一个空的 Job(即没有 Start、Run、Go)则仍然不会创建协程。
|
||||
|
||||
## EventLoop 停止
|
||||
为了节省资源,当 EventLoop 中没有待执行的子任务时,需要退出协程。EventLoop 会在以下情况退出:
|
||||
|
||||
1. 没有待处理的任务且没有活跃的子任务,且父任务的 keepalive() 返回 false
|
||||
2. EventLoop 的状态被设置为停止状态(-1)
|
||||
|
||||
# 任务的停止
|
||||
|
||||
## 主动停止某个任务
|
||||
|
||||
调用任务的 Stop 方法即可停止某个任务,此时该任务会由其父任务的 eventLoop 检测到 context 取消信号然后开始执行任务的 dispose 来进行销毁
|
||||
|
||||
## 任务的意外退出
|
||||
|
||||
当任务的 Run 返回错误,或者 context 被取消时,任务会退出,最终流程会同主动停止一样
|
||||
|
||||
## 父任务停止
|
||||
|
||||
当父任务停止并销毁时,会按照以下步骤处理子任务:
|
||||
|
||||
### 步骤
|
||||
|
||||
1. **设置 EventLoop 的状态为停止状态**:调用 `stop()` 方法设置 status = -1,防止继续添加子任务
|
||||
2. **激活 EventLoop 处理剩余任务**:调用 `active()` 方法,即使状态为 -1 也能处理剩余的子任务
|
||||
3. **停止所有子任务**:调用所有子任务的 Stop 方法
|
||||
4. **等待子任务销毁完成**:等待 EventLoop 处理完所有子任务的销毁工作
|
||||
|
||||
### 设计要点
|
||||
|
||||
- EventLoop 的 `active()` 方法允许在状态为 -1 时调用,以确保剩余的子任务能被正确处理
|
||||
- 使用互斥锁保护状态转换,避免竞态条件
|
||||
- 先停止再处理剩余任务,确保不会添加新的子任务
|
||||
|
||||
## 竞态条件处理
|
||||
|
||||
为了确保任务系统的线程安全,我们采取了以下措施:
|
||||
|
||||
### 状态管理
|
||||
- 使用 `sync.RWMutex` 保护 EventLoop 的状态转换
|
||||
- `add()` 方法使用读锁检查状态,防止在停止后添加新任务
|
||||
- `stop()` 方法使用写锁设置状态,确保原子性
|
||||
|
||||
### EventLoop 生命周期
|
||||
- EventLoop 只有在状态从 0(ready)转换到 1(running)时才启动新的 goroutine
|
||||
- 即使状态为 -1(stopped),`active()` 方法仍可被调用以处理剩余任务
|
||||
- 使用 `hasPending` 标志和互斥锁跟踪待处理任务,避免频繁检查 channel 长度
|
||||
|
||||
### 任务添加
|
||||
- 添加任务时会检查 EventLoop 状态,如果已停止则返回 `ErrDisposed`
|
||||
- 使用 `pendingMux` 保护 `hasPending` 标志,避免竞态条件
|
@@ -1,34 +0,0 @@
|
||||
package task
|
||||
|
||||
type CallBackTask struct {
|
||||
Task
|
||||
startHandler func() error
|
||||
disposeHandler func()
|
||||
}
|
||||
|
||||
func (t *CallBackTask) GetTaskType() TaskType {
|
||||
return TASK_TYPE_CALL
|
||||
}
|
||||
|
||||
func (t *CallBackTask) Start() error {
|
||||
return t.startHandler()
|
||||
}
|
||||
|
||||
func (t *CallBackTask) Dispose() {
|
||||
if t.disposeHandler != nil {
|
||||
t.disposeHandler()
|
||||
}
|
||||
}
|
||||
|
||||
func CreateTaskByCallBack(start func() error, dispose func()) *CallBackTask {
|
||||
var task CallBackTask
|
||||
task.startHandler = func() error {
|
||||
err := start()
|
||||
if err == nil && dispose == nil {
|
||||
err = ErrTaskComplete
|
||||
}
|
||||
return err
|
||||
}
|
||||
task.disposeHandler = dispose
|
||||
return &task
|
||||
}
|
@@ -42,6 +42,9 @@ func (t *TickTask) GetTickInterval() time.Duration {
|
||||
func (t *TickTask) Start() (err error) {
|
||||
t.Ticker = time.NewTicker(t.handler.(ITickTask).GetTickInterval())
|
||||
t.SignalChan = t.Ticker.C
|
||||
t.OnStop(func() {
|
||||
t.Ticker.Reset(time.Millisecond)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
|
167
pkg/task/event_loop.go
Normal file
167
pkg/task/event_loop.go
Normal file
@@ -0,0 +1,167 @@
|
||||
package task
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
type Singleton[T comparable] struct {
|
||||
instance atomic.Value
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
func (s *Singleton[T]) Load() T {
|
||||
return s.instance.Load().(T)
|
||||
}
|
||||
|
||||
func (s *Singleton[T]) Get(newF func() T) T {
|
||||
ch := s.instance.Load() //fast
|
||||
if ch == nil { // slow
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
if ch = s.instance.Load(); ch == nil {
|
||||
ch = newF()
|
||||
s.instance.Store(ch)
|
||||
}
|
||||
}
|
||||
return ch.(T)
|
||||
}
|
||||
|
||||
type EventLoop struct {
|
||||
cases []reflect.SelectCase
|
||||
children []ITask
|
||||
addSub Singleton[chan any]
|
||||
running atomic.Bool
|
||||
}
|
||||
|
||||
func (e *EventLoop) getInput() chan any {
|
||||
return e.addSub.Get(func() chan any {
|
||||
return make(chan any, 20)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *EventLoop) active(mt *Job) {
|
||||
if mt.parent != nil {
|
||||
mt.parent.eventLoop.active(mt.parent)
|
||||
}
|
||||
if e.running.CompareAndSwap(false, true) {
|
||||
go e.run(mt)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventLoop) add(mt *Job, sub any) (err error) {
|
||||
shouldActive := true
|
||||
switch sub.(type) {
|
||||
case TaskStarter, TaskBlock, TaskGo:
|
||||
case IJob:
|
||||
shouldActive = false
|
||||
}
|
||||
select {
|
||||
case e.getInput() <- sub:
|
||||
if shouldActive || mt.IsStopped() {
|
||||
e.active(mt)
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return ErrTooManyChildren
|
||||
}
|
||||
}
|
||||
|
||||
func (e *EventLoop) run(mt *Job) {
|
||||
mt.Debug("event loop start", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
|
||||
ch := e.getInput()
|
||||
e.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(ch)}}
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
|
||||
if !ThrowPanic {
|
||||
mt.Stop(errors.Join(err.(error), ErrPanic))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
mt.Debug("event loop exit", "jobId", mt.GetTaskID(), "type", mt.GetOwnerType())
|
||||
if !mt.handler.keepalive() {
|
||||
if mt.blocked != nil {
|
||||
mt.Stop(errors.Join(mt.blocked.StopReason(), ErrAutoStop))
|
||||
} else {
|
||||
mt.Stop(ErrAutoStop)
|
||||
}
|
||||
}
|
||||
mt.blocked = nil
|
||||
}()
|
||||
|
||||
// Main event loop - only exit when no more events AND no children
|
||||
for {
|
||||
if len(ch) == 0 && len(e.children) == 0 {
|
||||
if e.running.CompareAndSwap(true, false) {
|
||||
if len(ch) > 0 { // if add before running set to false
|
||||
e.active(mt)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
mt.blocked = nil
|
||||
if chosen, rev, ok := reflect.Select(e.cases); chosen == 0 {
|
||||
if !ok {
|
||||
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
|
||||
mt.Stop(ErrAutoStop)
|
||||
return
|
||||
}
|
||||
switch v := rev.Interface().(type) {
|
||||
case func():
|
||||
v()
|
||||
case ITask:
|
||||
if len(e.cases) >= 65535 {
|
||||
mt.Warn("task children too many, may cause performance issue", "count", len(e.cases), "taskId", mt.GetTaskID(), "taskType", mt.GetTaskType(), "ownerType", mt.GetOwnerType())
|
||||
v.Stop(ErrTooManyChildren)
|
||||
continue
|
||||
}
|
||||
if mt.blocked = v; v.start() {
|
||||
e.cases = append(e.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(v.GetSignal())})
|
||||
e.children = append(e.children, v)
|
||||
mt.onChildStart(v)
|
||||
} else {
|
||||
mt.removeChild(v)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
taskIndex := chosen - 1
|
||||
child := e.children[taskIndex]
|
||||
mt.blocked = child
|
||||
switch tt := mt.blocked.(type) {
|
||||
case IChannelTask:
|
||||
if tt.IsStopped() {
|
||||
switch ttt := tt.(type) {
|
||||
case ITickTask:
|
||||
ttt.GetTicker().Stop()
|
||||
}
|
||||
mt.onChildDispose(child)
|
||||
mt.removeChild(child)
|
||||
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
|
||||
e.cases = slices.Delete(e.cases, chosen, chosen+1)
|
||||
} else {
|
||||
tt.Tick(rev.Interface())
|
||||
}
|
||||
default:
|
||||
if !ok {
|
||||
if mt.onChildDispose(child); child.checkRetry(child.StopReason()) {
|
||||
if child.reset(); child.start() {
|
||||
e.cases[chosen].Chan = reflect.ValueOf(child.GetSignal())
|
||||
mt.onChildStart(child)
|
||||
continue
|
||||
}
|
||||
}
|
||||
mt.removeChild(child)
|
||||
e.children = slices.Delete(e.children, taskIndex, taskIndex+1)
|
||||
e.cases = slices.Delete(e.cases, chosen, chosen+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
261
pkg/task/job.go
261
pkg/task/job.go
@@ -2,13 +2,9 @@ package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -32,15 +28,12 @@ func GetNextTaskID() uint32 {
|
||||
// Job include tasks
|
||||
type Job struct {
|
||||
Task
|
||||
cases []reflect.SelectCase
|
||||
addSub chan ITask
|
||||
children []ITask
|
||||
lazyRun sync.Once
|
||||
eventLoopLock sync.Mutex
|
||||
childrenDisposed chan struct{}
|
||||
children sync.Map
|
||||
descendantsDisposeListeners []func(ITask)
|
||||
descendantsStartListeners []func(ITask)
|
||||
blocked ITask
|
||||
eventLoop EventLoop
|
||||
Size atomic.Int32
|
||||
}
|
||||
|
||||
func (*Job) GetTaskType() TaskType {
|
||||
@@ -55,19 +48,18 @@ func (mt *Job) Blocked() ITask {
|
||||
return mt.blocked
|
||||
}
|
||||
|
||||
func (mt *Job) waitChildrenDispose() {
|
||||
blocked := mt.blocked
|
||||
defer func() {
|
||||
// 忽略由于在任务关闭过程中可能存在竞态条件,当父任务关闭时子任务可能已经被释放。
|
||||
if err := recover(); err != nil {
|
||||
mt.Debug("waitChildrenDispose panic", "err", err)
|
||||
}
|
||||
mt.addSub <- nil
|
||||
<-mt.childrenDisposed
|
||||
}()
|
||||
if blocked != nil {
|
||||
blocked.Stop(mt.StopReason())
|
||||
}
|
||||
func (mt *Job) EventLoopRunning() bool {
|
||||
return mt.eventLoop.running.Load()
|
||||
}
|
||||
|
||||
func (mt *Job) waitChildrenDispose(stopReason error) {
|
||||
mt.eventLoop.active(mt)
|
||||
mt.children.Range(func(key, value any) bool {
|
||||
child := value.(ITask)
|
||||
child.Stop(stopReason)
|
||||
child.WaitStopped()
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (mt *Job) OnDescendantsDispose(listener func(ITask)) {
|
||||
@@ -84,12 +76,21 @@ func (mt *Job) onDescendantsDispose(descendants ITask) {
|
||||
}
|
||||
|
||||
func (mt *Job) onChildDispose(child ITask) {
|
||||
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
|
||||
mt.onDescendantsDispose(child)
|
||||
}
|
||||
mt.onDescendantsDispose(child)
|
||||
child.dispose()
|
||||
}
|
||||
|
||||
func (mt *Job) removeChild(child ITask) {
|
||||
value, loaded := mt.children.LoadAndDelete(child.getKey())
|
||||
if loaded {
|
||||
if value != child {
|
||||
panic("remove child")
|
||||
}
|
||||
remains := mt.Size.Add(-1)
|
||||
mt.Debug("remove child", "id", child.GetTaskID(), "remains", remains)
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *Job) OnDescendantsStart(listener func(ITask)) {
|
||||
mt.descendantsStartListeners = append(mt.descendantsStartListeners, listener)
|
||||
}
|
||||
@@ -104,166 +105,98 @@ func (mt *Job) onDescendantsStart(descendants ITask) {
|
||||
}
|
||||
|
||||
func (mt *Job) onChildStart(child ITask) {
|
||||
if child.GetTaskType() != TASK_TYPE_CALL || child.GetOwnerType() != "CallBack" {
|
||||
mt.onDescendantsStart(child)
|
||||
}
|
||||
mt.onDescendantsStart(child)
|
||||
}
|
||||
|
||||
func (mt *Job) RangeSubTask(callback func(task ITask) bool) {
|
||||
for _, task := range mt.children {
|
||||
callback(task)
|
||||
}
|
||||
mt.children.Range(func(key, value any) bool {
|
||||
callback(value.(ITask))
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (mt *Job) AddDependTask(t ITask, opt ...any) (task *Task) {
|
||||
mt.Depend(t)
|
||||
t.Using(mt)
|
||||
opt = append(opt, 1)
|
||||
return mt.AddTask(t, opt...)
|
||||
}
|
||||
|
||||
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
|
||||
if task = t.GetTask(); t != task.handler { // first add
|
||||
for _, o := range opt {
|
||||
switch v := o.(type) {
|
||||
case context.Context:
|
||||
task.parentCtx = v
|
||||
case Description:
|
||||
task.SetDescriptions(v)
|
||||
case RetryConfig:
|
||||
task.retry = v
|
||||
case *slog.Logger:
|
||||
task.Logger = v
|
||||
}
|
||||
}
|
||||
task.parent = mt
|
||||
task.handler = t
|
||||
switch t.(type) {
|
||||
case TaskStarter, TaskBlock, TaskGo:
|
||||
// need start now
|
||||
case IJob:
|
||||
// lazy start
|
||||
return
|
||||
func (mt *Job) initContext(task *Task, opt ...any) {
|
||||
callDepth := 2
|
||||
for _, o := range opt {
|
||||
switch v := o.(type) {
|
||||
case context.Context:
|
||||
task.parentCtx = v
|
||||
case Description:
|
||||
task.SetDescriptions(v)
|
||||
case RetryConfig:
|
||||
task.retry = v
|
||||
case *slog.Logger:
|
||||
task.Logger = v
|
||||
case int:
|
||||
callDepth += v
|
||||
}
|
||||
}
|
||||
_, file, line, ok := runtime.Caller(1)
|
||||
|
||||
_, file, line, ok := runtime.Caller(callDepth)
|
||||
if ok {
|
||||
task.StartReason = fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line)
|
||||
}
|
||||
|
||||
mt.lazyRun.Do(func() {
|
||||
if mt.eventLoopLock.TryLock() {
|
||||
defer mt.eventLoopLock.Unlock()
|
||||
if mt.parent != nil && mt.Context == nil {
|
||||
mt.parent.AddTask(mt.handler) // second add, lazy start
|
||||
}
|
||||
mt.childrenDisposed = make(chan struct{})
|
||||
mt.addSub = make(chan ITask, 20)
|
||||
go mt.run()
|
||||
}
|
||||
})
|
||||
if task.Context == nil {
|
||||
if task.parentCtx == nil {
|
||||
task.parentCtx = mt.Context
|
||||
}
|
||||
task.level = mt.level + 1
|
||||
if task.ID == 0 {
|
||||
task.ID = GetNextTaskID()
|
||||
}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
task.handler = t
|
||||
if task.Logger == nil {
|
||||
task.Logger = mt.Logger
|
||||
}
|
||||
task.parent = mt
|
||||
if task.parentCtx == nil {
|
||||
task.parentCtx = mt.Context
|
||||
}
|
||||
task.level = mt.level + 1
|
||||
if task.ID == 0 {
|
||||
task.ID = GetNextTaskID()
|
||||
}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
if task.Logger == nil {
|
||||
task.Logger = mt.Logger
|
||||
}
|
||||
}
|
||||
|
||||
func (mt *Job) AddTask(t ITask, opt ...any) (task *Task) {
|
||||
task = t.GetTask()
|
||||
task.handler = t
|
||||
mt.initContext(task, opt...)
|
||||
if mt.IsStopped() {
|
||||
task.startup.Reject(mt.StopReason())
|
||||
return
|
||||
}
|
||||
if len(mt.addSub) > 10 {
|
||||
mt.Warn("task wait list too many", "count", len(mt.addSub), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", mt.GetOwnerType())
|
||||
actual, loaded := mt.children.LoadOrStore(t.getKey(), t)
|
||||
if loaded {
|
||||
task.startup.Reject(ExistTaskError{
|
||||
Task: actual.(ITask),
|
||||
})
|
||||
return
|
||||
}
|
||||
mt.addSub <- t
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
mt.children.Delete(t.getKey())
|
||||
task.startup.Reject(err)
|
||||
}
|
||||
}()
|
||||
if err = mt.eventLoop.add(mt, t); err != nil {
|
||||
return
|
||||
}
|
||||
if mt.IsStopped() {
|
||||
err = mt.StopReason()
|
||||
return
|
||||
}
|
||||
remains := mt.Size.Add(1)
|
||||
mt.Debug("child added", "id", task.ID, "remains", remains)
|
||||
return
|
||||
}
|
||||
|
||||
func (mt *Job) Call(callback func() error, args ...any) {
|
||||
mt.Post(callback, args...).WaitStarted()
|
||||
}
|
||||
|
||||
func (mt *Job) Post(callback func() error, args ...any) *Task {
|
||||
task := CreateTaskByCallBack(callback, nil)
|
||||
if len(args) > 0 {
|
||||
task.SetDescription(OwnerTypeKey, args[0])
|
||||
}
|
||||
return mt.AddTask(task)
|
||||
}
|
||||
|
||||
func (mt *Job) run() {
|
||||
mt.cases = []reflect.SelectCase{{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.addSub)}}
|
||||
defer func() {
|
||||
err := recover()
|
||||
if err != nil {
|
||||
mt.Error("job panic", "err", err, "stack", string(debug.Stack()))
|
||||
if !ThrowPanic {
|
||||
mt.Stop(errors.Join(err.(error), ErrPanic))
|
||||
} else {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
stopReason := mt.StopReason()
|
||||
for _, task := range mt.children {
|
||||
task.Stop(stopReason)
|
||||
mt.onChildDispose(task)
|
||||
}
|
||||
mt.children = nil
|
||||
close(mt.childrenDisposed)
|
||||
}()
|
||||
for {
|
||||
mt.blocked = nil
|
||||
if chosen, rev, ok := reflect.Select(mt.cases); chosen == 0 {
|
||||
if rev.IsNil() {
|
||||
mt.Debug("job addSub channel closed, exiting", "taskId", mt.GetTaskID())
|
||||
return
|
||||
}
|
||||
if mt.blocked = rev.Interface().(ITask); mt.blocked.start() {
|
||||
mt.children = append(mt.children, mt.blocked)
|
||||
mt.cases = append(mt.cases, reflect.SelectCase{Dir: reflect.SelectRecv, Chan: reflect.ValueOf(mt.blocked.GetSignal())})
|
||||
mt.onChildStart(mt.blocked)
|
||||
}
|
||||
} else {
|
||||
taskIndex := chosen - 1
|
||||
mt.blocked = mt.children[taskIndex]
|
||||
switch tt := mt.blocked.(type) {
|
||||
case IChannelTask:
|
||||
if tt.IsStopped() {
|
||||
switch ttt := tt.(type) {
|
||||
case ITickTask:
|
||||
ttt.GetTicker().Stop()
|
||||
}
|
||||
mt.onChildDispose(mt.blocked)
|
||||
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
|
||||
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
|
||||
} else {
|
||||
tt.Tick(rev.Interface())
|
||||
}
|
||||
default:
|
||||
if !ok {
|
||||
if mt.onChildDispose(mt.blocked); mt.blocked.checkRetry(mt.blocked.StopReason()) {
|
||||
if mt.blocked.reset(); mt.blocked.start() {
|
||||
mt.cases[chosen].Chan = reflect.ValueOf(mt.blocked.GetSignal())
|
||||
mt.onChildStart(mt.blocked)
|
||||
continue
|
||||
}
|
||||
}
|
||||
mt.children = slices.Delete(mt.children, taskIndex, taskIndex+1)
|
||||
mt.cases = slices.Delete(mt.cases, chosen, chosen+1)
|
||||
}
|
||||
}
|
||||
}
|
||||
if !mt.handler.keepalive() && len(mt.children) == 0 {
|
||||
mt.Stop(ErrAutoStop)
|
||||
}
|
||||
func (mt *Job) Call(callback func()) {
|
||||
if mt.Size.Load() <= 0 {
|
||||
callback()
|
||||
return
|
||||
}
|
||||
ctx, cancel := context.WithCancel(mt)
|
||||
_ = mt.eventLoop.add(mt, func() { callback(); cancel() })
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
@@ -2,12 +2,21 @@ package task
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
. "m7s.live/v5/pkg/util"
|
||||
)
|
||||
|
||||
var ErrExist = errors.New("exist")
|
||||
|
||||
type ExistTaskError struct {
|
||||
Task ITask
|
||||
}
|
||||
|
||||
func (e ExistTaskError) Error() string {
|
||||
return fmt.Sprintf("%v exist", e.Task.getKey())
|
||||
}
|
||||
|
||||
type ManagerItem[K comparable] interface {
|
||||
ITask
|
||||
GetKey() K
|
||||
@@ -30,15 +39,25 @@ func (m *Manager[K, T]) Add(ctx T, opt ...any) *Task {
|
||||
m.Remove(ctx)
|
||||
m.Debug("remove", "key", ctx.GetKey(), "count", m.Length)
|
||||
})
|
||||
opt = append(opt, 1)
|
||||
return m.AddTask(ctx, opt...)
|
||||
}
|
||||
|
||||
func (m *Manager[K, T]) SafeHas(key K) (ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() {
|
||||
ok = m.Collection.Has(key)
|
||||
})
|
||||
return ok
|
||||
}
|
||||
return m.Collection.Has(key)
|
||||
}
|
||||
|
||||
// SafeGet 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
item, ok = m.Collection.Get(key)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
item, ok = m.Collection.Get(key)
|
||||
@@ -49,9 +68,8 @@ func (m *Manager[K, T]) SafeGet(key K) (item T, ok bool) {
|
||||
// SafeRange 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeRange(f func(T) bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
m.Collection.Range(f)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
m.Collection.Range(f)
|
||||
@@ -61,9 +79,8 @@ func (m *Manager[K, T]) SafeRange(f func(T) bool) {
|
||||
// SafeFind 用于不同协程获取元素,防止并发请求
|
||||
func (m *Manager[K, T]) SafeFind(f func(T) bool) (item T, ok bool) {
|
||||
if m.L == nil {
|
||||
m.Call(func() error {
|
||||
m.Call(func() {
|
||||
item, ok = m.Collection.Find(f)
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
item, ok = m.Collection.Find(f)
|
||||
|
@@ -3,4 +3,4 @@
|
||||
|
||||
package task
|
||||
|
||||
var ThrowPanic = true
|
||||
var ThrowPanic = true
|
||||
|
@@ -22,15 +22,20 @@ func (o *OSSignal) Start() error {
|
||||
signalChan := make(chan os.Signal, 1)
|
||||
signal.Notify(signalChan, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT)
|
||||
o.SignalChan = signalChan
|
||||
o.OnStop(func() {
|
||||
signal.Stop(signalChan)
|
||||
close(signalChan)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *OSSignal) Tick(any) {
|
||||
println("OSSignal Tick")
|
||||
go o.root.Shutdown()
|
||||
}
|
||||
|
||||
type RootManager[K comparable, T ManagerItem[K]] struct {
|
||||
Manager[K, T]
|
||||
WorkCollection[K, T]
|
||||
}
|
||||
|
||||
func (m *RootManager[K, T]) Init() {
|
||||
|
176
pkg/task/task.go
176
pkg/task/task.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log/slog"
|
||||
"maps"
|
||||
"reflect"
|
||||
@@ -21,13 +22,16 @@ const TraceLevel = slog.Level(-8)
|
||||
const OwnerTypeKey = "ownerType"
|
||||
|
||||
var (
|
||||
ErrAutoStop = errors.New("auto stop")
|
||||
ErrRetryRunOut = errors.New("retry out")
|
||||
ErrStopByUser = errors.New("stop by user")
|
||||
ErrRestart = errors.New("restart")
|
||||
ErrTaskComplete = errors.New("complete")
|
||||
ErrExit = errors.New("exit")
|
||||
ErrPanic = errors.New("panic")
|
||||
ErrAutoStop = errors.New("auto stop")
|
||||
ErrRetryRunOut = errors.New("retry out")
|
||||
ErrStopByUser = errors.New("stop by user")
|
||||
ErrRestart = errors.New("restart")
|
||||
ErrTaskComplete = errors.New("complete")
|
||||
ErrTimeout = errors.New("timeout")
|
||||
ErrExit = errors.New("exit")
|
||||
ErrPanic = errors.New("panic")
|
||||
ErrTooManyChildren = errors.New("too many children in job")
|
||||
ErrDisposed = errors.New("disposed")
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,7 +49,6 @@ const (
|
||||
TASK_TYPE_JOB
|
||||
TASK_TYPE_Work
|
||||
TASK_TYPE_CHANNEL
|
||||
TASK_TYPE_CALL
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -71,14 +74,15 @@ type (
|
||||
SetDescription(key string, value any)
|
||||
SetDescriptions(value Description)
|
||||
SetRetry(maxRetry int, retryInterval time.Duration)
|
||||
Depend(ITask)
|
||||
Using(resource ...any)
|
||||
OnStop(any)
|
||||
OnStart(func())
|
||||
OnBeforeDispose(func())
|
||||
OnDispose(func())
|
||||
GetState() TaskState
|
||||
GetLevel() byte
|
||||
WaitStopped() error
|
||||
WaitStarted() error
|
||||
getKey() any
|
||||
}
|
||||
IJob interface {
|
||||
ITask
|
||||
@@ -88,8 +92,8 @@ type (
|
||||
OnDescendantsDispose(func(ITask))
|
||||
OnDescendantsStart(func(ITask))
|
||||
Blocked() ITask
|
||||
Call(func() error, ...any)
|
||||
Post(func() error, ...any) *Task
|
||||
EventLoopRunning() bool
|
||||
Call(func())
|
||||
}
|
||||
IChannelTask interface {
|
||||
ITask
|
||||
@@ -121,15 +125,18 @@ type (
|
||||
Logger *slog.Logger
|
||||
context.Context
|
||||
context.CancelCauseFunc
|
||||
handler ITask
|
||||
retry RetryConfig
|
||||
afterStartListeners, beforeDisposeListeners, afterDisposeListeners []func()
|
||||
description sync.Map
|
||||
startup, shutdown *util.Promise
|
||||
parent *Job
|
||||
parentCtx context.Context
|
||||
state TaskState
|
||||
level byte
|
||||
handler ITask
|
||||
retry RetryConfig
|
||||
afterStartListeners, afterDisposeListeners []func()
|
||||
closeOnStop []any
|
||||
resources []any
|
||||
stopOnce sync.Once
|
||||
description sync.Map
|
||||
startup, shutdown *util.Promise
|
||||
parent *Job
|
||||
parentCtx context.Context
|
||||
state TaskState
|
||||
level byte
|
||||
}
|
||||
)
|
||||
|
||||
@@ -183,12 +190,19 @@ func (task *Task) GetKey() uint32 {
|
||||
return task.ID
|
||||
}
|
||||
|
||||
func (task *Task) getKey() any {
|
||||
return reflect.ValueOf(task.handler).MethodByName("GetKey").Call(nil)[0].Interface()
|
||||
}
|
||||
|
||||
func (task *Task) WaitStarted() error {
|
||||
if task.startup == nil {
|
||||
return nil
|
||||
}
|
||||
return task.startup.Await()
|
||||
}
|
||||
|
||||
func (task *Task) WaitStopped() (err error) {
|
||||
err = task.startup.Await()
|
||||
err = task.WaitStarted()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -229,33 +243,50 @@ func (task *Task) Stop(err error) {
|
||||
task.Error("task stop with nil error", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "parent", task.GetParent().GetOwnerType())
|
||||
panic("task stop with nil error")
|
||||
}
|
||||
if task.CancelCauseFunc != nil {
|
||||
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
task.Debug("task stop", "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType())
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
task.stopOnce.Do(func() {
|
||||
if task.CancelCauseFunc != nil {
|
||||
msg := "task stop"
|
||||
if task.startup.IsRejected() {
|
||||
msg = "task start failed"
|
||||
}
|
||||
task.Debug(msg, "caller", fmt.Sprintf("%s:%d", strings.TrimPrefix(file, sourceFilePathPrefix), line), "reason", err, "elapsed", time.Since(task.StartTime), "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType())
|
||||
task.CancelCauseFunc(err)
|
||||
}
|
||||
task.CancelCauseFunc(err)
|
||||
}
|
||||
task.stop()
|
||||
})
|
||||
}
|
||||
|
||||
func (task *Task) Depend(t ITask) {
|
||||
t.OnDispose(func() {
|
||||
task.Stop(t.StopReason())
|
||||
})
|
||||
func (task *Task) stop() {
|
||||
for _, resource := range task.closeOnStop {
|
||||
switch v := resource.(type) {
|
||||
case func():
|
||||
v()
|
||||
case func() error:
|
||||
v()
|
||||
case ITask:
|
||||
v.Stop(task.StopReason())
|
||||
}
|
||||
}
|
||||
task.closeOnStop = task.closeOnStop[:0]
|
||||
}
|
||||
|
||||
func (task *Task) OnStart(listener func()) {
|
||||
task.afterStartListeners = append(task.afterStartListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) OnBeforeDispose(listener func()) {
|
||||
task.beforeDisposeListeners = append(task.beforeDisposeListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) OnDispose(listener func()) {
|
||||
task.afterDisposeListeners = append(task.afterDisposeListeners, listener)
|
||||
}
|
||||
|
||||
func (task *Task) Using(resource ...any) {
|
||||
task.resources = append(task.resources, resource...)
|
||||
}
|
||||
|
||||
func (task *Task) OnStop(resource any) {
|
||||
task.closeOnStop = append(task.closeOnStop, resource)
|
||||
}
|
||||
|
||||
func (task *Task) GetSignal() any {
|
||||
return task.Done()
|
||||
}
|
||||
@@ -300,9 +331,7 @@ func (task *Task) start() bool {
|
||||
}
|
||||
for {
|
||||
task.StartTime = time.Now()
|
||||
if tt := task.handler.GetTaskType(); tt != TASK_TYPE_CALL {
|
||||
task.Debug("task start", "taskId", task.ID, "taskType", tt, "ownerType", task.GetOwnerType(), "reason", task.StartReason)
|
||||
}
|
||||
task.Debug("task start", "taskId", task.ID, "taskType", task.GetTaskType(), "ownerType", task.GetOwnerType(), "reason", task.StartReason)
|
||||
task.state = TASK_STATE_STARTING
|
||||
if v, ok := task.handler.(TaskStarter); ok {
|
||||
err = v.Start()
|
||||
@@ -350,6 +379,7 @@ func (task *Task) start() bool {
|
||||
}
|
||||
|
||||
func (task *Task) reset() {
|
||||
task.stopOnce = sync.Once{}
|
||||
task.Context, task.CancelCauseFunc = context.WithCancelCause(task.parentCtx)
|
||||
task.shutdown = util.NewPromise(context.Background())
|
||||
task.startup = util.NewPromise(task.Context)
|
||||
@@ -363,6 +393,10 @@ func (task *Task) GetDescriptions() map[string]string {
|
||||
})
|
||||
}
|
||||
|
||||
func (task *Task) GetDescription(key string) (any, bool) {
|
||||
return task.description.Load(key)
|
||||
}
|
||||
|
||||
func (task *Task) SetDescription(key string, value any) {
|
||||
task.description.Store(key, value)
|
||||
}
|
||||
@@ -380,41 +414,41 @@ func (task *Task) SetDescriptions(value Description) {
|
||||
func (task *Task) dispose() {
|
||||
taskType, ownerType := task.handler.GetTaskType(), task.GetOwnerType()
|
||||
if task.state < TASK_STATE_STARTED {
|
||||
if taskType != TASK_TYPE_CALL {
|
||||
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
|
||||
}
|
||||
task.Debug("task dispose canceled", "taskId", task.ID, "taskType", taskType, "ownerType", ownerType, "state", task.state)
|
||||
return
|
||||
}
|
||||
reason := task.StopReason()
|
||||
task.state = TASK_STATE_DISPOSING
|
||||
if taskType != TASK_TYPE_CALL {
|
||||
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
|
||||
task.Debug("task dispose", yargs...)
|
||||
defer task.Debug("task disposed", yargs...)
|
||||
}
|
||||
befores := len(task.beforeDisposeListeners)
|
||||
for i, listener := range task.beforeDisposeListeners {
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("b:%d/%d", i, befores))
|
||||
listener()
|
||||
}
|
||||
yargs := []any{"reason", reason, "taskId", task.ID, "taskType", taskType, "ownerType", ownerType}
|
||||
task.Debug("task dispose", yargs...)
|
||||
defer task.Debug("task disposed", yargs...)
|
||||
if job, ok := task.handler.(IJob); ok {
|
||||
mt := job.getJob()
|
||||
task.SetDescription("disposeProcess", "wait children")
|
||||
mt.eventLoopLock.Lock()
|
||||
if mt.addSub != nil {
|
||||
mt.waitChildrenDispose()
|
||||
mt.lazyRun = sync.Once{}
|
||||
}
|
||||
mt.eventLoopLock.Unlock()
|
||||
mt.waitChildrenDispose(reason)
|
||||
}
|
||||
task.SetDescription("disposeProcess", "self")
|
||||
if v, ok := task.handler.(TaskDisposal); ok {
|
||||
v.Dispose()
|
||||
}
|
||||
task.shutdown.Fulfill(reason)
|
||||
afters := len(task.afterDisposeListeners)
|
||||
task.SetDescription("disposeProcess", "resources")
|
||||
task.stopOnce.Do(task.stop)
|
||||
for _, resource := range task.resources {
|
||||
switch v := resource.(type) {
|
||||
case func():
|
||||
v()
|
||||
case ITask:
|
||||
v.Stop(task.StopReason())
|
||||
case util.Recyclable:
|
||||
v.Recycle()
|
||||
case io.Closer:
|
||||
v.Close()
|
||||
}
|
||||
}
|
||||
task.resources = task.resources[:0]
|
||||
for i, listener := range task.afterDisposeListeners {
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, afters))
|
||||
task.SetDescription("disposeProcess", fmt.Sprintf("a:%d/%d", i, len(task.afterDisposeListeners)))
|
||||
listener()
|
||||
}
|
||||
task.SetDescription("disposeProcess", "done")
|
||||
@@ -482,3 +516,25 @@ func (task *Task) Error(msg string, args ...any) {
|
||||
func (task *Task) TraceEnabled() bool {
|
||||
return task.Logger.Enabled(task.Context, TraceLevel)
|
||||
}
|
||||
|
||||
func (task *Task) RunTask(t ITask, opt ...any) (err error) {
|
||||
tt := t.GetTask()
|
||||
tt.handler = t
|
||||
mt := task.parent
|
||||
if job, ok := task.handler.(IJob); ok {
|
||||
mt = job.getJob()
|
||||
}
|
||||
mt.initContext(tt, opt...)
|
||||
if mt.IsStopped() {
|
||||
err = mt.StopReason()
|
||||
task.startup.Reject(err)
|
||||
return
|
||||
}
|
||||
task.OnStop(t)
|
||||
started := tt.start()
|
||||
<-tt.Done()
|
||||
if started {
|
||||
tt.dispose()
|
||||
}
|
||||
return tt.StopReason()
|
||||
}
|
||||
|
@@ -24,9 +24,12 @@ func Test_AddTask_AddsTaskSuccessfully(t *testing.T) {
|
||||
var task Task
|
||||
root.AddTask(&task)
|
||||
_ = task.WaitStarted()
|
||||
if len(root.children) != 1 {
|
||||
t.Errorf("expected 1 child task, got %d", len(root.children))
|
||||
}
|
||||
root.RangeSubTask(func(t ITask) bool {
|
||||
if t.GetTaskID() == task.GetTaskID() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
type retryDemoTask struct {
|
||||
@@ -51,9 +54,9 @@ func Test_RetryTask(t *testing.T) {
|
||||
|
||||
func Test_Call_ExecutesCallback(t *testing.T) {
|
||||
called := false
|
||||
root.Call(func() error {
|
||||
root.Call(func() {
|
||||
called = true
|
||||
return nil
|
||||
return
|
||||
})
|
||||
if !called {
|
||||
t.Errorf("expected callback to be called")
|
||||
@@ -162,6 +165,24 @@ func Test_StartFail(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Block(t *testing.T) {
|
||||
var task Task
|
||||
block := make(chan struct{})
|
||||
var job Job
|
||||
task.OnStart(func() {
|
||||
task.OnStop(func() {
|
||||
close(block)
|
||||
})
|
||||
<-block
|
||||
})
|
||||
time.AfterFunc(time.Second*2, func() {
|
||||
job.Stop(ErrTaskComplete)
|
||||
})
|
||||
root.AddTask(&job)
|
||||
job.AddTask(&task)
|
||||
job.WaitStopped()
|
||||
}
|
||||
|
||||
//
|
||||
//type DemoTask struct {
|
||||
// Task
|
||||
|
@@ -11,3 +11,57 @@ func (m *Work) keepalive() bool {
|
||||
func (*Work) GetTaskType() TaskType {
|
||||
return TASK_TYPE_Work
|
||||
}
|
||||
|
||||
type WorkCollection[K comparable, T interface {
|
||||
ITask
|
||||
GetKey() K
|
||||
}] struct {
|
||||
Work
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Find(f func(T) bool) (item T, ok bool) {
|
||||
c.RangeSubTask(func(task ITask) bool {
|
||||
if v, _ok := task.(T); _ok && f(v) {
|
||||
item = v
|
||||
ok = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Get(key K) (item T, ok bool) {
|
||||
var value any
|
||||
value, ok = c.children.Load(key)
|
||||
if ok {
|
||||
item, ok = value.(T)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Range(f func(T) bool) {
|
||||
c.RangeSubTask(func(task ITask) bool {
|
||||
if v, ok := task.(T); ok && !f(v) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Has(key K) (ok bool) {
|
||||
_, ok = c.children.Load(key)
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) ToList() (list []T) {
|
||||
c.Range(func(t T) bool {
|
||||
list = append(list, t)
|
||||
return true
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (c *WorkCollection[K, T]) Length() int {
|
||||
return int(c.Size.Load())
|
||||
}
|
||||
|
BIN
pkg/test.h264
Normal file
BIN
pkg/test.h264
Normal file
Binary file not shown.
63
pkg/track.go
63
pkg/track.go
@@ -51,14 +51,12 @@ type (
|
||||
LastDropLevelChange time.Time
|
||||
DropFrameLevel int // 0: no drop, 1: drop P-frame, 2: drop all
|
||||
}
|
||||
|
||||
AVTrack struct {
|
||||
Track
|
||||
*RingWriter
|
||||
codec.ICodecCtx
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
SequenceFrame IAVFrame
|
||||
WrapIndex int
|
||||
Allocator *util.ScalableMemoryAllocator
|
||||
WrapIndex int
|
||||
TsTamer
|
||||
SpeedController
|
||||
DropController
|
||||
@@ -71,11 +69,13 @@ func NewAVTrack(args ...any) (t *AVTrack) {
|
||||
switch v := arg.(type) {
|
||||
case IAVFrame:
|
||||
t.FrameType = reflect.TypeOf(v)
|
||||
t.Allocator = v.GetAllocator()
|
||||
sample := v.GetSample()
|
||||
t.Allocator = sample.GetAllocator()
|
||||
t.ICodecCtx = sample.ICodecCtx
|
||||
case reflect.Type:
|
||||
t.FrameType = v
|
||||
case *slog.Logger:
|
||||
t.Logger = v
|
||||
t.Logger = v.With("frameType", t.FrameType.String())
|
||||
case *AVTrack:
|
||||
t.Logger = v.Logger.With("subtrack", t.FrameType.String())
|
||||
t.RingWriter = v.RingWriter
|
||||
@@ -118,9 +118,25 @@ func (t *AVTrack) AddBytesIn(n int) {
|
||||
}
|
||||
}
|
||||
|
||||
func (t *AVTrack) AcceptFrame(data IAVFrame) {
|
||||
func (t *AVTrack) FixTimestamp(data *Sample, scale float64) {
|
||||
t.AddBytesIn(data.Size)
|
||||
data.Timestamp = t.Tame(data.Timestamp, t.FPS, scale)
|
||||
}
|
||||
|
||||
func (t *AVTrack) NewFrame(avFrame *AVFrame) (frame IAVFrame) {
|
||||
frame = reflect.New(t.FrameType.Elem()).Interface().(IAVFrame)
|
||||
if avFrame.Sample == nil {
|
||||
avFrame.Sample = frame.GetSample()
|
||||
}
|
||||
if avFrame.BaseSample == nil {
|
||||
avFrame.BaseSample = &BaseSample{}
|
||||
}
|
||||
frame.GetSample().BaseSample = avFrame.BaseSample
|
||||
return
|
||||
}
|
||||
|
||||
func (t *AVTrack) AcceptFrame() {
|
||||
t.acceptFrameCount++
|
||||
t.Value.Wraps = append(t.Value.Wraps, data)
|
||||
}
|
||||
|
||||
func (t *AVTrack) changeDropFrameLevel(newLevel int) {
|
||||
@@ -230,23 +246,28 @@ func (t *AVTrack) AddPausedTime(d time.Duration) {
|
||||
t.pausedTime += d
|
||||
}
|
||||
|
||||
func (s *SpeedController) speedControl(speed float64, ts time.Duration) {
|
||||
if speed != s.speed || s.beginTime.IsZero() {
|
||||
s.speed = speed
|
||||
s.beginTime = time.Now()
|
||||
s.beginTimestamp = ts
|
||||
s.pausedTime = 0
|
||||
func (t *AVTrack) speedControl(speed float64, ts time.Duration) {
|
||||
if speed != t.speed || t.beginTime.IsZero() {
|
||||
t.speed = speed
|
||||
t.beginTime = time.Now()
|
||||
t.beginTimestamp = ts
|
||||
t.pausedTime = 0
|
||||
} else {
|
||||
elapsed := time.Since(s.beginTime) - s.pausedTime
|
||||
elapsed := time.Since(t.beginTime) - t.pausedTime
|
||||
if speed == 0 {
|
||||
s.Delta = ts - elapsed
|
||||
t.Delta = ts - elapsed
|
||||
if t.Logger.Enabled(t.ready, task.TraceLevel) {
|
||||
t.Trace("speed 0", "ts", ts, "elapsed", elapsed, "delta", t.Delta)
|
||||
}
|
||||
return
|
||||
}
|
||||
should := time.Duration(float64(ts-s.beginTimestamp) / speed)
|
||||
s.Delta = should - elapsed
|
||||
// fmt.Println(speed, elapsed, should, s.Delta)
|
||||
if s.Delta > threshold {
|
||||
time.Sleep(min(s.Delta, time.Millisecond*500))
|
||||
should := time.Duration(float64(ts-t.beginTimestamp) / speed)
|
||||
t.Delta = should - elapsed
|
||||
if t.Delta > threshold {
|
||||
if t.Logger.Enabled(t.ready, task.TraceLevel) {
|
||||
t.Trace("speed control", "speed", speed, "elapsed", elapsed, "should", should, "delta", t.Delta)
|
||||
}
|
||||
time.Sleep(min(t.Delta, time.Millisecond*500))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
63
pkg/util/buddy_disable.go
Normal file
63
pkg/util/buddy_disable.go
Normal file
@@ -0,0 +1,63 @@
|
||||
//go:build !enable_buddy
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var pool0, pool1, pool2 sync.Pool
|
||||
|
||||
func init() {
|
||||
pool0.New = func() any {
|
||||
ret := createMemoryAllocator(defaultBufSize)
|
||||
ret.recycle = func() {
|
||||
pool0.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
pool1.New = func() any {
|
||||
ret := createMemoryAllocator(1 << MinPowerOf2)
|
||||
ret.recycle = func() {
|
||||
pool1.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
pool2.New = func() any {
|
||||
ret := createMemoryAllocator(1 << (MinPowerOf2 + 2))
|
||||
ret.recycle = func() {
|
||||
pool2.Put(ret)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
}
|
||||
|
||||
func createMemoryAllocator(size int) *MemoryAllocator {
|
||||
memory := make([]byte, size)
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: int64(uintptr(unsafe.Pointer(&memory[0]))),
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
switch size {
|
||||
case defaultBufSize:
|
||||
ret = pool0.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
case 1 << MinPowerOf2:
|
||||
ret = pool1.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
case 1 << (MinPowerOf2 + 2):
|
||||
ret = pool2.Get().(*MemoryAllocator)
|
||||
ret.allocator.Init(size)
|
||||
default:
|
||||
ret = createMemoryAllocator(size)
|
||||
}
|
||||
return
|
||||
}
|
44
pkg/util/buddy_enable.go
Normal file
44
pkg/util/buddy_enable.go
Normal file
@@ -0,0 +1,44 @@
|
||||
//go:build enable_buddy
|
||||
|
||||
package util
|
||||
|
||||
import "unsafe"
|
||||
|
||||
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: buddy.memoryPool[offset : offset+size],
|
||||
start: buddy.poolStart + int64(offset),
|
||||
recycle: func() {
|
||||
buddy.Free(offset >> MinPowerOf2)
|
||||
},
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
if size < BuddySize {
|
||||
requiredSize := size >> MinPowerOf2
|
||||
// 循环尝试从池中获取可用的 buddy
|
||||
for {
|
||||
buddy := GetBuddy()
|
||||
defer PutBuddy(buddy)
|
||||
offset, err := buddy.Alloc(requiredSize)
|
||||
if err == nil {
|
||||
// 分配成功,使用这个 buddy
|
||||
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
|
||||
}
|
||||
}
|
||||
}
|
||||
// 池中的 buddy 都无法分配或大小不够,使用系统内存
|
||||
memory := make([]byte, size)
|
||||
start := int64(uintptr(unsafe.Pointer(&memory[0])))
|
||||
return &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: start,
|
||||
}
|
||||
}
|
@@ -4,7 +4,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -15,8 +14,8 @@ type BufReader struct {
|
||||
buf MemoryReader
|
||||
totalRead int
|
||||
BufLen int
|
||||
Mouth chan []byte
|
||||
feedData func() error
|
||||
Dump *os.File
|
||||
}
|
||||
|
||||
func NewBufReaderWithBufLen(reader io.Reader, bufLen int) (r *BufReader) {
|
||||
@@ -62,8 +61,10 @@ func NewBufReaderBuffersChan(feedChan chan net.Buffers) (r *BufReader) {
|
||||
return
|
||||
}
|
||||
|
||||
func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
|
||||
func NewBufReaderChan(bufferSize int) (r *BufReader) {
|
||||
feedChan := make(chan []byte, bufferSize)
|
||||
r = &BufReader{
|
||||
Mouth: feedChan,
|
||||
feedData: func() error {
|
||||
data, ok := <-feedChan
|
||||
if !ok {
|
||||
@@ -81,6 +82,15 @@ func NewBufReaderChan(feedChan chan []byte) (r *BufReader) {
|
||||
return
|
||||
}
|
||||
|
||||
func (r *BufReader) Feed(data []byte) bool {
|
||||
select {
|
||||
case r.Mouth <- data:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func NewBufReader(reader io.Reader) (r *BufReader) {
|
||||
return NewBufReaderWithBufLen(reader, defaultBufSize)
|
||||
}
|
||||
@@ -90,6 +100,9 @@ func (r *BufReader) Recycle() {
|
||||
if r.Allocator != nil {
|
||||
r.Allocator.Recycle()
|
||||
}
|
||||
if r.Mouth != nil {
|
||||
close(r.Mouth)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BufReader) Buffered() int {
|
||||
@@ -176,9 +189,6 @@ func (r *BufReader) ReadRange(n int, yield func([]byte)) (err error) {
|
||||
func (r *BufReader) Read(to []byte) (n int, err error) {
|
||||
n = len(to)
|
||||
err = r.ReadNto(n, to)
|
||||
if r.Dump != nil {
|
||||
r.Dump.Write(to)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -199,7 +209,7 @@ func (r *BufReader) ReadString(n int) (s string, err error) {
|
||||
}
|
||||
|
||||
func (r *BufReader) ReadBytes(n int) (mem Memory, err error) {
|
||||
err = r.ReadRange(n, mem.AppendOne)
|
||||
err = r.ReadRange(n, mem.PushOne)
|
||||
return
|
||||
}
|
||||
|
||||
|
@@ -24,7 +24,7 @@ func TestReadBytesTo(t *testing.T) {
|
||||
s := RandomString(100)
|
||||
t.Logf("s:%s", s)
|
||||
var m Memory
|
||||
m.AppendOne([]byte(s))
|
||||
m.PushOne([]byte(s))
|
||||
r := m.NewReader()
|
||||
seededRand := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
var total []byte
|
||||
@@ -34,7 +34,7 @@ func TestReadBytesTo(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
buf := make([]byte, i)
|
||||
n := r.ReadBytesTo(buf)
|
||||
n, _ := r.Read(buf)
|
||||
t.Logf("n:%d buf:%s", n, string(buf))
|
||||
total = append(total, buf[:n]...)
|
||||
if n == 0 {
|
||||
|
@@ -101,23 +101,6 @@ func (c *Collection[K, T]) RemoveByKey(key K) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// func (c *Collection[K, T]) GetOrCreate(key K) (item T, find bool) {
|
||||
// if c.L != nil {
|
||||
// c.L.Lock()
|
||||
// defer c.L.Unlock()
|
||||
// }
|
||||
// if c.m != nil {
|
||||
// item, find = c.m[key]
|
||||
// return item, find
|
||||
// }
|
||||
// for _, item = range c.Items {
|
||||
// if item.GetKey() == key {
|
||||
// return item, true
|
||||
// }
|
||||
// }
|
||||
// item = reflect.New(reflect.TypeOf(item).Elem()).Interface().(T)
|
||||
// return
|
||||
// }
|
||||
func (c *Collection[K, T]) Has(key K) bool {
|
||||
_, ok := c.Get(key)
|
||||
return ok
|
||||
@@ -169,10 +152,6 @@ func (c *Collection[K, T]) Search(f func(T) bool) func(yield func(item T) bool)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Collection[K, T]) GetKey() K {
|
||||
return c.Items[0].GetKey()
|
||||
}
|
||||
|
||||
func (c *Collection[K, T]) Clear() {
|
||||
if c.L != nil {
|
||||
c.L.Lock()
|
||||
|
60
pkg/util/http_ws_writer.go
Normal file
60
pkg/util/http_ws_writer.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
)
|
||||
|
||||
type HTTP_WS_Writer struct {
|
||||
io.Writer
|
||||
Conn net.Conn
|
||||
ContentType string
|
||||
WriteTimeout time.Duration
|
||||
IsWebSocket bool
|
||||
buffer []byte
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) Write(p []byte) (n int, err error) {
|
||||
if m.IsWebSocket {
|
||||
m.buffer = append(m.buffer, p...)
|
||||
return len(p), nil
|
||||
}
|
||||
if m.Conn != nil && m.WriteTimeout > 0 {
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
}
|
||||
return m.Writer.Write(p)
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) Flush() (err error) {
|
||||
if m.IsWebSocket {
|
||||
if m.WriteTimeout > 0 {
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
}
|
||||
err = wsutil.WriteServerBinary(m.Conn, m.buffer)
|
||||
m.buffer = m.buffer[:0]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (m *HTTP_WS_Writer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if m.Conn == nil {
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
w.Header().Set("Content-Type", m.ContentType)
|
||||
w.WriteHeader(http.StatusOK)
|
||||
if hijacker, ok := w.(http.Hijacker); ok && m.WriteTimeout > 0 {
|
||||
m.Conn, _, _ = hijacker.Hijack()
|
||||
m.Conn.SetWriteDeadline(time.Now().Add(m.WriteTimeout))
|
||||
m.Writer = m.Conn
|
||||
} else {
|
||||
m.Writer = w
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
} else {
|
||||
m.IsWebSocket = true
|
||||
m.Writer = m.Conn
|
||||
}
|
||||
}
|
@@ -16,6 +16,10 @@ type ReadWriteSeekCloser interface {
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type Recyclable interface {
|
||||
Recycle()
|
||||
}
|
||||
|
||||
type Object = map[string]any
|
||||
|
||||
func Conditional[T any](cond bool, t, f T) T {
|
||||
@@ -70,3 +74,60 @@ func Exist(filename string) bool {
|
||||
_, err := os.Stat(filename)
|
||||
return err == nil || os.IsExist(err)
|
||||
}
|
||||
|
||||
type ReuseArray[T any] []T
|
||||
|
||||
func (s *ReuseArray[T]) GetNextPointer() (r *T) {
|
||||
ss := *s
|
||||
l := len(ss)
|
||||
if cap(ss) > l {
|
||||
ss = ss[:l+1]
|
||||
} else {
|
||||
var new T
|
||||
ss = append(ss, new)
|
||||
}
|
||||
*s = ss
|
||||
r = &((ss)[l])
|
||||
if resetter, ok := any(r).(Resetter); ok {
|
||||
resetter.Reset()
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func (s ReuseArray[T]) RangePoint(f func(yield *T) bool) {
|
||||
for i := range len(s) {
|
||||
if !f(&s[i]) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Reset() {
|
||||
*s = (*s)[:0]
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Reduce() {
|
||||
ss := *s
|
||||
*s = ss[:len(ss)-1]
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Remove(item *T) bool {
|
||||
count := s.Count()
|
||||
for i := range count {
|
||||
if &(*s)[i] == item {
|
||||
value := *item
|
||||
*s = append((*s)[:i], (*s)[i+1:]...)
|
||||
*s = append(*s, value)[:count-1]
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ReuseArray[T]) Count() int {
|
||||
return len(*s)
|
||||
}
|
||||
|
||||
type Resetter interface {
|
||||
Reset()
|
||||
}
|
||||
|
103
pkg/util/mem.go
103
pkg/util/mem.go
@@ -1,7 +1,110 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"slices"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxBlockSize = 1 << 22
|
||||
BuddySize = MaxBlockSize << 7
|
||||
MinPowerOf2 = 10
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
Size int
|
||||
Buffers [][]byte
|
||||
}
|
||||
|
||||
func NewMemory(buf []byte) Memory {
|
||||
return Memory{
|
||||
Buffers: net.Buffers{buf},
|
||||
Size: len(buf),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) WriteTo(w io.Writer) (n int64, err error) {
|
||||
copy := net.Buffers(slices.Clone(m.Buffers))
|
||||
return copy.WriteTo(w)
|
||||
}
|
||||
|
||||
func (m *Memory) Reset() {
|
||||
m.Buffers = m.Buffers[:0]
|
||||
m.Size = 0
|
||||
}
|
||||
|
||||
func (m *Memory) UpdateBuffer(index int, buf []byte) {
|
||||
if index < 0 {
|
||||
index = len(m.Buffers) + index
|
||||
}
|
||||
m.Size = len(buf) - len(m.Buffers[index])
|
||||
m.Buffers[index] = buf
|
||||
}
|
||||
|
||||
func (m *Memory) CopyFrom(b *Memory) {
|
||||
buf := make([]byte, b.Size)
|
||||
b.CopyTo(buf)
|
||||
m.PushOne(buf)
|
||||
}
|
||||
|
||||
func (m *Memory) Equal(b *Memory) bool {
|
||||
if m.Size != b.Size || len(m.Buffers) != len(b.Buffers) {
|
||||
return false
|
||||
}
|
||||
for i, buf := range m.Buffers {
|
||||
if !slices.Equal(buf, b.Buffers[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Memory) CopyTo(buf []byte) {
|
||||
for _, b := range m.Buffers {
|
||||
l := len(b)
|
||||
copy(buf, b)
|
||||
buf = buf[l:]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) ToBytes() []byte {
|
||||
buf := make([]byte, m.Size)
|
||||
m.CopyTo(buf)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (m *Memory) PushOne(b []byte) {
|
||||
m.Buffers = append(m.Buffers, b)
|
||||
m.Size += len(b)
|
||||
}
|
||||
|
||||
func (m *Memory) Push(b ...[]byte) {
|
||||
m.Buffers = append(m.Buffers, b...)
|
||||
for _, level0 := range b {
|
||||
m.Size += len(level0)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) Append(mm Memory) *Memory {
|
||||
m.Buffers = append(m.Buffers, mm.Buffers...)
|
||||
m.Size += mm.Size
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *Memory) Count() int {
|
||||
return len(m.Buffers)
|
||||
}
|
||||
|
||||
func (m *Memory) Range(yield func([]byte)) {
|
||||
for i := range m.Count() {
|
||||
yield(m.Buffers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) NewReader() MemoryReader {
|
||||
return MemoryReader{
|
||||
Memory: m,
|
||||
Length: m.Size,
|
||||
}
|
||||
}
|
||||
|
@@ -2,93 +2,23 @@ package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type Memory struct {
|
||||
Size int
|
||||
net.Buffers
|
||||
}
|
||||
|
||||
type MemoryReader struct {
|
||||
*Memory
|
||||
Length int
|
||||
offset0 int
|
||||
offset1 int
|
||||
Length, offset0, offset1 int
|
||||
}
|
||||
|
||||
func NewReadableBuffersFromBytes(b ...[]byte) *MemoryReader {
|
||||
func NewReadableBuffersFromBytes(b ...[]byte) MemoryReader {
|
||||
buf := &Memory{Buffers: b}
|
||||
for _, level0 := range b {
|
||||
buf.Size += len(level0)
|
||||
}
|
||||
return &MemoryReader{Memory: buf, Length: buf.Size}
|
||||
return MemoryReader{Memory: buf, Length: buf.Size}
|
||||
}
|
||||
|
||||
func NewMemory(buf []byte) Memory {
|
||||
return Memory{
|
||||
Buffers: net.Buffers{buf},
|
||||
Size: len(buf),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) UpdateBuffer(index int, buf []byte) {
|
||||
if index < 0 {
|
||||
index = len(m.Buffers) + index
|
||||
}
|
||||
m.Size = len(buf) - len(m.Buffers[index])
|
||||
m.Buffers[index] = buf
|
||||
}
|
||||
|
||||
func (m *Memory) CopyFrom(b *Memory) {
|
||||
buf := make([]byte, b.Size)
|
||||
b.CopyTo(buf)
|
||||
m.AppendOne(buf)
|
||||
}
|
||||
|
||||
func (m *Memory) CopyTo(buf []byte) {
|
||||
for _, b := range m.Buffers {
|
||||
l := len(b)
|
||||
copy(buf, b)
|
||||
buf = buf[l:]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) ToBytes() []byte {
|
||||
buf := make([]byte, m.Size)
|
||||
m.CopyTo(buf)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (m *Memory) AppendOne(b []byte) {
|
||||
m.Buffers = append(m.Buffers, b)
|
||||
m.Size += len(b)
|
||||
}
|
||||
|
||||
func (m *Memory) Append(b ...[]byte) {
|
||||
m.Buffers = append(m.Buffers, b...)
|
||||
for _, level0 := range b {
|
||||
m.Size += len(level0)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) Count() int {
|
||||
return len(m.Buffers)
|
||||
}
|
||||
|
||||
func (m *Memory) Range(yield func([]byte)) {
|
||||
for i := range m.Count() {
|
||||
yield(m.Buffers[i])
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Memory) NewReader() *MemoryReader {
|
||||
var reader MemoryReader
|
||||
reader.Memory = m
|
||||
reader.Length = m.Size
|
||||
return &reader
|
||||
}
|
||||
var _ io.Reader = (*MemoryReader)(nil)
|
||||
|
||||
func (r *MemoryReader) Offset() int {
|
||||
return r.Size - r.Length
|
||||
@@ -108,9 +38,9 @@ func (r *MemoryReader) MoveToEnd() {
|
||||
r.Length = 0
|
||||
}
|
||||
|
||||
func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
|
||||
func (r *MemoryReader) Read(buf []byte) (actual int, err error) {
|
||||
if r.Length == 0 {
|
||||
return 0
|
||||
return 0, io.EOF
|
||||
}
|
||||
n := len(buf)
|
||||
curBuf := r.GetCurrent()
|
||||
@@ -142,6 +72,7 @@ func (r *MemoryReader) ReadBytesTo(buf []byte) (actual int) {
|
||||
actual += curBufLen
|
||||
r.skipBuf()
|
||||
if r.Length == 0 && n > 0 {
|
||||
err = io.EOF
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -204,6 +135,9 @@ func (r *MemoryReader) getCurrentBufLen() int {
|
||||
return len(r.Memory.Buffers[r.offset0]) - r.offset1
|
||||
}
|
||||
func (r *MemoryReader) Skip(n int) error {
|
||||
if n <= 0 {
|
||||
return nil
|
||||
}
|
||||
if n > r.Length {
|
||||
return io.EOF
|
||||
}
|
||||
@@ -248,8 +182,8 @@ func (r *MemoryReader) ReadBytes(n int) ([]byte, error) {
|
||||
return nil, io.EOF
|
||||
}
|
||||
b := make([]byte, n)
|
||||
actual := r.ReadBytesTo(b)
|
||||
return b[:actual], nil
|
||||
actual, err := r.Read(b)
|
||||
return b[:actual], err
|
||||
}
|
||||
|
||||
func (r *MemoryReader) ReadBE(n int) (num uint32, err error) {
|
@@ -22,13 +22,13 @@ func NewPromiseWithTimeout(ctx context.Context, timeout time.Duration) *Promise
|
||||
p := &Promise{}
|
||||
p.Context, p.CancelCauseFunc = context.WithCancelCause(ctx)
|
||||
p.timer = time.AfterFunc(timeout, func() {
|
||||
p.CancelCauseFunc(ErrTimeout)
|
||||
p.CancelCauseFunc(errTimeout)
|
||||
})
|
||||
return p
|
||||
}
|
||||
|
||||
var ErrResolve = errors.New("promise resolved")
|
||||
var ErrTimeout = errors.New("promise timeout")
|
||||
var errTimeout = errors.New("promise timeout")
|
||||
|
||||
func (p *Promise) Resolve() {
|
||||
p.Fulfill(nil)
|
||||
@@ -47,6 +47,10 @@ func (p *Promise) Await() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Promise) IsRejected() bool {
|
||||
return context.Cause(p.Context) != ErrResolve
|
||||
}
|
||||
|
||||
func (p *Promise) Fulfill(err error) {
|
||||
if p.timer != nil {
|
||||
p.timer.Stop()
|
||||
|
@@ -4,12 +4,26 @@ package util
|
||||
|
||||
import (
|
||||
"io"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type RecyclableMemory struct {
|
||||
Memory
|
||||
}
|
||||
|
||||
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
|
||||
return RecyclableMemory{}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) Clone() RecyclableMemory {
|
||||
return RecyclableMemory{
|
||||
Memory: Memory{
|
||||
Buffers: slices.Clone(r.Buffers),
|
||||
Size: r.Size,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
|
||||
}
|
||||
|
||||
|
@@ -15,8 +15,14 @@ type RecyclableMemory struct {
|
||||
recycleIndexes []int
|
||||
}
|
||||
|
||||
func NewRecyclableMemory(allocator *ScalableMemoryAllocator) RecyclableMemory {
|
||||
return RecyclableMemory{allocator: allocator}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) InitRecycleIndexes(max int) {
|
||||
r.recycleIndexes = make([]int, 0, max)
|
||||
if r.recycleIndexes == nil {
|
||||
r.recycleIndexes = make([]int, 0, max)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) GetAllocator() *ScalableMemoryAllocator {
|
||||
@@ -28,7 +34,7 @@ func (r *RecyclableMemory) NextN(size int) (memory []byte) {
|
||||
if r.recycleIndexes != nil {
|
||||
r.recycleIndexes = append(r.recycleIndexes, r.Count())
|
||||
}
|
||||
r.AppendOne(memory)
|
||||
r.PushOne(memory)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -36,7 +42,7 @@ func (r *RecyclableMemory) AddRecycleBytes(b []byte) {
|
||||
if r.recycleIndexes != nil {
|
||||
r.recycleIndexes = append(r.recycleIndexes, r.Count())
|
||||
}
|
||||
r.AppendOne(b)
|
||||
r.PushOne(b)
|
||||
}
|
||||
|
||||
func (r *RecyclableMemory) SetAllocator(allocator *ScalableMemoryAllocator) {
|
||||
@@ -54,6 +60,7 @@ func (r *RecyclableMemory) Recycle() {
|
||||
r.allocator.Free(buf)
|
||||
}
|
||||
}
|
||||
r.Reset()
|
||||
}
|
||||
|
||||
type MemoryAllocator struct {
|
||||
@@ -61,54 +68,14 @@ type MemoryAllocator struct {
|
||||
start int64
|
||||
memory []byte
|
||||
Size int
|
||||
buddy *Buddy
|
||||
}
|
||||
|
||||
// createMemoryAllocator 创建并初始化 MemoryAllocator
|
||||
func createMemoryAllocator(size int, buddy *Buddy, offset int) *MemoryAllocator {
|
||||
ret := &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
buddy: buddy,
|
||||
Size: size,
|
||||
memory: buddy.memoryPool[offset : offset+size],
|
||||
start: buddy.poolStart + int64(offset),
|
||||
}
|
||||
ret.allocator.Init(size)
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetMemoryAllocator(size int) (ret *MemoryAllocator) {
|
||||
if size < BuddySize {
|
||||
requiredSize := size >> MinPowerOf2
|
||||
// 循环尝试从池中获取可用的 buddy
|
||||
for {
|
||||
buddy := GetBuddy()
|
||||
offset, err := buddy.Alloc(requiredSize)
|
||||
PutBuddy(buddy)
|
||||
if err == nil {
|
||||
// 分配成功,使用这个 buddy
|
||||
return createMemoryAllocator(size, buddy, offset<<MinPowerOf2)
|
||||
}
|
||||
}
|
||||
}
|
||||
// 池中的 buddy 都无法分配或大小不够,使用系统内存
|
||||
memory := make([]byte, size)
|
||||
start := int64(uintptr(unsafe.Pointer(&memory[0])))
|
||||
return &MemoryAllocator{
|
||||
allocator: NewAllocator(size),
|
||||
Size: size,
|
||||
memory: memory,
|
||||
start: start,
|
||||
}
|
||||
recycle func()
|
||||
}
|
||||
|
||||
func (ma *MemoryAllocator) Recycle() {
|
||||
ma.allocator.Recycle()
|
||||
if ma.buddy != nil {
|
||||
_ = ma.buddy.Free(int((ma.buddy.poolStart - ma.start) >> MinPowerOf2))
|
||||
ma.buddy = nil
|
||||
if ma.recycle != nil {
|
||||
ma.recycle()
|
||||
}
|
||||
ma.memory = nil
|
||||
}
|
||||
|
||||
func (ma *MemoryAllocator) Find(size int) (memory []byte) {
|
||||
|
370
plugin.go
370
plugin.go
@@ -6,6 +6,7 @@ import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
@@ -18,6 +19,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
|
||||
"m7s.live/v5/pkg/task"
|
||||
|
||||
"github.com/quic-go/quic-go"
|
||||
@@ -25,8 +28,8 @@ import (
|
||||
gatewayRuntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
myip "github.com/husanpao/ip"
|
||||
"google.golang.org/grpc"
|
||||
"gopkg.in/yaml.v3"
|
||||
"gorm.io/gorm"
|
||||
|
||||
. "m7s.live/v5/pkg"
|
||||
"m7s.live/v5/pkg/config"
|
||||
"m7s.live/v5/pkg/db"
|
||||
@@ -63,9 +66,7 @@ type (
|
||||
|
||||
IPlugin interface {
|
||||
task.IJob
|
||||
OnInit() error
|
||||
OnStop()
|
||||
Pull(string, config.Pull, *config.Publish)
|
||||
Pull(string, config.Pull, *config.Publish) (*PullJob, error)
|
||||
Push(string, config.Push, *config.Subscribe)
|
||||
Transform(*Publisher, config.Transform)
|
||||
OnPublish(*Publisher)
|
||||
@@ -161,27 +162,46 @@ func (plugin *PluginMeta) Init(s *Server, userConfig map[string]any) (p *Plugin)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err := s.AddTask(instance).WaitStarted(); err != nil {
|
||||
if err = s.AddTask(instance).WaitStarted(); err != nil {
|
||||
p.disable(instance.StopReason().Error())
|
||||
return
|
||||
}
|
||||
if err = p.listen(); err != nil {
|
||||
p.Stop(err)
|
||||
p.disable(err.Error())
|
||||
return
|
||||
}
|
||||
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
|
||||
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
|
||||
if p.Meta.RegisterGRPCHandler != nil {
|
||||
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
|
||||
p.Stop(err)
|
||||
p.disable(fmt.Sprintf("grpc %v", err))
|
||||
return
|
||||
} else {
|
||||
p.Info("grpc handler registered")
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.config.Hook != nil {
|
||||
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
|
||||
p.AddTask(&ServerKeepAliveTask{plugin: p})
|
||||
}
|
||||
}
|
||||
var handlers map[string]http.HandlerFunc
|
||||
if v, ok := instance.(IRegisterHandler); ok {
|
||||
handlers = v.RegisterHandler()
|
||||
}
|
||||
p.registerHandler(handlers)
|
||||
p.OnDispose(func() {
|
||||
s.Plugins.Remove(p)
|
||||
})
|
||||
s.Plugins.Add(p)
|
||||
return
|
||||
}
|
||||
|
||||
// InstallPlugin 安装插件
|
||||
func InstallPlugin[C iPlugin](options ...any) error {
|
||||
var meta PluginMeta
|
||||
for _, option := range options {
|
||||
if m, ok := option.(PluginMeta); ok {
|
||||
meta = m
|
||||
}
|
||||
}
|
||||
func InstallPlugin[C iPlugin](meta PluginMeta) error {
|
||||
var c *C
|
||||
meta.Type = reflect.TypeOf(c).Elem()
|
||||
if meta.Name == "" {
|
||||
@@ -196,30 +216,6 @@ func InstallPlugin[C iPlugin](options ...any) error {
|
||||
meta.Version = "dev"
|
||||
}
|
||||
}
|
||||
for _, option := range options {
|
||||
switch v := option.(type) {
|
||||
case OnExitHandler:
|
||||
meta.OnExit = v
|
||||
case DefaultYaml:
|
||||
meta.DefaultYaml = v
|
||||
case PullerFactory:
|
||||
meta.NewPuller = v
|
||||
case PusherFactory:
|
||||
meta.NewPusher = v
|
||||
case RecorderFactory:
|
||||
meta.NewRecorder = v
|
||||
case TransformerFactory:
|
||||
meta.NewTransformer = v
|
||||
case AuthPublisher:
|
||||
meta.OnAuthPub = v
|
||||
case AuthSubscriber:
|
||||
meta.OnAuthSub = v
|
||||
case *grpc.ServiceDesc:
|
||||
meta.ServiceDesc = v
|
||||
case func(context.Context, *gatewayRuntime.ServeMux, *grpc.ClientConn) error:
|
||||
meta.RegisterGRPCHandler = v
|
||||
}
|
||||
}
|
||||
plugins = append(plugins, meta)
|
||||
return nil
|
||||
}
|
||||
@@ -279,39 +275,6 @@ func (p *Plugin) disable(reason string) {
|
||||
p.Server.disabledPlugins = append(p.Server.disabledPlugins, p)
|
||||
}
|
||||
|
||||
func (p *Plugin) Start() (err error) {
|
||||
s := p.Server
|
||||
|
||||
if err = p.listen(); err != nil {
|
||||
return
|
||||
}
|
||||
if err = p.handler.OnInit(); err != nil {
|
||||
return
|
||||
}
|
||||
if p.Meta.ServiceDesc != nil && s.grpcServer != nil {
|
||||
s.grpcServer.RegisterService(p.Meta.ServiceDesc, p.handler)
|
||||
if p.Meta.RegisterGRPCHandler != nil {
|
||||
if err = p.Meta.RegisterGRPCHandler(p.Context, s.config.HTTP.GetGRPCMux(), s.grpcClientConn); err != nil {
|
||||
p.disable(fmt.Sprintf("grpc %v", err))
|
||||
return
|
||||
} else {
|
||||
p.Info("grpc handler registered")
|
||||
}
|
||||
}
|
||||
}
|
||||
if p.config.Hook != nil {
|
||||
if hook, ok := p.config.Hook[config.HookOnServerKeepAlive]; ok && hook.Interval > 0 {
|
||||
p.AddTask(&ServerKeepAliveTask{plugin: p})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Dispose() {
|
||||
p.handler.OnStop()
|
||||
p.Server.Plugins.Remove(p)
|
||||
}
|
||||
|
||||
func (p *Plugin) listen() (err error) {
|
||||
httpConf := &p.config.HTTP
|
||||
|
||||
@@ -371,13 +334,11 @@ func (p *Plugin) listen() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) OnInit() error {
|
||||
return nil
|
||||
type WebHookQueueTask struct {
|
||||
task.Work
|
||||
}
|
||||
|
||||
func (p *Plugin) OnStop() {
|
||||
|
||||
}
|
||||
var webHookQueueTask WebHookQueueTask
|
||||
|
||||
type WebHookTask struct {
|
||||
task.Task
|
||||
@@ -386,6 +347,7 @@ type WebHookTask struct {
|
||||
conf config.Webhook
|
||||
data any
|
||||
jsonData []byte
|
||||
alarm AlarmInfo
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Start() error {
|
||||
@@ -393,10 +355,58 @@ func (t *WebHookTask) Start() error {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
var err error
|
||||
t.jsonData, err = json.Marshal(t.data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal webhook data: %w", err)
|
||||
// 处理AlarmInfo数据
|
||||
if t.data != nil {
|
||||
// 获取主机名和IP地址
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
hostname = "unknown"
|
||||
}
|
||||
|
||||
// 获取本机IP地址
|
||||
var ipAddr string
|
||||
addrs, err := net.InterfaceAddrs()
|
||||
if err == nil {
|
||||
for _, addr := range addrs {
|
||||
if ipnet, ok := addr.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
|
||||
if ipnet.IP.To4() != nil {
|
||||
ipAddr = ipnet.IP.String()
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if ipAddr == "" {
|
||||
ipAddr = "unknown"
|
||||
}
|
||||
|
||||
// 直接使用t.data作为AlarmInfo
|
||||
alarmInfo, ok := t.data.(AlarmInfo)
|
||||
if !ok {
|
||||
return fmt.Errorf("data is not of type AlarmInfo")
|
||||
}
|
||||
|
||||
// 更新服务器信息
|
||||
if alarmInfo.ServerInfo == "" {
|
||||
alarmInfo.ServerInfo = fmt.Sprintf("%s (%s)", hostname, ipAddr)
|
||||
}
|
||||
|
||||
// 确保时间戳已设置
|
||||
if alarmInfo.CreatedAt.IsZero() {
|
||||
alarmInfo.CreatedAt = time.Now()
|
||||
}
|
||||
if alarmInfo.UpdatedAt.IsZero() {
|
||||
alarmInfo.UpdatedAt = time.Now()
|
||||
}
|
||||
|
||||
// 将AlarmInfo序列化为JSON
|
||||
jsonData, err := json.Marshal(alarmInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("marshal AlarmInfo to json: %w", err)
|
||||
}
|
||||
|
||||
t.jsonData = jsonData
|
||||
t.alarm = alarmInfo
|
||||
}
|
||||
|
||||
t.SetRetry(t.conf.RetryTimes, t.conf.RetryInterval)
|
||||
@@ -404,6 +414,20 @@ func (t *WebHookTask) Start() error {
|
||||
}
|
||||
|
||||
func (t *WebHookTask) Go() error {
|
||||
// 检查是否需要保存告警到数据库
|
||||
var dbID uint
|
||||
if t.conf.SaveAlarm && t.plugin.DB != nil {
|
||||
// 默认 IsSent 为 false
|
||||
t.alarm.IsSent = false
|
||||
if err := t.plugin.DB.Create(&t.alarm).Error; err != nil {
|
||||
t.plugin.Error("保存告警到数据库失败", "error", err)
|
||||
} else {
|
||||
dbID = t.alarm.ID
|
||||
t.plugin.Info(""+
|
||||
"", "id", dbID)
|
||||
}
|
||||
}
|
||||
|
||||
req, err := http.NewRequest(t.conf.Method, t.conf.URL, bytes.NewBuffer(t.jsonData))
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -420,28 +444,38 @@ func (t *WebHookTask) Go() error {
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
t.plugin.Error("webhook request failed", "error", err)
|
||||
t.plugin.Error("webhook请求失败", "error", err)
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// 如果发送成功且已保存到数据库,则更新IsSent字段为true
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 && t.conf.SaveAlarm && t.plugin.DB != nil && dbID > 0 {
|
||||
t.alarm.IsSent = true
|
||||
if err := t.plugin.DB.Model(&AlarmInfo{}).Where("id = ?", dbID).Update("is_sent", true).Error; err != nil {
|
||||
t.plugin.Error("更新告警发送状态失败", "error", err)
|
||||
} else {
|
||||
t.plugin.Info("告警发送状态已更新", "id", dbID, "is_sent", true)
|
||||
}
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
if resp.StatusCode >= 200 && resp.StatusCode < 300 {
|
||||
return task.ErrTaskComplete
|
||||
}
|
||||
|
||||
err = fmt.Errorf("webhook request failed with status: %d", resp.StatusCode)
|
||||
t.plugin.Error("webhook response error", "status", resp.StatusCode)
|
||||
err = fmt.Errorf("webhook请求失败,状态码:%d", resp.StatusCode)
|
||||
t.plugin.Error("webhook响应错误", "状态码", resp.StatusCode)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *Plugin) SendWebhook(hookType config.HookType, data any) *task.Task {
|
||||
func (p *Plugin) SendWebhook(conf config.Webhook, data any) *task.Task {
|
||||
webhookTask := &WebHookTask{
|
||||
plugin: p,
|
||||
hookType: hookType,
|
||||
conf: p.config.Hook[hookType],
|
||||
data: data,
|
||||
plugin: p,
|
||||
conf: conf,
|
||||
data: data,
|
||||
}
|
||||
return p.AddTask(webhookTask)
|
||||
return webHookQueueTask.AddTask(webhookTask)
|
||||
}
|
||||
|
||||
// TODO: use alias stream
|
||||
@@ -514,7 +548,11 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
if p.Meta.NewPuller != nil && reg.MatchString(streamPath) {
|
||||
conf.Args = config.HTTPValues(args)
|
||||
conf.URL = reg.Replace(streamPath, conf.URL)
|
||||
p.handler.Pull(streamPath, conf, nil)
|
||||
if job, err := p.handler.Pull(streamPath, conf, nil); err == nil {
|
||||
if w, ok := p.Server.Waiting.Get(streamPath); ok {
|
||||
job.Progress = &w.Progress
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -536,8 +574,19 @@ func (p *Plugin) OnSubscribe(streamPath string, args url.Values) {
|
||||
// }
|
||||
//}
|
||||
}
|
||||
|
||||
func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf config.Publish) (publisher *Publisher, err error) {
|
||||
publisher = createPublisher(p, streamPath, conf)
|
||||
publisher = &Publisher{Publish: conf}
|
||||
publisher.Type = conf.PubType
|
||||
publisher.ID = task.GetNextTaskID()
|
||||
publisher.Plugin = p
|
||||
if conf.PublishTimeout > 0 {
|
||||
publisher.TimeoutTimer = time.NewTimer(conf.PublishTimeout)
|
||||
} else {
|
||||
publisher.TimeoutTimer = time.NewTimer(time.Hour * 24 * 365)
|
||||
}
|
||||
publisher.Logger = p.Logger.With("streamPath", streamPath, "pId", publisher.ID)
|
||||
publisher.Init(streamPath, &publisher.Publish)
|
||||
if p.config.EnableAuth && publisher.Type == PublishTypeServer {
|
||||
onAuthPub := p.Meta.OnAuthPub
|
||||
if onAuthPub == nil {
|
||||
@@ -555,35 +604,40 @@ func (p *Plugin) PublishWithConfig(ctx context.Context, streamPath string, conf
|
||||
}
|
||||
}
|
||||
}
|
||||
err = p.Server.Streams.AddTask(publisher, ctx).WaitStarted()
|
||||
if err == nil {
|
||||
if sender := p.getHookSender(config.HookOnPublishEnd); sender != nil {
|
||||
publisher.OnDispose(func() {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnPublishEnd,
|
||||
"streamPath": publisher.StreamPath,
|
||||
"publishId": publisher.ID,
|
||||
"reason": publisher.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
}
|
||||
sender(config.HookOnPublishEnd, webhookData)
|
||||
})
|
||||
}
|
||||
if sender := p.getHookSender(config.HookOnPublishStart); sender != nil {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnPublishStart,
|
||||
"streamPath": publisher.StreamPath,
|
||||
"args": publisher.Args,
|
||||
"publishId": publisher.ID,
|
||||
"remoteAddr": publisher.RemoteAddr,
|
||||
"type": publisher.Type,
|
||||
"pluginName": p.Meta.Name,
|
||||
"timestamp": time.Now().Unix(),
|
||||
for {
|
||||
err = p.Server.Streams.Add(publisher, ctx).WaitStarted()
|
||||
if err == nil {
|
||||
if sender, webhook := p.getHookSender(config.HookOnPublishEnd); sender != nil {
|
||||
publisher.OnDispose(func() {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnPublishEnd),
|
||||
AlarmDesc: publisher.StopReason().Error(),
|
||||
AlarmType: config.AlarmPublishOffline,
|
||||
StreamPath: publisher.StreamPath,
|
||||
}
|
||||
sender(webhook, alarmInfo)
|
||||
})
|
||||
}
|
||||
sender(config.HookOnPublishStart, webhookData)
|
||||
if sender, webhook := p.getHookSender(config.HookOnPublishStart); sender != nil {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnPublishStart),
|
||||
AlarmType: config.AlarmPublishRecover,
|
||||
StreamPath: publisher.StreamPath,
|
||||
}
|
||||
sender(webhook, alarmInfo)
|
||||
}
|
||||
return
|
||||
} else if oldStream := new(task.ExistTaskError); errors.As(err, oldStream) {
|
||||
if conf.KickExist {
|
||||
publisher.takeOver(oldStream.Task.(*Publisher))
|
||||
oldStream.Task.WaitStopped()
|
||||
} else {
|
||||
return nil, ErrStreamExist
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Publish(ctx context.Context, streamPath string) (publisher *Publisher, err error) {
|
||||
@@ -621,33 +675,24 @@ func (p *Plugin) SubscribeWithConfig(ctx context.Context, streamPath string, con
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
if sender := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
|
||||
if sender, webhook := p.getHookSender(config.HookOnSubscribeEnd); sender != nil {
|
||||
subscriber.OnDispose(func() {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnSubscribeEnd,
|
||||
"streamPath": subscriber.StreamPath,
|
||||
"subscriberId": subscriber.ID,
|
||||
"reason": subscriber.StopReason().Error(),
|
||||
"timestamp": time.Now().Unix(),
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnSubscribeEnd),
|
||||
AlarmDesc: subscriber.StopReason().Error(),
|
||||
AlarmType: config.AlarmSubscribeOffline,
|
||||
StreamPath: subscriber.StreamPath,
|
||||
}
|
||||
if subscriber.Publisher != nil {
|
||||
webhookData["publishId"] = subscriber.Publisher.ID
|
||||
}
|
||||
sender(config.HookOnSubscribeEnd, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
})
|
||||
}
|
||||
if sender := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnSubscribeStart,
|
||||
"streamPath": subscriber.StreamPath,
|
||||
"publishId": subscriber.Publisher.ID,
|
||||
"subscriberId": subscriber.ID,
|
||||
"remoteAddr": subscriber.RemoteAddr,
|
||||
"type": subscriber.Type,
|
||||
"args": subscriber.Args,
|
||||
"timestamp": time.Now().Unix(),
|
||||
if sender, webhook := p.getHookSender(config.HookOnSubscribeStart); sender != nil {
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnSubscribeStart),
|
||||
AlarmType: config.AlarmSubscribeRecover,
|
||||
StreamPath: subscriber.StreamPath,
|
||||
}
|
||||
sender(config.HookOnSubscribeStart, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
}
|
||||
}
|
||||
return
|
||||
@@ -657,12 +702,14 @@ func (p *Plugin) Subscribe(ctx context.Context, streamPath string) (subscriber *
|
||||
return p.SubscribeWithConfig(ctx, streamPath, p.config.Subscribe)
|
||||
}
|
||||
|
||||
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) {
|
||||
func (p *Plugin) Pull(streamPath string, conf config.Pull, pubConf *config.Publish) (job *PullJob, err error) {
|
||||
puller := p.Meta.NewPuller(conf)
|
||||
if puller == nil {
|
||||
return
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
puller.GetPullJob().Init(puller, p, streamPath, conf, pubConf)
|
||||
job = puller.GetPullJob()
|
||||
job.Init(puller, p, streamPath, conf, pubConf)
|
||||
return
|
||||
}
|
||||
|
||||
func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subscribe) {
|
||||
@@ -673,14 +720,13 @@ func (p *Plugin) Push(streamPath string, conf config.Push, subConf *config.Subsc
|
||||
func (p *Plugin) Record(pub *Publisher, conf config.Record, subConf *config.Subscribe) *RecordJob {
|
||||
recorder := p.Meta.NewRecorder(conf)
|
||||
job := recorder.GetRecordJob().Init(recorder, p, pub.StreamPath, conf, subConf)
|
||||
job.Depend(pub)
|
||||
pub.Using(job)
|
||||
return job
|
||||
}
|
||||
|
||||
func (p *Plugin) Transform(pub *Publisher, conf config.Transform) {
|
||||
transformer := p.Meta.NewTransformer()
|
||||
job := transformer.GetTransformJob().Init(transformer, p, pub, conf)
|
||||
job.Depend(pub)
|
||||
pub.Using(transformer.GetTransformJob().Init(transformer, p, pub, conf))
|
||||
}
|
||||
|
||||
func (p *Plugin) registerHandler(handlers map[string]http.HandlerFunc) {
|
||||
@@ -760,13 +806,21 @@ func (p *Plugin) handle(pattern string, handler http.Handler) {
|
||||
p.Server.apiList = append(p.Server.apiList, pattern)
|
||||
}
|
||||
|
||||
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(hookType config.HookType, data any) *task.Task) {
|
||||
func (p *Plugin) getHookSender(hookType config.HookType) (sender func(webhook config.Webhook, data any) *task.Task, conf config.Webhook) {
|
||||
if p.config.Hook != nil {
|
||||
if _, ok := p.config.Hook[hookType]; ok {
|
||||
sender = p.SendWebhook
|
||||
conf = p.config.Hook[hookType]
|
||||
} else if _, ok := p.config.Hook[config.HookDefault]; ok {
|
||||
sender = p.SendWebhook
|
||||
conf = p.config.Hook[config.HookDefault]
|
||||
} else if p.Server.config.Hook != nil {
|
||||
if _, ok := p.Server.config.Hook[hookType]; ok {
|
||||
conf = p.config.Hook[hookType]
|
||||
sender = p.Server.SendWebhook
|
||||
} else if _, ok := p.Server.config.Hook[config.HookDefault]; ok {
|
||||
sender = p.Server.SendWebhook
|
||||
conf = p.config.Hook[config.HookDefault]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -783,19 +837,25 @@ func (t *ServerKeepAliveTask) GetTickInterval() time.Duration {
|
||||
}
|
||||
|
||||
func (t *ServerKeepAliveTask) Tick(now any) {
|
||||
sender := t.plugin.getHookSender(config.HookOnServerKeepAlive)
|
||||
sender, webhook := t.plugin.getHookSender(config.HookOnServerKeepAlive)
|
||||
if sender == nil {
|
||||
return
|
||||
}
|
||||
s := t.plugin.Server
|
||||
webhookData := map[string]interface{}{
|
||||
"event": config.HookOnServerKeepAlive,
|
||||
"timestamp": time.Now().Unix(),
|
||||
"streams": s.Streams.Length,
|
||||
"subscribers": s.Subscribers.Length,
|
||||
"publisherCount": s.Streams.Length,
|
||||
"subscriberCount": s.Subscribers.Length,
|
||||
"uptime": time.Since(s.StartTime).Seconds(),
|
||||
//s := t.plugin.Server
|
||||
alarmInfo := AlarmInfo{
|
||||
AlarmName: string(config.HookOnServerKeepAlive),
|
||||
AlarmType: config.AlarmKeepAliveOnline,
|
||||
StreamPath: "",
|
||||
}
|
||||
sender(config.HookOnServerKeepAlive, webhookData)
|
||||
sender(webhook, alarmInfo)
|
||||
//webhookData := map[string]interface{}{
|
||||
// "event": config.HookOnServerKeepAlive,
|
||||
// "timestamp": time.Now().Unix(),
|
||||
// "streams": s.Streams.Length,
|
||||
// "subscribers": s.Subscribers.Length,
|
||||
// "publisherCount": s.Streams.Length,
|
||||
// "subscriberCount": s.Subscribers.Length,
|
||||
// "uptime": time.Since(s.StartTime).Seconds(),
|
||||
//}
|
||||
//sender(webhook, webhookData)
|
||||
}
|
||||
|
347
plugin/README.md
347
plugin/README.md
@@ -6,6 +6,12 @@
|
||||
- Visual Studio Code
|
||||
- Goland
|
||||
- Cursor
|
||||
- CodeBuddy
|
||||
- Trae
|
||||
- Qoder
|
||||
- Claude Code
|
||||
- Kiro
|
||||
- Windsurf
|
||||
|
||||
### Install gRPC
|
||||
```shell
|
||||
@@ -53,14 +59,16 @@ Example:
|
||||
const defaultConfig = m7s.DefaultYaml(`tcp:
|
||||
listenaddr: :5554`)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](defaultConfig)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
DefaultYaml: defaultConfig,
|
||||
})
|
||||
```
|
||||
|
||||
## 3. Implement Event Callbacks (Optional)
|
||||
|
||||
### Initialization Callback
|
||||
```go
|
||||
func (config *MyPlugin) OnInit() (err error) {
|
||||
func (config *MyPlugin) Start() (err error) {
|
||||
// Initialize things
|
||||
return
|
||||
}
|
||||
@@ -121,22 +129,25 @@ func (config *MyPlugin) test1(rw http.ResponseWriter, r *http.Request) {
|
||||
Push client needs to implement IPusher interface and pass the creation method to InstallPlugin.
|
||||
```go
|
||||
type Pusher struct {
|
||||
pullCtx m7s.PullJob
|
||||
task.Task
|
||||
pushJob m7s.PushJob
|
||||
}
|
||||
|
||||
func (c *Pusher) GetPullJob() *m7s.PullJob {
|
||||
return &c.pullCtx
|
||||
func (c *Pusher) GetPushJob() *m7s.PushJob {
|
||||
return &c.pushJob
|
||||
}
|
||||
|
||||
func NewPusher(_ config.Push) m7s.IPusher {
|
||||
return &Pusher{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPusher)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPusher: NewPusher,
|
||||
})
|
||||
```
|
||||
|
||||
### Implement Pull Client
|
||||
Pull client needs to implement IPuller interface and pass the creation method to InstallPlugin.
|
||||
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling:
|
||||
The following Puller inherits from m7s.HTTPFilePuller for basic file and HTTP pulling. You need to override the Start method for specific pulling logic:
|
||||
```go
|
||||
type Puller struct {
|
||||
m7s.HTTPFilePuller
|
||||
@@ -145,7 +156,9 @@ type Puller struct {
|
||||
func NewPuller(_ config.Pull) m7s.IPuller {
|
||||
return &Puller{}
|
||||
}
|
||||
var _ = m7s.InstallPlugin[MyPlugin](NewPuller)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
NewPuller: NewPuller,
|
||||
})
|
||||
```
|
||||
|
||||
## 6. Implement gRPC Service
|
||||
@@ -226,7 +239,10 @@ import (
|
||||
"m7s.live/v5/plugin/myplugin/pb"
|
||||
)
|
||||
|
||||
var _ = m7s.InstallPlugin[MyPlugin](&pb.Api_ServiceDesc, pb.RegisterApiHandler)
|
||||
var _ = m7s.InstallPlugin[MyPlugin](m7s.PluginMeta{
|
||||
ServiceDesc: &pb.Api_ServiceDesc,
|
||||
RegisterGRPCHandler: pb.RegisterApiHandler,
|
||||
})
|
||||
|
||||
type MyPlugin struct {
|
||||
pb.UnimplementedApiServer
|
||||
@@ -247,43 +263,72 @@ Accessible via GET request to `/myplugin/api/test1`
|
||||
## 7. Publishing Streams
|
||||
|
||||
```go
|
||||
publisher, err = p.Publish(streamPath, connectInfo)
|
||||
publisher, err := p.Publish(ctx, streamPath)
|
||||
```
|
||||
The last two parameters are optional.
|
||||
The `ctx` parameter is required, `streamPath` parameter is required.
|
||||
|
||||
After obtaining the `publisher`, you can publish audio/video data using `publisher.WriteAudio` and `publisher.WriteVideo`.
|
||||
### Writing Audio/Video Data
|
||||
|
||||
The old `WriteAudio` and `WriteVideo` methods have been replaced with a more structured writer pattern using generics:
|
||||
|
||||
#### **Create Writers**
|
||||
```go
|
||||
// Audio writer
|
||||
audioWriter := m7s.NewPublishAudioWriter[*AudioFrame](publisher, allocator)
|
||||
|
||||
// Video writer
|
||||
videoWriter := m7s.NewPublishVideoWriter[*VideoFrame](publisher, allocator)
|
||||
|
||||
// Combined audio/video writer
|
||||
writer := m7s.NewPublisherWriter[*AudioFrame, *VideoFrame](publisher, allocator)
|
||||
```
|
||||
|
||||
#### **Write Frames**
|
||||
```go
|
||||
// Set timestamp and write audio frame
|
||||
writer.AudioFrame.SetTS32(timestamp)
|
||||
err := writer.NextAudio()
|
||||
|
||||
// Set timestamp and write video frame
|
||||
writer.VideoFrame.SetTS32(timestamp)
|
||||
err := writer.NextVideo()
|
||||
```
|
||||
|
||||
#### **Write Custom Data**
|
||||
```go
|
||||
// For custom data frames
|
||||
err := publisher.WriteData(data IDataFrame)
|
||||
```
|
||||
|
||||
### Define Audio/Video Data
|
||||
If existing audio/video data formats don't meet your needs, you can define custom formats by implementing this interface:
|
||||
```go
|
||||
IAVFrame interface {
|
||||
GetAllocator() *util.ScalableMemoryAllocator
|
||||
SetAllocator(*util.ScalableMemoryAllocator)
|
||||
Parse(*AVTrack) error
|
||||
ConvertCtx(codec.ICodecCtx) (codec.ICodecCtx, IAVFrame, error)
|
||||
Demux(codec.ICodecCtx) (any, error)
|
||||
Mux(codec.ICodecCtx, *AVFrame)
|
||||
GetTimestamp() time.Duration
|
||||
GetCTS() time.Duration
|
||||
GetSample() *Sample
|
||||
GetSize() int
|
||||
CheckCodecChange() error
|
||||
Demux() error // demux to raw format
|
||||
Mux(*Sample) error // mux from origin format
|
||||
Recycle()
|
||||
String() string
|
||||
Dump(byte, io.Writer)
|
||||
}
|
||||
```
|
||||
> Define separate types for audio and video
|
||||
|
||||
- GetAllocator/SetAllocator: Automatically implemented when embedding RecyclableMemory
|
||||
- Parse: Identifies key frames, sequence frames, and other important information
|
||||
- ConvertCtx: Called when protocol conversion is needed
|
||||
- Demux: Called when audio/video data needs to be demuxed
|
||||
- Mux: Called when audio/video data needs to be muxed
|
||||
- Recycle: Automatically implemented when embedding RecyclableMemory
|
||||
- String: Prints audio/video data information
|
||||
The methods serve the following purposes:
|
||||
- GetSample: Gets the Sample object containing codec context and raw data
|
||||
- GetSize: Gets the size of audio/video data
|
||||
- GetTimestamp: Gets the timestamp in nanoseconds
|
||||
- GetCTS: Gets the Composition Time Stamp in nanoseconds (PTS = DTS+CTS)
|
||||
- Dump: Prints binary audio/video data
|
||||
- CheckCodecChange: Checks if the codec has changed
|
||||
- Demux: Demuxes audio/video data to raw format for use by other formats
|
||||
- Mux: Muxes from original format to custom audio/video data format
|
||||
- Recycle: Recycles resources, automatically implemented when embedding RecyclableMemory
|
||||
- String: Prints audio/video data information
|
||||
|
||||
### Memory Management
|
||||
The new pattern includes built-in memory management:
|
||||
- `util.ScalableMemoryAllocator` - For efficient memory allocation
|
||||
- Frame recycling through `Recycle()` method
|
||||
- Automatic memory pool management
|
||||
|
||||
## 8. Subscribing to Streams
|
||||
```go
|
||||
@@ -293,7 +338,245 @@ go m7s.PlayBlock(suber, handleAudio, handleVideo)
|
||||
```
|
||||
Note that handleAudio and handleVideo are callback functions you need to implement. They take an audio/video format type as input and return an error. If the error is not nil, the subscription is terminated.
|
||||
|
||||
## 9. Prometheus Integration
|
||||
## 9. Working with H26xFrame for Raw Stream Data
|
||||
|
||||
### 9.1 Understanding H26xFrame Structure
|
||||
|
||||
The `H26xFrame` struct is used for handling H.264/H.265 raw stream data:
|
||||
|
||||
```go
|
||||
type H26xFrame struct {
|
||||
pkg.Sample
|
||||
}
|
||||
```
|
||||
|
||||
Key characteristics:
|
||||
- Inherits from `pkg.Sample` - contains codec context, memory management, and timing
|
||||
- Uses `Raw.(*pkg.Nalus)` to store NALU (Network Abstraction Layer Unit) data
|
||||
- Supports both H.264 (AVC) and H.265 (HEVC) formats
|
||||
- Uses efficient memory allocators for zero-copy operations
|
||||
|
||||
### 9.2 Creating H26xFrame for Publishing
|
||||
|
||||
```go
|
||||
import (
|
||||
"m7s.live/v5"
|
||||
"m7s.live/v5/pkg/format"
|
||||
"m7s.live/v5/pkg/util"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Create publisher with H26xFrame support
|
||||
func publishRawH264Stream(streamPath string, h264Frames [][]byte) error {
|
||||
// Get publisher
|
||||
publisher, err := p.Publish(streamPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create memory allocator
|
||||
allocator := util.NewScalableMemoryAllocator(1 << util.MinPowerOf2)
|
||||
defer allocator.Recycle()
|
||||
|
||||
// Create writer for H26xFrame
|
||||
writer := m7s.NewPublisherWriter[*format.RawAudio, *format.H26xFrame](publisher, allocator)
|
||||
|
||||
// Set up H264 codec context
|
||||
writer.VideoFrame.ICodecCtx = &format.H264{}
|
||||
|
||||
// Publish multiple frames
|
||||
// Note: This is a demonstration of multi-frame writing. In actual scenarios,
|
||||
// frames should be written gradually as they are received from the video source.
|
||||
startTime := time.Now()
|
||||
for i, frameData := range h264Frames {
|
||||
// Create H26xFrame for each frame
|
||||
frame := writer.VideoFrame
|
||||
|
||||
// Set timestamp with proper interval
|
||||
frame.Timestamp = startTime.Add(time.Duration(i) * time.Second / 30) // 30 FPS
|
||||
|
||||
// Write NALU data
|
||||
nalus := frame.GetNalus()
|
||||
// if frameData is a single NALU, otherwise need to loop
|
||||
p := nalus.GetNextPointer()
|
||||
mem := frame.NextN(len(frameData))
|
||||
copy(mem, frameData)
|
||||
p.PushOne(mem)
|
||||
// Publish frame
|
||||
if err := writer.NextVideo(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Example usage with continuous streaming
|
||||
func continuousH264Publishing(streamPath string, frameSource <-chan []byte, stopChan <-chan struct{}) error {
|
||||
// Get publisher
|
||||
publisher, err := p.Publish(streamPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer publisher.Dispose()
|
||||
|
||||
// Create memory allocator
|
||||
allocator := util.NewScalableMemoryAllocator(1 << util.MinPowerOf2)
|
||||
defer allocator.Recycle()
|
||||
|
||||
// Create writer for H26xFrame
|
||||
writer := m7s.NewPublisherWriter[*format.RawAudio, *format.H26xFrame](publisher, allocator)
|
||||
|
||||
// Set up H264 codec context
|
||||
writer.VideoFrame.ICodecCtx = &format.H264{}
|
||||
|
||||
startTime := time.Now()
|
||||
frameCount := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case frameData := <-frameSource:
|
||||
// Create H26xFrame for each frame
|
||||
frame := writer.VideoFrame
|
||||
|
||||
// Set timestamp with proper interval
|
||||
frame.Timestamp = startTime.Add(time.Duration(frameCount) * time.Second / 30) // 30 FPS
|
||||
|
||||
// Write NALU data
|
||||
nalus := frame.GetNalus()
|
||||
mem := frame.NextN(len(frameData))
|
||||
copy(mem, frameData)
|
||||
|
||||
// Publish frame
|
||||
if err := writer.NextVideo(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frameCount++
|
||||
|
||||
case <-stopChan:
|
||||
// Stop publishing
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 9.3 Processing H26xFrame (Transform Pattern)
|
||||
|
||||
```go
|
||||
type MyTransform struct {
|
||||
m7s.DefaultTransformer
|
||||
Writer *m7s.PublishWriter[*format.RawAudio, *format.H26xFrame]
|
||||
}
|
||||
|
||||
func (t *MyTransform) Go() {
|
||||
defer t.Dispose()
|
||||
|
||||
for video := range t.Video {
|
||||
if err := t.processH26xFrame(video); err != nil {
|
||||
t.Error("process frame failed", "error", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *MyTransform) processH26xFrame(video *format.H26xFrame) error {
|
||||
// Copy frame metadata
|
||||
copyVideo := t.Writer.VideoFrame
|
||||
copyVideo.ICodecCtx = video.ICodecCtx
|
||||
*copyVideo.BaseSample = *video.BaseSample
|
||||
nalus := copyVideo.GetNalus()
|
||||
|
||||
// Process each NALU unit
|
||||
for nalu := range video.Raw.(*pkg.Nalus).RangePoint {
|
||||
p := nalus.GetNextPointer()
|
||||
mem := copyVideo.NextN(nalu.Size)
|
||||
nalu.CopyTo(mem)
|
||||
|
||||
// Example: Filter or modify specific NALU types
|
||||
if video.FourCC() == codec.FourCC_H264 {
|
||||
switch codec.ParseH264NALUType(mem[0]) {
|
||||
case codec.NALU_IDR_Picture, codec.NALU_Non_IDR_Picture:
|
||||
// Process video frame NALUs
|
||||
// Example: Apply transformations, filters, etc.
|
||||
case codec.NALU_SPS, codec.NALU_PPS:
|
||||
// Process parameter set NALUs
|
||||
}
|
||||
} else if video.FourCC() == codec.FourCC_H265 {
|
||||
switch codec.ParseH265NALUType(mem[0]) {
|
||||
case h265parser.NAL_UNIT_CODED_SLICE_IDR_W_RADL:
|
||||
// Process H.265 IDR frames
|
||||
}
|
||||
}
|
||||
|
||||
// Push processed NALU
|
||||
p.PushOne(mem)
|
||||
}
|
||||
|
||||
return t.Writer.NextVideo()
|
||||
}
|
||||
```
|
||||
|
||||
### 9.4 Common NALU Types for H.264/H.265
|
||||
|
||||
#### H.264 NALU Types
|
||||
```go
|
||||
const (
|
||||
NALU_Non_IDR_Picture = 1 // Non-IDR picture (P-frames)
|
||||
NALU_IDR_Picture = 5 // IDR picture (I-frames)
|
||||
NALU_SEI = 6 // Supplemental enhancement information
|
||||
NALU_SPS = 7 // Sequence parameter set
|
||||
NALU_PPS = 8 // Picture parameter set
|
||||
)
|
||||
|
||||
// Parse NALU type from first byte
|
||||
naluType := codec.ParseH264NALUType(mem[0])
|
||||
```
|
||||
|
||||
#### H.265 NALU Types
|
||||
```go
|
||||
// Parse H.265 NALU type from first byte
|
||||
naluType := codec.ParseH265NALUType(mem[0])
|
||||
```
|
||||
|
||||
### 9.5 Memory Management Best Practices
|
||||
|
||||
```go
|
||||
// Use memory allocators for efficient operations
|
||||
allocator := util.NewScalableMemoryAllocator(1 << 20) // 1MB initial size
|
||||
defer allocator.Recycle()
|
||||
|
||||
// When processing multiple frames, reuse the same allocator
|
||||
writer := m7s.NewPublisherWriter[*format.RawAudio, *format.H26xFrame](publisher, allocator)
|
||||
```
|
||||
|
||||
### 9.6 Error Handling and Validation
|
||||
|
||||
```go
|
||||
func processFrame(video *format.H26xFrame) error {
|
||||
// Check codec changes
|
||||
if err := video.CheckCodecChange(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate frame data
|
||||
if video.Raw == nil {
|
||||
return fmt.Errorf("empty frame data")
|
||||
}
|
||||
|
||||
// Process NALUs safely
|
||||
nalus, ok := video.Raw.(*pkg.Nalus)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid NALUs format")
|
||||
}
|
||||
|
||||
// Process frame...
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## 10. Prometheus Integration
|
||||
Just implement the Collector interface, and the system will automatically collect metrics from all plugins:
|
||||
```go
|
||||
func (p *MyPlugin) Describe(ch chan<- *prometheus.Desc) {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user