recorder: limit maximum part size (#4674) (#4760)

this prevents RAM exhaustion.
This commit is contained in:
Alessandro Ros
2025-07-20 19:16:33 +02:00
committed by GitHub
parent bc95f6240b
commit 9ddcbf5c97
12 changed files with 29 additions and 4 deletions

View File

@@ -352,6 +352,8 @@ components:
type: string
recordPartDuration:
type: string
recordMaxPartSize:
type: string
recordSegmentDuration:
type: string
recordDeleteAfter:

View File

@@ -55,6 +55,7 @@ func TestConfFromFile(t *testing.T) {
RecordPath: "./recordings/%path/%Y-%m-%d_%H-%M-%S-%f",
RecordFormat: RecordFormatFMP4,
RecordPartDuration: Duration(1 * time.Second),
RecordMaxPartSize: 50 * 1024 * 1024,
RecordSegmentDuration: 3600000000000,
RecordDeleteAfter: 86400000000000,
OverridePublisher: true,

View File

@@ -128,6 +128,7 @@ type Path struct {
RecordPath string `json:"recordPath"`
RecordFormat RecordFormat `json:"recordFormat"`
RecordPartDuration Duration `json:"recordPartDuration"`
RecordMaxPartSize StringSize `json:"recordMaxPartSize"`
RecordSegmentDuration Duration `json:"recordSegmentDuration"`
RecordDeleteAfter Duration `json:"recordDeleteAfter"`
@@ -227,6 +228,7 @@ func (pconf *Path) setDefaults() {
pconf.RecordPath = "./recordings/%path/%Y-%m-%d_%H-%M-%S-%f"
pconf.RecordFormat = RecordFormatFMP4
pconf.RecordPartDuration = Duration(1 * time.Second)
pconf.RecordMaxPartSize = 50 * 1024 * 1024
pconf.RecordSegmentDuration = 3600 * Duration(time.Second)
pconf.RecordDeleteAfter = 24 * 3600 * Duration(time.Second)

View File

@@ -775,6 +775,7 @@ func (pa *path) startRecording() {
PathFormat: pa.conf.RecordPath,
Format: pa.conf.RecordFormat,
PartDuration: time.Duration(pa.conf.RecordPartDuration),
MaxPartSize: pa.conf.RecordMaxPartSize,
SegmentDuration: time.Duration(pa.conf.RecordSegmentDuration),
PathName: pa.name,
Stream: pa.stream,

View File

@@ -1,6 +1,7 @@
package recorder
import (
"fmt"
"io"
"os"
"path/filepath"
@@ -46,6 +47,7 @@ type formatFMP4Part struct {
startDTS time.Duration
partTracks map[*formatFMP4Track]*fmp4.PartTrack
size uint64
endDTS time.Duration
}
@@ -83,6 +85,12 @@ func (p *formatFMP4Part) close() error {
}
func (p *formatFMP4Part) write(track *formatFMP4Track, sample *sample, dts time.Duration) error {
size := uint64(len(sample.Payload))
if (p.size + size) > uint64(p.s.f.ri.maxPartSize) {
return fmt.Errorf("reached maximum part size")
}
p.size += size
partTrack, ok := p.partTracks[track]
if !ok {
partTrack = &fmp4.PartTrack{

View File

@@ -20,7 +20,7 @@ import (
)
const (
mpegtsMaxBufferSize = 64 * 1024
mpegtsBufferSize = 64 * 1024
)
func multiplyAndDivide(v, m, d int64) int64 {
@@ -419,7 +419,7 @@ func (f *formatMPEGTS) initialize() bool {
}
f.dw = &dynamicWriter{}
f.bw = bufio.NewWriterSize(f.dw, mpegtsMaxBufferSize)
f.bw = bufio.NewWriterSize(f.dw, mpegtsBufferSize)
f.mw = &mpegts.Writer{W: f.bw, Tracks: tracks}
err := f.mw.Initialize()

View File

@@ -20,6 +20,7 @@ type Recorder struct {
PathFormat string
Format conf.RecordFormat
PartDuration time.Duration
MaxPartSize conf.StringSize
SegmentDuration time.Duration
PathName string
Stream *stream.Stream
@@ -56,6 +57,7 @@ func (r *Recorder) Initialize() {
pathFormat: r.PathFormat,
format: r.Format,
partDuration: r.PartDuration,
maxPartSize: r.MaxPartSize,
segmentDuration: r.SegmentDuration,
pathName: r.PathName,
stream: r.Stream,
@@ -102,6 +104,7 @@ func (r *Recorder) run() {
pathFormat: r.PathFormat,
format: r.Format,
partDuration: r.PartDuration,
maxPartSize: r.MaxPartSize,
segmentDuration: r.SegmentDuration,
pathName: r.PathName,
stream: r.Stream,

View File

@@ -22,6 +22,7 @@ type recorderInstance struct {
pathFormat string
format conf.RecordFormat
partDuration time.Duration
maxPartSize conf.StringSize
segmentDuration time.Duration
pathName string
stream *stream.Stream

View File

@@ -165,6 +165,7 @@ func TestRecorder(t *testing.T) {
PathFormat: recordPath,
Format: f,
PartDuration: 100 * time.Millisecond,
MaxPartSize: 50 * 1024 * 1024,
SegmentDuration: 1 * time.Second,
PathName: "mypath",
Stream: strm,
@@ -360,6 +361,7 @@ func TestRecorderFMP4NegativeDTS(t *testing.T) {
PathFormat: recordPath,
Format: conf.RecordFormatFMP4,
PartDuration: 100 * time.Millisecond,
MaxPartSize: 50 * 1024 * 1024,
SegmentDuration: 1 * time.Second,
PathName: "mypath",
Stream: strm,
@@ -465,6 +467,7 @@ func TestRecorderSkipTracksPartial(t *testing.T) {
PathFormat: recordPath,
Format: fo,
PartDuration: 100 * time.Millisecond,
MaxPartSize: 50 * 1024 * 1024,
SegmentDuration: 1 * time.Second,
PathName: "mypath",
Stream: strm,
@@ -526,6 +529,7 @@ func TestRecorderSkipTracksFull(t *testing.T) {
PathFormat: recordPath,
Format: fo,
PartDuration: 100 * time.Millisecond,
MaxPartSize: 50 * 1024 * 1024,
SegmentDuration: 1 * time.Second,
PathName: "mypath",
Stream: strm,
@@ -572,6 +576,7 @@ func TestRecorderFMP4SegmentSwitch(t *testing.T) {
PathFormat: filepath.Join(dir, "%path/%Y-%m-%d_%H-%M-%S-%f"),
Format: conf.RecordFormatFMP4,
PartDuration: 100 * time.Millisecond,
MaxPartSize: 50 * 1024 * 1024,
SegmentDuration: 1 * time.Second,
PathName: "mypath",
Stream: strm,

View File

@@ -83,7 +83,7 @@ func (s *session) initialize() {
s.discardedFrames = &counterdumper.CounterDumper{
OnReport: func(val uint64) {
s.Log(logger.Warn, "connection is too slow, discarding %d %s",
s.Log(logger.Warn, "reader is too slow, discarding %d %s",
val,
func() string {
if val == 1 {

View File

@@ -31,7 +31,7 @@ func (w *streamReader) start() {
w.discardedFrames = &counterdumper.CounterDumper{
OnReport: func(val uint64) {
w.parent.Log(logger.Warn, "connection is too slow, discarding %d %s",
w.parent.Log(logger.Warn, "reader is too slow, discarding %d %s",
val,
func() string {
if val == 1 {

View File

@@ -492,6 +492,8 @@ pathDefaults:
# When a system failure occurs, the last part gets lost.
# Therefore, the part duration is equal to the RPO (recovery point objective).
recordPartDuration: 1s
# This prevents RAM exhaustion.
recordMaxPartSize: 50M
# Minimum duration of each segment.
recordSegmentDuration: 1h
# Delete segments after this timespan.