mirror of
https://github.com/datarhei/core.git
synced 2025-10-30 19:06:24 +08:00
Compare commits
20 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5cd8f3426c | ||
|
|
311c975911 | ||
|
|
4f292ec0c5 | ||
|
|
6751346566 | ||
|
|
ea98205bd6 | ||
|
|
3a6281295c | ||
|
|
e74149eed2 | ||
|
|
fc42f93d26 | ||
|
|
8a4e5363ac | ||
|
|
b042574e45 | ||
|
|
0cfe07de85 | ||
|
|
20fbb9b7bc | ||
|
|
85a7caffca | ||
|
|
96353aee3d | ||
|
|
cb0bc494f9 | ||
|
|
9a49e371e3 | ||
|
|
e4fc61617a | ||
|
|
baa9a80015 | ||
|
|
1183de560a | ||
|
|
50e511e6c6 |
2
.github/workflows/build_bundle-rpi.yaml
vendored
2
.github/workflows/build_bundle-rpi.yaml
vendored
@@ -62,7 +62,7 @@ jobs:
|
||||
build-args: |
|
||||
CORE_IMAGE=datarhei/base:${{ env.OS_NAME }}-core-${{ env.OS_VERSION }}-${{ env.CORE_VERSION }}
|
||||
FFMPEG_IMAGE=datarhei/base:${{ env.OS_NAME }}-ffmpeg-rpi-${{ env.OS_VERSION }}-${{ env.FFMPEG_VERSION }}
|
||||
platforms: linux/arm/v7,linux/arm64
|
||||
platforms: linux/arm/v7,linux/arm/v6,linux/arm64
|
||||
push: true
|
||||
tags: |
|
||||
datarhei/core:rpi-${{ env.CORE_VERSION }}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# CORE ALPINE BASE IMAGE
|
||||
OS_NAME=alpine
|
||||
OS_VERSION=3.16
|
||||
GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
CORE_VERSION=16.11.0
|
||||
OS_VERSION=3.15
|
||||
GOLANG_IMAGE=golang:1.18.6-alpine3.15
|
||||
CORE_VERSION=16.10.1
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
# CORE NVIDIA CUDA BUNDLE
|
||||
FFMPEG_VERSION=5.1.2
|
||||
CUDA_VERSION=11.7.1
|
||||
FFMPEG_VERSION=4.4.2
|
||||
CUDA_VERSION=11.4.2
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# CORE BUNDLE
|
||||
FFMPEG_VERSION=5.1.2
|
||||
FFMPEG_VERSION=4.4.2
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# CORE RASPBERRY-PI BUNDLE
|
||||
FFMPEG_VERSION=5.1.2
|
||||
FFMPEG_VERSION=4.4.2
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
# CORE BUNDLE
|
||||
FFMPEG_VERSION=5.1.2
|
||||
FFMPEG_VERSION=4.4.2
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# CORE UBUNTU BASE IMAGE
|
||||
OS_NAME=ubuntu
|
||||
OS_VERSION=20.04
|
||||
GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
CORE_VERSION=16.11.0
|
||||
GOLANG_IMAGE=golang:1.18.6-alpine3.15
|
||||
CORE_VERSION=16.10.1
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -2,7 +2,6 @@
|
||||
.env
|
||||
/core*
|
||||
/import*
|
||||
/ffmigrate*
|
||||
/data/**
|
||||
/test/**
|
||||
.vscode
|
||||
|
||||
17
CHANGELOG.md
17
CHANGELOG.md
@@ -1,18 +1,5 @@
|
||||
# Core
|
||||
|
||||
### Core v16.10.1 > v16.11.0
|
||||
|
||||
- Add FFmpeg 4.4 to FFmpeg 5.1 migration tool
|
||||
- Add alternative SRT streamid
|
||||
- Mod bump FFmpeg to v5.1.2 (datarhei/core:tag bundles)
|
||||
- Fix crash with custom SSL certificates ([restreamer/#425](https://github.com/datarhei/restreamer/issues/425))
|
||||
- Fix proper version handling for config
|
||||
- Fix widged session data
|
||||
- Fix resetting process stats when process stopped
|
||||
- Fix stale FFmpeg process detection for streams with only audio
|
||||
- Fix wrong return status code ([#6](https://github.com/datarhei/core/issues/6)))
|
||||
- Fix use SRT defaults for key material exchange
|
||||
|
||||
### Core v16.10.0 > v16.10.1
|
||||
|
||||
- Add email address in TLS config for Let's Encrypt
|
||||
@@ -33,11 +20,11 @@
|
||||
- Fix process cleanup on delete, remove empty directories from disk
|
||||
- Fix SRT blocking port on restart (upgrade datarhei/gosrt)
|
||||
- Fix RTMP communication (Blackmagic Web Presenter, thx 235 MEDIA)
|
||||
- Fix RTMP communication (Blackmagic ATEM Mini, [#385](https://github.com/datarhei/restreamer/issues/385))
|
||||
- Fix RTMP communication (Blackmagic ATEM Mini, datarhei/restreamer#385)
|
||||
- Fix injecting commit, branch, and build info
|
||||
- Fix API metadata endpoints responses
|
||||
|
||||
#### Core v16.9.0 > v16.9.1^
|
||||
#### Core v16.9.0 > v16.9.1
|
||||
|
||||
- Fix v1 import app
|
||||
- Fix race condition
|
||||
|
||||
12
Dockerfile
12
Dockerfile
@@ -1,25 +1,23 @@
|
||||
ARG GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
ARG GOLANG_IMAGE=golang:1.18.4-alpine3.15
|
||||
|
||||
ARG BUILD_IMAGE=alpine:3.16
|
||||
ARG BUILD_IMAGE=alpine:3.15
|
||||
|
||||
FROM $GOLANG_IMAGE as builder
|
||||
|
||||
COPY . /dist/core
|
||||
|
||||
RUN apk add \
|
||||
git \
|
||||
make && \
|
||||
git \
|
||||
make && \
|
||||
cd /dist/core && \
|
||||
go version && \
|
||||
make release_linux && \
|
||||
make import_linux && \
|
||||
make ffmigrate_linux
|
||||
make import_linux
|
||||
|
||||
FROM $BUILD_IMAGE
|
||||
|
||||
COPY --from=builder /dist/core/core /core/bin/core
|
||||
COPY --from=builder /dist/core/import /core/bin/import
|
||||
COPY --from=builder /dist/core/ffmigrate /core/bin/ffmigrate
|
||||
COPY --from=builder /dist/core/mime.types /core/mime.types
|
||||
COPY --from=builder /dist/core/run.sh /core/bin/run.sh
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
FROM golang:1.19.3-alpine3.16
|
||||
FROM golang:1.18.3-alpine3.15
|
||||
|
||||
RUN apk add alpine-sdk
|
||||
|
||||
COPY . /dist/core
|
||||
|
||||
RUN cd /dist/core && \
|
||||
go test -coverprofile=coverage.out -covermode=atomic -v ./...
|
||||
go test -coverprofile=coverage.out -covermode=atomic -v ./...
|
||||
10
Makefile
10
Makefile
@@ -75,14 +75,6 @@ import:
|
||||
import_linux:
|
||||
cd app/import && CGO_ENABLED=0 GOOS=linux GOARCH=${OSARCH} go build -o ../../import -ldflags="-s -w"
|
||||
|
||||
## ffmigrate: Build ffmpeg migration binary
|
||||
ffmigrate:
|
||||
cd app/ffmigrate && CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${GOARCH} go build -o ../../ffmigrate -ldflags="-s -w"
|
||||
|
||||
# github workflow workaround
|
||||
ffmigrate_linux:
|
||||
cd app/ffmigrate && CGO_ENABLED=0 GOOS=linux GOARCH=${OSARCH} go build -o ../../ffmigrate -ldflags="-s -w"
|
||||
|
||||
## coverage: Generate code coverage analysis
|
||||
coverage:
|
||||
go test -race -coverprofile test/cover.out ./...
|
||||
@@ -104,7 +96,7 @@ release_linux:
|
||||
docker:
|
||||
docker build -t core:$(SHORTCOMMIT) .
|
||||
|
||||
.PHONY: help init build swagger test vet fmt vulncheck vendor commit coverage lint release import ffmigrate update
|
||||
.PHONY: help init build swagger test vet fmt vulncheck vendor commit coverage lint release import update
|
||||
|
||||
## help: Show all commands
|
||||
help: Makefile
|
||||
|
||||
21
README.md
21
README.md
@@ -652,16 +652,17 @@ A command is defined as:
|
||||
|
||||
Currently supported placeholders are:
|
||||
|
||||
| Placeholder | Description | Location |
|
||||
| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| `{diskfs}` | Will be replaced by the provided `CORE_STORAGE_DISK_DIR`. | `options`, `input.address`, `input.options`, `output.address`, `output.options` |
|
||||
| `{memfs}` | Will be replace by the base URL of the MemFS. | `input.address`, `input.options`, `output.address`, `output.options` |
|
||||
| `{processid}` | Will be replaced by the ID of the process. | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{reference}` | Will be replaced by the reference of the process | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{inputid}` | Will be replaced by the ID of the input. | `input.address`, `input.options` |
|
||||
| `{outputid}` | Will be replaced by the ID of the output. | `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{rtmp}` | Will be replaced by the internal address of the RTMP server. Requires parameter `name` (name of the stream). | `input.address`, `output.address` |
|
||||
| `{srt}` | Will be replaced by the internal address of the SRT server. Requires parameter `name` (name of the stream) and `mode` (either `publish` or `request`). | `input.address`, `output.address` |
|
||||
| Placeholder | Description | Location |
|
||||
| ------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- |
|
||||
| `{diskfs}` or `{fs:disk}` | Will be replaced by the provided `CORE_STORAGE_DISK_DIR`. | `options`, `input.address`, `input.options`, `output.address`, `output.options` |
|
||||
| `{memfs}` or `{fs:mem}` | Will be replaced by the base address of the MemFS. | `input.address`, `input.options`, `output.address`, `output.options` |
|
||||
| `{fs:*}` | Will be replaces by the base address of the respective filesystem. | See `{memfs}` |
|
||||
| `{processid}` | Will be replaced by the ID of the process. | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{reference}` | Will be replaced by the reference of the process | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{inputid}` | Will be replaced by the ID of the input. | `input.address`, `input.options` |
|
||||
| `{outputid}` | Will be replaced by the ID of the output. | `output.address`, `output.options`, `output.cleanup.pattern` |
|
||||
| `{rtmp}` | Will be replaced by the internal address of the RTMP server. Requires parameter `name` (name of the stream). | `input.address`, `output.address` |
|
||||
| `{srt}` | Will be replaced by the internal address of the SRT server. Requires parameter `name` (name of the stream) and `mode` (either `publish` or `request`). | `input.address`, `output.address` |
|
||||
|
||||
Before replacing the placeholders in the process config, all references (see below) will be resolved.
|
||||
|
||||
|
||||
360
app/api/api.go
360
app/api/api.go
@@ -16,11 +16,10 @@ import (
|
||||
|
||||
"github.com/datarhei/core/v16/app"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
configstore "github.com/datarhei/core/v16/config/store"
|
||||
configvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/http"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
httpfs "github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/jwt"
|
||||
"github.com/datarhei/core/v16/http/router"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
@@ -66,6 +65,7 @@ type api struct {
|
||||
ffmpeg ffmpeg.FFmpeg
|
||||
diskfs fs.Filesystem
|
||||
memfs fs.Filesystem
|
||||
s3fs map[string]fs.Filesystem
|
||||
rtmpserver rtmp.Server
|
||||
srtserver srt.Server
|
||||
metrics monitor.HistoryMonitor
|
||||
@@ -98,7 +98,7 @@ type api struct {
|
||||
|
||||
config struct {
|
||||
path string
|
||||
store configstore.Store
|
||||
store config.Store
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
@@ -115,6 +115,7 @@ var ErrConfigReload = fmt.Errorf("configuration reload")
|
||||
func New(configpath string, logwriter io.Writer) (API, error) {
|
||||
a := &api{
|
||||
state: "idle",
|
||||
s3fs: map[string]fs.Filesystem{},
|
||||
}
|
||||
|
||||
a.config.path = configpath
|
||||
@@ -147,7 +148,7 @@ func (a *api) Reload() error {
|
||||
|
||||
logger := log.New("Core").WithOutput(log.NewConsoleWriter(a.log.writer, log.Lwarn, true))
|
||||
|
||||
store, err := configstore.NewJSON(a.config.path, func() {
|
||||
store, err := config.NewJSONStore(a.config.path, func() {
|
||||
a.errorChan <- ErrConfigReload
|
||||
})
|
||||
if err != nil {
|
||||
@@ -159,7 +160,7 @@ func (a *api) Reload() error {
|
||||
cfg.Merge()
|
||||
|
||||
if len(cfg.Host.Name) == 0 && cfg.Host.Auto {
|
||||
cfg.Host.Name = net.GetPublicIPs(5 * time.Second)
|
||||
cfg.SetPublicIPs()
|
||||
}
|
||||
|
||||
cfg.Validate(false)
|
||||
@@ -228,7 +229,7 @@ func (a *api) Reload() error {
|
||||
logger.Info().WithFields(logfields).Log("")
|
||||
|
||||
configlogger := logger.WithComponent("Config")
|
||||
cfg.Messages(func(level string, v configvars.Variable, message string) {
|
||||
cfg.Messages(func(level string, v config.Variable, message string) {
|
||||
configlogger = configlogger.WithFields(log.Fields{
|
||||
"variable": v.Name,
|
||||
"value": v.Value,
|
||||
@@ -364,13 +365,19 @@ func (a *api) start() error {
|
||||
a.sessions = sessions
|
||||
}
|
||||
|
||||
store := store.NewJSONStore(store.JSONConfig{
|
||||
Dir: cfg.DB.Dir,
|
||||
Logger: a.log.logger.core.WithComponent("ProcessStore"),
|
||||
})
|
||||
|
||||
diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{
|
||||
Name: "disk",
|
||||
Dir: cfg.Storage.Disk.Dir,
|
||||
Size: cfg.Storage.Disk.Size * 1024 * 1024,
|
||||
Logger: a.log.logger.core.WithComponent("DiskFS"),
|
||||
Logger: a.log.logger.core.WithComponent("FS"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("disk filesystem: %w", err)
|
||||
}
|
||||
|
||||
a.diskfs = diskfs
|
||||
@@ -393,10 +400,11 @@ func (a *api) start() error {
|
||||
|
||||
if a.memfs == nil {
|
||||
memfs := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Name: "mem",
|
||||
Base: baseMemFS.String(),
|
||||
Size: cfg.Storage.Memory.Size * 1024 * 1024,
|
||||
Purge: cfg.Storage.Memory.Purge,
|
||||
Logger: a.log.logger.core.WithComponent("MemFS"),
|
||||
Logger: a.log.logger.core.WithComponent("FS"),
|
||||
})
|
||||
|
||||
a.memfs = memfs
|
||||
@@ -405,23 +413,62 @@ func (a *api) start() error {
|
||||
a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024)
|
||||
}
|
||||
|
||||
for _, s3 := range cfg.Storage.S3 {
|
||||
baseS3FS := url.URL{
|
||||
Scheme: "http",
|
||||
Path: s3.Mountpoint,
|
||||
}
|
||||
|
||||
host, port, _ := gonet.SplitHostPort(cfg.Address)
|
||||
if len(host) == 0 {
|
||||
baseS3FS.Host = "localhost:" + port
|
||||
} else {
|
||||
baseS3FS.Host = cfg.Address
|
||||
}
|
||||
|
||||
if s3.Auth.Enable {
|
||||
baseS3FS.User = url.UserPassword(s3.Auth.Username, s3.Auth.Password)
|
||||
}
|
||||
|
||||
s3fs, err := fs.NewS3Filesystem(fs.S3Config{
|
||||
Name: s3.Name,
|
||||
Base: baseS3FS.String(),
|
||||
Endpoint: s3.Endpoint,
|
||||
AccessKeyID: s3.AccessKeyID,
|
||||
SecretAccessKey: s3.SecretAccessKey,
|
||||
Region: s3.Region,
|
||||
Bucket: s3.Bucket,
|
||||
UseSSL: s3.UseSSL,
|
||||
Logger: a.log.logger.core.WithComponent("FS"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("s3 filesystem (%s): %w", s3.Name, err)
|
||||
}
|
||||
|
||||
if _, ok := a.s3fs[s3.Name]; ok {
|
||||
return fmt.Errorf("the name '%s' for a filesystem is already in use", s3.Name)
|
||||
}
|
||||
|
||||
a.s3fs[s3.Name] = s3fs
|
||||
}
|
||||
|
||||
var portrange net.Portranger
|
||||
|
||||
if cfg.Playout.Enable {
|
||||
portrange, err = net.NewPortrange(cfg.Playout.MinPort, cfg.Playout.MaxPort)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("playout port range: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
validatorIn, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Input.Allow, cfg.FFmpeg.Access.Input.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("input address validator: %w", err)
|
||||
}
|
||||
|
||||
validatorOut, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Output.Allow, cfg.FFmpeg.Access.Output.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("output address validator: %w", err)
|
||||
}
|
||||
|
||||
ffmpeg, err := ffmpeg.New(ffmpeg.Config{
|
||||
@@ -435,7 +482,7 @@ func (a *api) start() error {
|
||||
Collector: a.sessions.Collector("ffmpeg"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to create ffmpeg: %w", err)
|
||||
}
|
||||
|
||||
a.ffmpeg = ffmpeg
|
||||
@@ -446,6 +493,13 @@ func (a *api) start() error {
|
||||
a.replacer.RegisterTemplate("diskfs", a.diskfs.Base())
|
||||
a.replacer.RegisterTemplate("memfs", a.memfs.Base())
|
||||
|
||||
a.replacer.RegisterTemplate("fs:disk", a.diskfs.Base())
|
||||
a.replacer.RegisterTemplate("fs:mem", a.memfs.Base())
|
||||
|
||||
for name, s3 := range a.s3fs {
|
||||
a.replacer.RegisterTemplate("fs:"+name, s3.Base())
|
||||
}
|
||||
|
||||
host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address)
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
@@ -478,18 +532,20 @@ func (a *api) start() error {
|
||||
a.replacer.RegisterTemplate("srt", template)
|
||||
}
|
||||
|
||||
store := store.NewJSONStore(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
FFVersion: a.ffmpeg.Skills().FFmpeg.Version,
|
||||
Logger: a.log.logger.core.WithComponent("ProcessStore"),
|
||||
})
|
||||
filesystems := []fs.Filesystem{
|
||||
a.diskfs,
|
||||
a.memfs,
|
||||
}
|
||||
|
||||
for _, fs := range a.s3fs {
|
||||
filesystems = append(filesystems, fs)
|
||||
}
|
||||
|
||||
restream, err := restream.New(restream.Config{
|
||||
ID: cfg.ID,
|
||||
Name: cfg.Name,
|
||||
Store: store,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: a.memfs,
|
||||
Filesystems: filesystems,
|
||||
Replace: a.replacer,
|
||||
FFmpeg: a.ffmpeg,
|
||||
MaxProcesses: cfg.FFmpeg.MaxProcesses,
|
||||
@@ -560,6 +616,9 @@ func (a *api) start() error {
|
||||
metrics.Register(monitor.NewDiskCollector(a.diskfs.Base()))
|
||||
metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs))
|
||||
metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs))
|
||||
for name, fs := range a.s3fs {
|
||||
metrics.Register(monitor.NewFilesystemCollector(name, fs))
|
||||
}
|
||||
metrics.Register(monitor.NewRestreamCollector(a.restream))
|
||||
metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg))
|
||||
metrics.Register(monitor.NewSessionCollector(a.sessions, []string{}))
|
||||
@@ -634,7 +693,7 @@ func (a *api) start() error {
|
||||
}
|
||||
|
||||
if cfg.Storage.Disk.Cache.Enable {
|
||||
diskCache, err := cache.NewLRUCache(cache.LRUConfig{
|
||||
cache, err := cache.NewLRUCache(cache.LRUConfig{
|
||||
TTL: time.Duration(cfg.Storage.Disk.Cache.TTL) * time.Second,
|
||||
MaxSize: cfg.Storage.Disk.Cache.Size * 1024 * 1024,
|
||||
MaxFileSize: cfg.Storage.Disk.Cache.FileSize * 1024 * 1024,
|
||||
@@ -644,96 +703,106 @@ func (a *api) start() error {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create disk cache: %w", err)
|
||||
return fmt.Errorf("unable to create cache: %w", err)
|
||||
}
|
||||
|
||||
a.cache = diskCache
|
||||
a.cache = cache
|
||||
}
|
||||
|
||||
var autocertManager *certmagic.Config
|
||||
|
||||
if cfg.TLS.Enable {
|
||||
if cfg.TLS.Auto {
|
||||
if len(cfg.Host.Name) == 0 {
|
||||
return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME")
|
||||
if cfg.TLS.Enable && cfg.TLS.Auto {
|
||||
if len(cfg.Host.Name) == 0 {
|
||||
return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME")
|
||||
}
|
||||
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = cfg.TLS.Email
|
||||
certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA
|
||||
certmagic.DefaultACME.DisableHTTPChallenge = false
|
||||
certmagic.DefaultACME.DisableTLSALPNChallenge = true
|
||||
certmagic.DefaultACME.Logger = nil
|
||||
|
||||
certmagic.Default.Storage = &certmagic.FileStorage{
|
||||
Path: cfg.DB.Dir + "/cert",
|
||||
}
|
||||
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
|
||||
certmagic.Default.Logger = nil
|
||||
certmagic.Default.OnEvent = func(event string, data interface{}) {
|
||||
message := ""
|
||||
|
||||
switch data := data.(type) {
|
||||
case string:
|
||||
message = data
|
||||
case fmt.Stringer:
|
||||
message = data.String()
|
||||
}
|
||||
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = cfg.TLS.Email
|
||||
certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA
|
||||
certmagic.DefaultACME.DisableHTTPChallenge = false
|
||||
certmagic.DefaultACME.DisableTLSALPNChallenge = true
|
||||
certmagic.DefaultACME.Logger = nil
|
||||
|
||||
certmagic.Default.Storage = &certmagic.FileStorage{
|
||||
Path: cfg.DB.Dir + "/cert",
|
||||
if len(message) != 0 {
|
||||
a.log.logger.core.WithComponent("certmagic").Info().WithField("event", event).Log(message)
|
||||
}
|
||||
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
|
||||
certmagic.Default.Logger = nil
|
||||
}
|
||||
|
||||
magic := certmagic.NewDefault()
|
||||
acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
|
||||
magic := certmagic.NewDefault()
|
||||
acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
|
||||
|
||||
magic.Issuers = []certmagic.Issuer{acme}
|
||||
magic.Issuers = []certmagic.Issuer{acme}
|
||||
|
||||
autocertManager = magic
|
||||
autocertManager = magic
|
||||
|
||||
// Start temporary http server on configured port
|
||||
tempserver := &gohttp.Server{
|
||||
Addr: cfg.Address,
|
||||
Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
|
||||
w.WriteHeader(gohttp.StatusNotFound)
|
||||
})),
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
// Start temporary http server on configured port
|
||||
tempserver := &gohttp.Server{
|
||||
Addr: cfg.Address,
|
||||
Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) {
|
||||
w.WriteHeader(gohttp.StatusNotFound)
|
||||
})),
|
||||
ReadTimeout: 10 * time.Second,
|
||||
WriteTimeout: 10 * time.Second,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
tempserver.ListenAndServe()
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
var certerror bool
|
||||
|
||||
// For each domain, get the certificate
|
||||
for _, host := range cfg.Host.Name {
|
||||
logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host)
|
||||
logger.Info().Log("Acquiring certificate ...")
|
||||
|
||||
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute))
|
||||
|
||||
err := autocertManager.ManageSync(ctx, []string{host})
|
||||
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
|
||||
certerror = true
|
||||
break
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
logger.Info().Log("Successfully acquired certificate")
|
||||
}
|
||||
|
||||
go func() {
|
||||
tempserver.ListenAndServe()
|
||||
wg.Done()
|
||||
}()
|
||||
// Shut down the temporary http server
|
||||
tempserver.Close()
|
||||
|
||||
var certerror bool
|
||||
wg.Wait()
|
||||
|
||||
// For each domain, get the certificate
|
||||
for _, host := range cfg.Host.Name {
|
||||
logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host)
|
||||
logger.Info().Log("Acquiring certificate ...")
|
||||
|
||||
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute))
|
||||
|
||||
err := autocertManager.ManageSync(ctx, []string{host})
|
||||
|
||||
cancel()
|
||||
|
||||
if err != nil {
|
||||
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
|
||||
certerror = true
|
||||
break
|
||||
}
|
||||
|
||||
logger.Info().Log("Successfully acquired certificate")
|
||||
}
|
||||
|
||||
// Shut down the temporary http server
|
||||
tempserver.Close()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if certerror {
|
||||
a.log.logger.core.Warn().Log("Continuing with disabled TLS")
|
||||
autocertManager = nil
|
||||
cfg.TLS.Enable = false
|
||||
} else {
|
||||
cfg.TLS.CertFile = ""
|
||||
cfg.TLS.KeyFile = ""
|
||||
}
|
||||
if certerror {
|
||||
a.log.logger.core.Warn().Log("Continuing with disabled TLS")
|
||||
autocertManager = nil
|
||||
cfg.TLS.Enable = false
|
||||
} else {
|
||||
a.log.logger.core.Info().Log("Enabling TLS with cert and key files")
|
||||
cfg.TLS.CertFile = ""
|
||||
cfg.TLS.KeyFile = ""
|
||||
}
|
||||
}
|
||||
|
||||
@@ -749,15 +818,14 @@ func (a *api) start() error {
|
||||
Collector: a.sessions.Collector("rtmp"),
|
||||
}
|
||||
|
||||
if cfg.RTMP.EnableTLS {
|
||||
if autocertManager != nil && cfg.RTMP.EnableTLS {
|
||||
config.TLSConfig = &tls.Config{
|
||||
GetCertificate: autocertManager.GetCertificate,
|
||||
}
|
||||
|
||||
config.Logger = config.Logger.WithComponent("RTMP/S")
|
||||
|
||||
a.log.logger.rtmps = a.log.logger.core.WithComponent("RTMPS").WithField("address", cfg.RTMP.AddressTLS)
|
||||
if autocertManager != nil {
|
||||
config.TLSConfig = &tls.Config{
|
||||
GetCertificate: autocertManager.GetCertificate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rtmpserver, err := rtmp.New(config)
|
||||
@@ -820,22 +888,61 @@ func (a *api) start() error {
|
||||
|
||||
a.log.logger.main = a.log.logger.core.WithComponent(logcontext).WithField("address", cfg.Address)
|
||||
|
||||
mainserverhandler, err := http.NewServer(http.Config{
|
||||
httpfilesystems := []httpfs.FS{
|
||||
{
|
||||
Name: a.diskfs.Name(),
|
||||
Mountpoint: "",
|
||||
AllowWrite: false,
|
||||
EnableAuth: false,
|
||||
Username: "",
|
||||
Password: "",
|
||||
DefaultFile: "index.html",
|
||||
DefaultContentType: "text/html",
|
||||
Gzip: true,
|
||||
Filesystem: a.diskfs,
|
||||
Cache: a.cache,
|
||||
},
|
||||
{
|
||||
Name: a.memfs.Name(),
|
||||
Mountpoint: "/memfs",
|
||||
AllowWrite: true,
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
DefaultFile: "",
|
||||
DefaultContentType: "application/data",
|
||||
Gzip: true,
|
||||
Filesystem: a.memfs,
|
||||
Cache: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, s3 := range cfg.Storage.S3 {
|
||||
httpfilesystems = append(httpfilesystems, httpfs.FS{
|
||||
Name: s3.Name,
|
||||
Mountpoint: s3.Mountpoint,
|
||||
AllowWrite: true,
|
||||
EnableAuth: s3.Auth.Enable,
|
||||
Username: s3.Auth.Username,
|
||||
Password: s3.Auth.Password,
|
||||
DefaultFile: "",
|
||||
DefaultContentType: "application/data",
|
||||
Gzip: true,
|
||||
Filesystem: a.s3fs[s3.Name],
|
||||
Cache: a.cache,
|
||||
})
|
||||
}
|
||||
|
||||
serverConfig := http.Config{
|
||||
Logger: a.log.logger.main,
|
||||
LogBuffer: a.log.buffer,
|
||||
Restream: a.restream,
|
||||
Metrics: a.metrics,
|
||||
Prometheus: a.prom,
|
||||
MimeTypesFile: cfg.Storage.MimeTypes,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: http.MemFSConfig{
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
Filesystem: a.memfs,
|
||||
},
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Filesystems: httpfilesystems,
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Cors: http.CorsConfig{
|
||||
Origins: cfg.Storage.CORS.Origins,
|
||||
},
|
||||
@@ -843,11 +950,12 @@ func (a *api) start() error {
|
||||
SRT: a.srtserver,
|
||||
JWT: a.httpjwt,
|
||||
Config: a.config.store,
|
||||
Cache: a.cache,
|
||||
Sessions: a.sessions,
|
||||
Router: router,
|
||||
ReadOnly: cfg.API.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
mainserverhandler, err := http.NewServer(serverConfig)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create server: %w", err)
|
||||
@@ -882,34 +990,10 @@ func (a *api) start() error {
|
||||
|
||||
a.log.logger.sidecar = a.log.logger.core.WithComponent("HTTP").WithField("address", cfg.Address)
|
||||
|
||||
sidecarserverhandler, err := http.NewServer(http.Config{
|
||||
Logger: a.log.logger.sidecar,
|
||||
LogBuffer: a.log.buffer,
|
||||
Restream: a.restream,
|
||||
Metrics: a.metrics,
|
||||
Prometheus: a.prom,
|
||||
MimeTypesFile: cfg.Storage.MimeTypes,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: http.MemFSConfig{
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
Filesystem: a.memfs,
|
||||
},
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Cors: http.CorsConfig{
|
||||
Origins: cfg.Storage.CORS.Origins,
|
||||
},
|
||||
RTMP: a.rtmpserver,
|
||||
SRT: a.srtserver,
|
||||
JWT: a.httpjwt,
|
||||
Config: a.config.store,
|
||||
Cache: a.cache,
|
||||
Sessions: a.sessions,
|
||||
Router: router,
|
||||
ReadOnly: cfg.API.ReadOnly,
|
||||
})
|
||||
serverConfig.Logger = a.log.logger.sidecar
|
||||
serverConfig.IPLimiter = iplimiter
|
||||
|
||||
sidecarserverhandler, err := http.NewServer(serverConfig)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create sidecar HTTP server: %w", err)
|
||||
|
||||
@@ -1,168 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
_ "github.com/joho/godotenv/autoload"
|
||||
)
|
||||
|
||||
func main() {
|
||||
logger := log.New("Migration").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithFields(log.Fields{
|
||||
"from": "ffmpeg4",
|
||||
"to": "ffmpeg5",
|
||||
})
|
||||
|
||||
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading configuration failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := doMigration(logger, configstore); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func doMigration(logger log.Logger, configstore cfgstore.Store) error {
|
||||
if logger == nil {
|
||||
logger = log.New("")
|
||||
}
|
||||
|
||||
cfg := configstore.Get()
|
||||
|
||||
// Merging the persisted config with the environment variables
|
||||
cfg.Merge()
|
||||
|
||||
cfg.Validate(false)
|
||||
if cfg.HasErrors() {
|
||||
logger.Error().Log("The configuration contains errors")
|
||||
messages := []string{}
|
||||
cfg.Messages(func(level string, v cfgvars.Variable, message string) {
|
||||
if level == "error" {
|
||||
logger.Error().WithFields(log.Fields{
|
||||
"variable": v.Name,
|
||||
"value": v.Value,
|
||||
"env": v.EnvName,
|
||||
"description": v.Description,
|
||||
}).Log(message)
|
||||
|
||||
messages = append(messages, v.Name+": "+message)
|
||||
}
|
||||
})
|
||||
|
||||
return fmt.Errorf("the configuration contains errors: %v", messages)
|
||||
}
|
||||
|
||||
ff, err := ffmpeg.New(ffmpeg.Config{
|
||||
Binary: cfg.FFmpeg.Binary,
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading FFmpeg binary failed")
|
||||
return fmt.Errorf("loading FFmpeg binary failed: %w", err)
|
||||
}
|
||||
|
||||
version, err := semver.NewVersion(ff.Skills().FFmpeg.Version)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Parsing FFmpeg version failed")
|
||||
return fmt.Errorf("parsing FFmpeg version failed: %w", err)
|
||||
}
|
||||
|
||||
// The current FFmpeg version is 4. Nothing to do.
|
||||
if version.Major() == 4 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if version.Major() != 5 {
|
||||
err := fmt.Errorf("unknown FFmpeg version found: %d", version.Major())
|
||||
logger.Error().WithError(err).Log("Unsupported FFmpeg version found")
|
||||
return fmt.Errorf("unsupported FFmpeg version found: %w", err)
|
||||
}
|
||||
|
||||
// Check if there's a DB file
|
||||
dbFilepath := cfg.DB.Dir + "/db.json"
|
||||
|
||||
if _, err = os.Stat(dbFilepath); err != nil {
|
||||
// There's no DB to backup
|
||||
logger.Info().WithField("db", dbFilepath).Log("Database not found. Migration not required")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Check if we already have a backup
|
||||
backupFilepath := cfg.DB.Dir + "/db_ff4.json"
|
||||
|
||||
if _, err = os.Stat(backupFilepath); err == nil {
|
||||
// Yes, we have a backup. The migration already happened
|
||||
logger.Info().WithField("backup", backupFilepath).Log("Migration already done")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a backup
|
||||
if err := file.Copy(dbFilepath, backupFilepath); err != nil {
|
||||
logger.Error().WithError(err).Log("Creating backup file failed")
|
||||
return fmt.Errorf("creating backup file failed: %w", err)
|
||||
}
|
||||
|
||||
logger.Info().WithField("backup", backupFilepath).Log("Backup created")
|
||||
|
||||
// Load the existing DB
|
||||
datastore := store.NewJSONStore(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
})
|
||||
|
||||
data, err := datastore.Load()
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading database failed")
|
||||
return fmt.Errorf("loading database failed: %w", err)
|
||||
}
|
||||
|
||||
logger.Info().Log("Migrating processes ...")
|
||||
|
||||
// Migrate the processes to version 5
|
||||
// Only this happens:
|
||||
// - for RTSP inputs, replace -stimeout with -timeout
|
||||
|
||||
reRTSP := regexp.MustCompile(`^rtsps?://`)
|
||||
for id, p := range data.Process {
|
||||
logger.Info().WithField("processid", p.ID).Log("")
|
||||
|
||||
for index, input := range p.Config.Input {
|
||||
if !reRTSP.MatchString(input.Address) {
|
||||
continue
|
||||
}
|
||||
|
||||
for i, o := range input.Options {
|
||||
if o != "-stimeout" {
|
||||
continue
|
||||
}
|
||||
|
||||
input.Options[i] = "-timeout"
|
||||
}
|
||||
|
||||
p.Config.Input[index] = input
|
||||
}
|
||||
p.Config.FFVersion = version.String()
|
||||
data.Process[id] = p
|
||||
}
|
||||
|
||||
logger.Info().Log("Migrating processes done")
|
||||
|
||||
// Store the modified DB
|
||||
if err := datastore.Store(data); err != nil {
|
||||
logger.Error().WithError(err).Log("Storing database failed")
|
||||
return fmt.Errorf("storing database failed: %w", err)
|
||||
}
|
||||
|
||||
logger.Info().Log("Completed")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -4,8 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
@@ -15,7 +14,7 @@ import (
|
||||
func main() {
|
||||
logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1")
|
||||
|
||||
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
configstore, err := config.NewJSONStore(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading configuration failed")
|
||||
os.Exit(1)
|
||||
@@ -26,7 +25,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
func doImport(logger log.Logger, configstore config.Store) error {
|
||||
if logger == nil {
|
||||
logger = log.New("")
|
||||
}
|
||||
@@ -42,7 +41,7 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
if cfg.HasErrors() {
|
||||
logger.Error().Log("The configuration contains errors")
|
||||
messages := []string{}
|
||||
cfg.Messages(func(level string, v cfgvars.Variable, message string) {
|
||||
cfg.Messages(func(level string, v config.Variable, message string) {
|
||||
if level == "error" {
|
||||
logger.Error().WithFields(log.Fields{
|
||||
"variable": v.Name,
|
||||
@@ -80,7 +79,7 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
|
||||
// Load an existing DB
|
||||
datastore := store.NewJSONStore(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
Dir: cfg.DB.Dir,
|
||||
})
|
||||
|
||||
data, err := datastore.Load()
|
||||
|
||||
@@ -3,12 +3,12 @@ package main
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestImport(t *testing.T) {
|
||||
configstore := store.NewDummy()
|
||||
configstore := config.NewDummyStore()
|
||||
|
||||
cfg := configstore.Get()
|
||||
|
||||
|
||||
@@ -29,8 +29,8 @@ func (v versionInfo) MinorString() string {
|
||||
// Version of the app
|
||||
var Version = versionInfo{
|
||||
Major: 16,
|
||||
Minor: 11,
|
||||
Patch: 0,
|
||||
Minor: 10,
|
||||
Patch: 1,
|
||||
}
|
||||
|
||||
// Commit is the git commit the app is build from. It should be filled in during compilation
|
||||
|
||||
511
config/config.go
511
config/config.go
@@ -3,49 +3,76 @@ package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
/*
|
||||
type Config interface {
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
Merge()
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
Validate(resetLogs bool)
|
||||
|
||||
// Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'.
|
||||
// The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message.
|
||||
Messages(logger func(level string, v vars.Variable, message string))
|
||||
|
||||
// HasErrors returns whether there are some error messages in the log.
|
||||
HasErrors() bool
|
||||
|
||||
// Overrides returns a list of configuration value names that have been overriden by an environment variable.
|
||||
Overrides() []string
|
||||
|
||||
Get(name string) (string, error)
|
||||
Set(name, val string) error
|
||||
}
|
||||
*/
|
||||
|
||||
const version int64 = 3
|
||||
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
type variable struct {
|
||||
value value // The actual value
|
||||
defVal string // The default value in string representation
|
||||
name string // A name for this value
|
||||
envName string // The environment variable that corresponds to this value
|
||||
envAltNames []string // Alternative environment variable names
|
||||
description string // A desriptions for this value
|
||||
required bool // Whether a non-empty value is required
|
||||
disguise bool // Whether the value should be disguised if printed
|
||||
merged bool // Whether this value has been replaced by its corresponding environment variable
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Value string
|
||||
Name string
|
||||
EnvName string
|
||||
Description string
|
||||
Merged bool
|
||||
}
|
||||
|
||||
type message struct {
|
||||
message string // The log message
|
||||
variable Variable // The config field this message refers to
|
||||
level string // The loglevel for this message
|
||||
}
|
||||
|
||||
type Auth0Tenant struct {
|
||||
Domain string `json:"domain"`
|
||||
Audience string `json:"audience"`
|
||||
ClientID string `json:"clientid"`
|
||||
Users []string `json:"users"`
|
||||
}
|
||||
|
||||
type S3Storage struct {
|
||||
Name string `json:"name"`
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
Auth struct {
|
||||
Enable bool `json:"enable"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"auth"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Bucket string `json:"bucket"`
|
||||
Region string `json:"region"`
|
||||
UseSSL bool `json:"use_ssl"`
|
||||
}
|
||||
|
||||
type DataVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars vars.Variables
|
||||
vars []*variable
|
||||
logs []message
|
||||
|
||||
Data
|
||||
}
|
||||
@@ -59,16 +86,8 @@ func New() *Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
func NewConfigFrom(d *Config) *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
@@ -97,201 +116,289 @@ func (d *Config) Clone() *Config {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
|
||||
data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block)
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types.Allow)
|
||||
data.Storage.Disk.Cache.Types.Block = copyStringSlice(d.Storage.Disk.Cache.Types.Block)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
|
||||
data.vars.Transfer(&d.vars)
|
||||
for i, v := range d.vars {
|
||||
data.vars[i].merged = v.merged
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
d.val(newInt64Value(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.val(newTimeValue(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.val(newStringValue(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.val(newStringValue(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.val(newAddressValue(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.val(newBoolValue(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
d.val(newStringValue(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.val(newStringListValue(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.val(newIntValue(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
d.val(newMustDirValue(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
d.val(newStringListValue(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.val(newBoolValue(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
d.val(newBoolValue(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.val(newBoolValue(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.val(newBoolValue(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.val(newStringValue(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.val(newStringValue(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
d.val(newStringValue(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
d.val(newBoolValue(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.val(newTenantListValue(&d.API.Auth.Auth0.Tenants, []Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
d.val(newAddressValue(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.val(newEmailValue(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.val(newFileValue(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.val(newFileValue(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
d.val(newFileValue(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
d.val(newMustDirValue(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.val(newInt64Value(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.val(newBoolValue(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.val(newUint64Value(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.val(newInt64Value(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.val(newUint64Value(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
d.val(newBoolValue(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.val(newStringValue(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.val(newStringValue(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.val(newInt64Value(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.val(newBoolValue(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (S3)
|
||||
d.val(newS3StorageListValue(&d.Storage.S3, []S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
d.val(newCORSOriginsValue(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
d.val(newBoolValue(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.val(newBoolValue(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.val(newAddressValue(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.val(newAddressValue(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.val(newAbsolutePathValue(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.val(newStringValue(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
d.val(newBoolValue(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.val(newAddressValue(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.val(newStringValue(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.val(newStringValue(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.val(newBoolValue(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.val(newStringListValue(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
d.val(newExecValue(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.val(newInt64Value(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.val(newIntValue(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.val(newIntValue(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
d.val(newBoolValue(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.val(newPortValue(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.val(newPortValue(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
d.val(newBoolValue(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.val(newIntValue(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
d.val(newBoolValue(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.val(newBoolValue(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.val(newInt64Value(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.val(newInt64Value(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
d.val(newBoolValue(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.val(newCIDRListValue(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.val(newIntValue(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.val(newBoolValue(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.val(newIntValue(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.val(newUint64Value(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.val(newUint64Value(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
d.val(newBoolValue(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.val(newStringValue(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.val(newURLValue(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
d.val(newStringListValue(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.val(newStringMapStringValue(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.val(newDirValue(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
func (d *Config) val(val value, name, envName string, envAltNames []string, description string, required, disguise bool) {
|
||||
d.vars = append(d.vars, &variable{
|
||||
value: val,
|
||||
defVal: val.String(),
|
||||
name: name,
|
||||
envName: envName,
|
||||
envAltNames: envAltNames,
|
||||
description: description,
|
||||
required: required,
|
||||
disguise: disguise,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Config) log(level string, v *variable, format string, args ...interface{}) {
|
||||
variable := Variable{
|
||||
Value: v.value.String(),
|
||||
Name: v.name,
|
||||
EnvName: v.envName,
|
||||
Description: v.description,
|
||||
Merged: v.merged,
|
||||
}
|
||||
|
||||
if v.disguise {
|
||||
variable.Value = "***"
|
||||
}
|
||||
|
||||
l := message{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
variable: variable,
|
||||
level: level,
|
||||
}
|
||||
|
||||
d.logs = append(d.logs, l)
|
||||
}
|
||||
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
func (d *Config) Merge() {
|
||||
for _, v := range d.vars {
|
||||
if len(v.envName) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var envval string
|
||||
var ok bool
|
||||
|
||||
envval, ok = os.LookupEnv(v.envName)
|
||||
if !ok {
|
||||
foundAltName := false
|
||||
|
||||
for _, envName := range v.envAltNames {
|
||||
envval, ok = os.LookupEnv(envName)
|
||||
if ok {
|
||||
foundAltName = true
|
||||
d.log("warn", v, "deprecated name, please use %s", v.envName)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundAltName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err := v.value.Set(envval)
|
||||
if err != nil {
|
||||
d.log("error", v, "%s", err.Error())
|
||||
}
|
||||
|
||||
v.merged = true
|
||||
}
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.vars.ResetLogs()
|
||||
d.logs = nil
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
d.log("error", d.findVariable("version"), "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d.vars.Validate()
|
||||
for _, v := range d.vars {
|
||||
d.log("info", v, "%s", "")
|
||||
|
||||
err := v.value.Validate()
|
||||
if err != nil {
|
||||
d.log("error", v, "%s", err.Error())
|
||||
}
|
||||
|
||||
if v.required && v.value.IsEmpty() {
|
||||
d.log("error", v, "a value is required")
|
||||
}
|
||||
}
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
d.log("error", d.findVariable("api.auth.enable"), "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
d.log("error", d.findVariable("api.auth.auth0.enable"), "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
d.log("error", d.findVariable("api.auth.auth0.tenants"), "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -299,14 +406,14 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
d.log("error", d.findVariable("tls.enable"), "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
d.log("error", d.findVariable("host.name"), "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
@@ -316,7 +423,7 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
d.log("error", d.findVariable("host.name"), "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
@@ -324,7 +431,7 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
d.log("error", d.findVariable("host.name"), "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
@@ -335,31 +442,32 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a non-empty email address
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.TLS.Email) == 0 {
|
||||
d.vars.SetDefault("tls.email")
|
||||
v := d.findVariable("tls.email")
|
||||
v.value.Set(v.defVal)
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
d.log("error", d.findVariable("rtmp.enable"), "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
d.log("error", d.findVariable("rtmp.enable_tls"), "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
|
||||
if len(d.RTMP.AddressTLS) == 0 {
|
||||
d.vars.Log("error", "rtmp.address_tls", "RTMPS server address must be set")
|
||||
d.log("error", d.findVariable("rtmp.address_tls"), "RTMPS server address must be set")
|
||||
}
|
||||
|
||||
if d.RTMP.Enable && d.RTMP.Address == d.RTMP.AddressTLS {
|
||||
d.vars.Log("error", "rtmp.address", "The RTMP and RTMPS server can't listen on the same address")
|
||||
d.log("error", d.findVariable("rtmp.address"), "The RTMP and RTMPS server can't listen on the same address")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
if d.findVariable("storage.memory.auth.username").merged && d.findVariable("storage.memory.auth.password").merged {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
@@ -370,76 +478,121 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
d.log("error", d.findVariable("storage.memory.auth.enable"), "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
d.log("error", d.findVariable("playout.min_port"), "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
d.log("error", d.findVariable("storage.disk.cache.ttl_seconds"), "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
d.log("error", d.findVariable("stats.session_timeout_sec"), "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
d.log("error", d.findVariable("stats.persist_interval_sec"), "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
d.log("error", d.findVariable("service.token"), "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
d.log("error", d.findVariable("service.url"), "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
d.log("error", d.findVariable("metrics.range"), "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
d.log("error", d.findVariable("metrics.interval"), "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
d.log("error", d.findVariable("metrics.interval"), "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
func (d *Config) findVariable(name string) *variable {
|
||||
for _, v := range d.vars {
|
||||
if v.name == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'.
|
||||
// The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message.
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
func (d *Config) Messages(logger func(level string, v Variable, message string)) {
|
||||
for _, l := range d.logs {
|
||||
logger(l.level, l.variable, l.message)
|
||||
}
|
||||
}
|
||||
|
||||
// HasErrors returns whether there are some error messages in the log.
|
||||
func (d *Config) HasErrors() bool {
|
||||
return d.vars.HasErrors()
|
||||
for _, l := range d.logs {
|
||||
if l.level == "error" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// Overrides returns a list of configuration value names that have been overriden by an environment variable.
|
||||
func (d *Config) Overrides() []string {
|
||||
return d.vars.Overrides()
|
||||
overrides := []string{}
|
||||
|
||||
for _, v := range d.vars {
|
||||
if v.merged {
|
||||
overrides = append(overrides, v.name)
|
||||
}
|
||||
}
|
||||
|
||||
return overrides
|
||||
}
|
||||
|
||||
func copyStringSlice(src []string) []string {
|
||||
dst := make([]string, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func copyStringMap(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func copyTenantSlice(src []Auth0Tenant) []Auth0Tenant {
|
||||
dst := make([]Auth0Tenant, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ package config
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestConfigCopy(t *testing.T) {
|
||||
@@ -12,41 +12,44 @@ func TestConfigCopy(t *testing.T) {
|
||||
config1.Version = 42
|
||||
config1.DB.Dir = "foo"
|
||||
|
||||
val1, _ := config1.Get("version")
|
||||
val2, _ := config1.Get("db.dir")
|
||||
val3, _ := config1.Get("host.name")
|
||||
val1 := config1.findVariable("version")
|
||||
val2 := config1.findVariable("db.dir")
|
||||
val3 := config1.findVariable("host.name")
|
||||
|
||||
require.Equal(t, "42", val1)
|
||||
require.Equal(t, "foo", val2)
|
||||
require.Equal(t, "(empty)", val3)
|
||||
assert.Equal(t, "42", val1.value.String())
|
||||
assert.Equal(t, nil, val1.value.Validate())
|
||||
assert.Equal(t, false, val1.value.IsEmpty())
|
||||
|
||||
config1.Set("host.name", "foo.com")
|
||||
val3, _ = config1.Get("host.name")
|
||||
require.Equal(t, "foo.com", val3)
|
||||
assert.Equal(t, "foo", val2.value.String())
|
||||
assert.Equal(t, "(empty)", val3.value.String())
|
||||
|
||||
config2 := config1.Clone()
|
||||
val3.value.Set("foo.com")
|
||||
|
||||
require.Equal(t, int64(42), config2.Version)
|
||||
require.Equal(t, "foo", config2.DB.Dir)
|
||||
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
assert.Equal(t, "foo.com", val3.value.String())
|
||||
|
||||
config1.Set("version", "77")
|
||||
config2 := NewConfigFrom(config1)
|
||||
|
||||
require.Equal(t, int64(77), config1.Version)
|
||||
require.Equal(t, int64(42), config2.Version)
|
||||
assert.Equal(t, int64(42), config2.Version)
|
||||
assert.Equal(t, "foo", config2.DB.Dir)
|
||||
assert.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
|
||||
config1.Set("db.dir", "bar")
|
||||
val1.value.Set("77")
|
||||
|
||||
require.Equal(t, "bar", config1.DB.Dir)
|
||||
require.Equal(t, "foo", config2.DB.Dir)
|
||||
assert.Equal(t, int64(77), config1.Version)
|
||||
assert.Equal(t, int64(42), config2.Version)
|
||||
|
||||
val2.value.Set("bar")
|
||||
|
||||
assert.Equal(t, "bar", config1.DB.Dir)
|
||||
assert.Equal(t, "foo", config2.DB.Dir)
|
||||
|
||||
config2.DB.Dir = "baz"
|
||||
|
||||
require.Equal(t, "bar", config1.DB.Dir)
|
||||
require.Equal(t, "baz", config2.DB.Dir)
|
||||
assert.Equal(t, "bar", config1.DB.Dir)
|
||||
assert.Equal(t, "baz", config2.DB.Dir)
|
||||
|
||||
config1.Host.Name[0] = "bar.com"
|
||||
|
||||
require.Equal(t, []string{"bar.com"}, config1.Host.Name)
|
||||
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
assert.Equal(t, []string{"bar.com"}, config1.Host.Name)
|
||||
assert.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
}
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
package copy
|
||||
|
||||
import "github.com/datarhei/core/v16/config/value"
|
||||
|
||||
func StringMap(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func TenantSlice(src []value.Auth0Tenant) []value.Auth0Tenant {
|
||||
dst := Slice(src)
|
||||
|
||||
for i, t := range src {
|
||||
dst[i].Users = Slice(t.Users)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func Slice[T any](src []T) []T {
|
||||
dst := make([]T, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
}
|
||||
149
config/data.go
149
config/data.go
@@ -1,12 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
import "time"
|
||||
|
||||
// Data is the actual configuration data for the app
|
||||
type Data struct {
|
||||
@@ -51,8 +45,8 @@ type Data struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
||||
@@ -88,6 +82,7 @@ type Data struct {
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Purge bool `json:"purge"`
|
||||
} `json:"memory"`
|
||||
S3 []S3Storage `json:"s3"`
|
||||
CORS struct {
|
||||
Origins []string `json:"origins"`
|
||||
} `json:"cors"`
|
||||
@@ -165,13 +160,9 @@ type Data struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
func UpgradeV2ToV3(d *v2.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data := &Data{}
|
||||
|
||||
return MergeV2toV3(&cfg.Data, d)
|
||||
}
|
||||
|
||||
func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
@@ -195,37 +186,30 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
@@ -233,6 +217,14 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
data.TLS.CertFile = d.TLS.CertFile
|
||||
data.TLS.KeyFile = d.TLS.KeyFile
|
||||
data.TLS.Email = "cert@datarhei.com"
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
|
||||
data.Storage.Disk.Dir = d.Storage.Disk.Dir
|
||||
data.Storage.Disk.Size = d.Storage.Disk.Size
|
||||
@@ -240,87 +232,10 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size
|
||||
data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize
|
||||
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types)
|
||||
data.Storage.Disk.Cache.Types.Block = []string{}
|
||||
|
||||
data.Version = 3
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func DowngradeV3toV2(d *Data) (*v2.Data, error) {
|
||||
data := &v2.Data{}
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
data.TLS.Address = d.TLS.Address
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
data.TLS.CertFile = d.TLS.CertFile
|
||||
data.TLS.KeyFile = d.TLS.KeyFile
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
|
||||
data.Storage.Disk.Dir = d.Storage.Disk.Dir
|
||||
data.Storage.Disk.Size = d.Storage.Disk.Size
|
||||
data.Storage.Disk.Cache.Enable = d.Storage.Disk.Cache.Enable
|
||||
data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size
|
||||
data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize
|
||||
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
|
||||
|
||||
data.Version = 2
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
package v1
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
import "time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
type dataV1 struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
@@ -48,8 +44,8 @@ type Data struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
||||
@@ -1,4 +1,4 @@
|
||||
package v2
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,13 +6,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
type dataV2 struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
@@ -54,8 +50,8 @@ type Data struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
||||
@@ -164,15 +160,11 @@ type Data struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
func UpgradeV1ToV2(d *v1.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
|
||||
return MergeV1ToV2(&cfg.Data, d)
|
||||
}
|
||||
|
||||
// Migrate will migrate some settings, depending on the version it finds. Migrations
|
||||
// are only going upwards, i.e. from a lower version to a higher version.
|
||||
func MergeV1ToV2(data *Data, d *v1.Data) (*Data, error) {
|
||||
// are only going upwards,i.e. from a lower version to a higher version.
|
||||
func NewV2FromV1(d *dataV1) (*dataV2, error) {
|
||||
data := &dataV2{}
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
@@ -197,30 +189,30 @@ func MergeV1ToV2(data *Data, d *v1.Data) (*Data, error) {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.RTMP.Enable = d.RTMP.Enable
|
||||
@@ -253,67 +245,3 @@ func MergeV1ToV2(data *Data, d *v1.Data) (*Data, error) {
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func DowngradeV2toV1(d *Data) (*v1.Data, error) {
|
||||
data := &v1.Data{}
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.RTMP.Enable = d.RTMP.Enable
|
||||
data.RTMP.EnableTLS = d.RTMP.EnableTLS
|
||||
data.RTMP.Address = d.RTMP.Address
|
||||
data.RTMP.App = d.RTMP.App
|
||||
data.RTMP.Token = d.RTMP.Token
|
||||
|
||||
data.Version = 1
|
||||
|
||||
return data, nil
|
||||
}
|
||||
@@ -1,21 +1,17 @@
|
||||
package store
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
)
|
||||
import "fmt"
|
||||
|
||||
type dummyStore struct {
|
||||
current *config.Config
|
||||
active *config.Config
|
||||
current *Config
|
||||
active *Config
|
||||
}
|
||||
|
||||
// NewDummyStore returns a store that returns the default config
|
||||
func NewDummy() Store {
|
||||
func NewDummyStore() Store {
|
||||
s := &dummyStore{}
|
||||
|
||||
cfg := config.New()
|
||||
cfg := New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
@@ -24,7 +20,7 @@ func NewDummy() Store {
|
||||
|
||||
s.current = cfg
|
||||
|
||||
cfg = config.New()
|
||||
cfg = New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
@@ -36,34 +32,48 @@ func NewDummy() Store {
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *dummyStore) Get() *config.Config {
|
||||
return c.current.Clone()
|
||||
func (c *dummyStore) Get() *Config {
|
||||
cfg := New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *dummyStore) Set(d *config.Config) error {
|
||||
func (c *dummyStore) Set(d *Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.current = d.Clone()
|
||||
c.current = NewConfigFrom(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dummyStore) GetActive() *config.Config {
|
||||
return c.active.Clone()
|
||||
func (c *dummyStore) GetActive() *Config {
|
||||
cfg := New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (c *dummyStore) SetActive(d *config.Config) error {
|
||||
func (c *dummyStore) SetActive(d *Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.active = d.Clone()
|
||||
c.active = NewConfigFrom(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
71
config/ip.go
Normal file
71
config/ip.go
Normal file
@@ -0,0 +1,71 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SetPublicIPs will try to figure out the public IPs (v4 and v6)
|
||||
// we're running on. There's a timeout of max. 5 seconds to do it.
|
||||
// If it fails, the IPs will simply not be set.
|
||||
func (d *Config) SetPublicIPs() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ipv4 := ""
|
||||
ipv6 := ""
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv4 = doRequest("https://api.ipify.org")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv6 = doRequest("https://api6.ipify.org")
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(ipv4) != 0 {
|
||||
d.Host.Name = append(d.Host.Name, ipv4)
|
||||
}
|
||||
|
||||
if len(ipv6) != 0 && ipv4 != ipv6 {
|
||||
d.Host.Name = append(d.Host.Name, ipv6)
|
||||
}
|
||||
}
|
||||
|
||||
func doRequest(url string) string {
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(body)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package store
|
||||
package config
|
||||
|
||||
import (
|
||||
gojson "encoding/json"
|
||||
@@ -7,9 +7,6 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
)
|
||||
@@ -17,7 +14,7 @@ import (
|
||||
type jsonStore struct {
|
||||
path string
|
||||
|
||||
data map[string]*config.Config
|
||||
data map[string]*Config
|
||||
|
||||
reloadFn func()
|
||||
}
|
||||
@@ -26,14 +23,14 @@ type jsonStore struct {
|
||||
// back to the path. The returned error will be nil if everything went fine.
|
||||
// If the path doesn't exist, a default JSON config file will be written to that path.
|
||||
// The returned ConfigStore can be used to retrieve or write the config.
|
||||
func NewJSON(path string, reloadFn func()) (Store, error) {
|
||||
func NewJSONStore(path string, reloadFn func()) (Store, error) {
|
||||
c := &jsonStore{
|
||||
path: path,
|
||||
data: make(map[string]*config.Config),
|
||||
data: make(map[string]*Config),
|
||||
reloadFn: reloadFn,
|
||||
}
|
||||
|
||||
c.data["base"] = config.New()
|
||||
c.data["base"] = New()
|
||||
|
||||
if err := c.load(c.data["base"]); err != nil {
|
||||
return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err)
|
||||
@@ -46,16 +43,16 @@ func NewJSON(path string, reloadFn func()) (Store, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) Get() *config.Config {
|
||||
return c.data["base"].Clone()
|
||||
func (c *jsonStore) Get() *Config {
|
||||
return NewConfigFrom(c.data["base"])
|
||||
}
|
||||
|
||||
func (c *jsonStore) Set(d *config.Config) error {
|
||||
func (c *jsonStore) Set(d *Config) error {
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
data := d.Clone()
|
||||
data := NewConfigFrom(d)
|
||||
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
@@ -70,26 +67,26 @@ func (c *jsonStore) Set(d *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) GetActive() *config.Config {
|
||||
func (c *jsonStore) GetActive() *Config {
|
||||
if x, ok := c.data["merged"]; ok {
|
||||
return x.Clone()
|
||||
return NewConfigFrom(x)
|
||||
}
|
||||
|
||||
if x, ok := c.data["base"]; ok {
|
||||
return x.Clone()
|
||||
return NewConfigFrom(x)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) SetActive(d *config.Config) error {
|
||||
func (c *jsonStore) SetActive(d *Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.data["merged"] = d.Clone()
|
||||
c.data["merged"] = NewConfigFrom(d)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -104,7 +101,7 @@ func (c *jsonStore) Reload() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) load(cfg *config.Config) error {
|
||||
func (c *jsonStore) load(config *Config) error {
|
||||
if len(c.path) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -118,20 +115,56 @@ func (c *jsonStore) load(cfg *config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := migrate(jsondata)
|
||||
if err != nil {
|
||||
return err
|
||||
dataV3 := &Data{}
|
||||
|
||||
version := DataVersion{}
|
||||
|
||||
if err = gojson.Unmarshal(jsondata, &version); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
cfg.Data = *data
|
||||
if version.Version == 1 {
|
||||
dataV1 := &dataV1{}
|
||||
|
||||
cfg.LoadedAt = time.Now()
|
||||
cfg.UpdatedAt = cfg.LoadedAt
|
||||
if err = gojson.Unmarshal(jsondata, dataV1); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV2, err := NewV2FromV1(dataV1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dataV3, err = NewV3FromV2(dataV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if version.Version == 2 {
|
||||
dataV2 := &dataV2{}
|
||||
|
||||
if err = gojson.Unmarshal(jsondata, dataV2); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV3, err = NewV3FromV2(dataV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if version.Version == 3 {
|
||||
if err = gojson.Unmarshal(jsondata, dataV3); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
}
|
||||
|
||||
config.Data = *dataV3
|
||||
|
||||
config.LoadedAt = time.Now()
|
||||
config.UpdatedAt = config.LoadedAt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) store(data *config.Config) error {
|
||||
func (c *jsonStore) store(data *Config) error {
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
if len(c.path) == 0 {
|
||||
@@ -166,55 +199,3 @@ func (c *jsonStore) store(data *config.Config) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func migrate(jsondata []byte) (*config.Data, error) {
|
||||
data := &config.Data{}
|
||||
version := DataVersion{}
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, &version); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
if version.Version == 1 {
|
||||
dataV1 := &v1.New().Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV1); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV2, err := v2.UpgradeV1ToV2(dataV1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = dataV3
|
||||
} else if version.Version == 2 {
|
||||
dataV2 := &v2.New().Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV2); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = dataV3
|
||||
} else if version.Version == 3 {
|
||||
dataV3 := &config.New().Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV3); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
data = dataV3
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
@@ -1,29 +1,23 @@
|
||||
package store
|
||||
|
||||
import "github.com/datarhei/core/v16/config"
|
||||
package config
|
||||
|
||||
// Store is a store for the configuration data.
|
||||
type Store interface {
|
||||
// Get the current configuration.
|
||||
Get() *config.Config
|
||||
Get() *Config
|
||||
|
||||
// Set a new configuration for persistence.
|
||||
Set(data *config.Config) error
|
||||
Set(data *Config) error
|
||||
|
||||
// GetActive returns the configuration that has been set as
|
||||
// active before, otherwise it return nil.
|
||||
GetActive() *config.Config
|
||||
GetActive() *Config
|
||||
|
||||
// SetActive will keep the given configuration
|
||||
// as active in memory. It can be retrieved later with GetActive()
|
||||
SetActive(data *config.Config) error
|
||||
SetActive(data *Config) error
|
||||
|
||||
// Reload will reload the stored configuration. It has to make sure
|
||||
// that all affected components will receiver their potentially
|
||||
// changed configuration.
|
||||
Reload() error
|
||||
}
|
||||
|
||||
type DataVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
{
|
||||
"created_at": "2022-11-08T12:01:22.533279+01:00",
|
||||
"version": 1,
|
||||
"id": "c5ea4473-2f84-417c-a0c6-35746bfc9fc9",
|
||||
"name": "cool-breeze-4646",
|
||||
"address": ":8080",
|
||||
"update_check": true,
|
||||
"log": {
|
||||
"level": "info",
|
||||
"topics": [],
|
||||
"max_lines": 1000
|
||||
},
|
||||
"db": {
|
||||
"dir": "./config"
|
||||
},
|
||||
"host": {
|
||||
"name": [],
|
||||
"auto": true
|
||||
},
|
||||
"api": {
|
||||
"read_only": false,
|
||||
"access": {
|
||||
"http": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"https": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"enable": false,
|
||||
"disable_localhost": false,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"jwt": {
|
||||
"secret": "L(*C[:uuHzL.]Fzpk$q=fa@PO=Z;j;56"
|
||||
},
|
||||
"auth0": {
|
||||
"enable": false,
|
||||
"tenants": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"address": ":8181",
|
||||
"enable": false,
|
||||
"auto": false,
|
||||
"cert_file": "",
|
||||
"key_file": ""
|
||||
},
|
||||
"storage": {
|
||||
"disk": {
|
||||
"dir": "./data",
|
||||
"max_size_mbytes": 0,
|
||||
"cache": {
|
||||
"enable": true,
|
||||
"max_size_mbytes": 0,
|
||||
"ttl_seconds": 300,
|
||||
"max_file_size_mbytes": 1,
|
||||
"types": []
|
||||
}
|
||||
},
|
||||
"memory": {
|
||||
"auth": {
|
||||
"enable": true,
|
||||
"username": "admin",
|
||||
"password": "dcFsZVGwVFkv1bE8Rl"
|
||||
},
|
||||
"max_size_mbytes": 0,
|
||||
"purge": false
|
||||
},
|
||||
"cors": {
|
||||
"origins": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"mimetypes_file": "./mime.types"
|
||||
},
|
||||
"ffmpeg": {
|
||||
"binary": "ffmpeg",
|
||||
"max_processes": 0,
|
||||
"access": {
|
||||
"input": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"output": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"log": {
|
||||
"max_lines": 50,
|
||||
"max_history": 3
|
||||
}
|
||||
},
|
||||
"playout": {
|
||||
"enable": false,
|
||||
"min_port": 0,
|
||||
"max_port": 0
|
||||
},
|
||||
"debug": {
|
||||
"profiling": false,
|
||||
"force_gc": 0
|
||||
},
|
||||
"metrics": {
|
||||
"enable": false,
|
||||
"enable_prometheus": false,
|
||||
"range_sec": 300,
|
||||
"interval_sec": 2
|
||||
},
|
||||
"sessions": {
|
||||
"enable": true,
|
||||
"ip_ignorelist": [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
],
|
||||
"session_timeout_sec": 30,
|
||||
"persist": false,
|
||||
"persist_interval_sec": 300,
|
||||
"max_bitrate_mbit": 0,
|
||||
"max_sessions": 0
|
||||
},
|
||||
"service": {
|
||||
"enable": false,
|
||||
"token": "",
|
||||
"url": "https://service.datarhei.com"
|
||||
},
|
||||
"router": {
|
||||
"blocked_prefixes": [
|
||||
"/api"
|
||||
],
|
||||
"routes": {},
|
||||
"ui_path": ""
|
||||
}
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
{
|
||||
"created_at": "2022-11-08T13:34:47.498911+01:00",
|
||||
"version": 3,
|
||||
"id": "c5ea4473-2f84-417c-a0c6-35746bfc9fc9",
|
||||
"name": "cool-breeze-4646",
|
||||
"address": ":8080",
|
||||
"update_check": true,
|
||||
"log": {
|
||||
"level": "info",
|
||||
"topics": [],
|
||||
"max_lines": 1000
|
||||
},
|
||||
"db": {
|
||||
"dir": "./config"
|
||||
},
|
||||
"host": {
|
||||
"name": [],
|
||||
"auto": true
|
||||
},
|
||||
"api": {
|
||||
"read_only": false,
|
||||
"access": {
|
||||
"http": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"https": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"enable": false,
|
||||
"disable_localhost": false,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"jwt": {
|
||||
"secret": "L(*C[:uuHzL.]Fzpk$q=fa@PO=Z;j;56"
|
||||
},
|
||||
"auth0": {
|
||||
"enable": false,
|
||||
"tenants": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"address": ":8181",
|
||||
"enable": false,
|
||||
"auto": false,
|
||||
"email": "cert@datarhei.com",
|
||||
"cert_file": "",
|
||||
"key_file": ""
|
||||
},
|
||||
"storage": {
|
||||
"disk": {
|
||||
"dir": "./data",
|
||||
"max_size_mbytes": 0,
|
||||
"cache": {
|
||||
"enable": true,
|
||||
"max_size_mbytes": 0,
|
||||
"ttl_seconds": 300,
|
||||
"max_file_size_mbytes": 1,
|
||||
"types": {
|
||||
"allow": [],
|
||||
"block": [
|
||||
".m3u8",
|
||||
".mpd"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"memory": {
|
||||
"auth": {
|
||||
"enable": true,
|
||||
"username": "admin",
|
||||
"password": "dcFsZVGwVFkv1bE8Rl"
|
||||
},
|
||||
"max_size_mbytes": 0,
|
||||
"purge": false
|
||||
},
|
||||
"cors": {
|
||||
"origins": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"mimetypes_file": "./mime.types"
|
||||
},
|
||||
"rtmp": {
|
||||
"enable": false,
|
||||
"enable_tls": false,
|
||||
"address": ":1935",
|
||||
"address_tls": ":1936",
|
||||
"app": "/",
|
||||
"token": ""
|
||||
},
|
||||
"srt": {
|
||||
"enable": false,
|
||||
"address": ":6000",
|
||||
"passphrase": "",
|
||||
"token": "",
|
||||
"log": {
|
||||
"enable": false,
|
||||
"topics": []
|
||||
}
|
||||
},
|
||||
"ffmpeg": {
|
||||
"binary": "ffmpeg",
|
||||
"max_processes": 0,
|
||||
"access": {
|
||||
"input": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"output": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"log": {
|
||||
"max_lines": 50,
|
||||
"max_history": 3
|
||||
}
|
||||
},
|
||||
"playout": {
|
||||
"enable": false,
|
||||
"min_port": 0,
|
||||
"max_port": 0
|
||||
},
|
||||
"debug": {
|
||||
"profiling": false,
|
||||
"force_gc": 0
|
||||
},
|
||||
"metrics": {
|
||||
"enable": false,
|
||||
"enable_prometheus": false,
|
||||
"range_sec": 300,
|
||||
"interval_sec": 2
|
||||
},
|
||||
"sessions": {
|
||||
"enable": true,
|
||||
"ip_ignorelist": [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
],
|
||||
"session_timeout_sec": 30,
|
||||
"persist": false,
|
||||
"persist_interval_sec": 300,
|
||||
"max_bitrate_mbit": 0,
|
||||
"max_sessions": 0
|
||||
},
|
||||
"service": {
|
||||
"enable": false,
|
||||
"token": "",
|
||||
"url": "https://service.datarhei.com"
|
||||
},
|
||||
"router": {
|
||||
"blocked_prefixes": [
|
||||
"/api"
|
||||
],
|
||||
"routes": {},
|
||||
"ui_path": ""
|
||||
}
|
||||
}
|
||||
@@ -1,140 +0,0 @@
|
||||
{
|
||||
"created_at": "2022-11-08T11:54:44.224213+01:00",
|
||||
"version": 2,
|
||||
"id": "3bddc061-e534-4315-ab56-95b48c050ec9",
|
||||
"name": "super-frog-1715",
|
||||
"address": ":8080",
|
||||
"update_check": true,
|
||||
"log": {
|
||||
"level": "info",
|
||||
"topics": [],
|
||||
"max_lines": 1000
|
||||
},
|
||||
"db": {
|
||||
"dir": "./config"
|
||||
},
|
||||
"host": {
|
||||
"name": [],
|
||||
"auto": true
|
||||
},
|
||||
"api": {
|
||||
"read_only": false,
|
||||
"access": {
|
||||
"http": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"https": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"enable": false,
|
||||
"disable_localhost": false,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"jwt": {
|
||||
"secret": "u4+N,UDq]jGxGbbQLQN[!jcMsa\u0026weIJW"
|
||||
},
|
||||
"auth0": {
|
||||
"enable": false,
|
||||
"tenants": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"address": ":8181",
|
||||
"enable": false,
|
||||
"auto": false,
|
||||
"cert_file": "",
|
||||
"key_file": ""
|
||||
},
|
||||
"storage": {
|
||||
"disk": {
|
||||
"dir": "./data",
|
||||
"max_size_mbytes": 0,
|
||||
"cache": {
|
||||
"enable": true,
|
||||
"max_size_mbytes": 0,
|
||||
"ttl_seconds": 300,
|
||||
"max_file_size_mbytes": 1,
|
||||
"types": [
|
||||
".ts"
|
||||
]
|
||||
}
|
||||
},
|
||||
"memory": {
|
||||
"auth": {
|
||||
"enable": true,
|
||||
"username": "admin",
|
||||
"password": "DsAKRUg9wmOk4qpvvy"
|
||||
},
|
||||
"max_size_mbytes": 0,
|
||||
"purge": false
|
||||
},
|
||||
"cors": {
|
||||
"origins": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"mimetypes_file": "./mime.types"
|
||||
},
|
||||
"ffmpeg": {
|
||||
"binary": "ffmpeg",
|
||||
"max_processes": 0,
|
||||
"access": {
|
||||
"input": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"output": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"log": {
|
||||
"max_lines": 50,
|
||||
"max_history": 3
|
||||
}
|
||||
},
|
||||
"playout": {
|
||||
"enable": false,
|
||||
"min_port": 0,
|
||||
"max_port": 0
|
||||
},
|
||||
"debug": {
|
||||
"profiling": false,
|
||||
"force_gc": 0
|
||||
},
|
||||
"metrics": {
|
||||
"enable": false,
|
||||
"enable_prometheus": false,
|
||||
"range_sec": 300,
|
||||
"interval_sec": 2
|
||||
},
|
||||
"sessions": {
|
||||
"enable": true,
|
||||
"ip_ignorelist": [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
],
|
||||
"session_timeout_sec": 30,
|
||||
"persist": false,
|
||||
"persist_interval_sec": 300,
|
||||
"max_bitrate_mbit": 0,
|
||||
"max_sessions": 0
|
||||
},
|
||||
"service": {
|
||||
"enable": false,
|
||||
"token": "",
|
||||
"url": "https://service.datarhei.com"
|
||||
},
|
||||
"router": {
|
||||
"blocked_prefixes": [
|
||||
"/api"
|
||||
],
|
||||
"routes": {},
|
||||
"ui_path": ""
|
||||
}
|
||||
}
|
||||
@@ -1,165 +0,0 @@
|
||||
{
|
||||
"created_at": "2022-11-08T11:54:44.224213+01:00",
|
||||
"version": 3,
|
||||
"id": "3bddc061-e534-4315-ab56-95b48c050ec9",
|
||||
"name": "super-frog-1715",
|
||||
"address": ":8080",
|
||||
"update_check": true,
|
||||
"log": {
|
||||
"level": "info",
|
||||
"topics": [],
|
||||
"max_lines": 1000
|
||||
},
|
||||
"db": {
|
||||
"dir": "./config"
|
||||
},
|
||||
"host": {
|
||||
"name": [],
|
||||
"auto": true
|
||||
},
|
||||
"api": {
|
||||
"read_only": false,
|
||||
"access": {
|
||||
"http": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"https": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"auth": {
|
||||
"enable": false,
|
||||
"disable_localhost": false,
|
||||
"username": "",
|
||||
"password": "",
|
||||
"jwt": {
|
||||
"secret": "u4+N,UDq]jGxGbbQLQN[!jcMsa\u0026weIJW"
|
||||
},
|
||||
"auth0": {
|
||||
"enable": false,
|
||||
"tenants": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"address": ":8181",
|
||||
"enable": false,
|
||||
"auto": false,
|
||||
"cert_file": "",
|
||||
"key_file": "",
|
||||
"email": "cert@datarhei.com"
|
||||
},
|
||||
"storage": {
|
||||
"disk": {
|
||||
"dir": "./data",
|
||||
"max_size_mbytes": 0,
|
||||
"cache": {
|
||||
"enable": true,
|
||||
"max_size_mbytes": 0,
|
||||
"ttl_seconds": 300,
|
||||
"max_file_size_mbytes": 1,
|
||||
"types": {
|
||||
"allow": [
|
||||
".ts"
|
||||
],
|
||||
"block": [
|
||||
".m3u8",
|
||||
".mpd"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"memory": {
|
||||
"auth": {
|
||||
"enable": true,
|
||||
"username": "admin",
|
||||
"password": "DsAKRUg9wmOk4qpvvy"
|
||||
},
|
||||
"max_size_mbytes": 0,
|
||||
"purge": false
|
||||
},
|
||||
"cors": {
|
||||
"origins": [
|
||||
"*"
|
||||
]
|
||||
},
|
||||
"mimetypes_file": "./mime.types"
|
||||
},
|
||||
"rtmp": {
|
||||
"enable": false,
|
||||
"enable_tls": false,
|
||||
"address": ":1935",
|
||||
"address_tls": ":1936",
|
||||
"app": "/",
|
||||
"token": ""
|
||||
},
|
||||
"srt": {
|
||||
"enable": false,
|
||||
"address": ":6000",
|
||||
"passphrase": "",
|
||||
"token": "",
|
||||
"log": {
|
||||
"enable": false,
|
||||
"topics": []
|
||||
}
|
||||
},
|
||||
"ffmpeg": {
|
||||
"binary": "ffmpeg",
|
||||
"max_processes": 0,
|
||||
"access": {
|
||||
"input": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
},
|
||||
"output": {
|
||||
"allow": [],
|
||||
"block": []
|
||||
}
|
||||
},
|
||||
"log": {
|
||||
"max_lines": 50,
|
||||
"max_history": 3
|
||||
}
|
||||
},
|
||||
"playout": {
|
||||
"enable": false,
|
||||
"min_port": 0,
|
||||
"max_port": 0
|
||||
},
|
||||
"debug": {
|
||||
"profiling": false,
|
||||
"force_gc": 0
|
||||
},
|
||||
"metrics": {
|
||||
"enable": false,
|
||||
"enable_prometheus": false,
|
||||
"range_sec": 300,
|
||||
"interval_sec": 2
|
||||
},
|
||||
"sessions": {
|
||||
"enable": true,
|
||||
"ip_ignorelist": [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
],
|
||||
"session_timeout_sec": 30,
|
||||
"persist": false,
|
||||
"persist_interval_sec": 300,
|
||||
"max_bitrate_mbit": 0,
|
||||
"max_sessions": 0
|
||||
},
|
||||
"service": {
|
||||
"enable": false,
|
||||
"token": "",
|
||||
"url": "https://service.datarhei.com"
|
||||
},
|
||||
"router": {
|
||||
"blocked_prefixes": [
|
||||
"/api"
|
||||
],
|
||||
"routes": {},
|
||||
"ui_path": ""
|
||||
}
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMigrationV1ToV3(t *testing.T) {
|
||||
jsondatav1, err := os.ReadFile("./fixtures/config_v1.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
jsondatav3, err := os.ReadFile("./fixtures/config_v1_v3.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3 := config.New()
|
||||
json.Unmarshal(jsondatav3, datav3)
|
||||
|
||||
data, err := migrate(jsondatav1)
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3.Data.CreatedAt = time.Time{}
|
||||
data.CreatedAt = time.Time{}
|
||||
|
||||
require.Equal(t, datav3.Data, *data)
|
||||
}
|
||||
|
||||
func TestMigrationV2ToV3(t *testing.T) {
|
||||
jsondatav2, err := os.ReadFile("./fixtures/config_v2.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
jsondatav3, err := os.ReadFile("./fixtures/config_v2_v3.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3 := config.New()
|
||||
json.Unmarshal(jsondatav3, datav3)
|
||||
|
||||
data, err := migrate(jsondatav2)
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3.Data.CreatedAt = time.Time{}
|
||||
data.CreatedAt = time.Time{}
|
||||
|
||||
require.Equal(t, datav3.Data, *data)
|
||||
}
|
||||
996
config/types.go
Normal file
996
config/types.go
Normal file
@@ -0,0 +1,996 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/http/cors"
|
||||
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
type value interface {
|
||||
// String returns a string representation of the value.
|
||||
String() string
|
||||
|
||||
// Set a new value for the value. Returns an
|
||||
// error if the given string representation can't
|
||||
// be transformed to the value. Returns nil
|
||||
// if the new value has been set.
|
||||
Set(string) error
|
||||
|
||||
// Validate the value. The returned error will
|
||||
// indicate what is wrong with the current value.
|
||||
// Returns nil if the value is OK.
|
||||
Validate() error
|
||||
|
||||
// IsEmpty returns whether the value represents an empty
|
||||
// representation for that value.
|
||||
IsEmpty() bool
|
||||
}
|
||||
|
||||
// string
|
||||
|
||||
type stringValue string
|
||||
|
||||
func newStringValue(p *string, val string) *stringValue {
|
||||
*p = val
|
||||
return (*stringValue)(p)
|
||||
}
|
||||
|
||||
func (s *stringValue) Set(val string) error {
|
||||
*s = stringValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *stringValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// address (host?:port)
|
||||
|
||||
type addressValue string
|
||||
|
||||
func newAddressValue(p *string, val string) *addressValue {
|
||||
*p = val
|
||||
return (*addressValue)(p)
|
||||
}
|
||||
|
||||
func (s *addressValue) Set(val string) error {
|
||||
// Check if the new value is only a port number
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if re.MatchString(val) {
|
||||
val = ":" + val
|
||||
}
|
||||
|
||||
*s = addressValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *addressValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *addressValue) Validate() error {
|
||||
_, port, err := net.SplitHostPort(string(*s))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if !re.MatchString(port) {
|
||||
return fmt.Errorf("the port must be numerical")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *addressValue) IsEmpty() bool {
|
||||
return s.Validate() != nil
|
||||
}
|
||||
|
||||
// array of strings
|
||||
|
||||
type stringListValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newStringListValue(p *[]string, val []string, separator string) *stringListValue {
|
||||
v := &stringListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *stringListValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *stringListValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of auth0 tenants
|
||||
|
||||
type tenantListValue struct {
|
||||
p *[]Auth0Tenant
|
||||
separator string
|
||||
}
|
||||
|
||||
func newTenantListValue(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *tenantListValue {
|
||||
v := &tenantListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *tenantListValue) Set(val string) error {
|
||||
list := []Auth0Tenant{}
|
||||
|
||||
for i, elm := range strings.Split(val, s.separator) {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t := Auth0Tenant{}
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *tenantListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
|
||||
}
|
||||
|
||||
return strings.Join(list, ",")
|
||||
}
|
||||
|
||||
func (s *tenantListValue) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Domain) == 0 {
|
||||
return fmt.Errorf("the domain for tenant %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Audience) == 0 {
|
||||
return fmt.Errorf("the audience for tenant %d is missing", i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *tenantListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of s3 storages
|
||||
// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy
|
||||
|
||||
type s3StorageListValue struct {
|
||||
p *[]S3Storage
|
||||
separator string
|
||||
}
|
||||
|
||||
func newS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue {
|
||||
v := &s3StorageListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) Set(val string) error {
|
||||
list := []S3Storage{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
u, err := url.Parse(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err)
|
||||
}
|
||||
|
||||
t := S3Storage{
|
||||
Name: u.Query().Get("name"),
|
||||
Mountpoint: u.Query().Get("mountpoint"),
|
||||
AccessKeyID: u.User.Username(),
|
||||
}
|
||||
|
||||
hostname := u.Hostname()
|
||||
port := u.Port()
|
||||
|
||||
domain, err := publicsuffix.EffectiveTLDPlusOne(hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid eTLD (%s): %w", hostname, err)
|
||||
}
|
||||
|
||||
t.Endpoint = domain
|
||||
if len(port) != 0 {
|
||||
t.Endpoint += ":" + port
|
||||
}
|
||||
|
||||
region := strings.TrimSuffix(hostname, domain)
|
||||
if len(region) != 0 {
|
||||
t.Region = strings.TrimSuffix(region, ".")
|
||||
}
|
||||
|
||||
secret, ok := u.User.Password()
|
||||
if ok {
|
||||
t.SecretAccessKey = secret
|
||||
}
|
||||
|
||||
t.Bucket = strings.TrimPrefix(u.Path, "/")
|
||||
|
||||
if u.Scheme == "https" {
|
||||
t.UseSSL = true
|
||||
}
|
||||
|
||||
if u.Query().Has("username") || u.Query().Has("password") {
|
||||
t.Auth.Enable = true
|
||||
t.Auth.Username = u.Query().Get("username")
|
||||
t.Auth.Username = u.Query().Get("password")
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
u := url.URL{}
|
||||
|
||||
if t.UseSSL {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(t.AccessKeyID, "---")
|
||||
|
||||
u.Host = t.Endpoint
|
||||
|
||||
if len(t.Region) != 0 {
|
||||
u.Host = t.Region + "." + u.Host
|
||||
}
|
||||
|
||||
if len(t.Bucket) != 0 {
|
||||
u.Path = "/" + t.Bucket
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("name", t.Name)
|
||||
v.Set("mountpoint", t.Mountpoint)
|
||||
|
||||
if t.Auth.Enable {
|
||||
if len(t.Auth.Username) != 0 {
|
||||
v.Set("username", t.Auth.Username)
|
||||
}
|
||||
|
||||
if len(t.Auth.Password) != 0 {
|
||||
v.Set("password", "---")
|
||||
}
|
||||
}
|
||||
|
||||
u.RawQuery = v.Encode()
|
||||
|
||||
list = append(list, u.String())
|
||||
}
|
||||
|
||||
return strings.Join(list, s.separator)
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Name) == 0 {
|
||||
return fmt.Errorf("the name for s3 storage %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Mountpoint) == 0 {
|
||||
return fmt.Errorf("the mountpoint for s3 storage %d is missing", i)
|
||||
}
|
||||
|
||||
if t.Auth.Enable {
|
||||
if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 {
|
||||
return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// map of strings to strings
|
||||
|
||||
type stringMapStringValue struct {
|
||||
p *map[string]string
|
||||
}
|
||||
|
||||
func newStringMapStringValue(p *map[string]string, val map[string]string) *stringMapStringValue {
|
||||
v := &stringMapStringValue{
|
||||
p: p,
|
||||
}
|
||||
|
||||
if *p == nil {
|
||||
*p = make(map[string]string)
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
*p = val
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) Set(val string) error {
|
||||
mappings := make(map[string]string)
|
||||
|
||||
for _, elm := range strings.Split(val, " ") {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
mapping := strings.SplitN(elm, ":", 2)
|
||||
|
||||
mappings[mapping[0]] = mapping[1]
|
||||
}
|
||||
|
||||
*s.p = mappings
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
mappings := make([]string, len(*s.p))
|
||||
|
||||
i := 0
|
||||
for k, v := range *s.p {
|
||||
mappings[i] = k + ":" + v
|
||||
i++
|
||||
}
|
||||
|
||||
return strings.Join(mappings, " ")
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of CIDR notation IP adresses
|
||||
|
||||
type cidrListValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newCIDRListValue(p *[]string, val []string, separator string) *cidrListValue {
|
||||
v := &cidrListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *cidrListValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cidrListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *cidrListValue) Validate() error {
|
||||
for _, cidr := range *s.p {
|
||||
_, _, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cidrListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of origins for CORS
|
||||
|
||||
type corsOriginsValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newCORSOriginsValue(p *[]string, val []string, separator string) *corsOriginsValue {
|
||||
v := &corsOriginsValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) Validate() error {
|
||||
return cors.Validate(*s.p)
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// boolean
|
||||
|
||||
type boolValue bool
|
||||
|
||||
func newBoolValue(p *bool, val bool) *boolValue {
|
||||
*p = val
|
||||
return (*boolValue)(p)
|
||||
}
|
||||
|
||||
func (b *boolValue) Set(val string) error {
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = boolValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolValue) String() string {
|
||||
return strconv.FormatBool(bool(*b))
|
||||
}
|
||||
|
||||
func (b *boolValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolValue) IsEmpty() bool {
|
||||
return !bool(*b)
|
||||
}
|
||||
|
||||
// int
|
||||
|
||||
type intValue int
|
||||
|
||||
func newIntValue(p *int, val int) *intValue {
|
||||
*p = val
|
||||
return (*intValue)(p)
|
||||
}
|
||||
|
||||
func (i *intValue) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = intValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *intValue) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *intValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *intValue) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// int64
|
||||
|
||||
type int64Value int64
|
||||
|
||||
func newInt64Value(p *int64, val int64) *int64Value {
|
||||
*p = val
|
||||
return (*int64Value)(p)
|
||||
}
|
||||
|
||||
func (u *int64Value) Set(val string) error {
|
||||
v, err := strconv.ParseInt(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = int64Value(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *int64Value) String() string {
|
||||
return strconv.FormatInt(int64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *int64Value) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *int64Value) IsEmpty() bool {
|
||||
return int64(*u) == 0
|
||||
}
|
||||
|
||||
// uint64
|
||||
|
||||
type uint64Value uint64
|
||||
|
||||
func newUint64Value(p *uint64, val uint64) *uint64Value {
|
||||
*p = val
|
||||
return (*uint64Value)(p)
|
||||
}
|
||||
|
||||
func (u *uint64Value) Set(val string) error {
|
||||
v, err := strconv.ParseUint(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = uint64Value(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uint64Value) String() string {
|
||||
return strconv.FormatUint(uint64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *uint64Value) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uint64Value) IsEmpty() bool {
|
||||
return uint64(*u) == 0
|
||||
}
|
||||
|
||||
// network port
|
||||
|
||||
type portValue int
|
||||
|
||||
func newPortValue(p *int, val int) *portValue {
|
||||
*p = val
|
||||
return (*portValue)(p)
|
||||
}
|
||||
|
||||
func (i *portValue) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = portValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *portValue) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *portValue) Validate() error {
|
||||
val := int(*i)
|
||||
|
||||
if val < 0 || val >= (1<<16) {
|
||||
return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *portValue) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// must directory
|
||||
|
||||
type mustDirValue string
|
||||
|
||||
func newMustDirValue(p *string, val string) *mustDirValue {
|
||||
*p = val
|
||||
return (*mustDirValue)(p)
|
||||
}
|
||||
|
||||
func (u *mustDirValue) Set(val string) error {
|
||||
*u = mustDirValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *mustDirValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *mustDirValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return fmt.Errorf("path name must not be empty")
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *mustDirValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// directory
|
||||
|
||||
type dirValue string
|
||||
|
||||
func newDirValue(p *string, val string) *dirValue {
|
||||
*p = val
|
||||
return (*dirValue)(p)
|
||||
}
|
||||
|
||||
func (u *dirValue) Set(val string) error {
|
||||
*u = dirValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *dirValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *dirValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *dirValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// executable
|
||||
|
||||
type execValue string
|
||||
|
||||
func newExecValue(p *string, val string) *execValue {
|
||||
*p = val
|
||||
return (*execValue)(p)
|
||||
}
|
||||
|
||||
func (u *execValue) Set(val string) error {
|
||||
*u = execValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *execValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *execValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
_, err := exec.LookPath(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s not found or is not executable", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *execValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// regular file
|
||||
|
||||
type fileValue string
|
||||
|
||||
func newFileValue(p *string, val string) *fileValue {
|
||||
*p = val
|
||||
return (*fileValue)(p)
|
||||
}
|
||||
|
||||
func (u *fileValue) Set(val string) error {
|
||||
*u = fileValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *fileValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *fileValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s is not a regular file", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *fileValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// time
|
||||
|
||||
type timeValue time.Time
|
||||
|
||||
func newTimeValue(p *time.Time, val time.Time) *timeValue {
|
||||
*p = val
|
||||
return (*timeValue)(p)
|
||||
}
|
||||
|
||||
func (u *timeValue) Set(val string) error {
|
||||
v, err := time.Parse(time.RFC3339, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = timeValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *timeValue) String() string {
|
||||
v := time.Time(*u)
|
||||
return v.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func (u *timeValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *timeValue) IsEmpty() bool {
|
||||
v := time.Time(*u)
|
||||
return v.IsZero()
|
||||
}
|
||||
|
||||
// url
|
||||
|
||||
type urlValue string
|
||||
|
||||
func newURLValue(p *string, val string) *urlValue {
|
||||
*p = val
|
||||
return (*urlValue)(p)
|
||||
}
|
||||
|
||||
func (u *urlValue) Set(val string) error {
|
||||
*u = urlValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *urlValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *urlValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
URL, err := url.Parse(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
if len(URL.Scheme) == 0 || len(URL.Host) == 0 {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *urlValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// absolute path
|
||||
|
||||
type absolutePathValue string
|
||||
|
||||
func newAbsolutePathValue(p *string, val string) *absolutePathValue {
|
||||
*p = filepath.Clean(val)
|
||||
return (*absolutePathValue)(p)
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) Set(val string) error {
|
||||
*s = absolutePathValue(filepath.Clean(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) Validate() error {
|
||||
path := string(*s)
|
||||
|
||||
if !filepath.IsAbs(path) {
|
||||
return fmt.Errorf("%s is not an absolute path", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// email address
|
||||
|
||||
type emailValue string
|
||||
|
||||
func newEmailValue(p *string, val string) *emailValue {
|
||||
*p = val
|
||||
return (*emailValue)(p)
|
||||
}
|
||||
|
||||
func (s *emailValue) Set(val string) error {
|
||||
addr, err := mail.ParseAddress(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = emailValue(addr.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *emailValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *emailValue) Validate() error {
|
||||
if len(s.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(s.String())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *emailValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package value
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
func TestIntValue(t *testing.T) {
|
||||
var i int
|
||||
|
||||
ivar := NewInt(&i, 11)
|
||||
ivar := newIntValue(&i, 11)
|
||||
|
||||
assert.Equal(t, "11", ivar.String())
|
||||
assert.Equal(t, nil, ivar.Validate())
|
||||
@@ -34,16 +34,16 @@ type testdata struct {
|
||||
func TestCopyStruct(t *testing.T) {
|
||||
data1 := testdata{}
|
||||
|
||||
NewInt(&data1.value1, 1)
|
||||
NewInt(&data1.value2, 2)
|
||||
newIntValue(&data1.value1, 1)
|
||||
newIntValue(&data1.value2, 2)
|
||||
|
||||
assert.Equal(t, int(1), data1.value1)
|
||||
assert.Equal(t, int(2), data1.value2)
|
||||
|
||||
data2 := testdata{}
|
||||
|
||||
val21 := NewInt(&data2.value1, 3)
|
||||
val22 := NewInt(&data2.value2, 4)
|
||||
val21 := newIntValue(&data2.value1, 3)
|
||||
val22 := newIntValue(&data2.value2, 4)
|
||||
|
||||
assert.Equal(t, int(3), data2.value1)
|
||||
assert.Equal(t, int(4), data2.value2)
|
||||
@@ -1,397 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const version int64 = 1
|
||||
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
|
||||
cfg.init()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.Version = d.Version
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.vars.Transfer(&d.vars)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.vars.ResetLogs()
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d.vars.Validate()
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.Storage.Memory.Auth.Enable = false
|
||||
}
|
||||
}
|
||||
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
}
|
||||
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
}
|
||||
|
||||
func (d *Config) HasErrors() bool {
|
||||
return d.vars.HasErrors()
|
||||
}
|
||||
|
||||
func (d *Config) Overrides() []string {
|
||||
return d.vars.Overrides()
|
||||
}
|
||||
@@ -1,398 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const version int64 = 2
|
||||
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
|
||||
cfg.init()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.Version = d.Version
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.vars.Transfer(&d.vars)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.vars.ResetLogs()
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d.vars.Validate()
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.Storage.Memory.Auth.Enable = false
|
||||
}
|
||||
}
|
||||
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
}
|
||||
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
}
|
||||
|
||||
func (d *Config) HasErrors() bool {
|
||||
return d.vars.HasErrors()
|
||||
}
|
||||
|
||||
func (d *Config) Overrides() []string {
|
||||
return d.vars.Overrides()
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// array of auth0 tenants
|
||||
|
||||
type Auth0Tenant struct {
|
||||
Domain string `json:"domain"`
|
||||
Audience string `json:"audience"`
|
||||
ClientID string `json:"clientid"`
|
||||
Users []string `json:"users"`
|
||||
}
|
||||
|
||||
type TenantList struct {
|
||||
p *[]Auth0Tenant
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *TenantList {
|
||||
v := &TenantList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *TenantList) Set(val string) error {
|
||||
list := []Auth0Tenant{}
|
||||
|
||||
for i, elm := range strings.Split(val, s.separator) {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t := Auth0Tenant{}
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TenantList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
|
||||
}
|
||||
|
||||
return strings.Join(list, ",")
|
||||
}
|
||||
|
||||
func (s *TenantList) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Domain) == 0 {
|
||||
return fmt.Errorf("the domain for tenant %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Audience) == 0 {
|
||||
return fmt.Errorf("the audience for tenant %d is missing", i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TenantList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
@@ -1,277 +0,0 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/http/cors"
|
||||
)
|
||||
|
||||
// address (host?:port)
|
||||
|
||||
type Address string
|
||||
|
||||
func NewAddress(p *string, val string) *Address {
|
||||
*p = val
|
||||
|
||||
return (*Address)(p)
|
||||
}
|
||||
|
||||
func (s *Address) Set(val string) error {
|
||||
// Check if the new value is only a port number
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if re.MatchString(val) {
|
||||
val = ":" + val
|
||||
}
|
||||
|
||||
*s = Address(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Address) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *Address) Validate() error {
|
||||
_, port, err := net.SplitHostPort(string(*s))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if !re.MatchString(port) {
|
||||
return fmt.Errorf("the port must be numerical")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Address) IsEmpty() bool {
|
||||
return s.Validate() != nil
|
||||
}
|
||||
|
||||
// array of CIDR notation IP adresses
|
||||
|
||||
type CIDRList struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewCIDRList(p *[]string, val []string, separator string) *CIDRList {
|
||||
v := &CIDRList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *CIDRList) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CIDRList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *CIDRList) Validate() error {
|
||||
for _, cidr := range *s.p {
|
||||
_, _, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CIDRList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of origins for CORS
|
||||
|
||||
type CORSOrigins struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewCORSOrigins(p *[]string, val []string, separator string) *CORSOrigins {
|
||||
v := &CORSOrigins{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) Validate() error {
|
||||
return cors.Validate(*s.p)
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// network port
|
||||
|
||||
type Port int
|
||||
|
||||
func NewPort(p *int, val int) *Port {
|
||||
*p = val
|
||||
|
||||
return (*Port)(p)
|
||||
}
|
||||
|
||||
func (i *Port) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Port(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Port) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *Port) Validate() error {
|
||||
val := int(*i)
|
||||
|
||||
if val < 0 || val >= (1<<16) {
|
||||
return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Port) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// url
|
||||
|
||||
type URL string
|
||||
|
||||
func NewURL(p *string, val string) *URL {
|
||||
*p = val
|
||||
|
||||
return (*URL)(p)
|
||||
}
|
||||
|
||||
func (u *URL) Set(val string) error {
|
||||
*u = URL(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URL) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *URL) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
URL, err := url.Parse(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
if len(URL.Scheme) == 0 || len(URL.Host) == 0 {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URL) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// email address
|
||||
|
||||
type Email string
|
||||
|
||||
func NewEmail(p *string, val string) *Email {
|
||||
*p = val
|
||||
|
||||
return (*Email)(p)
|
||||
}
|
||||
|
||||
func (s *Email) Set(val string) error {
|
||||
addr, err := mail.ParseAddress(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = Email(addr.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Email) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *Email) Validate() error {
|
||||
if len(s.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(s.String())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Email) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
@@ -1,202 +0,0 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// must directory
|
||||
|
||||
type MustDir string
|
||||
|
||||
func NewMustDir(p *string, val string) *MustDir {
|
||||
*p = val
|
||||
|
||||
return (*MustDir)(p)
|
||||
}
|
||||
|
||||
func (u *MustDir) Set(val string) error {
|
||||
*u = MustDir(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MustDir) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *MustDir) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return fmt.Errorf("path name must not be empty")
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MustDir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// directory
|
||||
|
||||
type Dir string
|
||||
|
||||
func NewDir(p *string, val string) *Dir {
|
||||
*p = val
|
||||
|
||||
return (*Dir)(p)
|
||||
}
|
||||
|
||||
func (u *Dir) Set(val string) error {
|
||||
*u = Dir(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Dir) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *Dir) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Dir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// executable
|
||||
|
||||
type Exec string
|
||||
|
||||
func NewExec(p *string, val string) *Exec {
|
||||
*p = val
|
||||
|
||||
return (*Exec)(p)
|
||||
}
|
||||
|
||||
func (u *Exec) Set(val string) error {
|
||||
*u = Exec(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Exec) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *Exec) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
_, err := exec.LookPath(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s not found or is not executable", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Exec) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// regular file
|
||||
|
||||
type File string
|
||||
|
||||
func NewFile(p *string, val string) *File {
|
||||
*p = val
|
||||
|
||||
return (*File)(p)
|
||||
}
|
||||
|
||||
func (u *File) Set(val string) error {
|
||||
*u = File(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *File) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *File) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s is not a regular file", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *File) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// absolute path
|
||||
|
||||
type AbsolutePath string
|
||||
|
||||
func NewAbsolutePath(p *string, val string) *AbsolutePath {
|
||||
*p = filepath.Clean(val)
|
||||
|
||||
return (*AbsolutePath)(p)
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) Set(val string) error {
|
||||
*s = AbsolutePath(filepath.Clean(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) Validate() error {
|
||||
path := string(*s)
|
||||
|
||||
if !filepath.IsAbs(path) {
|
||||
return fmt.Errorf("%s is not an absolute path", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
@@ -1,271 +0,0 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// string
|
||||
|
||||
type String string
|
||||
|
||||
func NewString(p *string, val string) *String {
|
||||
*p = val
|
||||
|
||||
return (*String)(p)
|
||||
}
|
||||
|
||||
func (s *String) Set(val string) error {
|
||||
*s = String(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *String) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *String) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *String) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// array of strings
|
||||
|
||||
type StringList struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewStringList(p *[]string, val []string, separator string) *StringList {
|
||||
v := &StringList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *StringList) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *StringList) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// map of strings to strings
|
||||
|
||||
type StringMapString struct {
|
||||
p *map[string]string
|
||||
}
|
||||
|
||||
func NewStringMapString(p *map[string]string, val map[string]string) *StringMapString {
|
||||
v := &StringMapString{
|
||||
p: p,
|
||||
}
|
||||
|
||||
if *p == nil {
|
||||
*p = make(map[string]string)
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
*p = val
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *StringMapString) Set(val string) error {
|
||||
mappings := make(map[string]string)
|
||||
|
||||
for _, elm := range strings.Split(val, " ") {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
mapping := strings.SplitN(elm, ":", 2)
|
||||
|
||||
mappings[mapping[0]] = mapping[1]
|
||||
}
|
||||
|
||||
*s.p = mappings
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringMapString) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
mappings := make([]string, len(*s.p))
|
||||
|
||||
i := 0
|
||||
for k, v := range *s.p {
|
||||
mappings[i] = k + ":" + v
|
||||
i++
|
||||
}
|
||||
|
||||
return strings.Join(mappings, " ")
|
||||
}
|
||||
|
||||
func (s *StringMapString) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringMapString) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// boolean
|
||||
|
||||
type Bool bool
|
||||
|
||||
func NewBool(p *bool, val bool) *Bool {
|
||||
*p = val
|
||||
|
||||
return (*Bool)(p)
|
||||
}
|
||||
|
||||
func (b *Bool) Set(val string) error {
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = Bool(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bool) String() string {
|
||||
return strconv.FormatBool(bool(*b))
|
||||
}
|
||||
|
||||
func (b *Bool) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bool) IsEmpty() bool {
|
||||
return !bool(*b)
|
||||
}
|
||||
|
||||
// int
|
||||
|
||||
type Int int
|
||||
|
||||
func NewInt(p *int, val int) *Int {
|
||||
*p = val
|
||||
|
||||
return (*Int)(p)
|
||||
}
|
||||
|
||||
func (i *Int) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Int(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Int) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *Int) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Int) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// int64
|
||||
|
||||
type Int64 int64
|
||||
|
||||
func NewInt64(p *int64, val int64) *Int64 {
|
||||
*p = val
|
||||
|
||||
return (*Int64)(p)
|
||||
}
|
||||
|
||||
func (u *Int64) Set(val string) error {
|
||||
v, err := strconv.ParseInt(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Int64(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Int64) String() string {
|
||||
return strconv.FormatInt(int64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *Int64) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Int64) IsEmpty() bool {
|
||||
return int64(*u) == 0
|
||||
}
|
||||
|
||||
// uint64
|
||||
|
||||
type Uint64 uint64
|
||||
|
||||
func NewUint64(p *uint64, val uint64) *Uint64 {
|
||||
*p = val
|
||||
|
||||
return (*Uint64)(p)
|
||||
}
|
||||
|
||||
func (u *Uint64) Set(val string) error {
|
||||
v, err := strconv.ParseUint(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) String() string {
|
||||
return strconv.FormatUint(uint64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *Uint64) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) IsEmpty() bool {
|
||||
return uint64(*u) == 0
|
||||
}
|
||||
@@ -1,36 +0,0 @@
|
||||
package value
|
||||
|
||||
import "time"
|
||||
|
||||
// time
|
||||
|
||||
type Time time.Time
|
||||
|
||||
func NewTime(p *time.Time, val time.Time) *Time {
|
||||
*p = val
|
||||
|
||||
return (*Time)(p)
|
||||
}
|
||||
|
||||
func (u *Time) Set(val string) error {
|
||||
v, err := time.Parse(time.RFC3339, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Time(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Time) String() string {
|
||||
v := time.Time(*u)
|
||||
return v.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func (u *Time) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Time) IsEmpty() bool {
|
||||
v := time.Time(*u)
|
||||
return v.IsZero()
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package value
|
||||
|
||||
type Value interface {
|
||||
// String returns a string representation of the value.
|
||||
String() string
|
||||
|
||||
// Set a new value for the value. Returns an
|
||||
// error if the given string representation can't
|
||||
// be transformed to the value. Returns nil
|
||||
// if the new value has been set.
|
||||
Set(string) error
|
||||
|
||||
// Validate the value. The returned error will
|
||||
// indicate what is wrong with the current value.
|
||||
// Returns nil if the value is OK.
|
||||
Validate() error
|
||||
|
||||
// IsEmpty returns whether the value represents an empty
|
||||
// representation for that value.
|
||||
IsEmpty() bool
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
package vars
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type variable struct {
|
||||
value value.Value // The actual value
|
||||
defVal string // The default value in string representation
|
||||
name string // A name for this value
|
||||
envName string // The environment variable that corresponds to this value
|
||||
envAltNames []string // Alternative environment variable names
|
||||
description string // A desriptions for this value
|
||||
required bool // Whether a non-empty value is required
|
||||
disguise bool // Whether the value should be disguised if printed
|
||||
merged bool // Whether this value has been replaced by its corresponding environment variable
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Value string
|
||||
Name string
|
||||
EnvName string
|
||||
Description string
|
||||
Merged bool
|
||||
}
|
||||
|
||||
type message struct {
|
||||
message string // The log message
|
||||
variable Variable // The config field this message refers to
|
||||
level string // The loglevel for this message
|
||||
}
|
||||
|
||||
type Variables struct {
|
||||
vars []*variable
|
||||
logs []message
|
||||
}
|
||||
|
||||
func (vs *Variables) Register(val value.Value, name, envName string, envAltNames []string, description string, required, disguise bool) {
|
||||
vs.vars = append(vs.vars, &variable{
|
||||
value: val,
|
||||
defVal: val.String(),
|
||||
name: name,
|
||||
envName: envName,
|
||||
envAltNames: envAltNames,
|
||||
description: description,
|
||||
required: required,
|
||||
disguise: disguise,
|
||||
})
|
||||
}
|
||||
|
||||
func (vs *Variables) Transfer(vss *Variables) {
|
||||
for _, v := range vs.vars {
|
||||
if vss.IsMerged(v.name) {
|
||||
v.merged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) SetDefault(name string) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.value.Set(v.defVal)
|
||||
}
|
||||
|
||||
func (vs *Variables) Get(name string) (string, error) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return "", fmt.Errorf("variable not found")
|
||||
}
|
||||
|
||||
return v.value.String(), nil
|
||||
}
|
||||
|
||||
func (vs *Variables) Set(name, val string) error {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return fmt.Errorf("variable not found")
|
||||
}
|
||||
|
||||
return v.value.Set(val)
|
||||
}
|
||||
|
||||
func (vs *Variables) Log(level, name string, format string, args ...interface{}) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
variable := Variable{
|
||||
Value: v.value.String(),
|
||||
Name: v.name,
|
||||
EnvName: v.envName,
|
||||
Description: v.description,
|
||||
Merged: v.merged,
|
||||
}
|
||||
|
||||
if v.disguise {
|
||||
variable.Value = "***"
|
||||
}
|
||||
|
||||
l := message{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
variable: variable,
|
||||
level: level,
|
||||
}
|
||||
|
||||
vs.logs = append(vs.logs, l)
|
||||
}
|
||||
|
||||
func (vs *Variables) Merge() {
|
||||
for _, v := range vs.vars {
|
||||
if len(v.envName) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var envval string
|
||||
var ok bool
|
||||
|
||||
envval, ok = os.LookupEnv(v.envName)
|
||||
if !ok {
|
||||
foundAltName := false
|
||||
|
||||
for _, envName := range v.envAltNames {
|
||||
envval, ok = os.LookupEnv(envName)
|
||||
if ok {
|
||||
foundAltName = true
|
||||
vs.Log("warn", v.name, "deprecated name, please use %s", v.envName)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundAltName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err := v.value.Set(envval)
|
||||
if err != nil {
|
||||
vs.Log("error", v.name, "%s", err.Error())
|
||||
}
|
||||
|
||||
v.merged = true
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) IsMerged(name string) bool {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return v.merged
|
||||
}
|
||||
|
||||
func (vs *Variables) Validate() {
|
||||
for _, v := range vs.vars {
|
||||
vs.Log("info", v.name, "%s", "")
|
||||
|
||||
err := v.value.Validate()
|
||||
if err != nil {
|
||||
vs.Log("error", v.name, "%s", err.Error())
|
||||
}
|
||||
|
||||
if v.required && v.value.IsEmpty() {
|
||||
vs.Log("error", v.name, "a value is required")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) ResetLogs() {
|
||||
vs.logs = nil
|
||||
}
|
||||
|
||||
func (vs *Variables) Messages(logger func(level string, v Variable, message string)) {
|
||||
for _, l := range vs.logs {
|
||||
logger(l.level, l.variable, l.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) HasErrors() bool {
|
||||
for _, l := range vs.logs {
|
||||
if l.level == "error" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (vs *Variables) Overrides() []string {
|
||||
overrides := []string{}
|
||||
|
||||
for _, v := range vs.vars {
|
||||
if v.merged {
|
||||
overrides = append(overrides, v.name)
|
||||
}
|
||||
}
|
||||
|
||||
return overrides
|
||||
}
|
||||
|
||||
func (vs *Variables) findVariable(name string) *variable {
|
||||
for _, v := range vs.vars {
|
||||
if v.name == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,40 +0,0 @@
|
||||
package vars
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVars(t *testing.T) {
|
||||
v1 := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v1.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
x, _ := v1.Get("string")
|
||||
require.Equal(t, "foobar", x)
|
||||
|
||||
v := v1.findVariable("string")
|
||||
v.value.Set("barfoo")
|
||||
|
||||
require.Equal(t, "barfoo", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "barfoo", x)
|
||||
|
||||
v1.Set("string", "foobaz")
|
||||
|
||||
require.Equal(t, "foobaz", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "foobaz", x)
|
||||
|
||||
v1.SetDefault("string")
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "foobar", x)
|
||||
}
|
||||
744
docs/docs.go
744
docs/docs.go
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -122,7 +122,7 @@ definitions:
|
||||
type: boolean
|
||||
tenants:
|
||||
items:
|
||||
$ref: '#/definitions/value.Auth0Tenant'
|
||||
$ref: '#/definitions/config.Auth0Tenant'
|
||||
type: array
|
||||
type: object
|
||||
disable_localhost:
|
||||
@@ -379,6 +379,32 @@ definitions:
|
||||
type: object
|
||||
mimetypes_file:
|
||||
type: string
|
||||
s3:
|
||||
properties:
|
||||
access_key_id:
|
||||
type: string
|
||||
auth:
|
||||
properties:
|
||||
enable:
|
||||
type: boolean
|
||||
password:
|
||||
type: string
|
||||
username:
|
||||
type: string
|
||||
type: object
|
||||
bucket:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
endpoint:
|
||||
type: string
|
||||
region:
|
||||
type: string
|
||||
secret_access_key:
|
||||
type: string
|
||||
use_ssl:
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
tls:
|
||||
properties:
|
||||
@@ -388,8 +414,6 @@ definitions:
|
||||
type: boolean
|
||||
cert_file:
|
||||
type: string
|
||||
email:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
key_file:
|
||||
@@ -426,6 +450,15 @@ definitions:
|
||||
size_bytes:
|
||||
type: integer
|
||||
type: object
|
||||
api.FilesystemInfo:
|
||||
properties:
|
||||
mount:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
type:
|
||||
type: string
|
||||
type: object
|
||||
api.GraphQuery:
|
||||
properties:
|
||||
query:
|
||||
@@ -979,7 +1012,7 @@ definitions:
|
||||
recv_km_pkt:
|
||||
description: The total number of received KM (Key Material) control packets
|
||||
type: integer
|
||||
recv_loss_bytes:
|
||||
recv_loss__bytes:
|
||||
description: Same as pktRcvLoss, but expressed in bytes, including payload
|
||||
and all the headers (IP, TCP, SRT), bytes for the presently missing (either
|
||||
reordered or lost) packets' payloads are estimated based on the average
|
||||
@@ -1088,7 +1121,7 @@ definitions:
|
||||
sent_retrans_pkt:
|
||||
description: The total number of retransmitted packets sent by the SRT sender
|
||||
type: integer
|
||||
sent_unique_bytes:
|
||||
sent_unique__bytes:
|
||||
description: Same as pktSentUnique, but expressed in bytes, including payload
|
||||
and all the headers (IP, TCP, SRT)
|
||||
type: integer
|
||||
@@ -1247,7 +1280,7 @@ definitions:
|
||||
type: boolean
|
||||
tenants:
|
||||
items:
|
||||
$ref: '#/definitions/value.Auth0Tenant'
|
||||
$ref: '#/definitions/config.Auth0Tenant'
|
||||
type: array
|
||||
type: object
|
||||
disable_localhost:
|
||||
@@ -1504,6 +1537,32 @@ definitions:
|
||||
type: object
|
||||
mimetypes_file:
|
||||
type: string
|
||||
s3:
|
||||
properties:
|
||||
access_key_id:
|
||||
type: string
|
||||
auth:
|
||||
properties:
|
||||
enable:
|
||||
type: boolean
|
||||
password:
|
||||
type: string
|
||||
username:
|
||||
type: string
|
||||
type: object
|
||||
bucket:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
endpoint:
|
||||
type: string
|
||||
region:
|
||||
type: string
|
||||
secret_access_key:
|
||||
type: string
|
||||
use_ssl:
|
||||
type: boolean
|
||||
type: object
|
||||
type: object
|
||||
tls:
|
||||
properties:
|
||||
@@ -1513,8 +1572,6 @@ definitions:
|
||||
type: boolean
|
||||
cert_file:
|
||||
type: string
|
||||
email:
|
||||
type: string
|
||||
enable:
|
||||
type: boolean
|
||||
key_file:
|
||||
@@ -1695,7 +1752,7 @@ definitions:
|
||||
uptime:
|
||||
type: integer
|
||||
type: object
|
||||
value.Auth0Tenant:
|
||||
config.Auth0Tenant:
|
||||
properties:
|
||||
audience:
|
||||
type: string
|
||||
@@ -1720,34 +1777,6 @@ info:
|
||||
title: datarhei Core API
|
||||
version: "3.0"
|
||||
paths:
|
||||
/{path}:
|
||||
get:
|
||||
description: Fetch a file from the filesystem. If the file is a directory, a
|
||||
index.html is returned, if it exists.
|
||||
operationId: diskfs-get-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/data
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
type: file
|
||||
"301":
|
||||
description: Moved Permanently
|
||||
schema:
|
||||
type: string
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
summary: Fetch a file from the filesystem
|
||||
/api:
|
||||
get:
|
||||
description: API version and build infos in case auth is valid or not required.
|
||||
@@ -1771,7 +1800,7 @@ paths:
|
||||
- text/html
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
description: ""
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Load GraphQL playground
|
||||
@@ -1880,8 +1909,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Retrieve the currently active Restreamer configuration
|
||||
tags:
|
||||
- v16.7.2
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -1913,8 +1940,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Update the current Restreamer configuration
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/config/reload:
|
||||
get:
|
||||
description: Reload the currently active configuration. This will trigger a
|
||||
@@ -1930,14 +1955,33 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Reload the currently active configuration
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/fs/disk:
|
||||
/api/v3/fs:
|
||||
get:
|
||||
description: List all files on the filesystem. The listing can be ordered by
|
||||
name, size, or date of last modification in ascending or descending order.
|
||||
operationId: diskfs-3-list-files
|
||||
description: Listall registered filesystems
|
||||
operationId: filesystem-3-list
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/api.FilesystemInfo'
|
||||
type: array
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all registered filesystems
|
||||
/api/v3/fs/{name}:
|
||||
get:
|
||||
description: List all files on a filesystem. The listing can be ordered by name,
|
||||
size, or date of last modification in ascending or descending order.
|
||||
operationId: filesystem-3-list-files
|
||||
parameters:
|
||||
- description: Name of the filesystem
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: glob pattern for file names
|
||||
in: query
|
||||
name: glob
|
||||
@@ -1961,14 +2005,17 @@ paths:
|
||||
type: array
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all files on the filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/fs/disk/{path}:
|
||||
summary: List all files on a filesystem
|
||||
/api/v3/fs/{name}/{path}:
|
||||
delete:
|
||||
description: Remove a file from the filesystem
|
||||
operationId: diskfs-3-delete-file
|
||||
description: Remove a file from a filesystem
|
||||
operationId: filesystem-3-delete-file
|
||||
parameters:
|
||||
- description: Name of the filesystem
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
@@ -1987,14 +2034,16 @@ paths:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Remove a file from the filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
summary: Remove a file from a filesystem
|
||||
get:
|
||||
description: Fetch a file from the filesystem. The contents of that file are
|
||||
returned.
|
||||
operationId: diskfs-3-get-file
|
||||
description: Fetch a file from a filesystem
|
||||
operationId: filesystem-3-get-file
|
||||
parameters:
|
||||
- description: Name of the filesystem
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
@@ -2018,15 +2067,18 @@ paths:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Fetch a file from the filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
summary: Fetch a file from a filesystem
|
||||
put:
|
||||
consumes:
|
||||
- application/data
|
||||
description: Writes or overwrites a file on the filesystem
|
||||
operationId: diskfs-3-put-file
|
||||
description: Writes or overwrites a file on a filesystem
|
||||
operationId: filesystem-3-put-file
|
||||
parameters:
|
||||
- description: Name of the filesystem
|
||||
in: path
|
||||
name: name
|
||||
required: true
|
||||
type: string
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
@@ -2058,172 +2110,7 @@ paths:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add a file to the filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/fs/mem:
|
||||
get:
|
||||
description: List all files on the memory filesystem. The listing can be ordered
|
||||
by name, size, or date of last modification in ascending or descending order.
|
||||
operationId: memfs-3-list-files
|
||||
parameters:
|
||||
- description: glob pattern for file names
|
||||
in: query
|
||||
name: glob
|
||||
type: string
|
||||
- description: none, name, size, lastmod
|
||||
in: query
|
||||
name: sort
|
||||
type: string
|
||||
- description: asc, desc
|
||||
in: query
|
||||
name: order
|
||||
type: string
|
||||
produces:
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
items:
|
||||
$ref: '#/definitions/api.FileInfo'
|
||||
type: array
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all files on the memory filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/fs/mem/{path}:
|
||||
delete:
|
||||
description: Remove a file from the memory filesystem
|
||||
operationId: memfs-3-delete-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- text/plain
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
type: string
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Remove a file from the memory filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
get:
|
||||
description: Fetch a file from the memory filesystem
|
||||
operationId: memfs-3-get-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/data
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
type: file
|
||||
"301":
|
||||
description: Moved Permanently
|
||||
schema:
|
||||
type: string
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Fetch a file from the memory filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
patch:
|
||||
consumes:
|
||||
- application/data
|
||||
description: Create a link to a file in the memory filesystem. The file linked
|
||||
to has to exist.
|
||||
operationId: memfs-3-patch
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
- description: Path to the file to link to
|
||||
in: body
|
||||
name: url
|
||||
required: true
|
||||
schema:
|
||||
type: string
|
||||
produces:
|
||||
- text/plain
|
||||
- application/json
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
schema:
|
||||
type: string
|
||||
"400":
|
||||
description: Bad Request
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Create a link to a file in the memory filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
put:
|
||||
consumes:
|
||||
- application/data
|
||||
description: Writes or overwrites a file on the memory filesystem
|
||||
operationId: memfs-3-put-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
- description: File data
|
||||
in: body
|
||||
name: data
|
||||
required: true
|
||||
schema:
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
produces:
|
||||
- text/plain
|
||||
- application/json
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
schema:
|
||||
type: string
|
||||
"204":
|
||||
description: No Content
|
||||
schema:
|
||||
type: string
|
||||
"507":
|
||||
description: Insufficient Storage
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add a file to the memory filesystem
|
||||
tags:
|
||||
- v16.7.2
|
||||
summary: Add a file to a filesystem
|
||||
/api/v3/log:
|
||||
get:
|
||||
description: Get the last log lines of the Restreamer application
|
||||
@@ -2245,8 +2132,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Application log
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/metadata/{key}:
|
||||
get:
|
||||
description: Retrieve the previously stored JSON metadata under the given key.
|
||||
@@ -2275,8 +2160,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Retrieve JSON metadata from a key
|
||||
tags:
|
||||
- v16.7.2
|
||||
put:
|
||||
description: Add arbitrary JSON metadata under the given key. If the key exists,
|
||||
all already stored metadata with this key will be overwritten. If the key
|
||||
@@ -2306,8 +2189,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add JSON metadata under the given key
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/metrics:
|
||||
get:
|
||||
description: List all known metrics with their description and labels
|
||||
@@ -2324,8 +2205,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all known metrics with their description and labels
|
||||
tags:
|
||||
- v16.10.0
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -2352,8 +2231,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Query the collected metrics
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process:
|
||||
get:
|
||||
description: List all known processes. Use the query parameter to filter the
|
||||
@@ -2398,8 +2275,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all known processes
|
||||
tags:
|
||||
- v16.7.2
|
||||
post:
|
||||
consumes:
|
||||
- application/json
|
||||
@@ -2426,8 +2301,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add a new process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}:
|
||||
delete:
|
||||
description: Delete a process by its ID
|
||||
@@ -2452,8 +2325,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Delete a process by its ID
|
||||
tags:
|
||||
- v16.7.2
|
||||
get:
|
||||
description: List a process by its ID. Use the filter parameter to specifiy
|
||||
the level of detail of the output.
|
||||
@@ -2483,12 +2354,10 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List a process by its ID
|
||||
tags:
|
||||
- v16.7.2
|
||||
put:
|
||||
consumes:
|
||||
- application/json
|
||||
description: Replace an existing process.
|
||||
description: Replace an existing process. This is a shortcut for DELETE+POST.
|
||||
operationId: process-3-update
|
||||
parameters:
|
||||
- description: Process ID
|
||||
@@ -2520,8 +2389,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Replace an existing process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/command:
|
||||
put:
|
||||
consumes:
|
||||
@@ -2558,8 +2425,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Issue a command to a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/config:
|
||||
get:
|
||||
description: Get the configuration of a process. This is the configuration as
|
||||
@@ -2589,8 +2454,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get the configuration of a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/metadata/{key}:
|
||||
get:
|
||||
description: Retrieve the previously stored JSON metadata under the given key.
|
||||
@@ -2624,8 +2487,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Retrieve JSON metadata stored with a process under a key
|
||||
tags:
|
||||
- v16.7.2
|
||||
put:
|
||||
description: Add arbitrary JSON metadata under the given key. If the key exists,
|
||||
all already stored metadata with this key will be overwritten. If the key
|
||||
@@ -2665,8 +2526,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Add JSON metadata with a process under the given key
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/errorframe/{name}:
|
||||
post:
|
||||
consumes:
|
||||
@@ -2716,8 +2575,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Upload an error frame
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/errorframe/encode:
|
||||
get:
|
||||
description: Immediately encode the errorframe (if available and looping)
|
||||
@@ -2752,8 +2609,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Encode the errorframe
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/keyframe/{name}:
|
||||
get:
|
||||
description: Get the last keyframe of an input of a process. The extension of
|
||||
@@ -2795,8 +2650,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get the last keyframe
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/reopen:
|
||||
get:
|
||||
description: Close the current input stream such that it will be automatically
|
||||
@@ -2831,8 +2684,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Close the current input stream
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/status:
|
||||
get:
|
||||
description: Get the current playout status of an input of a process
|
||||
@@ -2866,8 +2717,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get the current playout status
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/playout/{inputid}/stream:
|
||||
put:
|
||||
consumes:
|
||||
@@ -2911,12 +2760,10 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Switch to a new stream
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/probe:
|
||||
get:
|
||||
description: Probe an existing process to get a detailed stream information
|
||||
on the inputs.
|
||||
on the inputs
|
||||
operationId: process-3-probe
|
||||
parameters:
|
||||
- description: Process ID
|
||||
@@ -2934,11 +2781,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Probe a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/report:
|
||||
get:
|
||||
description: Get the logs and the log history of a process.
|
||||
description: Get the logs and the log history of a process
|
||||
operationId: process-3-get-report
|
||||
parameters:
|
||||
- description: Process ID
|
||||
@@ -2964,11 +2809,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get the logs of a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/process/{id}/state:
|
||||
get:
|
||||
description: Get the state and progress data of a process.
|
||||
description: Get the state and progress data of a process
|
||||
operationId: process-3-get-state
|
||||
parameters:
|
||||
- description: Process ID
|
||||
@@ -2994,11 +2837,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get the state of a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/rtmp:
|
||||
get:
|
||||
description: List all currently publishing RTMP streams.
|
||||
description: List all currently publishing RTMP streams
|
||||
operationId: rtmp-3-list-channels
|
||||
produces:
|
||||
- application/json
|
||||
@@ -3012,11 +2853,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all publishing RTMP streams
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/session:
|
||||
get:
|
||||
description: Get a summary of all active and past sessions of the given collector.
|
||||
description: Get a summary of all active and past sessions of the given collector
|
||||
operationId: session-3-summary
|
||||
parameters:
|
||||
- description: Comma separated list of collectors
|
||||
@@ -3033,12 +2872,10 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get a summary of all active and past sessions
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/session/active:
|
||||
get:
|
||||
description: Get a minimal summary of all active sessions (i.e. number of sessions,
|
||||
bandwidth).
|
||||
bandwidth)
|
||||
operationId: session-3-current
|
||||
parameters:
|
||||
- description: Comma separated list of collectors
|
||||
@@ -3055,11 +2892,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Get a minimal summary of all active sessions
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/skills:
|
||||
get:
|
||||
description: List all detected FFmpeg capabilities.
|
||||
description: List all detected FFmpeg capabilities
|
||||
operationId: skills-3
|
||||
produces:
|
||||
- application/json
|
||||
@@ -3071,11 +2906,9 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: FFmpeg capabilities
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/skills/reload:
|
||||
get:
|
||||
description: Refresh the available FFmpeg capabilities.
|
||||
description: Refresh the available FFmpeg capabilities
|
||||
operationId: skills-3-reload
|
||||
produces:
|
||||
- application/json
|
||||
@@ -3087,8 +2920,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: Refresh FFmpeg capabilities
|
||||
tags:
|
||||
- v16.7.2
|
||||
/api/v3/srt:
|
||||
get:
|
||||
description: List all currently publishing SRT streams. This endpoint is EXPERIMENTAL
|
||||
@@ -3106,8 +2937,6 @@ paths:
|
||||
security:
|
||||
- ApiKeyAuth: []
|
||||
summary: List all publishing SRT treams
|
||||
tags:
|
||||
- v16.9.0
|
||||
/api/v3/widget/process/{id}:
|
||||
get:
|
||||
description: Fetch minimal statistics about a process, which is not protected
|
||||
@@ -3131,96 +2960,6 @@ paths:
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
summary: Fetch minimal statistics about a process
|
||||
tags:
|
||||
- v16.7.2
|
||||
/memfs/{path}:
|
||||
delete:
|
||||
description: Remove a file from the memory filesystem
|
||||
operationId: memfs-delete-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- text/plain
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
type: string
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- BasicAuth: []
|
||||
summary: Remove a file from the memory filesystem
|
||||
get:
|
||||
description: Fetch a file from the memory filesystem
|
||||
operationId: memfs-get-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
produces:
|
||||
- application/data
|
||||
- application/json
|
||||
responses:
|
||||
"200":
|
||||
description: OK
|
||||
schema:
|
||||
type: file
|
||||
"301":
|
||||
description: Moved Permanently
|
||||
schema:
|
||||
type: string
|
||||
"404":
|
||||
description: Not Found
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
summary: Fetch a file from the memory filesystem
|
||||
put:
|
||||
consumes:
|
||||
- application/data
|
||||
description: Writes or overwrites a file on the memory filesystem
|
||||
operationId: memfs-put-file
|
||||
parameters:
|
||||
- description: Path to file
|
||||
in: path
|
||||
name: path
|
||||
required: true
|
||||
type: string
|
||||
- description: File data
|
||||
in: body
|
||||
name: data
|
||||
required: true
|
||||
schema:
|
||||
items:
|
||||
type: integer
|
||||
type: array
|
||||
produces:
|
||||
- text/plain
|
||||
- application/json
|
||||
responses:
|
||||
"201":
|
||||
description: Created
|
||||
schema:
|
||||
type: string
|
||||
"204":
|
||||
description: No Content
|
||||
schema:
|
||||
type: string
|
||||
"507":
|
||||
description: Insufficient Storage
|
||||
schema:
|
||||
$ref: '#/definitions/api.Error'
|
||||
security:
|
||||
- BasicAuth: []
|
||||
summary: Add a file to the memory filesystem
|
||||
/metrics:
|
||||
get:
|
||||
description: Prometheus metrics
|
||||
|
||||
@@ -379,12 +379,13 @@ func (p *parser) Parse(line string) uint64 {
|
||||
}
|
||||
|
||||
// Calculate if any of the processed frames staled.
|
||||
// If one number of frames in an output is the same as before, then pFrames becomes 0.
|
||||
pFrames := p.stats.main.diff.frame
|
||||
// If one number of frames in an output is the same as
|
||||
// before, then pFrames becomes 0.
|
||||
var pFrames uint64 = 0
|
||||
|
||||
pFrames = p.stats.main.diff.frame
|
||||
|
||||
if isFFmpegProgress {
|
||||
// Only consider the outputs
|
||||
pFrames = 1
|
||||
for i := range p.stats.output {
|
||||
pFrames *= p.stats.output[i].diff.frame
|
||||
}
|
||||
|
||||
@@ -4,6 +4,9 @@ import (
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
// Match returns whether the name matches the glob pattern, also considering
|
||||
// one or several optionnal separator. An error is only returned if the pattern
|
||||
// is invalid.
|
||||
func Match(pattern, name string, separators ...rune) (bool, error) {
|
||||
g, err := glob.Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
|
||||
68
go.mod
68
go.mod
@@ -3,30 +3,31 @@ module github.com/datarhei/core/v16
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/99designs/gqlgen v0.17.20
|
||||
github.com/Masterminds/semver/v3 v3.1.1
|
||||
github.com/99designs/gqlgen v0.17.16
|
||||
github.com/atrox/haikunatorgo/v2 v2.0.1
|
||||
github.com/caddyserver/certmagic v0.17.2
|
||||
github.com/datarhei/gosrt v0.3.1
|
||||
github.com/caddyserver/certmagic v0.16.2
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759
|
||||
github.com/go-playground/validator/v10 v10.11.1
|
||||
github.com/go-playground/validator/v10 v10.11.0
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/invopop/jsonschema v0.4.0
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/labstack/echo/v4 v4.9.1
|
||||
github.com/labstack/echo/v4 v4.9.0
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/minio/minio-go/v7 v7.0.39
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
|
||||
github.com/prometheus/client_golang v1.13.1
|
||||
github.com/shirou/gopsutil/v3 v3.22.10
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/swaggo/echo-swagger v1.3.5
|
||||
github.com/swaggo/swag v1.8.7
|
||||
github.com/vektah/gqlparser/v2 v2.5.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/shirou/gopsutil/v3 v3.22.8
|
||||
github.com/stretchr/testify v1.8.0
|
||||
github.com/swaggo/echo-swagger v1.3.4
|
||||
github.com/swaggo/swag v1.8.5
|
||||
github.com/vektah/gqlparser/v2 v2.5.0
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/mod v0.6.0
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -37,6 +38,7 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
@@ -50,42 +52,50 @@ require (
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/iancoleman/orderedmap v0.2.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 // indirect
|
||||
github.com/labstack/gommon v0.4.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.9 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.1.0 // indirect
|
||||
github.com/labstack/gommon v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/mholt/acmez v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.50 // indirect
|
||||
github.com/miekg/dns v1.1.46 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.5.0 // indirect
|
||||
github.com/urfave/cli/v2 v2.8.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.1 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
138
go.sum
138
go.sum
@@ -31,15 +31,13 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbpJMw=
|
||||
github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4=
|
||||
github.com/99designs/gqlgen v0.17.16 h1:tTIw/cQ/uvf3iXIb2I6YSkdaDkmHmH2W2eZkVe0IVLA=
|
||||
github.com/99designs/gqlgen v0.17.16/go.mod h1:dnJdUkgfh8iw8CEx2hhTdgTQO/GvVWKLcm/kult5gwI=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
@@ -65,8 +63,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE=
|
||||
github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE=
|
||||
github.com/caddyserver/certmagic v0.16.2 h1:k2n3LkkUG3aMUK/kckMuF9/0VFo+0FtMX3drPYESbmQ=
|
||||
github.com/caddyserver/certmagic v0.16.2/go.mod h1:PgLIr/dSJa+WA7t7z6Je5xuS/e5A/GFCPHRuZ1QP+MQ=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
@@ -80,8 +78,8 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/datarhei/gosrt v0.3.1 h1:9A75hIvnY74IUFyeguqYXh1lsGF8Qt8fjxJS2Ewr12Q=
|
||||
github.com/datarhei/gosrt v0.3.1/go.mod h1:M2nl2WPrawncUc1FtUBK6gZX4tpZRC7FqL8NjOdBZV0=
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845 h1:nlVb4EVMwdVUwH6e10WZrx4lW0n2utnlE+4ILMPyD5o=
|
||||
github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845/go.mod h1:wyoTu+DG45XRuCgEq/y+R8nhZCrJbOyQKn+SwNrNVZ8=
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759 h1:h8NyekuQSDvLIsZVTV172m5/RVArXkEM/cnHaUzszQU=
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -89,6 +87,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
@@ -126,8 +126,8 @@ github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw=
|
||||
github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
@@ -176,8 +176,8 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
@@ -214,6 +214,7 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
@@ -222,8 +223,12 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 h1:XhdX4fqAJUA0yj+kUwMavO0hHrSPAecYdYf1ZmxHvak=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/klauspost/compress v1.15.9 h1:wKRjX6JRtDdrE9qwa4b/Cip7ACOshUI4smpCQanqjSY=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
|
||||
github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@@ -235,12 +240,11 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/echo/v4 v4.9.0 h1:wPOF1CE6gvt/kmbMR4dGzWvHMPT+sAEUJOwOTtvITVY=
|
||||
github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks=
|
||||
github.com/labstack/echo/v4 v4.9.1 h1:GliPYSpzGKlyOhqIbG8nmHBo3i1saKWFOgh41AN3b+Y=
|
||||
github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo=
|
||||
github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o=
|
||||
github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8=
|
||||
github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
|
||||
@@ -249,8 +253,8 @@ github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y=
|
||||
github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc=
|
||||
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
|
||||
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY=
|
||||
github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE=
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 h1:aczX6NMOtt6L4YT0fQvKkDK6LZEtdOso9sUH89V1+P0=
|
||||
github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281/go.mod h1:lc+czkgO/8F7puNki5jk8QyujbfK1LOT7Wl0ON2hxyk=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
@@ -258,25 +262,33 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0
|
||||
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
|
||||
github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
|
||||
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
|
||||
github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio=
|
||||
github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.39 h1:upnbu1jCGOqEvrGSpRauSN9ZG7RCHK7VHxXS8Vmg2zk=
|
||||
github.com/minio/minio-go/v7 v7.0.39/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
@@ -304,14 +316,13 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
|
||||
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU=
|
||||
github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
@@ -329,23 +340,26 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZpPoVg=
|
||||
github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y=
|
||||
github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
@@ -353,16 +367,16 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
|
||||
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/swaggo/echo-swagger v1.3.5 h1:kCx1wvX5AKhjI6Ykt48l3PTsfL9UD40ZROOx/tYzWyY=
|
||||
github.com/swaggo/echo-swagger v1.3.5/go.mod h1:3IMHd2Z8KftdWFEEjGmv6QpWj370LwMCOfovuh7vF34=
|
||||
github.com/swaggo/echo-swagger v1.3.4 h1:8B+yVqjVm7cMy4QBLRUuRaOzrTVAqZahcrgrOSdpC5I=
|
||||
github.com/swaggo/echo-swagger v1.3.4/go.mod h1:vh8QAdbHtTXwTSaWzc1Nby7zMYJd/g0FwQyArmrFHA8=
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY=
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w=
|
||||
github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ=
|
||||
github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU=
|
||||
github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
|
||||
github.com/swaggo/swag v1.8.5 h1:7NgtfXsXE+jrcOwRyiftGKW7Ppydj7tZiVenuRf1fE4=
|
||||
github.com/swaggo/swag v1.8.5/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
@@ -373,11 +387,10 @@ github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4=
|
||||
github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo=
|
||||
github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4=
|
||||
github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
|
||||
github.com/vektah/gqlparser/v2 v2.5.0 h1:GwEwy7AJsqPWrey0bHnn+3JLaHLZVT66wY/+O+Tf9SU=
|
||||
github.com/vektah/gqlparser/v2 v2.5.0/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
@@ -393,7 +406,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
@@ -401,17 +413,14 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
|
||||
go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
@@ -422,8 +431,9 @@ golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
|
||||
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@@ -456,9 +466,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -499,9 +508,8 @@ golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 h1:1WGATo9HAhkWMbfyuVU0tEFP88OIkUvwaHFveQPvzCQ=
|
||||
golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -520,7 +528,6 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -575,28 +582,26 @@ golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd h1:AZeIEzg+8RCELJYq8w+ODLVxFgLMMigSwO/ffKPEd9U=
|
||||
golang.org/x/sys v0.0.0-20220907062415-87db552b00fd/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ=
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
@@ -642,9 +647,8 @@ golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -735,6 +739,8 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.66.6 h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=
|
||||
gopkg.in/ini.v1 v1.66.6/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
||||
@@ -4,16 +4,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
v1config "github.com/datarhei/core/v16/config/v1"
|
||||
v2config "github.com/datarhei/core/v16/config/v2"
|
||||
)
|
||||
|
||||
// ConfigVersion is used to only unmarshal the version field in order
|
||||
// find out which SetConfig should be used.
|
||||
type ConfigVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
||||
|
||||
// ConfigData embeds config.Data
|
||||
type ConfigData struct {
|
||||
config.Data
|
||||
@@ -30,68 +22,11 @@ type Config struct {
|
||||
Overrides []string `json:"overrides"`
|
||||
}
|
||||
|
||||
type SetConfigV1 struct {
|
||||
v1config.Data
|
||||
}
|
||||
|
||||
// NewSetConfigV1 creates a new SetConfigV1 based on the current
|
||||
// config with downgrading.
|
||||
func NewSetConfigV1(cfg *config.Config) SetConfigV1 {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
v1data, _ := v2config.DowngradeV2toV1(v2data)
|
||||
|
||||
data := SetConfigV1{
|
||||
Data: *v1data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges the v1 config into the current config.
|
||||
func (s *SetConfigV1) MergeTo(cfg *config.Config) {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
|
||||
v2config.MergeV1ToV2(v2data, &s.Data)
|
||||
config.MergeV2toV3(&cfg.Data, v2data)
|
||||
}
|
||||
|
||||
type SetConfigV2 struct {
|
||||
v2config.Data
|
||||
}
|
||||
|
||||
// NewSetConfigV2 creates a new SetConfigV2 based on the current
|
||||
// config with downgrading.
|
||||
func NewSetConfigV2(cfg *config.Config) SetConfigV2 {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
|
||||
data := SetConfigV2{
|
||||
Data: *v2data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges the v2 config into the current config.
|
||||
func (s *SetConfigV2) MergeTo(cfg *config.Config) {
|
||||
config.MergeV2toV3(&cfg.Data, &s.Data)
|
||||
}
|
||||
|
||||
// SetConfig embeds config.Data. It is used to send a new config to the server.
|
||||
type SetConfig struct {
|
||||
config.Data
|
||||
}
|
||||
|
||||
// NewSetConfig converts a config.Config into a SetConfig in order to prepopulate
|
||||
// a SetConfig with the current values. The uploaded config can have missing fields that
|
||||
// will be filled with the current values after unmarshalling the JSON.
|
||||
func NewSetConfig(cfg *config.Config) SetConfig {
|
||||
data := SetConfig{
|
||||
cfg.Data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges a sent config into a config.Config
|
||||
func (rscfg *SetConfig) MergeTo(cfg *config.Config) {
|
||||
cfg.ID = rscfg.ID
|
||||
@@ -116,7 +51,18 @@ func (rscfg *SetConfig) MergeTo(cfg *config.Config) {
|
||||
cfg.Router = rscfg.Router
|
||||
}
|
||||
|
||||
// Unmarshal converts a config.Config to a Config.
|
||||
// NewSetConfig converts a config.Config into a RestreamerSetConfig in order to prepopulate
|
||||
// a RestreamerSetConfig with the current values. The uploaded config can have missing fields that
|
||||
// will be filled with the current values after unmarshalling the JSON.
|
||||
func NewSetConfig(cfg *config.Config) SetConfig {
|
||||
data := SetConfig{
|
||||
cfg.Data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// Unmarshal converts a config.Config to a RestreamerConfig.
|
||||
func (c *Config) Unmarshal(cfg *config.Config) {
|
||||
if cfg == nil {
|
||||
return
|
||||
|
||||
@@ -6,3 +6,10 @@ type FileInfo struct {
|
||||
Size int64 `json:"size_bytes" jsonschema:"minimum=0"`
|
||||
LastMod int64 `json:"last_modified" jsonschema:"minimum=0"`
|
||||
}
|
||||
|
||||
// FilesystemInfo represents information about a filesystem
|
||||
type FilesystemInfo struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Mount string `json:"mount"`
|
||||
}
|
||||
|
||||
@@ -33,9 +33,9 @@ type SRTStatistics struct {
|
||||
|
||||
ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSentUnique uint64 `json:"sent_unique_bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSentUnique uint64 `json:"sent_unique__bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvLoss uint64 `json:"recv_loss_bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
|
||||
ByteRcvLoss uint64 `json:"recv_loss__bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
|
||||
ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
@@ -68,54 +68,34 @@ type SRTStatistics struct {
|
||||
func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) {
|
||||
s.MsTimeStamp = ss.MsTimeStamp
|
||||
|
||||
s.PktSent = ss.Accumulated.PktSent
|
||||
s.PktRecv = ss.Accumulated.PktRecv
|
||||
s.PktSentUnique = ss.Accumulated.PktSentUnique
|
||||
s.PktRecvUnique = ss.Accumulated.PktRecvUnique
|
||||
s.PktSndLoss = ss.Accumulated.PktSendLoss
|
||||
s.PktRcvLoss = ss.Accumulated.PktRecvLoss
|
||||
s.PktRetrans = ss.Accumulated.PktRetrans
|
||||
s.PktRcvRetrans = ss.Accumulated.PktRecvRetrans
|
||||
s.PktSentACK = ss.Accumulated.PktSentACK
|
||||
s.PktRecvACK = ss.Accumulated.PktRecvACK
|
||||
s.PktSentNAK = ss.Accumulated.PktSentNAK
|
||||
s.PktRecvNAK = ss.Accumulated.PktRecvNAK
|
||||
s.PktSentKM = ss.Accumulated.PktSentKM
|
||||
s.PktRecvKM = ss.Accumulated.PktRecvKM
|
||||
s.UsSndDuration = ss.Accumulated.UsSndDuration
|
||||
s.PktSndDrop = ss.Accumulated.PktSendDrop
|
||||
s.PktRcvDrop = ss.Accumulated.PktRecvDrop
|
||||
s.PktRcvUndecrypt = ss.Accumulated.PktRecvUndecrypt
|
||||
s.PktSent = ss.PktSent
|
||||
s.PktRecv = ss.PktRecv
|
||||
s.PktSentUnique = ss.PktSentUnique
|
||||
s.PktRecvUnique = ss.PktRecvUnique
|
||||
s.PktSndLoss = ss.PktSndLoss
|
||||
s.PktRcvLoss = ss.PktRcvLoss
|
||||
s.PktRetrans = ss.PktRetrans
|
||||
s.PktRcvRetrans = ss.PktRcvRetrans
|
||||
s.PktSentACK = ss.PktSentACK
|
||||
s.PktRecvACK = ss.PktRecvACK
|
||||
s.PktSentNAK = ss.PktSentNAK
|
||||
s.PktRecvNAK = ss.PktRecvNAK
|
||||
s.PktSentKM = ss.PktSentKM
|
||||
s.PktRecvKM = ss.PktRecvKM
|
||||
s.UsSndDuration = ss.UsSndDuration
|
||||
s.PktSndDrop = ss.PktSndDrop
|
||||
s.PktRcvDrop = ss.PktRcvDrop
|
||||
s.PktRcvUndecrypt = ss.PktRcvUndecrypt
|
||||
|
||||
s.ByteSent = ss.Accumulated.ByteSent
|
||||
s.ByteRecv = ss.Accumulated.ByteRecv
|
||||
s.ByteSentUnique = ss.Accumulated.ByteSentUnique
|
||||
s.ByteRecvUnique = ss.Accumulated.ByteRecvUnique
|
||||
s.ByteRcvLoss = ss.Accumulated.ByteRecvLoss
|
||||
s.ByteRetrans = ss.Accumulated.ByteRetrans
|
||||
s.ByteSndDrop = ss.Accumulated.ByteSendDrop
|
||||
s.ByteRcvDrop = ss.Accumulated.ByteRecvDrop
|
||||
s.ByteRcvUndecrypt = ss.Accumulated.ByteRecvUndecrypt
|
||||
|
||||
s.UsPktSndPeriod = ss.Instantaneous.UsPktSendPeriod
|
||||
s.PktFlowWindow = ss.Instantaneous.PktFlowWindow
|
||||
s.PktFlightSize = ss.Instantaneous.PktFlightSize
|
||||
s.MsRTT = ss.Instantaneous.MsRTT
|
||||
s.MbpsBandwidth = ss.Instantaneous.MbpsLinkCapacity
|
||||
s.ByteAvailSndBuf = ss.Instantaneous.ByteAvailSendBuf
|
||||
s.ByteAvailRcvBuf = ss.Instantaneous.ByteAvailRecvBuf
|
||||
s.MbpsMaxBW = ss.Instantaneous.MbpsMaxBW
|
||||
s.ByteMSS = ss.Instantaneous.ByteMSS
|
||||
s.PktSndBuf = ss.Instantaneous.PktSendBuf
|
||||
s.ByteSndBuf = ss.Instantaneous.ByteSendBuf
|
||||
s.MsSndBuf = ss.Instantaneous.MsSendBuf
|
||||
s.MsSndTsbPdDelay = ss.Instantaneous.MsSendTsbPdDelay
|
||||
s.PktRcvBuf = ss.Instantaneous.PktRecvBuf
|
||||
s.ByteRcvBuf = ss.Instantaneous.ByteRecvBuf
|
||||
s.MsRcvBuf = ss.Instantaneous.MsRecvBuf
|
||||
s.MsRcvTsbPdDelay = ss.Instantaneous.MsRecvTsbPdDelay
|
||||
s.PktReorderTolerance = ss.Instantaneous.PktReorderTolerance
|
||||
s.PktRcvAvgBelatedTime = ss.Instantaneous.PktRecvAvgBelatedTime
|
||||
s.ByteSent = ss.ByteSent
|
||||
s.ByteRecv = ss.ByteRecv
|
||||
s.ByteSentUnique = ss.ByteSentUnique
|
||||
s.ByteRecvUnique = ss.ByteRecvUnique
|
||||
s.ByteRcvLoss = ss.ByteRcvLoss
|
||||
s.ByteRetrans = ss.ByteRetrans
|
||||
s.ByteSndDrop = ss.ByteSndDrop
|
||||
s.ByteRcvDrop = ss.ByteRcvDrop
|
||||
s.ByteRcvUndecrypt = ss.ByteRcvUndecrypt
|
||||
}
|
||||
|
||||
type SRTLog struct {
|
||||
|
||||
25
http/fs/fs.go
Normal file
25
http/fs/fs.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
type FS struct {
|
||||
Name string
|
||||
Mountpoint string
|
||||
|
||||
AllowWrite bool
|
||||
|
||||
EnableAuth bool
|
||||
Username string
|
||||
Password string
|
||||
|
||||
DefaultFile string
|
||||
DefaultContentType string
|
||||
Gzip bool
|
||||
|
||||
Filesystem fs.Filesystem
|
||||
|
||||
Cache cache.Cacher
|
||||
}
|
||||
@@ -1,13 +1,11 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
@@ -15,11 +13,11 @@ import (
|
||||
// The ConfigHandler type provides handler functions for reading and manipulating
|
||||
// the current config.
|
||||
type ConfigHandler struct {
|
||||
store cfgstore.Store
|
||||
store config.Store
|
||||
}
|
||||
|
||||
// NewConfig return a new Config type. You have to provide a valid config store.
|
||||
func NewConfig(store cfgstore.Store) *ConfigHandler {
|
||||
func NewConfig(store config.Store) *ConfigHandler {
|
||||
return &ConfigHandler{
|
||||
store: store,
|
||||
}
|
||||
@@ -28,7 +26,6 @@ func NewConfig(store cfgstore.Store) *ConfigHandler {
|
||||
// Get returns the currently active Restreamer configuration
|
||||
// @Summary Retrieve the currently active Restreamer configuration
|
||||
// @Description Retrieve the currently active Restreamer configuration
|
||||
// @Tags v16.7.2
|
||||
// @ID config-3-get
|
||||
// @Produce json
|
||||
// @Success 200 {object} api.Config
|
||||
@@ -46,7 +43,6 @@ func (p *ConfigHandler) Get(c echo.Context) error {
|
||||
// Set will set the given configuration as new active configuration
|
||||
// @Summary Update the current Restreamer configuration
|
||||
// @Description Update the current Restreamer configuration by providing a complete or partial configuration. Fields that are not provided will not be changed.
|
||||
// @Tags v16.7.2
|
||||
// @ID config-3-set
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -57,73 +53,25 @@ func (p *ConfigHandler) Get(c echo.Context) error {
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/config [put]
|
||||
func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
version := api.ConfigVersion{}
|
||||
cfg := p.store.Get()
|
||||
|
||||
req := c.Request()
|
||||
// Set the current config as default config value. This will
|
||||
// allow to set a partial config without destroying the other
|
||||
// values.
|
||||
setConfig := api.NewSetConfig(cfg)
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
if err := util.ShouldBindJSON(c, &setConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &version); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
cfg := p.store.Get()
|
||||
|
||||
// For each version, set the current config as default config value. This will
|
||||
// allow to set a partial config without destroying the other values.
|
||||
if version.Version == 1 {
|
||||
// Downgrade to v1 in order to have a populated v1 config
|
||||
v1SetConfig := api.NewSetConfigV1(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v1SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v1SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v1SetConfig.MergeTo(cfg)
|
||||
} else if version.Version == 2 {
|
||||
// Downgrade to v2 in order to have a populated v2 config
|
||||
v2SetConfig := api.NewSetConfigV2(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v2SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v2SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v2SetConfig.MergeTo(cfg)
|
||||
} else if version.Version == 3 {
|
||||
v3SetConfig := api.NewSetConfig(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v3SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v3SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v3SetConfig.MergeTo(cfg)
|
||||
} else {
|
||||
return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version)
|
||||
}
|
||||
// Merge it into the current config
|
||||
setConfig.MergeTo(cfg)
|
||||
|
||||
// Now we make a copy from the config and merge it with the environment
|
||||
// variables. If this configuration is valid, we will store the un-merged
|
||||
// one to disk.
|
||||
|
||||
mergedConfig := cfg.Clone()
|
||||
mergedConfig := config.NewConfigFrom(cfg)
|
||||
mergedConfig.Merge()
|
||||
|
||||
// Validate the new merged config
|
||||
@@ -131,7 +79,7 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
if mergedConfig.HasErrors() {
|
||||
errors := make(map[string][]string)
|
||||
|
||||
mergedConfig.Messages(func(level string, v cfgvars.Variable, message string) {
|
||||
mergedConfig.Messages(func(level string, v config.Variable, message string) {
|
||||
if level != "error" {
|
||||
return
|
||||
}
|
||||
@@ -158,7 +106,6 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
// Reload will reload the currently active configuration
|
||||
// @Summary Reload the currently active configuration
|
||||
// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer.
|
||||
// @Tags v16.7.2
|
||||
// @ID config-3-reload
|
||||
// @Produce plain
|
||||
// @Success 200 {string} string "OK"
|
||||
|
||||
@@ -7,28 +7,25 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/http/mock"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getDummyConfigRouter() (*echo.Echo, store.Store) {
|
||||
func getDummyConfigRouter() *echo.Echo {
|
||||
router := mock.DummyEcho()
|
||||
|
||||
config := store.NewDummy()
|
||||
config := config.NewDummyStore()
|
||||
|
||||
handler := NewConfig(config)
|
||||
|
||||
router.Add("GET", "/", handler.Get)
|
||||
router.Add("PUT", "/", handler.Set)
|
||||
|
||||
return router, config
|
||||
return router
|
||||
}
|
||||
|
||||
func TestConfigGet(t *testing.T) {
|
||||
router, _ := getDummyConfigRouter()
|
||||
router := getDummyConfigRouter()
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "GET", "/", nil)
|
||||
|
||||
@@ -36,7 +33,7 @@ func TestConfigGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigSetConflict(t *testing.T) {
|
||||
router, _ := getDummyConfigRouter()
|
||||
router := getDummyConfigRouter()
|
||||
|
||||
var data bytes.Buffer
|
||||
|
||||
@@ -47,86 +44,18 @@ func TestConfigSetConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigSet(t *testing.T) {
|
||||
router, store := getDummyConfigRouter()
|
||||
|
||||
storedcfg := store.Get()
|
||||
|
||||
require.Equal(t, []string{}, storedcfg.Host.Name)
|
||||
router := getDummyConfigRouter()
|
||||
|
||||
var data bytes.Buffer
|
||||
encoder := json.NewEncoder(&data)
|
||||
|
||||
// Setting a new v3 config
|
||||
cfg := config.New()
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.DB.Dir = "."
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"}
|
||||
cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"}
|
||||
cfg.Host.Name = []string{"foobar.com"}
|
||||
|
||||
encoder := json.NewEncoder(&data)
|
||||
encoder.Encode(cfg)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".aaa"}, cfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, cfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
|
||||
// Setting a complete v1 config
|
||||
cfgv1 := v1.New()
|
||||
cfgv1.FFmpeg.Binary = "true"
|
||||
cfgv1.DB.Dir = "."
|
||||
cfgv1.Storage.Disk.Dir = "."
|
||||
cfgv1.Storage.MimeTypes = ""
|
||||
cfgv1.Storage.Disk.Cache.Types = []string{".bbb"}
|
||||
cfgv1.Host.Name = []string{"foobar.com"}
|
||||
|
||||
data.Reset()
|
||||
|
||||
encoder.Encode(cfgv1)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".bbb"}, storedcfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
|
||||
// Setting a partial v1 config
|
||||
type customconfig struct {
|
||||
Version int `json:"version"`
|
||||
Storage struct {
|
||||
Disk struct {
|
||||
Cache struct {
|
||||
Types []string `json:"types"`
|
||||
} `json:"cache"`
|
||||
} `json:"disk"`
|
||||
} `json:"storage"`
|
||||
}
|
||||
|
||||
customcfg := customconfig{
|
||||
Version: 1,
|
||||
}
|
||||
|
||||
customcfg.Storage.Disk.Cache.Types = []string{".ccc"}
|
||||
|
||||
data.Reset()
|
||||
|
||||
encoder.Encode(customcfg)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".ccc"}, storedcfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
}
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The DiskFSHandler type provides handlers for manipulating a filesystem
|
||||
type DiskFSHandler struct {
|
||||
cache cache.Cacher
|
||||
filesystem fs.Filesystem
|
||||
handler *handler.DiskFSHandler
|
||||
}
|
||||
|
||||
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
|
||||
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
|
||||
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
|
||||
return &DiskFSHandler{
|
||||
cache: cache,
|
||||
filesystem: fs,
|
||||
handler: handler.NewDiskFS(fs, cache),
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the filesystem
|
||||
// @Description Fetch a file from the filesystem. The contents of that file are returned.
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [get]
|
||||
func (h *DiskFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if stat.IsDir() {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
// PutFile adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the filesystem
|
||||
// @Description Writes or overwrites a file on the filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [put]
|
||||
func (h *DiskFSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.filesystem.Store(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "%s", err)
|
||||
}
|
||||
|
||||
if h.cache != nil {
|
||||
h.cache.Delete(path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, path)
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// DeleteFile removes a file from the filesystem
|
||||
// @Summary Remove a file from the filesystem
|
||||
// @Description Remove a file from the filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [delete]
|
||||
func (h *DiskFSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.filesystem.Delete(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
if h.cache != nil {
|
||||
h.cache.Delete(path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "OK")
|
||||
}
|
||||
|
||||
// ListFiles lists all files on the filesystem
|
||||
// @Summary List all files on the filesystem
|
||||
// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-list-files
|
||||
// @Produce json
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk [get]
|
||||
func (h *DiskFSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.filesystem.List(pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
fileinfos := []api.FileInfo{}
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileinfos = append(fileinfos, api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
146
http/handler/api/filesystems.go
Normal file
146
http/handler/api/filesystems.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
type FSConfig struct {
|
||||
Type string
|
||||
Mountpoint string
|
||||
Handler *handler.FSHandler
|
||||
}
|
||||
|
||||
// The FSHandler type provides handlers for manipulating a filesystem
|
||||
type FSHandler struct {
|
||||
filesystems map[string]FSConfig
|
||||
}
|
||||
|
||||
// NewFS return a new FSHanlder type. You have to provide a filesystem to act on.
|
||||
func NewFS(filesystems map[string]FSConfig) *FSHandler {
|
||||
return &FSHandler{
|
||||
filesystems: filesystems,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileAPI returns the file at the given path
|
||||
// @Summary Fetch a file from a filesystem
|
||||
// @Description Fetch a file from a filesystem
|
||||
// @ID filesystem-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [get]
|
||||
func (h *FSHandler) GetFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.GetFile(c)
|
||||
}
|
||||
|
||||
// PutFileAPI adds or overwrites a file at the given path
|
||||
// @Summary Add a file to a filesystem
|
||||
// @Description Writes or overwrites a file on a filesystem
|
||||
// @ID filesystem-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [put]
|
||||
func (h *FSHandler) PutFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.PutFile(c)
|
||||
}
|
||||
|
||||
// DeleteFileAPI removes a file from a filesystem
|
||||
// @Summary Remove a file from a filesystem
|
||||
// @Description Remove a file from a filesystem
|
||||
// @ID filesystem-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [delete]
|
||||
func (h *FSHandler) DeleteFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.DeleteFile(c)
|
||||
}
|
||||
|
||||
// ListFiles lists all files on a filesystem
|
||||
// @Summary List all files on a filesystem
|
||||
// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @ID filesystem-3-list-files
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name} [get]
|
||||
func (h *FSHandler) ListFiles(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.ListFiles(c)
|
||||
}
|
||||
|
||||
// List lists all registered filesystems
|
||||
// @Summary List all registered filesystems
|
||||
// @Description Listall registered filesystems
|
||||
// @ID filesystem-3-list
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.FilesystemInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs [get]
|
||||
func (h *FSHandler) List(c echo.Context) error {
|
||||
fss := []api.FilesystemInfo{}
|
||||
|
||||
for name, config := range h.filesystems {
|
||||
fss = append(fss, api.FilesystemInfo{
|
||||
Name: name,
|
||||
Type: config.Type,
|
||||
Mount: config.Mountpoint,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fss)
|
||||
}
|
||||
@@ -31,7 +31,6 @@ func NewLog(buffer log.BufferWriter) *LogHandler {
|
||||
// Log returns the last log lines of the Restreamer application
|
||||
// @Summary Application log
|
||||
// @Description Get the last log lines of the Restreamer application
|
||||
// @Tags v16.7.2
|
||||
// @ID log-3
|
||||
// @Param format query string false "Format of the list of log events (*console, raw)"
|
||||
// @Produce json
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The MemFSHandler type provides handlers for manipulating a filesystem
|
||||
type MemFSHandler struct {
|
||||
filesystem fs.Filesystem
|
||||
handler *handler.MemFSHandler
|
||||
}
|
||||
|
||||
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
|
||||
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
|
||||
return &MemFSHandler{
|
||||
filesystem: fs,
|
||||
handler: handler.NewMemFS(fs),
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileAPI returns the file at the given path
|
||||
// @Summary Fetch a file from the memory filesystem
|
||||
// @Description Fetch a file from the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [get]
|
||||
func (h *MemFSHandler) GetFile(c echo.Context) error {
|
||||
return h.handler.GetFile(c)
|
||||
}
|
||||
|
||||
// PutFileAPI adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the memory filesystem
|
||||
// @Description Writes or overwrites a file on the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [put]
|
||||
func (h *MemFSHandler) PutFile(c echo.Context) error {
|
||||
return h.handler.PutFile(c)
|
||||
}
|
||||
|
||||
// DeleteFileAPI removes a file from the filesystem
|
||||
// @Summary Remove a file from the memory filesystem
|
||||
// @Description Remove a file from the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [delete]
|
||||
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
|
||||
return h.handler.DeleteFile(c)
|
||||
}
|
||||
|
||||
// PatchFile creates a symbolic link to a file in the filesystem
|
||||
// @Summary Create a link to a file in the memory filesystem
|
||||
// @Description Create a link to a file in the memory filesystem. The file linked to has to exist.
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-patch
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param url body string true "Path to the file to link to"
|
||||
// @Success 201 {string} string
|
||||
// @Failure 400 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [patch]
|
||||
func (h *MemFSHandler) PatchFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err)
|
||||
}
|
||||
|
||||
u, err := url.Parse(string(body))
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err)
|
||||
}
|
||||
|
||||
if err := h.filesystem.Symlink(u.Path, path); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
// ListFiles lists all files on the filesystem
|
||||
// @Summary List all files on the memory filesystem
|
||||
// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-list-files
|
||||
// @Produce json
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem [get]
|
||||
func (h *MemFSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.filesystem.List(pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
fileinfos[i] = api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
@@ -32,7 +32,6 @@ func NewMetrics(config MetricsConfig) *MetricsHandler {
|
||||
// Describe the known metrics
|
||||
// @Summary List all known metrics with their description and labels
|
||||
// @Description List all known metrics with their description and labels
|
||||
// @Tags v16.10.0
|
||||
// @ID metrics-3-describe
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.MetricsDescription
|
||||
@@ -61,7 +60,6 @@ func (r *MetricsHandler) Describe(c echo.Context) error {
|
||||
// Query the collected metrics
|
||||
// @Summary Query the collected metrics
|
||||
// @Description Query the collected metrics
|
||||
// @Tags v16.7.2
|
||||
// @ID metrics-3-metrics
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
|
||||
@@ -31,7 +31,6 @@ func NewPlayout(restream restream.Restreamer) *PlayoutHandler {
|
||||
// Status return the current playout status
|
||||
// @Summary Get the current playout status
|
||||
// @Description Get the current playout status of an input of a process
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-status
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -85,7 +84,6 @@ func (h *PlayoutHandler) Status(c echo.Context) error {
|
||||
// Keyframe returns the last keyframe
|
||||
// @Summary Get the last keyframe
|
||||
// @Description Get the last keyframe of an input of a process. The extension of the name determines the return type.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-keyframe
|
||||
// @Produce image/jpeg
|
||||
// @Produce image/png
|
||||
@@ -135,7 +133,6 @@ func (h *PlayoutHandler) Keyframe(c echo.Context) error {
|
||||
// EncodeErrorframe encodes the errorframe
|
||||
// @Summary Encode the errorframe
|
||||
// @Description Immediately encode the errorframe (if available and looping)
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-errorframencode
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
@@ -176,7 +173,6 @@ func (h *PlayoutHandler) EncodeErrorframe(c echo.Context) error {
|
||||
// SetErrorframe sets an errorframe
|
||||
// @Summary Upload an error frame
|
||||
// @Description Upload an error frame which will be encoded immediately
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-errorframe
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
@@ -225,7 +221,6 @@ func (h *PlayoutHandler) SetErrorframe(c echo.Context) error {
|
||||
// ReopenInput closes the current input stream
|
||||
// @Summary Close the current input stream
|
||||
// @Description Close the current input stream such that it will be automatically re-opened
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-reopen-input
|
||||
// @Produce plain
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -265,7 +260,6 @@ func (h *PlayoutHandler) ReopenInput(c echo.Context) error {
|
||||
// SetStream replaces the current stream
|
||||
// @Summary Switch to a new stream
|
||||
// @Description Replace the current stream with the one from the given URL. The switch will only happen if the stream parameters match.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-playout-stream
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
|
||||
@@ -27,7 +27,6 @@ func NewRestream(restream restream.Restreamer) *RestreamHandler {
|
||||
// Add adds a new process
|
||||
// @Summary Add a new process
|
||||
// @Description Add a new FFmpeg process
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-add
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -69,7 +68,6 @@ func (h *RestreamHandler) Add(c echo.Context) error {
|
||||
// GetAll returns all known processes
|
||||
// @Summary List all known processes
|
||||
// @Description List all known processes. Use the query parameter to filter the listed processes.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-get-all
|
||||
// @Produce json
|
||||
// @Param filter query string false "Comma separated list of fields (config, state, report, metadata) that will be part of the output. If empty, all fields will be part of the output."
|
||||
@@ -120,7 +118,6 @@ func (h *RestreamHandler) GetAll(c echo.Context) error {
|
||||
// Get returns the process with the given ID
|
||||
// @Summary List a process by its ID
|
||||
// @Description List a process by its ID. Use the filter parameter to specifiy the level of detail of the output.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-get
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -144,7 +141,6 @@ func (h *RestreamHandler) Get(c echo.Context) error {
|
||||
// Delete deletes the process with the given ID
|
||||
// @Summary Delete a process by its ID
|
||||
// @Description Delete a process by its ID
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-delete
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -168,8 +164,7 @@ func (h *RestreamHandler) Delete(c echo.Context) error {
|
||||
|
||||
// Update replaces an existing process
|
||||
// @Summary Replace an existing process
|
||||
// @Description Replace an existing process.
|
||||
// @Tags v16.7.2
|
||||
// @Description Replace an existing process. This is a shortcut for DELETE+POST.
|
||||
// @ID process-3-update
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -211,7 +206,6 @@ func (h *RestreamHandler) Update(c echo.Context) error {
|
||||
// Command issues a command to a process
|
||||
// @Summary Issue a command to a process
|
||||
// @Description Issue a command to a process: start, stop, reload, restart
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-command
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
@@ -254,7 +248,6 @@ func (h *RestreamHandler) Command(c echo.Context) error {
|
||||
// GetConfig returns the configuration of a process
|
||||
// @Summary Get the configuration of a process
|
||||
// @Description Get the configuration of a process. This is the configuration as provided by Add or Update.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-get-config
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -279,8 +272,7 @@ func (h *RestreamHandler) GetConfig(c echo.Context) error {
|
||||
|
||||
// GetState returns the current state of a process
|
||||
// @Summary Get the state of a process
|
||||
// @Description Get the state and progress data of a process.
|
||||
// @Tags v16.7.2
|
||||
// @Description Get the state and progress data of a process
|
||||
// @ID process-3-get-state
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -305,8 +297,7 @@ func (h *RestreamHandler) GetState(c echo.Context) error {
|
||||
|
||||
// GetReport return the current log and the log history of a process
|
||||
// @Summary Get the logs of a process
|
||||
// @Description Get the logs and the log history of a process.
|
||||
// @Tags v16.7.2
|
||||
// @Description Get the logs and the log history of a process
|
||||
// @ID process-3-get-report
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -331,8 +322,7 @@ func (h *RestreamHandler) GetReport(c echo.Context) error {
|
||||
|
||||
// Probe probes a process
|
||||
// @Summary Probe a process
|
||||
// @Description Probe an existing process to get a detailed stream information on the inputs.
|
||||
// @Tags v16.7.2
|
||||
// @Description Probe an existing process to get a detailed stream information on the inputs
|
||||
// @ID process-3-probe
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -352,8 +342,7 @@ func (h *RestreamHandler) Probe(c echo.Context) error {
|
||||
|
||||
// Skills returns the detected FFmpeg capabilities
|
||||
// @Summary FFmpeg capabilities
|
||||
// @Description List all detected FFmpeg capabilities.
|
||||
// @Tags v16.7.2
|
||||
// @Description List all detected FFmpeg capabilities
|
||||
// @ID skills-3
|
||||
// @Produce json
|
||||
// @Success 200 {object} api.Skills
|
||||
@@ -370,8 +359,7 @@ func (h *RestreamHandler) Skills(c echo.Context) error {
|
||||
|
||||
// ReloadSkills will refresh the FFmpeg capabilities
|
||||
// @Summary Refresh FFmpeg capabilities
|
||||
// @Description Refresh the available FFmpeg capabilities.
|
||||
// @Tags v16.7.2
|
||||
// @Description Refresh the available FFmpeg capabilities
|
||||
// @ID skills-3-reload
|
||||
// @Produce json
|
||||
// @Success 200 {object} api.Skills
|
||||
@@ -390,7 +378,6 @@ func (h *RestreamHandler) ReloadSkills(c echo.Context) error {
|
||||
// GetProcessMetadata returns the metadata stored with a process
|
||||
// @Summary Retrieve JSON metadata stored with a process under a key
|
||||
// @Description Retrieve the previously stored JSON metadata under the given key. If the key is empty, all metadata will be returned.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-get-process-metadata
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -415,7 +402,6 @@ func (h *RestreamHandler) GetProcessMetadata(c echo.Context) error {
|
||||
// SetProcessMetadata stores metadata with a process
|
||||
// @Summary Add JSON metadata with a process under the given key
|
||||
// @Description Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key doesn't exist, it will be created.
|
||||
// @Tags v16.7.2
|
||||
// @ID process-3-set-process-metadata
|
||||
// @Produce json
|
||||
// @Param id path string true "Process ID"
|
||||
@@ -450,7 +436,6 @@ func (h *RestreamHandler) SetProcessMetadata(c echo.Context) error {
|
||||
// GetMetadata returns the metadata stored with the Restreamer
|
||||
// @Summary Retrieve JSON metadata from a key
|
||||
// @Description Retrieve the previously stored JSON metadata under the given key. If the key is empty, all metadata will be returned.
|
||||
// @Tags v16.7.2
|
||||
// @ID metadata-3-get
|
||||
// @Produce json
|
||||
// @Param key path string true "Key for data store"
|
||||
@@ -473,7 +458,6 @@ func (h *RestreamHandler) GetMetadata(c echo.Context) error {
|
||||
// SetMetadata stores metadata with the Restreamer
|
||||
// @Summary Add JSON metadata under the given key
|
||||
// @Description Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key doesn't exist, it will be created.
|
||||
// @Tags v16.7.2
|
||||
// @ID metadata-3-set
|
||||
// @Produce json
|
||||
// @Param key path string true "Key for data store"
|
||||
|
||||
@@ -23,8 +23,7 @@ func NewRTMP(rtmp rtmp.Server) *RTMPHandler {
|
||||
|
||||
// ListChannels lists all currently publishing RTMP streams
|
||||
// @Summary List all publishing RTMP streams
|
||||
// @Description List all currently publishing RTMP streams.
|
||||
// @Tags v16.7.2
|
||||
// @Description List all currently publishing RTMP streams
|
||||
// @ID rtmp-3-list-channels
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.RTMPChannel
|
||||
|
||||
@@ -25,8 +25,7 @@ func NewSession(registry session.RegistryReader) *SessionHandler {
|
||||
|
||||
// Summary returns a summary of all active and past sessions
|
||||
// @Summary Get a summary of all active and past sessions
|
||||
// @Description Get a summary of all active and past sessions of the given collector.
|
||||
// @Tags v16.7.2
|
||||
// @Description Get a summary of all active and past sessions of the given collector
|
||||
// @ID session-3-summary
|
||||
// @Produce json
|
||||
// @Security ApiKeyAuth
|
||||
@@ -50,8 +49,7 @@ func (s *SessionHandler) Summary(c echo.Context) error {
|
||||
|
||||
// Active returns a list of active sessions
|
||||
// @Summary Get a minimal summary of all active sessions
|
||||
// @Description Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth).
|
||||
// @Tags v16.7.2
|
||||
// @Description Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth)
|
||||
// @ID session-3-current
|
||||
// @Produce json
|
||||
// @Security ApiKeyAuth
|
||||
|
||||
@@ -24,7 +24,6 @@ func NewSRT(srt srt.Server) *SRTHandler {
|
||||
// ListChannels lists all currently publishing SRT streams
|
||||
// @Summary List all publishing SRT treams
|
||||
// @Description List all currently publishing SRT streams. This endpoint is EXPERIMENTAL and may change in future.
|
||||
// @Tags v16.9.0
|
||||
// @ID srt-3-list-channels
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.SRTChannels
|
||||
|
||||
@@ -2,7 +2,6 @@ package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
@@ -34,7 +33,6 @@ func NewWidget(config WidgetConfig) *WidgetHandler {
|
||||
// Get returns minimal public statistics about a process
|
||||
// @Summary Fetch minimal statistics about a process
|
||||
// @Description Fetch minimal statistics about a process, which is not protected by any auth.
|
||||
// @Tags v16.7.2
|
||||
// @ID widget-3-get
|
||||
// @Produce json
|
||||
// @Param id path string true "ID of a process"
|
||||
@@ -75,19 +73,13 @@ func (w *WidgetHandler) Get(c echo.Context) error {
|
||||
summary := collector.Summary()
|
||||
|
||||
for _, session := range summary.Active {
|
||||
if !strings.HasPrefix(session.Reference, process.Reference) {
|
||||
continue
|
||||
if session.Reference == process.Reference {
|
||||
data.CurrentSessions++
|
||||
}
|
||||
|
||||
data.CurrentSessions++
|
||||
}
|
||||
|
||||
for reference, s := range summary.Summary.References {
|
||||
if !strings.HasPrefix(reference, process.Reference) {
|
||||
continue
|
||||
}
|
||||
|
||||
data.TotalSessions += s.TotalSessions
|
||||
if s, ok := summary.Summary.References[process.Reference]; ok {
|
||||
data.TotalSessions = s.TotalSessions
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, data)
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The DiskFSHandler type provides handlers for manipulating a filesystem
|
||||
type DiskFSHandler struct {
|
||||
cache cache.Cacher
|
||||
filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
|
||||
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
|
||||
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
|
||||
return &DiskFSHandler{
|
||||
cache: cache,
|
||||
filesystem: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the filesystem
|
||||
// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.
|
||||
// @ID diskfs-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Router /{path} [get]
|
||||
func (h *DiskFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if stat.IsDir() {
|
||||
path = filepath.Join(path, "index.html")
|
||||
|
||||
file.Close()
|
||||
|
||||
file = h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ = file.Stat()
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
164
http/handler/filesystem.go
Normal file
164
http/handler/filesystem.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The FSHandler type provides handlers for manipulating a filesystem
|
||||
type FSHandler struct {
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
// NewFS return a new FSHandler type. You have to provide a filesystem to act on.
|
||||
func NewFS(fs fs.FS) *FSHandler {
|
||||
return &FSHandler{
|
||||
fs: fs,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.fs.Filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if len(h.fs.DefaultFile) != 0 {
|
||||
if stat.IsDir() {
|
||||
path = filepath.Join(path, h.fs.DefaultFile)
|
||||
|
||||
file.Close()
|
||||
|
||||
file = h.fs.Filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ = file.Stat()
|
||||
}
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
func (h *FSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.fs.Filesystem.Store(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "%s", err)
|
||||
}
|
||||
|
||||
if h.fs.Cache != nil {
|
||||
h.fs.Cache.Delete(path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *FSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.fs.Filesystem.Delete(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
if h.fs.Cache != nil {
|
||||
h.fs.Cache.Delete(path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "Deleted: "+path)
|
||||
}
|
||||
|
||||
func (h *FSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.fs.Filesystem.List(pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
fileinfos[i] = api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The MemFSHandler type provides handlers for manipulating a filesystem
|
||||
type MemFSHandler struct {
|
||||
filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
|
||||
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
|
||||
return &MemFSHandler{
|
||||
filesystem: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the memory filesystem
|
||||
// @Description Fetch a file from the memory filesystem
|
||||
// @ID memfs-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Router /memfs/{path} [get]
|
||||
func (h *MemFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
// PutFile adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the memory filesystem
|
||||
// @Description Writes or overwrites a file on the memory filesystem
|
||||
// @ID memfs-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security BasicAuth
|
||||
// @Router /memfs/{path} [put]
|
||||
func (h *MemFSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.filesystem.Store(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "%s", err)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// DeleteFile removes a file from the filesystem
|
||||
// @Summary Remove a file from the memory filesystem
|
||||
// @Description Remove a file from the memory filesystem
|
||||
// @ID memfs-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security BasicAuth
|
||||
// @Router /memfs/{path} [delete]
|
||||
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.filesystem.Delete(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "Deleted: "+path)
|
||||
}
|
||||
25
http/middleware/cache/cache.go
vendored
25
http/middleware/cache/cache.go
vendored
@@ -57,18 +57,31 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
if req.Method != "GET" {
|
||||
res.Header().Set("X-Cache", "SKIP ONLYGET")
|
||||
return next(c)
|
||||
|
||||
if err := next(c); err != nil {
|
||||
c.Error(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
res.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.0f", config.Cache.TTL().Seconds()))
|
||||
|
||||
key := strings.TrimPrefix(req.URL.Path, config.Prefix)
|
||||
|
||||
if !config.Cache.IsExtensionCacheable(path.Ext(req.URL.Path)) {
|
||||
res.Header().Set("X-Cache", "SKIP EXT")
|
||||
return next(c)
|
||||
|
||||
if err := next(c); err != nil {
|
||||
c.Error(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if obj, expireIn, _ := config.Cache.Get(key); obj == nil {
|
||||
// cache miss
|
||||
|
||||
writer := res.Writer
|
||||
|
||||
w := &cacheWriter{
|
||||
@@ -92,7 +105,6 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
if res.Status != 200 {
|
||||
res.Header().Set("X-Cache", "SKIP NOTOK")
|
||||
res.Writer.WriteHeader(res.Status)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,7 +112,6 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
if !config.Cache.IsSizeCacheable(size) {
|
||||
res.Header().Set("X-Cache", "SKIP TOOBIG")
|
||||
res.Writer.WriteHeader(res.Status)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -112,13 +123,11 @@ func NewWithConfig(config Config) echo.MiddlewareFunc {
|
||||
|
||||
if err := config.Cache.Put(key, o, size); err != nil {
|
||||
res.Header().Set("X-Cache", "SKIP TOOBIG")
|
||||
res.Writer.WriteHeader(res.Status)
|
||||
return nil
|
||||
}
|
||||
|
||||
res.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.0f", expireIn.Seconds()))
|
||||
res.Header().Set("X-Cache", "MISS")
|
||||
res.Writer.WriteHeader(res.Status)
|
||||
} else {
|
||||
// cache hit
|
||||
o := obj.(*cacheObject)
|
||||
@@ -181,5 +190,7 @@ func (w *cacheWriter) WriteHeader(code int) {
|
||||
}
|
||||
|
||||
func (w *cacheWriter) Write(body []byte) (int, error) {
|
||||
return w.body.Write(body)
|
||||
n, err := w.body.Write(body)
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
100
http/middleware/cache/cache_test.go
vendored
100
http/middleware/cache/cache_test.go
vendored
@@ -1,100 +0,0 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
c, err := cache.NewLRUCache(cache.LRUConfig{
|
||||
TTL: 300 * time.Second,
|
||||
MaxSize: 0,
|
||||
MaxFileSize: 16,
|
||||
AllowExtensions: []string{".js"},
|
||||
BlockExtensions: []string{".ts"},
|
||||
Logger: nil,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
e := echo.New()
|
||||
req := httptest.NewRequest(http.MethodGet, "/found.js", nil)
|
||||
rec := httptest.NewRecorder()
|
||||
ctx := e.NewContext(req, rec)
|
||||
|
||||
handler := NewWithConfig(Config{
|
||||
Cache: c,
|
||||
})(func(c echo.Context) error {
|
||||
if c.Request().URL.Path == "/found.js" {
|
||||
c.Response().Write([]byte("test"))
|
||||
} else if c.Request().URL.Path == "/toobig.js" {
|
||||
c.Response().Write([]byte("testtesttesttesttest"))
|
||||
} else if c.Request().URL.Path == "/blocked.ts" {
|
||||
c.Response().Write([]byte("blocked"))
|
||||
}
|
||||
|
||||
c.Response().WriteHeader(http.StatusNotFound)
|
||||
return nil
|
||||
})
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, "test", rec.Body.String())
|
||||
require.Equal(t, 200, rec.Result().StatusCode)
|
||||
require.Equal(t, "MISS", rec.Result().Header.Get("x-cache"))
|
||||
|
||||
rec = httptest.NewRecorder()
|
||||
ctx = e.NewContext(req, rec)
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, "test", rec.Body.String())
|
||||
require.Equal(t, 200, rec.Result().StatusCode)
|
||||
require.Equal(t, "HIT", rec.Result().Header.Get("x-cache")[:3])
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/notfound.js", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
ctx = e.NewContext(req, rec)
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, 404, rec.Result().StatusCode)
|
||||
require.Equal(t, "SKIP NOTOK", rec.Result().Header.Get("x-cache"))
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/toobig.js", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
ctx = e.NewContext(req, rec)
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, "testtesttesttesttest", rec.Body.String())
|
||||
require.Equal(t, 200, rec.Result().StatusCode)
|
||||
require.Equal(t, "SKIP TOOBIG", rec.Result().Header.Get("x-cache"))
|
||||
|
||||
req = httptest.NewRequest(http.MethodGet, "/blocked.ts", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
ctx = e.NewContext(req, rec)
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, "blocked", rec.Body.String())
|
||||
require.Equal(t, 200, rec.Result().StatusCode)
|
||||
require.Equal(t, "SKIP EXT", rec.Result().Header.Get("x-cache"))
|
||||
|
||||
req = httptest.NewRequest(http.MethodPost, "/found.js", nil)
|
||||
rec = httptest.NewRecorder()
|
||||
ctx = e.NewContext(req, rec)
|
||||
|
||||
handler(ctx)
|
||||
|
||||
require.Equal(t, "test", rec.Body.String())
|
||||
require.Equal(t, 200, rec.Result().StatusCode)
|
||||
require.Equal(t, "SKIP ONLYGET", rec.Result().Header.Get("x-cache"))
|
||||
}
|
||||
243
http/server.go
243
http/server.go
@@ -29,19 +29,19 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/http/errorhandler"
|
||||
"github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/graph/resolver"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
api "github.com/datarhei/core/v16/http/handler/api"
|
||||
"github.com/datarhei/core/v16/http/jwt"
|
||||
"github.com/datarhei/core/v16/http/router"
|
||||
"github.com/datarhei/core/v16/http/validator"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/monitor"
|
||||
"github.com/datarhei/core/v16/net"
|
||||
@@ -79,28 +79,19 @@ type Config struct {
|
||||
Metrics monitor.HistoryReader
|
||||
Prometheus prometheus.Reader
|
||||
MimeTypesFile string
|
||||
DiskFS fs.Filesystem
|
||||
MemFS MemFSConfig
|
||||
Filesystems []fs.FS
|
||||
IPLimiter net.IPLimiter
|
||||
Profiling bool
|
||||
Cors CorsConfig
|
||||
RTMP rtmp.Server
|
||||
SRT srt.Server
|
||||
JWT jwt.JWT
|
||||
Config cfgstore.Store
|
||||
Cache cache.Cacher
|
||||
Config config.Store
|
||||
Sessions session.RegistryReader
|
||||
Router router.Router
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
type MemFSConfig struct {
|
||||
EnableAuth bool
|
||||
Username string
|
||||
Password string
|
||||
Filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
type CorsConfig struct {
|
||||
Origins []string
|
||||
}
|
||||
@@ -114,8 +105,6 @@ type server struct {
|
||||
|
||||
handler struct {
|
||||
about *api.AboutHandler
|
||||
memfs *handler.MemFSHandler
|
||||
diskfs *handler.DiskFSHandler
|
||||
prometheus *handler.PrometheusHandler
|
||||
profiling *handler.ProfilingHandler
|
||||
ping *handler.PingHandler
|
||||
@@ -127,8 +116,6 @@ type server struct {
|
||||
log *api.LogHandler
|
||||
restream *api.RestreamHandler
|
||||
playout *api.PlayoutHandler
|
||||
memfs *api.MemFSHandler
|
||||
diskfs *api.DiskFSHandler
|
||||
rtmp *api.RTMPHandler
|
||||
srt *api.SRTHandler
|
||||
config *api.ConfigHandler
|
||||
@@ -148,18 +135,12 @@ type server struct {
|
||||
hlsrewrite echo.MiddlewareFunc
|
||||
}
|
||||
|
||||
memfs struct {
|
||||
enableAuth bool
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
diskfs fs.Filesystem
|
||||
|
||||
gzip struct {
|
||||
mimetypes []string
|
||||
}
|
||||
|
||||
filesystems map[string]*filesystem
|
||||
|
||||
router *echo.Echo
|
||||
mimeTypesFile string
|
||||
profiling bool
|
||||
@@ -167,32 +148,62 @@ type server struct {
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
type filesystem struct {
|
||||
fs.FS
|
||||
|
||||
handler *handler.FSHandler
|
||||
}
|
||||
|
||||
func NewServer(config Config) (Server, error) {
|
||||
s := &server{
|
||||
logger: config.Logger,
|
||||
mimeTypesFile: config.MimeTypesFile,
|
||||
profiling: config.Profiling,
|
||||
diskfs: config.DiskFS,
|
||||
readOnly: config.ReadOnly,
|
||||
}
|
||||
|
||||
s.v3handler.diskfs = api.NewDiskFS(
|
||||
config.DiskFS,
|
||||
config.Cache,
|
||||
)
|
||||
s.filesystems = map[string]*filesystem{}
|
||||
|
||||
s.handler.diskfs = handler.NewDiskFS(
|
||||
config.DiskFS,
|
||||
config.Cache,
|
||||
)
|
||||
corsPrefixes := map[string][]string{
|
||||
"/api": {"*"},
|
||||
}
|
||||
|
||||
s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
|
||||
PathPrefix: config.DiskFS.Base(),
|
||||
})
|
||||
for _, fs := range config.Filesystems {
|
||||
if _, ok := s.filesystems[fs.Name]; ok {
|
||||
return nil, fmt.Errorf("the filesystem name '%s' is already in use", fs.Name)
|
||||
}
|
||||
|
||||
s.memfs.enableAuth = config.MemFS.EnableAuth
|
||||
s.memfs.username = config.MemFS.Username
|
||||
s.memfs.password = config.MemFS.Password
|
||||
if !strings.HasPrefix(fs.Mountpoint, "/") {
|
||||
fs.Mountpoint = "/" + fs.Mountpoint
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(fs.Mountpoint, "/") {
|
||||
fs.Mountpoint = strings.TrimSuffix(fs.Mountpoint, "/")
|
||||
}
|
||||
|
||||
if _, ok := corsPrefixes[fs.Mountpoint]; ok {
|
||||
return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", fs.Mountpoint, fs.Name)
|
||||
}
|
||||
|
||||
corsPrefixes[fs.Mountpoint] = config.Cors.Origins
|
||||
|
||||
filesystem := &filesystem{
|
||||
FS: fs,
|
||||
handler: handler.NewFS(fs),
|
||||
}
|
||||
|
||||
s.filesystems[filesystem.Name] = filesystem
|
||||
|
||||
if fs.Filesystem.Type() == "disk" {
|
||||
s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
|
||||
PathPrefix: fs.Filesystem.Base(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := corsPrefixes["/"]; !ok {
|
||||
return nil, fmt.Errorf("one filesystem must be mounted at /")
|
||||
}
|
||||
|
||||
if config.Logger == nil {
|
||||
s.logger = log.New("HTTP")
|
||||
@@ -224,16 +235,6 @@ func NewServer(config Config) (Server, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if config.MemFS.Filesystem != nil {
|
||||
s.v3handler.memfs = api.NewMemFS(
|
||||
config.MemFS.Filesystem,
|
||||
)
|
||||
|
||||
s.handler.memfs = handler.NewMemFS(
|
||||
config.MemFS.Filesystem,
|
||||
)
|
||||
}
|
||||
|
||||
if config.Prometheus != nil {
|
||||
s.handler.prometheus = handler.NewPrometheus(
|
||||
config.Prometheus.HTTPHandler(),
|
||||
@@ -292,12 +293,6 @@ func NewServer(config Config) (Server, error) {
|
||||
Logger: s.logger,
|
||||
})
|
||||
|
||||
if config.Cache != nil {
|
||||
s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{
|
||||
Cache: config.Cache,
|
||||
})
|
||||
}
|
||||
|
||||
s.v3handler.widget = api.NewWidget(api.WidgetConfig{
|
||||
Restream: config.Restream,
|
||||
Registry: config.Sessions,
|
||||
@@ -308,11 +303,7 @@ func NewServer(config Config) (Server, error) {
|
||||
})
|
||||
|
||||
if middleware, err := mwcors.NewWithConfig(mwcors.Config{
|
||||
Prefixes: map[string][]string{
|
||||
"/": config.Cors.Origins,
|
||||
"/api": {"*"},
|
||||
"/memfs": config.Cors.Origins,
|
||||
},
|
||||
Prefixes: corsPrefixes,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
@@ -437,65 +428,58 @@ func (s *server) setRoutes() {
|
||||
doc.Use(gzipMiddleware)
|
||||
doc.GET("", echoSwagger.WrapHandler)
|
||||
|
||||
// Serve static data
|
||||
fs := s.router.Group("/*")
|
||||
fs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "text/html",
|
||||
}))
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
}))
|
||||
if s.middleware.cache != nil {
|
||||
fs.Use(s.middleware.cache)
|
||||
}
|
||||
fs.Use(s.middleware.hlsrewrite)
|
||||
if s.middleware.session != nil {
|
||||
fs.Use(s.middleware.session)
|
||||
}
|
||||
// Mount filesystems
|
||||
for _, filesystem := range s.filesystems {
|
||||
// Define a local variable because later in the loop we have a closure
|
||||
filesystem := filesystem
|
||||
|
||||
fs.GET("", s.handler.diskfs.GetFile)
|
||||
fs.HEAD("", s.handler.diskfs.GetFile)
|
||||
|
||||
// Memory FS
|
||||
if s.handler.memfs != nil {
|
||||
memfs := s.router.Group("/memfs/*")
|
||||
memfs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
}))
|
||||
if s.middleware.session != nil {
|
||||
memfs.Use(s.middleware.session)
|
||||
mountpoint := filesystem.Mountpoint + "/*"
|
||||
if filesystem.Mountpoint == "/" {
|
||||
mountpoint = "/*"
|
||||
}
|
||||
|
||||
memfs.HEAD("", s.handler.memfs.GetFile)
|
||||
memfs.GET("", s.handler.memfs.GetFile)
|
||||
fs := s.router.Group(mountpoint)
|
||||
fs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: filesystem.DefaultContentType,
|
||||
}))
|
||||
|
||||
var authmw echo.MiddlewareFunc
|
||||
if filesystem.Gzip {
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
}))
|
||||
}
|
||||
|
||||
if s.memfs.enableAuth {
|
||||
authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
|
||||
if username == s.memfs.username && password == s.memfs.password {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
if filesystem.Cache != nil {
|
||||
mwcache := mwcache.NewWithConfig(mwcache.Config{
|
||||
Cache: filesystem.Cache,
|
||||
})
|
||||
fs.Use(mwcache)
|
||||
}
|
||||
|
||||
memfs.POST("", s.handler.memfs.PutFile, authmw)
|
||||
memfs.PUT("", s.handler.memfs.PutFile, authmw)
|
||||
memfs.DELETE("", s.handler.memfs.DeleteFile, authmw)
|
||||
} else {
|
||||
memfs.POST("", s.handler.memfs.PutFile)
|
||||
memfs.PUT("", s.handler.memfs.PutFile)
|
||||
memfs.DELETE("", s.handler.memfs.DeleteFile)
|
||||
fs.GET("", filesystem.handler.GetFile)
|
||||
fs.HEAD("", filesystem.handler.GetFile)
|
||||
|
||||
if filesystem.AllowWrite {
|
||||
if filesystem.EnableAuth {
|
||||
authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
|
||||
if username == filesystem.Username && password == filesystem.Password {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
fs.POST("", filesystem.handler.PutFile, authmw)
|
||||
fs.PUT("", filesystem.handler.PutFile, authmw)
|
||||
fs.DELETE("", filesystem.handler.DeleteFile, authmw)
|
||||
} else {
|
||||
fs.POST("", filesystem.handler.PutFile)
|
||||
fs.PUT("", filesystem.handler.PutFile)
|
||||
fs.DELETE("", filesystem.handler.DeleteFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -593,32 +577,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
|
||||
}
|
||||
}
|
||||
|
||||
// v3 Memory FS
|
||||
if s.v3handler.memfs != nil {
|
||||
v3.GET("/fs/mem", s.v3handler.memfs.ListFiles)
|
||||
v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile)
|
||||
|
||||
if !s.readOnly {
|
||||
v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile)
|
||||
v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile)
|
||||
v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile)
|
||||
// v3 Filesystems
|
||||
fshandlers := map[string]api.FSConfig{}
|
||||
for _, fs := range s.filesystems {
|
||||
fshandlers[fs.Name] = api.FSConfig{
|
||||
Type: fs.Filesystem.Type(),
|
||||
Mountpoint: fs.Mountpoint,
|
||||
Handler: fs.handler,
|
||||
}
|
||||
}
|
||||
|
||||
// v3 Disk FS
|
||||
v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles)
|
||||
v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
handler := api.NewFS(fshandlers)
|
||||
|
||||
v3.GET("/fs", handler.List)
|
||||
|
||||
v3.GET("/fs/:name", handler.ListFiles)
|
||||
v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
|
||||
if !s.readOnly {
|
||||
v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile)
|
||||
v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile)
|
||||
v3.PUT("/fs/:name/*", handler.PutFile)
|
||||
v3.DELETE("/fs/:name/*", handler.DeleteFile)
|
||||
}
|
||||
|
||||
// v3 RTMP
|
||||
|
||||
@@ -17,18 +17,6 @@ func Rename(src, dst string) error {
|
||||
}
|
||||
|
||||
// If renaming the file fails, copy the data
|
||||
Copy(src, dst)
|
||||
|
||||
if err := os.Remove(src); err != nil {
|
||||
os.Remove(dst)
|
||||
return fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a file from src to dst.
|
||||
func Copy(src, dst string) error {
|
||||
source, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
@@ -49,5 +37,10 @@ func Copy(src, dst string) error {
|
||||
|
||||
source.Close()
|
||||
|
||||
if err := os.Remove(src); err != nil {
|
||||
os.Remove(dst)
|
||||
return fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -15,6 +15,9 @@ import (
|
||||
// DiskConfig is the config required to create a new disk
|
||||
// filesystem.
|
||||
type DiskConfig struct {
|
||||
// Namee is the name of the filesystem
|
||||
Name string
|
||||
|
||||
// Dir is the path to the directory to observe
|
||||
Dir string
|
||||
|
||||
@@ -109,7 +112,8 @@ func (f *diskFile) Read(p []byte) (int, error) {
|
||||
|
||||
// diskFilesystem implements the Filesystem interface
|
||||
type diskFilesystem struct {
|
||||
dir string
|
||||
name string
|
||||
dir string
|
||||
|
||||
// Max. size of the filesystem in bytes as
|
||||
// given by the config
|
||||
@@ -127,14 +131,20 @@ type diskFilesystem struct {
|
||||
// that implements the Filesystem interface
|
||||
func NewDiskFilesystem(config DiskConfig) (Filesystem, error) {
|
||||
fs := &diskFilesystem{
|
||||
name: config.Name,
|
||||
maxSize: config.Size,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("DiskFS")
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithFields(log.Fields{
|
||||
"name": fs.name,
|
||||
"type": "disk",
|
||||
})
|
||||
|
||||
if err := fs.Rebase(config.Dir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -142,6 +152,10 @@ func NewDiskFilesystem(config DiskConfig) (Filesystem, error) {
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Name() string {
|
||||
return fs.name
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Base() string {
|
||||
return fs.dir
|
||||
}
|
||||
@@ -172,6 +186,10 @@ func (fs *diskFilesystem) Rebase(base string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Type() string {
|
||||
return "diskfs"
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Size() (int64, int64) {
|
||||
// This is to cache the size for some time in order not to
|
||||
// stress the underlying filesystem too much.
|
||||
|
||||
@@ -20,10 +20,15 @@ func (d *dummyFile) Close() error { return nil }
|
||||
func (d *dummyFile) Name() string { return "" }
|
||||
func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil }
|
||||
|
||||
type dummyFilesystem struct{}
|
||||
type dummyFilesystem struct {
|
||||
name string
|
||||
typ string
|
||||
}
|
||||
|
||||
func (d *dummyFilesystem) Name() string { return d.name }
|
||||
func (d *dummyFilesystem) Base() string { return "/" }
|
||||
func (d *dummyFilesystem) Rebase(string) error { return nil }
|
||||
func (d *dummyFilesystem) Type() string { return d.typ }
|
||||
func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 }
|
||||
func (d *dummyFilesystem) Resize(int64) {}
|
||||
func (d *dummyFilesystem) Files() int64 { return 0 }
|
||||
@@ -35,6 +40,9 @@ func (d *dummyFilesystem) DeleteAll() int64 { return
|
||||
func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} }
|
||||
|
||||
// NewDummyFilesystem return a dummy filesystem
|
||||
func NewDummyFilesystem() Filesystem {
|
||||
return &dummyFilesystem{}
|
||||
func NewDummyFilesystem(name, typ string) Filesystem {
|
||||
return &dummyFilesystem{
|
||||
name: name,
|
||||
typ: typ,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -38,12 +38,18 @@ type File interface {
|
||||
|
||||
// Filesystem is an interface that provides access to a filesystem.
|
||||
type Filesystem interface {
|
||||
// Name returns the name of this filesystem
|
||||
Name() string
|
||||
|
||||
// Base returns the base path of this filesystem
|
||||
Base() string
|
||||
|
||||
// Rebase sets a new base path for this filesystem
|
||||
Rebase(string) error
|
||||
|
||||
// Type returns the type of this filesystem
|
||||
Type() string
|
||||
|
||||
// Size returns the consumed size and capacity of the filesystem in bytes. The
|
||||
// capacity is negative if the filesystem can consume as much space as it can.
|
||||
Size() (int64, int64)
|
||||
@@ -67,7 +73,7 @@ type Filesystem interface {
|
||||
Store(path string, r io.Reader) (int64, bool, error)
|
||||
|
||||
// Delete removes a file at the given path from the filesystem. Returns the size of
|
||||
// the remove file in bytes. The size is negative if the file doesn't exist.
|
||||
// the removed file in bytes. The size is negative if the file doesn't exist.
|
||||
Delete(path string) int64
|
||||
|
||||
// DeleteAll removes all files from the filesystem. Returns the size of the
|
||||
|
||||
18
io/fs/mem.go
18
io/fs/mem.go
@@ -15,6 +15,9 @@ import (
|
||||
// MemConfig is the config that is required for creating
|
||||
// a new memory filesystem.
|
||||
type MemConfig struct {
|
||||
// Namee is the name of the filesystem
|
||||
Name string
|
||||
|
||||
// Base is the base path to be reported for this filesystem
|
||||
Base string
|
||||
|
||||
@@ -107,6 +110,7 @@ func (f *memFile) Close() error {
|
||||
}
|
||||
|
||||
type memFilesystem struct {
|
||||
name string
|
||||
base string
|
||||
|
||||
// Mapping of path to file
|
||||
@@ -136,6 +140,7 @@ type memFilesystem struct {
|
||||
// the Filesystem interface.
|
||||
func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
fs := &memFilesystem{
|
||||
name: config.Name,
|
||||
base: config.Base,
|
||||
maxSize: config.Size,
|
||||
purge: config.Purge,
|
||||
@@ -143,9 +148,11 @@ func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("MemFS")
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithField("type", "mem")
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
|
||||
fs.dataPool = sync.Pool{
|
||||
@@ -155,6 +162,7 @@ func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
}
|
||||
|
||||
fs.logger.WithFields(log.Fields{
|
||||
"name": fs.name,
|
||||
"size_bytes": fs.maxSize,
|
||||
"purge": fs.purge,
|
||||
}).Debug().Log("Created")
|
||||
@@ -162,6 +170,10 @@ func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
return fs
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Name() string {
|
||||
return fs.name
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Base() string {
|
||||
return fs.base
|
||||
}
|
||||
@@ -172,6 +184,10 @@ func (fs *memFilesystem) Rebase(base string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Type() string {
|
||||
return "memfs"
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Size() (int64, int64) {
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
389
io/fs/s3.go
Normal file
389
io/fs/s3.go
Normal file
@@ -0,0 +1,389 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/glob"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
type S3Config struct {
|
||||
// Namee is the name of the filesystem
|
||||
Name string
|
||||
Base string
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Region string
|
||||
Bucket string
|
||||
UseSSL bool
|
||||
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
type s3fs struct {
|
||||
name string
|
||||
base string
|
||||
|
||||
endpoint string
|
||||
accessKeyID string
|
||||
secretAccessKey string
|
||||
region string
|
||||
bucket string
|
||||
useSSL bool
|
||||
|
||||
client *minio.Client
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewS3Filesystem(config S3Config) (Filesystem, error) {
|
||||
fs := &s3fs{
|
||||
name: config.Name,
|
||||
base: config.Base,
|
||||
endpoint: config.Endpoint,
|
||||
accessKeyID: config.AccessKeyID,
|
||||
secretAccessKey: config.SecretAccessKey,
|
||||
region: config.Region,
|
||||
bucket: config.Bucket,
|
||||
useSSL: config.UseSSL,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
client, err := minio.New(fs.endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""),
|
||||
Region: fs.region,
|
||||
Secure: fs.useSSL,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err)
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithFields(log.Fields{
|
||||
"name": fs.name,
|
||||
"type": "s3",
|
||||
"bucket": fs.bucket,
|
||||
"region": fs.region,
|
||||
"endpoint": fs.endpoint,
|
||||
})
|
||||
|
||||
fs.logger.Debug().Log("Connected")
|
||||
|
||||
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
|
||||
defer cancel()
|
||||
|
||||
exists, err := client.BucketExists(ctx, fs.bucket)
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).Log("Can't access bucket")
|
||||
return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
fs.logger.Debug().Log("Bucket already exists")
|
||||
} else {
|
||||
fs.logger.Debug().Log("Bucket doesn't exists")
|
||||
err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).Log("Can't create bucket")
|
||||
return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err)
|
||||
} else {
|
||||
fs.logger.Debug().Log("Bucket created")
|
||||
}
|
||||
}
|
||||
|
||||
fs.client = client
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *s3fs) Name() string {
|
||||
return fs.name
|
||||
}
|
||||
|
||||
func (fs *s3fs) Base() string {
|
||||
return fs.base
|
||||
}
|
||||
|
||||
func (fs *s3fs) Rebase(base string) error {
|
||||
fs.base = base
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *s3fs) Type() string {
|
||||
return "s3fs"
|
||||
}
|
||||
|
||||
func (fs *s3fs) Size() (int64, int64) {
|
||||
size := int64(0)
|
||||
|
||||
files := fs.List("")
|
||||
|
||||
for _, file := range files {
|
||||
size += file.Size()
|
||||
}
|
||||
|
||||
return size, -1
|
||||
}
|
||||
|
||||
func (fs *s3fs) Resize(size int64) {}
|
||||
|
||||
func (fs *s3fs) Files() int64 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
nfiles := int64(0)
|
||||
|
||||
for object := range ch {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
}
|
||||
nfiles++
|
||||
}
|
||||
|
||||
return nfiles
|
||||
}
|
||||
|
||||
func (fs *s3fs) Symlink(oldname, newname string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (fs *s3fs) Open(path string) File {
|
||||
//ctx, cancel := context.WithCancel(context.Background())
|
||||
//defer cancel()
|
||||
ctx := context.Background()
|
||||
|
||||
object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Not found")
|
||||
return nil
|
||||
}
|
||||
|
||||
stat, err := object.Stat()
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Stat failed")
|
||||
return nil
|
||||
}
|
||||
|
||||
file := &s3File{
|
||||
data: object,
|
||||
name: stat.Key,
|
||||
size: stat.Size,
|
||||
lastModified: stat.LastModified,
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", stat.Key).Log("Opened")
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func (fs *s3fs) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
overwrite := false
|
||||
|
||||
_, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
|
||||
if err == nil {
|
||||
overwrite = true
|
||||
}
|
||||
|
||||
info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{
|
||||
UserMetadata: map[string]string{},
|
||||
UserTags: map[string]string{},
|
||||
Progress: nil,
|
||||
ContentType: "",
|
||||
ContentEncoding: "",
|
||||
ContentDisposition: "",
|
||||
ContentLanguage: "",
|
||||
CacheControl: "",
|
||||
Mode: "",
|
||||
RetainUntilDate: time.Time{},
|
||||
ServerSideEncryption: nil,
|
||||
NumThreads: 0,
|
||||
StorageClass: "",
|
||||
WebsiteRedirectLocation: "",
|
||||
PartSize: 0,
|
||||
LegalHold: "",
|
||||
SendContentMd5: false,
|
||||
DisableContentSha256: false,
|
||||
DisableMultipart: false,
|
||||
Internal: minio.AdvancedPutOptions{},
|
||||
})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).WithField("key", path).Log("Failed to store file")
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithFields(log.Fields{
|
||||
"key": path,
|
||||
"overwrite": overwrite,
|
||||
}).Log("Stored")
|
||||
|
||||
return info.Size, overwrite, nil
|
||||
}
|
||||
|
||||
func (fs *s3fs) Delete(path string) int64 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Not found")
|
||||
return -1
|
||||
}
|
||||
|
||||
err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{
|
||||
GovernanceBypass: true,
|
||||
})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file")
|
||||
return -1
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", stat.Key).Log("Deleted")
|
||||
|
||||
return stat.Size
|
||||
}
|
||||
|
||||
func (fs *s3fs) DeleteAll() int64 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
totalSize := int64(0)
|
||||
|
||||
objectsCh := make(chan minio.ObjectInfo)
|
||||
|
||||
// Send object names that are needed to be removed to objectsCh
|
||||
go func() {
|
||||
defer close(objectsCh)
|
||||
|
||||
for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
}) {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
continue
|
||||
}
|
||||
totalSize += object.Size
|
||||
objectsCh <- object
|
||||
}
|
||||
}()
|
||||
|
||||
for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{
|
||||
GovernanceBypass: true,
|
||||
}) {
|
||||
fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed")
|
||||
}
|
||||
|
||||
fs.logger.Debug().Log("Deleted all files")
|
||||
|
||||
return totalSize
|
||||
}
|
||||
|
||||
func (fs *s3fs) List(pattern string) []FileInfo {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
WithVersions: false,
|
||||
WithMetadata: false,
|
||||
Prefix: "",
|
||||
Recursive: true,
|
||||
MaxKeys: 0,
|
||||
StartAfter: "",
|
||||
UseV1: false,
|
||||
})
|
||||
|
||||
files := []FileInfo{}
|
||||
|
||||
for object := range ch {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pattern) != 0 {
|
||||
if ok, _ := glob.Match(pattern, object.Key, '/'); !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
f := &s3FileInfo{
|
||||
name: object.Key,
|
||||
size: object.Size,
|
||||
lastModified: object.LastModified,
|
||||
}
|
||||
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
type s3FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) ModTime() time.Time {
|
||||
return f.lastModified
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) IsLink() (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
type s3File struct {
|
||||
data io.ReadCloser
|
||||
name string
|
||||
size int64
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
func (f *s3File) Read(p []byte) (int, error) {
|
||||
return f.data.Read(p)
|
||||
}
|
||||
|
||||
func (f *s3File) Close() error {
|
||||
return f.data.Close()
|
||||
}
|
||||
|
||||
func (f *s3File) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *s3File) Stat() (FileInfo, error) {
|
||||
return &s3FileInfo{
|
||||
name: f.name,
|
||||
size: f.size,
|
||||
lastModified: f.lastModified,
|
||||
}, nil
|
||||
}
|
||||
70
net/ip.go
70
net/ip.go
@@ -4,11 +4,7 @@ package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -62,69 +58,3 @@ func ipVersion(ipAddress string) int {
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetPublicIPs will try to figure out the public IPs (v4 and v6)
|
||||
// we're running on. If it fails, an empty list will be returned.
|
||||
func GetPublicIPs(timeout time.Duration) []string {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ipv4 := ""
|
||||
ipv6 := ""
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv4 = doRequest("https://api.ipify.org", timeout)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv6 = doRequest("https://api6.ipify.org", timeout)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
ips := []string{}
|
||||
|
||||
if len(ipv4) != 0 {
|
||||
ips = append(ips, ipv4)
|
||||
}
|
||||
|
||||
if len(ipv6) != 0 && ipv4 != ipv6 {
|
||||
ips = append(ips, ipv6)
|
||||
}
|
||||
|
||||
return ips
|
||||
}
|
||||
|
||||
func doRequest(url string, timeout time.Duration) string {
|
||||
client := &http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(body)
|
||||
}
|
||||
|
||||
@@ -104,8 +104,6 @@ func (l *limiter) Stop() {
|
||||
|
||||
l.proc.Stop()
|
||||
l.proc = nil
|
||||
|
||||
l.reset()
|
||||
}
|
||||
|
||||
func (l *limiter) ticker(ctx context.Context) {
|
||||
|
||||
@@ -37,7 +37,6 @@ func (io ConfigIO) Clone() ConfigIO {
|
||||
type Config struct {
|
||||
ID string `json:"id"`
|
||||
Reference string `json:"reference"`
|
||||
FFVersion string `json:"ffversion"`
|
||||
Input []ConfigIO `json:"input"`
|
||||
Output []ConfigIO `json:"output"`
|
||||
Options []string `json:"options"`
|
||||
@@ -54,7 +53,6 @@ func (config *Config) Clone() *Config {
|
||||
clone := &Config{
|
||||
ID: config.ID,
|
||||
Reference: config.Reference,
|
||||
FFVersion: config.FFVersion,
|
||||
Reconnect: config.Reconnect,
|
||||
ReconnectDelay: config.ReconnectDelay,
|
||||
Autostart: config.Autostart,
|
||||
@@ -86,6 +84,7 @@ func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
for i, option := range config.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
option = r.Replace(option, "fs:disk", "")
|
||||
|
||||
config.Options[i] = option
|
||||
}
|
||||
@@ -100,6 +99,7 @@ func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
input.Address = r.Replace(input.Address, "reference", config.Reference)
|
||||
input.Address = r.Replace(input.Address, "diskfs", "")
|
||||
input.Address = r.Replace(input.Address, "memfs", "")
|
||||
input.Address = r.Replace(input.Address, "fs:*", "")
|
||||
input.Address = r.Replace(input.Address, "rtmp", "")
|
||||
input.Address = r.Replace(input.Address, "srt", "")
|
||||
|
||||
@@ -110,6 +110,7 @@ func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
option = r.Replace(option, "reference", config.Reference)
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
option = r.Replace(option, "memfs", "")
|
||||
option = r.Replace(option, "fs:*", "")
|
||||
|
||||
input.Options[j] = option
|
||||
}
|
||||
@@ -126,6 +127,7 @@ func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
output.Address = r.Replace(output.Address, "reference", config.Reference)
|
||||
output.Address = r.Replace(output.Address, "diskfs", "")
|
||||
output.Address = r.Replace(output.Address, "memfs", "")
|
||||
output.Address = r.Replace(output.Address, "fs:*", "")
|
||||
output.Address = r.Replace(output.Address, "rtmp", "")
|
||||
output.Address = r.Replace(output.Address, "srt", "")
|
||||
|
||||
@@ -136,6 +138,7 @@ func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
option = r.Replace(option, "reference", config.Reference)
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
option = r.Replace(option, "memfs", "")
|
||||
option = r.Replace(option, "fs:*", "")
|
||||
|
||||
output.Options[j] = option
|
||||
}
|
||||
|
||||
@@ -62,6 +62,11 @@ func New(config Config) Filesystem {
|
||||
rfs.logger = log.New("")
|
||||
}
|
||||
|
||||
rfs.logger = rfs.logger.WithFields(log.Fields{
|
||||
"name": config.FS.Name(),
|
||||
"type": config.FS.Type(),
|
||||
})
|
||||
|
||||
rfs.cleanupPatterns = make(map[string][]Pattern)
|
||||
|
||||
// already drain the stop
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/glob"
|
||||
)
|
||||
|
||||
type Replacer interface {
|
||||
@@ -24,7 +26,8 @@ type Replacer interface {
|
||||
// the value of the corresponding key in the parameters.
|
||||
// If the value is an empty string, the registered templates will be searched for that
|
||||
// placeholder. If no template is found, the placeholder will be replaced by the empty string.
|
||||
// A placeholder name may consist on of the letters a-z.
|
||||
// A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain
|
||||
// a glob pattern to find the appropriate template.
|
||||
Replace(str, placeholder, value string) string
|
||||
}
|
||||
|
||||
@@ -39,8 +42,8 @@ type replacer struct {
|
||||
func New() Replacer {
|
||||
r := &replacer{
|
||||
templates: make(map[string]func() string),
|
||||
re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`),
|
||||
templateRe: regexp.MustCompile(`{([a-z]+)}`),
|
||||
re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`),
|
||||
templateRe: regexp.MustCompile(`{([a-z:]+)}`),
|
||||
}
|
||||
|
||||
return r
|
||||
@@ -57,7 +60,8 @@ func (r *replacer) RegisterTemplateFunc(placeholder string, template func() stri
|
||||
func (r *replacer) Replace(str, placeholder, value string) string {
|
||||
str = r.re.ReplaceAllStringFunc(str, func(match string) string {
|
||||
matches := r.re.FindStringSubmatch(match)
|
||||
if matches[1] != placeholder {
|
||||
|
||||
if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok {
|
||||
return match
|
||||
}
|
||||
|
||||
@@ -66,7 +70,7 @@ func (r *replacer) Replace(str, placeholder, value string) string {
|
||||
|
||||
// Check for a registered template
|
||||
if len(v) == 0 {
|
||||
tmplFunc, ok := r.templates[placeholder]
|
||||
tmplFunc, ok := r.templates[matches[1]]
|
||||
if ok {
|
||||
v = tmplFunc()
|
||||
}
|
||||
|
||||
@@ -34,15 +34,29 @@ func TestReplace(t *testing.T) {
|
||||
|
||||
func TestReplaceTemplate(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplate("foobar", "Hello {who}! {what}?")
|
||||
r.RegisterTemplate("foo:bar", "Hello {who}! {what}?")
|
||||
|
||||
replaced := r.Replace("{foobar,who=World}", "foobar", "")
|
||||
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! {what}?", replaced)
|
||||
|
||||
replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "")
|
||||
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! E=mc^2?", replaced)
|
||||
|
||||
replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "")
|
||||
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
|
||||
}
|
||||
|
||||
func TestReplaceTemplateFunc(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplateFunc("foo:bar", func() string { return "Hello {who}! {what}?" })
|
||||
|
||||
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! {what}?", replaced)
|
||||
|
||||
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! E=mc^2?", replaced)
|
||||
|
||||
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "")
|
||||
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
|
||||
}
|
||||
|
||||
@@ -62,3 +76,12 @@ func TestReplaceCompileTemplate(t *testing.T) {
|
||||
require.Equal(t, e[2], replaced, e[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceGlob(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplate("foo:bar", "Hello foobar")
|
||||
r.RegisterTemplate("foo:baz", "Hello foobaz")
|
||||
|
||||
replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "")
|
||||
require.Equal(t, "Hello foobaz, Hello foobar", replaced)
|
||||
}
|
||||
|
||||
@@ -24,8 +24,6 @@ import (
|
||||
rfs "github.com/datarhei/core/v16/restream/fs"
|
||||
"github.com/datarhei/core/v16/restream/replace"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
"github.com/Masterminds/semver/v3"
|
||||
)
|
||||
|
||||
// The Restreamer interface
|
||||
@@ -61,8 +59,7 @@ type Config struct {
|
||||
ID string
|
||||
Name string
|
||||
Store store.Store
|
||||
DiskFS fs.Filesystem
|
||||
MemFS fs.Filesystem
|
||||
Filesystems []fs.Filesystem
|
||||
Replace replace.Replacer
|
||||
FFmpeg ffmpeg.FFmpeg
|
||||
MaxProcesses int64
|
||||
@@ -93,8 +90,8 @@ type restream struct {
|
||||
maxProc int64
|
||||
nProc int64
|
||||
fs struct {
|
||||
diskfs rfs.Filesystem
|
||||
memfs rfs.Filesystem
|
||||
list []rfs.Filesystem
|
||||
diskfs []rfs.Filesystem
|
||||
stopObserver context.CancelFunc
|
||||
}
|
||||
replace replace.Replacer
|
||||
@@ -127,26 +124,18 @@ func New(config Config) (Restreamer, error) {
|
||||
r.store = store.NewDummyStore(store.DummyConfig{})
|
||||
}
|
||||
|
||||
if config.DiskFS != nil {
|
||||
r.fs.diskfs = rfs.New(rfs.Config{
|
||||
FS: config.DiskFS,
|
||||
Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"),
|
||||
for _, fs := range config.Filesystems {
|
||||
fs := rfs.New(rfs.Config{
|
||||
FS: fs,
|
||||
Logger: r.logger.WithComponent("Cleanup"),
|
||||
})
|
||||
} else {
|
||||
r.fs.diskfs = rfs.New(rfs.Config{
|
||||
FS: fs.NewDummyFilesystem(),
|
||||
})
|
||||
}
|
||||
|
||||
if config.MemFS != nil {
|
||||
r.fs.memfs = rfs.New(rfs.Config{
|
||||
FS: config.MemFS,
|
||||
Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"),
|
||||
})
|
||||
} else {
|
||||
r.fs.memfs = rfs.New(rfs.Config{
|
||||
FS: fs.NewDummyFilesystem(),
|
||||
})
|
||||
r.fs.list = append(r.fs.list, fs)
|
||||
|
||||
// Add the diskfs filesystems also to a separate array. We need it later for input and output validation
|
||||
if fs.Type() == "diskfs" {
|
||||
r.fs.diskfs = append(r.fs.diskfs, fs)
|
||||
}
|
||||
}
|
||||
|
||||
if r.replace == nil {
|
||||
@@ -185,12 +174,16 @@ func (r *restream) Start() {
|
||||
r.setCleanup(id, t.config)
|
||||
}
|
||||
|
||||
r.fs.diskfs.Start()
|
||||
r.fs.memfs.Start()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r.fs.stopObserver = cancel
|
||||
go r.observe(ctx, 10*time.Second)
|
||||
|
||||
for _, fs := range r.fs.list {
|
||||
fs.Start()
|
||||
|
||||
if fs.Type() == "diskfs" {
|
||||
go r.observe(ctx, fs, 10*time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
r.stopOnce = sync.Once{}
|
||||
})
|
||||
@@ -214,14 +207,16 @@ func (r *restream) Stop() {
|
||||
|
||||
r.fs.stopObserver()
|
||||
|
||||
r.fs.diskfs.Stop()
|
||||
r.fs.memfs.Stop()
|
||||
// Stop the cleanup jobs
|
||||
for _, fs := range r.fs.list {
|
||||
fs.Stop()
|
||||
}
|
||||
|
||||
r.startOnce = sync.Once{}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -230,14 +225,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
size, limit := r.fs.diskfs.Size()
|
||||
size, limit := fs.Size()
|
||||
isFull := false
|
||||
if limit > 0 && size >= limit {
|
||||
isFull = true
|
||||
}
|
||||
|
||||
if isFull {
|
||||
// Stop all tasks that write to disk
|
||||
// Stop all tasks that write to this filesystem
|
||||
r.lock.Lock()
|
||||
for id, t := range r.tasks {
|
||||
if !t.valid {
|
||||
@@ -252,7 +247,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
continue
|
||||
}
|
||||
|
||||
r.logger.Warn().Log("Shutting down because disk is full")
|
||||
r.logger.Warn().Log("Shutting down because filesystem is full")
|
||||
r.stopProcess(id)
|
||||
}
|
||||
r.lock.Unlock()
|
||||
@@ -269,18 +264,7 @@ func (r *restream) load() error {
|
||||
|
||||
tasks := make(map[string]*task)
|
||||
|
||||
skills := r.ffmpeg.Skills()
|
||||
ffversion := skills.FFmpeg.Version
|
||||
if v, err := semver.NewVersion(ffversion); err == nil {
|
||||
// Remove the patch level for the constraint
|
||||
ffversion = fmt.Sprintf("%d.%d.0", v.Major(), v.Minor())
|
||||
}
|
||||
|
||||
for id, process := range data.Process {
|
||||
if len(process.Config.FFVersion) == 0 {
|
||||
process.Config.FFVersion = "^" + ffversion
|
||||
}
|
||||
|
||||
t := &task{
|
||||
id: id,
|
||||
reference: process.Reference,
|
||||
@@ -308,23 +292,6 @@ func (r *restream) load() error {
|
||||
// replaced, we can resolve references and validate the
|
||||
// inputs and outputs.
|
||||
for _, t := range tasks {
|
||||
// Just warn if the ffmpeg version constraint doesn't match the available ffmpeg version
|
||||
if c, err := semver.NewConstraint(t.config.FFVersion); err == nil {
|
||||
if v, err := semver.NewVersion(skills.FFmpeg.Version); err == nil {
|
||||
if !c.Check(v) {
|
||||
r.logger.Warn().WithFields(log.Fields{
|
||||
"id": t.id,
|
||||
"constraint": t.config.FFVersion,
|
||||
"version": skills.FFmpeg.Version,
|
||||
}).WithError(fmt.Errorf("available FFmpeg version doesn't fit constraint; you have to update this process to adjust the constraint")).Log("")
|
||||
}
|
||||
} else {
|
||||
r.logger.Warn().WithField("id", t.id).WithError(err).Log("")
|
||||
}
|
||||
} else {
|
||||
r.logger.Warn().WithField("id", t.id).WithError(err).Log("")
|
||||
}
|
||||
|
||||
err := r.resolveAddresses(tasks, t.config)
|
||||
if err != nil {
|
||||
r.logger.Warn().WithField("id", t.id).WithError(err).Log("Ignoring")
|
||||
@@ -437,12 +404,6 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
|
||||
return nil, fmt.Errorf("an empty ID is not allowed")
|
||||
}
|
||||
|
||||
config.FFVersion = "^" + r.ffmpeg.Skills().FFmpeg.Version
|
||||
if v, err := semver.NewVersion(config.FFVersion); err == nil {
|
||||
// Remove the patch level for the constraint
|
||||
config.FFVersion = fmt.Sprintf("^%d.%d.0", v.Major(), v.Minor())
|
||||
}
|
||||
|
||||
process := &app.Process{
|
||||
ID: config.ID,
|
||||
Reference: config.Reference,
|
||||
@@ -502,34 +463,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
|
||||
}
|
||||
|
||||
func (r *restream) setCleanup(id string, config *app.Config) {
|
||||
rePrefix := regexp.MustCompile(`^([a-z]+):`)
|
||||
|
||||
for _, output := range config.Output {
|
||||
for _, c := range output.Cleanup {
|
||||
if strings.HasPrefix(c.Pattern, "memfs:") {
|
||||
r.fs.memfs.SetCleanup(id, []rfs.Pattern{
|
||||
{
|
||||
Pattern: strings.TrimPrefix(c.Pattern, "memfs:"),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
},
|
||||
})
|
||||
} else if strings.HasPrefix(c.Pattern, "diskfs:") {
|
||||
r.fs.diskfs.SetCleanup(id, []rfs.Pattern{
|
||||
{
|
||||
Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
},
|
||||
matches := rePrefix.FindStringSubmatch(c.Pattern)
|
||||
if matches == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
name := matches[1]
|
||||
|
||||
// Support legacy names
|
||||
if name == "diskfs" {
|
||||
name = "disk"
|
||||
} else if name == "memfs" {
|
||||
name = "mem"
|
||||
}
|
||||
|
||||
for _, fs := range r.fs.list {
|
||||
if fs.Name() != name {
|
||||
continue
|
||||
}
|
||||
|
||||
pattern := rfs.Pattern{
|
||||
Pattern: rePrefix.ReplaceAllString(c.Pattern, ""),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
}
|
||||
|
||||
fs.SetCleanup(id, []rfs.Pattern{
|
||||
pattern,
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restream) unsetCleanup(id string) {
|
||||
r.fs.diskfs.UnsetCleanup(id)
|
||||
r.fs.memfs.UnsetCleanup(id)
|
||||
for _, fs := range r.fs.list {
|
||||
fs.UnsetCleanup(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restream) setPlayoutPorts(t *task) error {
|
||||
@@ -618,9 +595,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID)
|
||||
}
|
||||
|
||||
io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
if len(r.fs.diskfs) != 0 {
|
||||
maxFails := 0
|
||||
for _, fs := range r.fs.diskfs {
|
||||
io.Address, err = r.validateInputAddress(io.Address, fs.Base())
|
||||
if err != nil {
|
||||
maxFails++
|
||||
}
|
||||
}
|
||||
|
||||
if maxFails == len(r.fs.diskfs) {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
}
|
||||
} else {
|
||||
io.Address, err = r.validateInputAddress(io.Address, "/")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -650,15 +641,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID)
|
||||
}
|
||||
|
||||
isFile := false
|
||||
if len(r.fs.diskfs) != 0 {
|
||||
maxFails := 0
|
||||
for _, fs := range r.fs.diskfs {
|
||||
isFile := false
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Base())
|
||||
if err != nil {
|
||||
maxFails++
|
||||
}
|
||||
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
if maxFails == len(r.fs.diskfs) {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
} else {
|
||||
isFile := false
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, "/")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"version": 3
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
gojson "encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
@@ -13,15 +12,14 @@ import (
|
||||
)
|
||||
|
||||
type JSONConfig struct {
|
||||
Filepath string
|
||||
FFVersion string
|
||||
Logger log.Logger
|
||||
Dir string
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
type jsonStore struct {
|
||||
filepath string
|
||||
ffversion string
|
||||
logger log.Logger
|
||||
filename string
|
||||
dir string
|
||||
logger log.Logger
|
||||
|
||||
// Mutex to serialize access to the backend
|
||||
lock sync.RWMutex
|
||||
@@ -31,13 +29,13 @@ var version uint64 = 4
|
||||
|
||||
func NewJSONStore(config JSONConfig) Store {
|
||||
s := &jsonStore{
|
||||
filepath: config.Filepath,
|
||||
ffversion: config.FFVersion,
|
||||
logger: config.Logger,
|
||||
filename: "db.json",
|
||||
dir: config.Dir,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if s.logger == nil {
|
||||
s.logger = log.New("")
|
||||
s.logger = log.New("JSONStore")
|
||||
}
|
||||
|
||||
return s
|
||||
@@ -47,7 +45,7 @@ func (s *jsonStore) Load() (StoreData, error) {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
data, err := s.load(s.filepath, version)
|
||||
data, err := s.load(version)
|
||||
if err != nil {
|
||||
return NewStoreData(), err
|
||||
}
|
||||
@@ -65,7 +63,7 @@ func (s *jsonStore) Store(data StoreData) error {
|
||||
s.lock.RLock()
|
||||
defer s.lock.RUnlock()
|
||||
|
||||
err := s.store(s.filepath, data)
|
||||
err := s.store(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to store data: %w", err)
|
||||
}
|
||||
@@ -73,16 +71,13 @@ func (s *jsonStore) Store(data StoreData) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *jsonStore) store(filepath string, data StoreData) error {
|
||||
func (s *jsonStore) store(data StoreData) error {
|
||||
jsondata, err := gojson.MarshalIndent(&data, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := path.Dir(filepath)
|
||||
name := path.Base(filepath)
|
||||
|
||||
tmpfile, err := os.CreateTemp(dir, name)
|
||||
tmpfile, err := os.CreateTemp(s.dir, s.filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -97,11 +92,13 @@ func (s *jsonStore) store(filepath string, data StoreData) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := file.Rename(tmpfile.Name(), filepath); err != nil {
|
||||
filename := s.dir + "/" + s.filename
|
||||
|
||||
if err := file.Rename(tmpfile.Name(), filename); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.logger.WithField("file", filepath).Debug().Log("Stored data")
|
||||
s.logger.WithField("file", filename).Debug().Log("Stored data")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -110,10 +107,12 @@ type storeVersion struct {
|
||||
Version uint64 `json:"version"`
|
||||
}
|
||||
|
||||
func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
|
||||
func (s *jsonStore) load(version uint64) (StoreData, error) {
|
||||
r := NewStoreData()
|
||||
|
||||
_, err := os.Stat(filepath)
|
||||
filename := s.dir + "/" + s.filename
|
||||
|
||||
_, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return r, nil
|
||||
@@ -122,7 +121,7 @@ func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
|
||||
return r, err
|
||||
}
|
||||
|
||||
jsondata, err := os.ReadFile(filepath)
|
||||
jsondata, err := os.ReadFile(filename)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
@@ -141,7 +140,7 @@ func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
|
||||
return r, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
s.logger.WithField("file", filepath).Debug().Log("Read data")
|
||||
s.logger.WithField("file", filename).Debug().Log("Read data")
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/log"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -14,71 +15,34 @@ func TestNew(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
})
|
||||
store := &jsonStore{
|
||||
filename: "v4_empty.json",
|
||||
dir: "./fixtures",
|
||||
logger: log.New(""),
|
||||
}
|
||||
|
||||
_, err := store.Load()
|
||||
require.Equal(t, nil, err)
|
||||
}
|
||||
|
||||
func TestLoadFailed(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_invalid.json",
|
||||
})
|
||||
store := &jsonStore{
|
||||
filename: "v4_invalid.json",
|
||||
dir: "./fixtures",
|
||||
logger: log.New(""),
|
||||
}
|
||||
|
||||
_, err := store.Load()
|
||||
require.NotEqual(t, nil, err)
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
})
|
||||
store := &jsonStore{
|
||||
filename: "v4_empty.json",
|
||||
dir: "./fixtures",
|
||||
logger: log.New(""),
|
||||
}
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, data.IsEmpty())
|
||||
}
|
||||
|
||||
func TestNotExists(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_notexist.json",
|
||||
})
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, data.IsEmpty())
|
||||
}
|
||||
|
||||
func TestStore(t *testing.T) {
|
||||
os.Remove("./fixtures/v4_store.json")
|
||||
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_store.json",
|
||||
})
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, data.IsEmpty())
|
||||
|
||||
data.Metadata.System["somedata"] = "foobar"
|
||||
|
||||
store.Store(data)
|
||||
|
||||
data2, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, data, data2)
|
||||
|
||||
os.Remove("./fixtures/v4_store.json")
|
||||
}
|
||||
|
||||
func TestInvalidVersion(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v3_empty.json",
|
||||
})
|
||||
|
||||
data, err := store.Load()
|
||||
require.Error(t, err)
|
||||
data, _ := store.Load()
|
||||
require.Equal(t, true, data.IsEmpty())
|
||||
}
|
||||
|
||||
9
run.sh
9
run.sh
@@ -8,15 +8,6 @@ if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the FFmpeg migration program. In case a FFmpeg 5 binary is present, it will create a
|
||||
# backup of the current DB and modify the FFmpeg parameter such that they are compatible
|
||||
# with FFmpeg 5.
|
||||
|
||||
./bin/ffmigrate
|
||||
if [ $? -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now run the core with the possibly converted configuration.
|
||||
|
||||
./bin/core
|
||||
|
||||
78
srt/srt.go
78
srt/srt.go
@@ -5,7 +5,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -53,17 +52,15 @@ func (c *client) ticker(ctx context.Context) {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
|
||||
stats := &srt.Statistics{}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
c.conn.Stats(stats)
|
||||
stats := c.conn.Stats()
|
||||
|
||||
rxbytes := stats.Accumulated.ByteRecv
|
||||
txbytes := stats.Accumulated.ByteSent
|
||||
rxbytes := stats.ByteRecv
|
||||
txbytes := stats.ByteSent
|
||||
|
||||
c.collector.Ingress(c.id, int64(rxbytes-c.rxbytes))
|
||||
c.collector.Egress(c.id, int64(txbytes-c.txbytes))
|
||||
@@ -228,6 +225,8 @@ func New(config Config) (Server, error) {
|
||||
|
||||
srtconfig := srt.DefaultConfig()
|
||||
|
||||
srtconfig.KMPreAnnounce = 200
|
||||
srtconfig.KMRefreshRate = 10000
|
||||
srtconfig.Passphrase = config.Passphrase
|
||||
srtconfig.Logger = s.srtlogger
|
||||
|
||||
@@ -285,11 +284,8 @@ func (s *server) Channels() Channels {
|
||||
socketId := ch.publisher.conn.SocketId()
|
||||
st.Publisher[id] = socketId
|
||||
|
||||
stats := &srt.Statistics{}
|
||||
ch.publisher.conn.Stats(stats)
|
||||
|
||||
st.Connections[socketId] = Connection{
|
||||
Stats: *stats,
|
||||
Stats: ch.publisher.conn.Stats(),
|
||||
Log: map[string][]Log{},
|
||||
}
|
||||
|
||||
@@ -297,11 +293,8 @@ func (s *server) Channels() Channels {
|
||||
socketId := c.conn.SocketId()
|
||||
st.Subscriber[id] = append(st.Subscriber[id], socketId)
|
||||
|
||||
stats := &srt.Statistics{}
|
||||
c.conn.Stats(stats)
|
||||
|
||||
st.Connections[socketId] = Connection{
|
||||
Stats: *stats,
|
||||
Stats: c.conn.Stats(),
|
||||
Log: map[string][]Log{},
|
||||
}
|
||||
}
|
||||
@@ -372,59 +365,6 @@ type streamInfo struct {
|
||||
func parseStreamId(streamid string) (streamInfo, error) {
|
||||
si := streamInfo{}
|
||||
|
||||
if strings.HasPrefix(streamid, "#!:") {
|
||||
return parseOldStreamId(streamid)
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`,(token|mode):(.+)`)
|
||||
|
||||
results := map[string]string{}
|
||||
|
||||
idEnd := -1
|
||||
value := streamid
|
||||
key := ""
|
||||
|
||||
for {
|
||||
matches := re.FindStringSubmatchIndex(value)
|
||||
if matches == nil {
|
||||
break
|
||||
}
|
||||
|
||||
if idEnd < 0 {
|
||||
idEnd = matches[2] - 1
|
||||
}
|
||||
|
||||
if len(key) != 0 {
|
||||
results[key] = value[:matches[2]-1]
|
||||
}
|
||||
|
||||
key = value[matches[2]:matches[3]]
|
||||
value = value[matches[4]:matches[5]]
|
||||
|
||||
results[key] = value
|
||||
}
|
||||
|
||||
if idEnd < 0 {
|
||||
idEnd = len(streamid)
|
||||
}
|
||||
|
||||
si.resource = streamid[:idEnd]
|
||||
if token, ok := results["token"]; ok {
|
||||
si.token = token
|
||||
}
|
||||
|
||||
if mode, ok := results["mode"]; ok {
|
||||
si.mode = mode
|
||||
} else {
|
||||
si.mode = "request"
|
||||
}
|
||||
|
||||
return si, nil
|
||||
}
|
||||
|
||||
func parseOldStreamId(streamid string) (streamInfo, error) {
|
||||
si := streamInfo{}
|
||||
|
||||
if !strings.HasPrefix(streamid, "#!:") {
|
||||
return si, fmt.Errorf("unknown streamid format")
|
||||
}
|
||||
@@ -433,7 +373,7 @@ func parseOldStreamId(streamid string) (streamInfo, error) {
|
||||
|
||||
kvs := strings.Split(streamid, ",")
|
||||
|
||||
splitFn := func(s, sep string) (string, string, error) {
|
||||
split := func(s, sep string) (string, string, error) {
|
||||
splitted := strings.SplitN(s, sep, 2)
|
||||
|
||||
if len(splitted) != 2 {
|
||||
@@ -444,7 +384,7 @@ func parseOldStreamId(streamid string) (streamInfo, error) {
|
||||
}
|
||||
|
||||
for _, kv := range kvs {
|
||||
key, value, err := splitFn(kv, "=")
|
||||
key, value, err := split(kv, "=")
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -8,25 +8,7 @@ import (
|
||||
|
||||
func TestParseStreamId(t *testing.T) {
|
||||
streamids := map[string]streamInfo{
|
||||
"bla": {resource: "bla", mode: "request"},
|
||||
"bla,mode:publish": {resource: "bla", mode: "publish"},
|
||||
"123456789": {resource: "123456789", mode: "request"},
|
||||
"bla,token:foobar": {resource: "bla", token: "foobar", mode: "request"},
|
||||
"bla,token:foo,bar": {resource: "bla", token: "foo,bar", mode: "request"},
|
||||
"123456789,mode:publish,token:foobar": {resource: "123456789", token: "foobar", mode: "publish"},
|
||||
"mode:publish": {resource: "mode:publish", mode: "request"},
|
||||
}
|
||||
|
||||
for streamid, wantsi := range streamids {
|
||||
si, err := parseStreamId(streamid)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, wantsi, si)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseOldStreamId(t *testing.T) {
|
||||
streamids := map[string]streamInfo{
|
||||
"bla": {},
|
||||
"#!:": {},
|
||||
"#!:key=value": {},
|
||||
"#!:m=publish": {mode: "publish"},
|
||||
@@ -37,7 +19,7 @@ func TestParseOldStreamId(t *testing.T) {
|
||||
}
|
||||
|
||||
for streamid, wantsi := range streamids {
|
||||
si, _ := parseOldStreamId(streamid)
|
||||
si, _ := parseStreamId(streamid)
|
||||
|
||||
require.Equal(t, wantsi, si)
|
||||
}
|
||||
|
||||
130
vendor/github.com/99designs/gqlgen/CHANGELOG.md
generated
vendored
130
vendor/github.com/99designs/gqlgen/CHANGELOG.md
generated
vendored
@@ -5,138 +5,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
<a name="unreleased"></a>
|
||||
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.19...HEAD)
|
||||
## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.14...HEAD)
|
||||
|
||||
<!-- end of if -->
|
||||
<!-- end of CommitGroups -->
|
||||
<a name="v0.17.19"></a>
|
||||
## [v0.17.19](https://github.com/99designs/gqlgen/compare/v0.17.18...v0.17.19) - 2022-09-15
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/588c6ac137b8ed7aea1bc7c009ea23cb9dec5caa"><tt>588c6ac1</tt></a> release v0.17.19
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/c671317056298db8073498c8db02120b6f737032"><tt>c6713170</tt></a> v0.17.18 postrelease bump
|
||||
|
||||
<!-- end of Commits -->
|
||||
<!-- end of Else -->
|
||||
|
||||
<!-- end of If NoteGroups -->
|
||||
<a name="v0.17.18"></a>
|
||||
## [v0.17.18](https://github.com/99designs/gqlgen/compare/v0.17.17...v0.17.18) - 2022-09-15
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/1d41c808a93446fca8ff867e957ef552e56f6ae3"><tt>1d41c808</tt></a> release v0.17.18
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/4dbe2e475f15ce77a498c841ea6c9149ef5ceaba"><tt>4dbe2e47</tt></a> update graphiql to 2.0.7 (<a href="https://github.com/99designs/gqlgen/pull/2375">#2375</a>)
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/b7cc094a49e3d348cfc457aa76f1640c86cdcae9"><tt>b7cc094a</tt></a> testfix: make apollo federated tracer test more consistent (<a href="https://github.com/99designs/gqlgen/pull/2374">#2374</a>)</summary>
|
||||
|
||||
* Update tracing_test.go
|
||||
|
||||
* add missing imports
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/d096fb9b08531b0dc389a786b6f44add045ea75e"><tt>d096fb9b</tt></a> Update directives (<a href="https://github.com/99designs/gqlgen/pull/2371">#2371</a>)
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/1acfea2fbdf3564df16f8023f4e736e90a05b909"><tt>1acfea2f</tt></a> Add v0.17.17 changelog
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/c273adc8ad45e15940bbb6fe211603670d9f3220"><tt>c273adc8</tt></a> v0.17.17 postrelease bump
|
||||
|
||||
<!-- end of Commits -->
|
||||
<!-- end of Else -->
|
||||
|
||||
<!-- end of If NoteGroups -->
|
||||
<a name="v0.17.17"></a>
|
||||
## [v0.17.17](https://github.com/99designs/gqlgen/compare/v0.17.16...v0.17.17) - 2022-09-13
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/d50bc5aca10c5a5dd6a1680b2288c35a61327ade"><tt>d50bc5ac</tt></a> release v0.17.17
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/462025b400e9b792a5afbe320cde4cc952f6b547"><tt>462025b4</tt></a> nil check error before type assertion follow-up from <a href="https://github.com/99designs/gqlgen/pull/2341">#2341</a> (<a href="https://github.com/99designs/gqlgen/pull/2368">#2368</a>)</summary>
|
||||
|
||||
* Improve errcode.Set safety
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/59493aff86020d170e58900654d334f5ebc2ceee"><tt>59493aff</tt></a> fix: apollo federation tracer was race prone (<a href="https://github.com/99designs/gqlgen/pull/2366">#2366</a>)</summary>
|
||||
|
||||
The tracer was using a global state across different goroutines
|
||||
Added req headers to operation context to allow it to be fetched in InterceptOperation
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/fc0185567f2dfc37b38f11283efb9cc1db69e96d"><tt>fc018556</tt></a> Update gqlparser to v2.5.1 (<a href="https://github.com/99designs/gqlgen/pull/2363">#2363</a>)
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/56574a146bd16a13c9055128ec3c80e96a7c4b29"><tt>56574a14</tt></a> feat: make Playground HTML content compatible with UTF-8 charset (<a href="https://github.com/99designs/gqlgen/pull/2355">#2355</a>)
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/182b039d34cb730f432c486ebe763f246937dea4"><tt>182b039d</tt></a> Add `subscriptions.md` recipe to docs (<a href="https://github.com/99designs/gqlgen/pull/2346">#2346</a>)</summary>
|
||||
|
||||
* Add `subscriptions.md` recipe to docs
|
||||
|
||||
* Fix wrong request type
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/b66fff16de0b16edc317398a5574fcff2cb39e66"><tt>b66fff16</tt></a> Add omit_getters config option (<a href="https://github.com/99designs/gqlgen/pull/2348">#2348</a>)
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/2ba8040f20e32d06dc6d5bfacaadc5619a6e66ee"><tt>2ba8040f</tt></a> Update changelog for v0.17.16
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/8bef8c8061222071e6c814e45bbc33fcabcb3980"><tt>8bef8c80</tt></a> v0.17.16 postrelease bump
|
||||
|
||||
<!-- end of Commits -->
|
||||
<!-- end of Else -->
|
||||
|
||||
<!-- end of If NoteGroups -->
|
||||
<a name="v0.17.16"></a>
|
||||
## [v0.17.16](https://github.com/99designs/gqlgen/compare/v0.17.15...v0.17.16) - 2022-08-26
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/9593ceadd6e07c6fd0f0b0e0c55b9f1bf8ade762"><tt>9593cead</tt></a> release v0.17.16
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/2390af2db920dc632fe47bc778a24c30495b9efd"><tt>2390af2d</tt></a> Update gqlparser to v2.5.0 (<a href="https://github.com/99designs/gqlgen/pull/2341">#2341</a>)
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/2a87fe0645fd271e4e71d2b7bde34ecf31bf844c"><tt>2a87fe06</tt></a> feat: update Graphiql to version 2 (<a href="https://github.com/99designs/gqlgen/pull/2340">#2340</a>)
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/32e2ccd30e82fc566ca022a65dcc4a67c4b6125a"><tt>32e2ccd3</tt></a> Update yaml to v3 (<a href="https://github.com/99designs/gqlgen/pull/2339">#2339</a>)</summary>
|
||||
|
||||
* update yaml to v3
|
||||
|
||||
* add missing go entry for yaml on _example
|
||||
|
||||
* add missing sum file
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/7949117a524be7f8882a61e2d4ade1bedf105107"><tt>7949117a</tt></a> v0.17.15 postrelease bump
|
||||
|
||||
<!-- end of Commits -->
|
||||
<!-- end of Else -->
|
||||
|
||||
<!-- end of If NoteGroups -->
|
||||
<a name="v0.17.15"></a>
|
||||
## [v0.17.15](https://github.com/99designs/gqlgen/compare/v0.17.14...v0.17.15) - 2022-08-23
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/23cc749256b4e2edc4b11ce9e84c643a7bb3194f"><tt>23cc7492</tt></a> release v0.17.15
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/577a570cdb6b1b9185f24940690a14cdced37a36"><tt>577a570c</tt></a> Markdown formatting fixes (<a href="https://github.com/99designs/gqlgen/pull/2335">#2335</a>)
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/2b584011fc64a55cbda67f46637a280bf94d9cc1"><tt>2b584011</tt></a> Fix Interface Slice Getter Generation (<a href="https://github.com/99designs/gqlgen/pull/2332">#2332</a>)</summary>
|
||||
|
||||
* Make modelgen test fail if generated doesn't build
|
||||
Added returning list of interface to modelgen test schema
|
||||
|
||||
* Implement slice copying when returning interface slices
|
||||
|
||||
* Re-generate to satisfy the linter
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
<dl><dd><details><summary><a href="https://github.com/99designs/gqlgen/commit/aee57b4c521e527ebc0538b8edfbe610973abf21"><tt>aee57b4c</tt></a> Correct boolean logic (<a href="https://github.com/99designs/gqlgen/pull/2330">#2330</a>)</summary>
|
||||
|
||||
Correcting boolean logic issue
|
||||
|
||||
</details></dd></dl>
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/da0610e11accf3afd34903f03bfc0abd045d07ed"><tt>da0610e1</tt></a> Update changelog for v0.17.14
|
||||
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/ddcb524e3321d849505f6937307ef3dcbd3acace"><tt>ddcb524e</tt></a> v0.17.14 postrelease bump
|
||||
|
||||
<!-- end of Commits -->
|
||||
<!-- end of Else -->
|
||||
|
||||
<!-- end of If NoteGroups -->
|
||||
<a name="v0.17.14"></a>
|
||||
## [v0.17.14](https://github.com/99designs/gqlgen/compare/v0.17.13...v0.17.14) - 2022-08-18
|
||||
- <a href="https://github.com/99designs/gqlgen/commit/581bf6eb063a0d6a3cec3b6bc7a16ca10e310a97"><tt>581bf6eb</tt></a> release v0.17.14
|
||||
|
||||
10
vendor/github.com/99designs/gqlgen/README.md
generated
vendored
10
vendor/github.com/99designs/gqlgen/README.md
generated
vendored
@@ -142,16 +142,6 @@ first model in this list is used as the default type and it will always be used
|
||||
|
||||
There isn't any way around this, gqlgen has no way to know what you want in a given context.
|
||||
|
||||
### Why do my interfaces have getters? Can I disable these?
|
||||
These were added in v0.17.14 to allow accessing common interface fields without casting to a concrete type.
|
||||
However, certain fields, like Relay-style Connections, cannot be implemented with simple getters.
|
||||
|
||||
If you'd prefer to not have getters generated in your interfaces, you can add the following in your `gqlgen.yml`:
|
||||
```yaml
|
||||
# gqlgen.yml
|
||||
omit_getters: true
|
||||
```
|
||||
|
||||
## Other Resources
|
||||
|
||||
- [Christopher Biscardi @ Gophercon UK 2018](https://youtu.be/FdURVezcdcw)
|
||||
|
||||
1
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
1
vendor/github.com/99designs/gqlgen/codegen/config/config.go
generated
vendored
@@ -26,7 +26,6 @@ type Config struct {
|
||||
StructTag string `yaml:"struct_tag,omitempty"`
|
||||
Directives map[string]DirectiveConfig `yaml:"directives,omitempty"`
|
||||
OmitSliceElementPointers bool `yaml:"omit_slice_element_pointers,omitempty"`
|
||||
OmitGetters bool `yaml:"omit_getters,omitempty"`
|
||||
StructFieldsAlwaysPointers bool `yaml:"struct_fields_always_pointers,omitempty"`
|
||||
ResolversAlwaysReturnPointers bool `yaml:"resolvers_always_return_pointers,omitempty"`
|
||||
SkipValidation bool `yaml:"skip_validation,omitempty"`
|
||||
|
||||
2
vendor/github.com/99designs/gqlgen/graphql/context_operation.go
generated
vendored
2
vendor/github.com/99designs/gqlgen/graphql/context_operation.go
generated
vendored
@@ -3,7 +3,6 @@ package graphql
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
"github.com/vektah/gqlparser/v2/ast"
|
||||
)
|
||||
@@ -16,7 +15,6 @@ type OperationContext struct {
|
||||
Variables map[string]interface{}
|
||||
OperationName string
|
||||
Doc *ast.QueryDocument
|
||||
Headers http.Header
|
||||
|
||||
Operation *ast.OperationDefinition
|
||||
DisableIntrospection bool
|
||||
|
||||
12
vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go
generated
vendored
12
vendor/github.com/99designs/gqlgen/graphql/errcode/codes.go
generated
vendored
@@ -23,22 +23,14 @@ var codeType = map[string]ErrorKind{
|
||||
ParseFailed: KindProtocol,
|
||||
}
|
||||
|
||||
// RegisterErrorType should be called by extensions that want to customize the http status codes for
|
||||
// errors they return
|
||||
// RegisterErrorType should be called by extensions that want to customize the http status codes for errors they return
|
||||
func RegisterErrorType(code string, kind ErrorKind) {
|
||||
codeType[code] = kind
|
||||
}
|
||||
|
||||
// Set the error code on a given graphql error extension
|
||||
func Set(err error, value string) {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
gqlErr, ok := err.(*gqlerror.Error)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
gqlErr, _ := err.(*gqlerror.Error)
|
||||
if gqlErr.Extensions == nil {
|
||||
gqlErr.Extensions = map[string]interface{}{}
|
||||
}
|
||||
|
||||
5
vendor/github.com/99designs/gqlgen/graphql/executable_schema.go
generated
vendored
5
vendor/github.com/99designs/gqlgen/graphql/executable_schema.go
generated
vendored
@@ -118,11 +118,6 @@ func getOrCreateAndAppendField(c *[]CollectedField, name string, alias string, o
|
||||
return &(*c)[i]
|
||||
}
|
||||
}
|
||||
for _, ifc := range cf.ObjectDefinition.Interfaces {
|
||||
if ifc == objectDefinition.Name {
|
||||
return &(*c)[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user