mirror of
https://github.com/datarhei/core.git
synced 2025-10-26 17:30:31 +08:00
Merge branch 'dev'
This commit is contained in:
34
.github/workflows/go-tests.yml
vendored
34
.github/workflows/go-tests.yml
vendored
@@ -3,20 +3,20 @@ name: tests
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- name: Run coverage
|
||||
run: go test -coverprofile=coverage.out -covermode=atomic -v ./...
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
flags: unit-linux
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.19"
|
||||
- name: Run coverage
|
||||
run: go test -coverprofile=coverage.out -covermode=atomic -v ./...
|
||||
- name: Upload coverage to Codecov
|
||||
uses: codecov/codecov-action@v2
|
||||
with:
|
||||
token: ${{ secrets.CODECOV_TOKEN }}
|
||||
files: coverage.out
|
||||
flags: unit-linux
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# CORE ALPINE BASE IMAGE
|
||||
OS_NAME=alpine
|
||||
OS_VERSION=3.16
|
||||
GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
CORE_VERSION=16.11.0
|
||||
GOLANG_IMAGE=golang:1.20-alpine3.16
|
||||
CORE_VERSION=16.12.0
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# CORE UBUNTU BASE IMAGE
|
||||
OS_NAME=ubuntu
|
||||
OS_VERSION=20.04
|
||||
GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
CORE_VERSION=16.11.0
|
||||
GOLANG_IMAGE=golang:1.20-alpine3.16
|
||||
CORE_VERSION=16.12.0
|
||||
|
||||
13
CHANGELOG.md
13
CHANGELOG.md
@@ -1,5 +1,18 @@
|
||||
# Core
|
||||
|
||||
### Core v16.11.0 > v16.12.0
|
||||
|
||||
- Add S3 storage support
|
||||
- Add support for variables in placeholde parameter
|
||||
- Add support for RTMP token as stream key as last element in path
|
||||
- Add support for soft memory limit with debug.memory_limit_mbytes in config
|
||||
- Add support for partial process config updates
|
||||
- Add support for alternative syntax for auth0 tenants as environment variable
|
||||
- Fix config timestamps created_at and loaded_at
|
||||
- Fix /config/reload return type
|
||||
- Fix modifying DTS in RTMP packets ([restreamer/#487](https://github.com/datarhei/restreamer/issues/487), [restreamer/#367](https://github.com/datarhei/restreamer/issues/367))
|
||||
- Fix default internal SRT latency to 20ms
|
||||
|
||||
### Core v16.10.1 > v16.11.0
|
||||
|
||||
- Add FFmpeg 4.4 to FFmpeg 5.1 migration tool
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG GOLANG_IMAGE=golang:1.19.3-alpine3.16
|
||||
ARG GOLANG_IMAGE=golang:1.20-alpine3.16
|
||||
|
||||
ARG BUILD_IMAGE=alpine:3.16
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.19.3-alpine3.16
|
||||
FROM golang:1.20-alpine3.16
|
||||
|
||||
RUN apk add alpine-sdk
|
||||
|
||||
|
||||
62
README.md
62
README.md
@@ -16,47 +16,47 @@ The datarhei Core is a process management solution for FFmpeg that offers a rang
|
||||
|
||||
The objectives of development are:
|
||||
|
||||
* Unhindered use of FFmpeg processes
|
||||
* Portability of FFmpeg, including management across development and production environments
|
||||
* Scalability of FFmpeg-based applications through the ability to offload processes to additional instances
|
||||
* Streamlining of media product development by focusing on features and design.
|
||||
- Unhindered use of FFmpeg processes
|
||||
- Portability of FFmpeg, including management across development and production environments
|
||||
- Scalability of FFmpeg-based applications through the ability to offload processes to additional instances
|
||||
- Streamlining of media product development by focusing on features and design.
|
||||
|
||||
## What issues have been resolved thus far?
|
||||
|
||||
### Process management
|
||||
|
||||
* Run multiple processes via API
|
||||
* Unrestricted FFmpeg commands in process configuration.
|
||||
* Error detection and recovery (e.g., FFmpeg stalls, dumps)
|
||||
* Referencing for process chaining (pipelines)
|
||||
* Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution)
|
||||
* Logs (access to current stdout/stderr)
|
||||
* Log history (configurable log history, e.g., for error analysis)
|
||||
* Resource limitation (max. CPU and MEMORY usage per process)
|
||||
* Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime)
|
||||
* Input verification (like FFprobe)
|
||||
* Metadata (option to store additional information like a title)
|
||||
- Run multiple processes via API
|
||||
- Unrestricted FFmpeg commands in process configuration.
|
||||
- Error detection and recovery (e.g., FFmpeg stalls, dumps)
|
||||
- Referencing for process chaining (pipelines)
|
||||
- Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution)
|
||||
- Logs (access to current stdout/stderr)
|
||||
- Log history (configurable log history, e.g., for error analysis)
|
||||
- Resource limitation (max. CPU and MEMORY usage per process)
|
||||
- Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime)
|
||||
- Input verification (like FFprobe)
|
||||
- Metadata (option to store additional information like a title)
|
||||
|
||||
### Media delivery
|
||||
|
||||
* Configurable file systems (in-memory, disk-mount, S3)
|
||||
* HTTP/S, RTMP/S, and SRT services, including Let's Encrypt
|
||||
* Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion)
|
||||
* Viewer session API and logging
|
||||
- Configurable file systems (in-memory, disk-mount, S3)
|
||||
- HTTP/S, RTMP/S, and SRT services, including Let's Encrypt
|
||||
- Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion)
|
||||
- Viewer session API and logging
|
||||
|
||||
### Misc
|
||||
|
||||
* HTTP REST and GraphQL API
|
||||
* Swagger documentation
|
||||
* Metrics incl. Prometheus support (also detects POSIX and cgroups resources)
|
||||
* Docker images for fast setup of development environments up to the integration of cloud resources
|
||||
- HTTP REST and GraphQL API
|
||||
- Swagger documentation
|
||||
- Metrics incl. Prometheus support (also detects POSIX and cgroups resources)
|
||||
- Docker images for fast setup of development environments up to the integration of cloud resources
|
||||
|
||||
## Docker images
|
||||
|
||||
- datarhei/core:latest (AMD64, ARM64, ARMv7)
|
||||
- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64)
|
||||
- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7)
|
||||
- datarhei/core:vaapi-latest (Intel VAAPI, AMD64)
|
||||
- datarhei/core:latest (AMD64, ARM64, ARMv7)
|
||||
- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64)
|
||||
- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7)
|
||||
- datarhei/core:vaapi-latest (Intel VAAPI, AMD64)
|
||||
|
||||
## Quick start
|
||||
|
||||
@@ -82,10 +82,10 @@ docker run --name core -d \
|
||||
|
||||
Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core).
|
||||
|
||||
- [Quick start](https://docs.datarhei.com/core/guides/beginner)
|
||||
- [Installation](https://docs.datarhei.com/core/installation)
|
||||
- [Configuration](https://docs.datarhei.com/core/configuration)
|
||||
- [Coding](https://docs.datarhei.com/core/development/coding)
|
||||
- [Quick start](https://docs.datarhei.com/core/guides/beginner)
|
||||
- [Installation](https://docs.datarhei.com/core/installation)
|
||||
- [Configuration](https://docs.datarhei.com/core/configuration)
|
||||
- [Coding](https://docs.datarhei.com/core/development/coding)
|
||||
|
||||
## License
|
||||
|
||||
|
||||
363
app/api/api.go
363
app/api/api.go
@@ -6,6 +6,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
golog "log"
|
||||
"math"
|
||||
gonet "net"
|
||||
gohttp "net/http"
|
||||
"net/url"
|
||||
@@ -21,6 +22,7 @@ import (
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/http"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
httpfs "github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/jwt"
|
||||
"github.com/datarhei/core/v16/http/router"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
@@ -30,8 +32,9 @@ import (
|
||||
"github.com/datarhei/core/v16/net"
|
||||
"github.com/datarhei/core/v16/prometheus"
|
||||
"github.com/datarhei/core/v16/restream"
|
||||
restreamapp "github.com/datarhei/core/v16/restream/app"
|
||||
"github.com/datarhei/core/v16/restream/replace"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
restreamstore "github.com/datarhei/core/v16/restream/store"
|
||||
"github.com/datarhei/core/v16/rtmp"
|
||||
"github.com/datarhei/core/v16/service"
|
||||
"github.com/datarhei/core/v16/session"
|
||||
@@ -39,6 +42,7 @@ import (
|
||||
"github.com/datarhei/core/v16/update"
|
||||
|
||||
"github.com/caddyserver/certmagic"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// The API interface is the implementation for the restreamer API.
|
||||
@@ -66,6 +70,7 @@ type api struct {
|
||||
ffmpeg ffmpeg.FFmpeg
|
||||
diskfs fs.Filesystem
|
||||
memfs fs.Filesystem
|
||||
s3fs map[string]fs.Filesystem
|
||||
rtmpserver rtmp.Server
|
||||
srtserver srt.Server
|
||||
metrics monitor.HistoryMonitor
|
||||
@@ -115,6 +120,7 @@ var ErrConfigReload = fmt.Errorf("configuration reload")
|
||||
func New(configpath string, logwriter io.Writer) (API, error) {
|
||||
a := &api{
|
||||
state: "idle",
|
||||
s3fs: map[string]fs.Filesystem{},
|
||||
}
|
||||
|
||||
a.config.path = configpath
|
||||
@@ -147,7 +153,8 @@ func (a *api) Reload() error {
|
||||
|
||||
logger := log.New("Core").WithOutput(log.NewConsoleWriter(a.log.writer, log.Lwarn, true))
|
||||
|
||||
store, err := configstore.NewJSON(a.config.path, func() {
|
||||
rootfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{})
|
||||
store, err := configstore.NewJSON(rootfs, a.config.path, func() {
|
||||
a.errorChan <- ErrConfigReload
|
||||
})
|
||||
if err != nil {
|
||||
@@ -227,6 +234,8 @@ func (a *api) Reload() error {
|
||||
|
||||
logger.Info().WithFields(logfields).Log("")
|
||||
|
||||
logger.Info().WithField("path", a.config.path).Log("Read config file")
|
||||
|
||||
configlogger := logger.WithComponent("Config")
|
||||
cfg.Messages(func(level string, v configvars.Variable, message string) {
|
||||
configlogger = configlogger.WithFields(log.Fields{
|
||||
@@ -253,6 +262,8 @@ func (a *api) Reload() error {
|
||||
return fmt.Errorf("not all variables are set or valid")
|
||||
}
|
||||
|
||||
cfg.LoadedAt = time.Now()
|
||||
|
||||
store.SetActive(cfg)
|
||||
|
||||
a.config.store = store
|
||||
@@ -285,7 +296,13 @@ func (a *api) start() error {
|
||||
}
|
||||
|
||||
if cfg.Sessions.Persist {
|
||||
sessionConfig.PersistDir = filepath.Join(cfg.DB.Dir, "sessions")
|
||||
fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
|
||||
Root: filepath.Join(cfg.DB.Dir, "sessions"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create filesystem for persisting sessions: %w", err)
|
||||
}
|
||||
sessionConfig.PersistFS = fs
|
||||
}
|
||||
|
||||
sessions, err := session.New(sessionConfig)
|
||||
@@ -364,13 +381,18 @@ func (a *api) start() error {
|
||||
a.sessions = sessions
|
||||
}
|
||||
|
||||
diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{
|
||||
Dir: cfg.Storage.Disk.Dir,
|
||||
Size: cfg.Storage.Disk.Size * 1024 * 1024,
|
||||
diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
|
||||
Root: cfg.Storage.Disk.Dir,
|
||||
Logger: a.log.logger.core.WithComponent("DiskFS"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("disk filesystem: %w", err)
|
||||
}
|
||||
|
||||
if diskfsRoot, err := filepath.Abs(cfg.Storage.Disk.Dir); err != nil {
|
||||
return err
|
||||
} else {
|
||||
diskfs.SetMetadata("base", diskfsRoot)
|
||||
}
|
||||
|
||||
a.diskfs = diskfs
|
||||
@@ -392,17 +414,60 @@ func (a *api) start() error {
|
||||
}
|
||||
|
||||
if a.memfs == nil {
|
||||
memfs := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Base: baseMemFS.String(),
|
||||
Size: cfg.Storage.Memory.Size * 1024 * 1024,
|
||||
Purge: cfg.Storage.Memory.Purge,
|
||||
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Logger: a.log.logger.core.WithComponent("MemFS"),
|
||||
})
|
||||
|
||||
a.memfs = memfs
|
||||
memfs.SetMetadata("base", baseMemFS.String())
|
||||
|
||||
sizedfs, _ := fs.NewSizedFilesystem(memfs, cfg.Storage.Memory.Size*1024*1024, cfg.Storage.Memory.Purge)
|
||||
|
||||
a.memfs = sizedfs
|
||||
} else {
|
||||
a.memfs.Rebase(baseMemFS.String())
|
||||
a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024)
|
||||
a.memfs.SetMetadata("base", baseMemFS.String())
|
||||
if sizedfs, ok := a.memfs.(fs.SizedFilesystem); ok {
|
||||
sizedfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024)
|
||||
}
|
||||
}
|
||||
|
||||
for _, s3 := range cfg.Storage.S3 {
|
||||
if _, ok := a.s3fs[s3.Name]; ok {
|
||||
return fmt.Errorf("the name '%s' for a s3 filesystem is already in use", s3.Name)
|
||||
}
|
||||
|
||||
baseS3FS := url.URL{
|
||||
Scheme: "http",
|
||||
Path: s3.Mountpoint,
|
||||
}
|
||||
|
||||
host, port, _ := gonet.SplitHostPort(cfg.Address)
|
||||
if len(host) == 0 {
|
||||
baseS3FS.Host = "localhost:" + port
|
||||
} else {
|
||||
baseS3FS.Host = cfg.Address
|
||||
}
|
||||
|
||||
if s3.Auth.Enable {
|
||||
baseS3FS.User = url.UserPassword(s3.Auth.Username, s3.Auth.Password)
|
||||
}
|
||||
|
||||
s3fs, err := fs.NewS3Filesystem(fs.S3Config{
|
||||
Name: s3.Name,
|
||||
Endpoint: s3.Endpoint,
|
||||
AccessKeyID: s3.AccessKeyID,
|
||||
SecretAccessKey: s3.SecretAccessKey,
|
||||
Region: s3.Region,
|
||||
Bucket: s3.Bucket,
|
||||
UseSSL: s3.UseSSL,
|
||||
Logger: a.log.logger.core.WithComponent("FS"),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("s3 filesystem (%s): %w", s3.Name, err)
|
||||
}
|
||||
|
||||
s3fs.SetMetadata("base", baseS3FS.String())
|
||||
|
||||
a.s3fs[s3.Name] = s3fs
|
||||
}
|
||||
|
||||
var portrange net.Portranger
|
||||
@@ -410,18 +475,18 @@ func (a *api) start() error {
|
||||
if cfg.Playout.Enable {
|
||||
portrange, err = net.NewPortrange(cfg.Playout.MinPort, cfg.Playout.MaxPort)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("playout port range: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
validatorIn, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Input.Allow, cfg.FFmpeg.Access.Input.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("input address validator: %w", err)
|
||||
}
|
||||
|
||||
validatorOut, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Output.Allow, cfg.FFmpeg.Access.Output.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("output address validator: %w", err)
|
||||
}
|
||||
|
||||
ffmpeg, err := ffmpeg.New(ffmpeg.Config{
|
||||
@@ -435,7 +500,7 @@ func (a *api) start() error {
|
||||
Collector: a.sessions.Collector("ffmpeg"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("unable to create ffmpeg: %w", err)
|
||||
}
|
||||
|
||||
a.ffmpeg = ffmpeg
|
||||
@@ -443,53 +508,103 @@ func (a *api) start() error {
|
||||
a.replacer = replace.New()
|
||||
|
||||
{
|
||||
a.replacer.RegisterTemplate("diskfs", a.diskfs.Base())
|
||||
a.replacer.RegisterTemplate("memfs", a.memfs.Base())
|
||||
a.replacer.RegisterTemplateFunc("diskfs", func(config *restreamapp.Config, section string) string {
|
||||
return a.diskfs.Metadata("base")
|
||||
}, nil)
|
||||
|
||||
host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address)
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
a.replacer.RegisterTemplateFunc("fs:disk", func(config *restreamapp.Config, section string) string {
|
||||
return a.diskfs.Metadata("base")
|
||||
}, nil)
|
||||
|
||||
a.replacer.RegisterTemplateFunc("memfs", func(config *restreamapp.Config, section string) string {
|
||||
return a.memfs.Metadata("base")
|
||||
}, nil)
|
||||
|
||||
a.replacer.RegisterTemplateFunc("fs:mem", func(config *restreamapp.Config, section string) string {
|
||||
return a.memfs.Metadata("base")
|
||||
}, nil)
|
||||
|
||||
for name, s3 := range a.s3fs {
|
||||
a.replacer.RegisterTemplate("fs:"+name, s3.Metadata("base"), nil)
|
||||
}
|
||||
|
||||
template := "rtmp://" + host + ":" + port
|
||||
if cfg.RTMP.App != "/" {
|
||||
template += cfg.RTMP.App
|
||||
}
|
||||
template += "/{name}"
|
||||
a.replacer.RegisterTemplateFunc("rtmp", func(config *restreamapp.Config, section string) string {
|
||||
host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address)
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
if len(cfg.RTMP.Token) != 0 {
|
||||
template += "?token=" + cfg.RTMP.Token
|
||||
}
|
||||
template := "rtmp://" + host + ":" + port
|
||||
if cfg.RTMP.App != "/" {
|
||||
template += cfg.RTMP.App
|
||||
}
|
||||
template += "/{name}"
|
||||
|
||||
a.replacer.RegisterTemplate("rtmp", template)
|
||||
if len(cfg.RTMP.Token) != 0 {
|
||||
template += "?token=" + cfg.RTMP.Token
|
||||
}
|
||||
|
||||
host, port, _ = gonet.SplitHostPort(cfg.SRT.Address)
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
}
|
||||
return template
|
||||
}, nil)
|
||||
|
||||
template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid=#!:m={mode},r={name}"
|
||||
if len(cfg.SRT.Token) != 0 {
|
||||
template += ",token=" + cfg.SRT.Token
|
||||
}
|
||||
if len(cfg.SRT.Passphrase) != 0 {
|
||||
template += "&passphrase=" + cfg.SRT.Passphrase
|
||||
}
|
||||
a.replacer.RegisterTemplate("srt", template)
|
||||
a.replacer.RegisterTemplateFunc("srt", func(config *restreamapp.Config, section string) string {
|
||||
host, port, _ = gonet.SplitHostPort(cfg.SRT.Address)
|
||||
if len(host) == 0 {
|
||||
host = "localhost"
|
||||
}
|
||||
|
||||
template := "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name}"
|
||||
if section == "output" {
|
||||
template += ",mode:publish"
|
||||
} else {
|
||||
template += ",mode:request"
|
||||
}
|
||||
if len(cfg.SRT.Token) != 0 {
|
||||
template += ",token:" + cfg.SRT.Token
|
||||
}
|
||||
if len(cfg.SRT.Passphrase) != 0 {
|
||||
template += "&passphrase=" + cfg.SRT.Passphrase
|
||||
}
|
||||
|
||||
return template
|
||||
}, map[string]string{
|
||||
"latency": "20000", // 20 milliseconds, FFmpeg requires microseconds
|
||||
})
|
||||
}
|
||||
|
||||
store := store.NewJSONStore(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
FFVersion: a.ffmpeg.Skills().FFmpeg.Version,
|
||||
Logger: a.log.logger.core.WithComponent("ProcessStore"),
|
||||
})
|
||||
filesystems := []fs.Filesystem{
|
||||
a.diskfs,
|
||||
a.memfs,
|
||||
}
|
||||
|
||||
for _, fs := range a.s3fs {
|
||||
filesystems = append(filesystems, fs)
|
||||
}
|
||||
|
||||
var store restreamstore.Store = nil
|
||||
|
||||
{
|
||||
fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
|
||||
Root: cfg.DB.Dir,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
store, err = restreamstore.NewJSON(restreamstore.JSONConfig{
|
||||
Filesystem: fs,
|
||||
Filepath: "/db.json",
|
||||
Logger: a.log.logger.core.WithComponent("ProcessStore"),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
restream, err := restream.New(restream.Config{
|
||||
ID: cfg.ID,
|
||||
Name: cfg.Name,
|
||||
Store: store,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: a.memfs,
|
||||
Filesystems: filesystems,
|
||||
Replace: a.replacer,
|
||||
FFmpeg: a.ffmpeg,
|
||||
MaxProcesses: cfg.FFmpeg.MaxProcesses,
|
||||
@@ -557,9 +672,12 @@ func (a *api) start() error {
|
||||
metrics.Register(monitor.NewCPUCollector())
|
||||
metrics.Register(monitor.NewMemCollector())
|
||||
metrics.Register(monitor.NewNetCollector())
|
||||
metrics.Register(monitor.NewDiskCollector(a.diskfs.Base()))
|
||||
metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs))
|
||||
metrics.Register(monitor.NewDiskCollector(a.diskfs.Metadata("base")))
|
||||
metrics.Register(monitor.NewFilesystemCollector("diskfs", a.diskfs))
|
||||
metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs))
|
||||
for name, fs := range a.s3fs {
|
||||
metrics.Register(monitor.NewFilesystemCollector(name, fs))
|
||||
}
|
||||
metrics.Register(monitor.NewRestreamCollector(a.restream))
|
||||
metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg))
|
||||
metrics.Register(monitor.NewSessionCollector(a.sessions, []string{}))
|
||||
@@ -634,7 +752,7 @@ func (a *api) start() error {
|
||||
}
|
||||
|
||||
if cfg.Storage.Disk.Cache.Enable {
|
||||
diskCache, err := cache.NewLRUCache(cache.LRUConfig{
|
||||
cache, err := cache.NewLRUCache(cache.LRUConfig{
|
||||
TTL: time.Duration(cfg.Storage.Disk.Cache.TTL) * time.Second,
|
||||
MaxSize: cfg.Storage.Disk.Cache.Size * 1024 * 1024,
|
||||
MaxFileSize: cfg.Storage.Disk.Cache.FileSize * 1024 * 1024,
|
||||
@@ -644,10 +762,10 @@ func (a *api) start() error {
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create disk cache: %w", err)
|
||||
return fmt.Errorf("unable to create cache: %w", err)
|
||||
}
|
||||
|
||||
a.cache = diskCache
|
||||
a.cache = cache
|
||||
}
|
||||
|
||||
var autocertManager *certmagic.Config
|
||||
@@ -655,26 +773,28 @@ func (a *api) start() error {
|
||||
if cfg.TLS.Enable {
|
||||
if cfg.TLS.Auto {
|
||||
if len(cfg.Host.Name) == 0 {
|
||||
return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME")
|
||||
return fmt.Errorf("at least one host must be provided in host.name or CORE_HOST_NAME")
|
||||
}
|
||||
|
||||
certmagic.Default.Storage = &certmagic.FileStorage{
|
||||
Path: cfg.DB.Dir + "/cert",
|
||||
}
|
||||
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
|
||||
certmagic.Default.Logger = zap.NewNop()
|
||||
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = cfg.TLS.Email
|
||||
certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA
|
||||
certmagic.DefaultACME.DisableHTTPChallenge = false
|
||||
certmagic.DefaultACME.DisableTLSALPNChallenge = true
|
||||
certmagic.DefaultACME.Logger = nil
|
||||
|
||||
certmagic.Default.Storage = &certmagic.FileStorage{
|
||||
Path: cfg.DB.Dir + "/cert",
|
||||
}
|
||||
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
|
||||
certmagic.Default.Logger = nil
|
||||
certmagic.DefaultACME.Logger = zap.NewNop()
|
||||
|
||||
magic := certmagic.NewDefault()
|
||||
acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
|
||||
acme.Logger = zap.NewNop()
|
||||
|
||||
magic.Issuers = []certmagic.Issuer{acme}
|
||||
magic.Logger = zap.NewNop()
|
||||
|
||||
autocertManager = magic
|
||||
|
||||
@@ -713,6 +833,19 @@ func (a *api) start() error {
|
||||
if err != nil {
|
||||
logger.Error().WithField("error", err).Log("Failed to acquire certificate")
|
||||
certerror = true
|
||||
/*
|
||||
problems, err := letsdebug.Check(host, letsdebug.HTTP01)
|
||||
if err != nil {
|
||||
logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition")
|
||||
}
|
||||
|
||||
for _, p := range problems {
|
||||
logger.Error().WithFields(log.Fields{
|
||||
"name": p.Name,
|
||||
"detail": p.Detail,
|
||||
}).Log(p.Explanation)
|
||||
}
|
||||
*/
|
||||
break
|
||||
}
|
||||
|
||||
@@ -820,22 +953,61 @@ func (a *api) start() error {
|
||||
|
||||
a.log.logger.main = a.log.logger.core.WithComponent(logcontext).WithField("address", cfg.Address)
|
||||
|
||||
mainserverhandler, err := http.NewServer(http.Config{
|
||||
httpfilesystems := []httpfs.FS{
|
||||
{
|
||||
Name: a.diskfs.Name(),
|
||||
Mountpoint: "",
|
||||
AllowWrite: false,
|
||||
EnableAuth: false,
|
||||
Username: "",
|
||||
Password: "",
|
||||
DefaultFile: "index.html",
|
||||
DefaultContentType: "text/html",
|
||||
Gzip: true,
|
||||
Filesystem: a.diskfs,
|
||||
Cache: a.cache,
|
||||
},
|
||||
{
|
||||
Name: a.memfs.Name(),
|
||||
Mountpoint: "/memfs",
|
||||
AllowWrite: true,
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
DefaultFile: "",
|
||||
DefaultContentType: "application/data",
|
||||
Gzip: true,
|
||||
Filesystem: a.memfs,
|
||||
Cache: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, s3 := range cfg.Storage.S3 {
|
||||
httpfilesystems = append(httpfilesystems, httpfs.FS{
|
||||
Name: s3.Name,
|
||||
Mountpoint: s3.Mountpoint,
|
||||
AllowWrite: true,
|
||||
EnableAuth: s3.Auth.Enable,
|
||||
Username: s3.Auth.Username,
|
||||
Password: s3.Auth.Password,
|
||||
DefaultFile: "",
|
||||
DefaultContentType: "application/data",
|
||||
Gzip: true,
|
||||
Filesystem: a.s3fs[s3.Name],
|
||||
Cache: a.cache,
|
||||
})
|
||||
}
|
||||
|
||||
serverConfig := http.Config{
|
||||
Logger: a.log.logger.main,
|
||||
LogBuffer: a.log.buffer,
|
||||
Restream: a.restream,
|
||||
Metrics: a.metrics,
|
||||
Prometheus: a.prom,
|
||||
MimeTypesFile: cfg.Storage.MimeTypes,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: http.MemFSConfig{
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
Filesystem: a.memfs,
|
||||
},
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Filesystems: httpfilesystems,
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Cors: http.CorsConfig{
|
||||
Origins: cfg.Storage.CORS.Origins,
|
||||
},
|
||||
@@ -843,11 +1015,12 @@ func (a *api) start() error {
|
||||
SRT: a.srtserver,
|
||||
JWT: a.httpjwt,
|
||||
Config: a.config.store,
|
||||
Cache: a.cache,
|
||||
Sessions: a.sessions,
|
||||
Router: router,
|
||||
ReadOnly: cfg.API.ReadOnly,
|
||||
})
|
||||
}
|
||||
|
||||
mainserverhandler, err := http.NewServer(serverConfig)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create server: %w", err)
|
||||
@@ -882,34 +1055,10 @@ func (a *api) start() error {
|
||||
|
||||
a.log.logger.sidecar = a.log.logger.core.WithComponent("HTTP").WithField("address", cfg.Address)
|
||||
|
||||
sidecarserverhandler, err := http.NewServer(http.Config{
|
||||
Logger: a.log.logger.sidecar,
|
||||
LogBuffer: a.log.buffer,
|
||||
Restream: a.restream,
|
||||
Metrics: a.metrics,
|
||||
Prometheus: a.prom,
|
||||
MimeTypesFile: cfg.Storage.MimeTypes,
|
||||
DiskFS: a.diskfs,
|
||||
MemFS: http.MemFSConfig{
|
||||
EnableAuth: cfg.Storage.Memory.Auth.Enable,
|
||||
Username: cfg.Storage.Memory.Auth.Username,
|
||||
Password: cfg.Storage.Memory.Auth.Password,
|
||||
Filesystem: a.memfs,
|
||||
},
|
||||
IPLimiter: iplimiter,
|
||||
Profiling: cfg.Debug.Profiling,
|
||||
Cors: http.CorsConfig{
|
||||
Origins: cfg.Storage.CORS.Origins,
|
||||
},
|
||||
RTMP: a.rtmpserver,
|
||||
SRT: a.srtserver,
|
||||
JWT: a.httpjwt,
|
||||
Config: a.config.store,
|
||||
Cache: a.cache,
|
||||
Sessions: a.sessions,
|
||||
Router: router,
|
||||
ReadOnly: cfg.API.ReadOnly,
|
||||
})
|
||||
serverConfig.Logger = a.log.logger.sidecar
|
||||
serverConfig.IPLimiter = iplimiter
|
||||
|
||||
sidecarserverhandler, err := http.NewServer(serverConfig)
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create sidecar HTTP server: %w", err)
|
||||
@@ -1101,6 +1250,12 @@ func (a *api) start() error {
|
||||
}(ctx)
|
||||
}
|
||||
|
||||
if cfg.Debug.MemoryLimit > 0 {
|
||||
debug.SetMemoryLimit(cfg.Debug.MemoryLimit * 1024 * 1024)
|
||||
} else {
|
||||
debug.SetMemoryLimit(math.MaxInt64)
|
||||
}
|
||||
|
||||
// Start the restream processes
|
||||
restream.Start()
|
||||
|
||||
@@ -1267,7 +1422,7 @@ func (a *api) Destroy() {
|
||||
|
||||
// Free the MemFS
|
||||
if a.memfs != nil {
|
||||
a.memfs.DeleteAll()
|
||||
a.memfs.RemoveAll()
|
||||
a.memfs = nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
@@ -22,7 +23,11 @@ func main() {
|
||||
"to": "ffmpeg5",
|
||||
})
|
||||
|
||||
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE"))
|
||||
|
||||
diskfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{})
|
||||
|
||||
configstore, err := cfgstore.NewJSON(diskfs, configfile, nil)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading configuration failed")
|
||||
os.Exit(1)
|
||||
@@ -115,9 +120,12 @@ func doMigration(logger log.Logger, configstore cfgstore.Store) error {
|
||||
logger.Info().WithField("backup", backupFilepath).Log("Backup created")
|
||||
|
||||
// Load the existing DB
|
||||
datastore := store.NewJSONStore(store.JSONConfig{
|
||||
datastore, err := store.NewJSON(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := datastore.Load()
|
||||
if err != nil {
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/ffmpeg/skills"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/restream"
|
||||
"github.com/datarhei/core/v16/restream/app"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
@@ -495,14 +496,14 @@ type importConfigAudio struct {
|
||||
sampling string
|
||||
}
|
||||
|
||||
func importV1(path string, cfg importConfig) (store.StoreData, error) {
|
||||
func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.StoreData, error) {
|
||||
if len(cfg.id) == 0 {
|
||||
cfg.id = uuid.New().String()
|
||||
}
|
||||
|
||||
r := store.NewStoreData()
|
||||
|
||||
jsondata, err := os.ReadFile(path)
|
||||
jsondata, err := fs.ReadFile(path)
|
||||
if err != nil {
|
||||
return r, fmt.Errorf("failed to read data from %s: %w", path, err)
|
||||
}
|
||||
@@ -1417,9 +1418,19 @@ func probeInput(binary string, config app.Config) app.Probe {
|
||||
return app.Probe{}
|
||||
}
|
||||
|
||||
dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
store, err := store.NewJSON(store.JSONConfig{
|
||||
Filesystem: dummyfs,
|
||||
Filepath: "/",
|
||||
Logger: nil,
|
||||
})
|
||||
if err != nil {
|
||||
return app.Probe{}
|
||||
}
|
||||
|
||||
rs, err := restream.New(restream.Config{
|
||||
FFmpeg: ffmpeg,
|
||||
Store: store.NewDummyStore(store.DummyConfig{}),
|
||||
Store: store,
|
||||
})
|
||||
if err != nil {
|
||||
return app.Probe{}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -36,8 +37,13 @@ import (
|
||||
var id string = "4186b095-7f0a-4e94-8c3d-f17459ab252f"
|
||||
|
||||
func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig) {
|
||||
diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
|
||||
Root: ".",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Import v1 database
|
||||
v4, err := importV1(v1Fixture, config)
|
||||
v4, err := importV1(diskfs, v1Fixture, config)
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
// Reset variants
|
||||
@@ -50,7 +56,7 @@ func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
// Read the wanted result
|
||||
wantdatav4, err := os.ReadFile(v4Fixture)
|
||||
wantdatav4, err := diskfs.ReadFile(v4Fixture)
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
var wantv4 store.StoreData
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
@@ -15,18 +16,26 @@ import (
|
||||
func main() {
|
||||
logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1")
|
||||
|
||||
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE"))
|
||||
|
||||
diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{})
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Access disk filesystem failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
configstore, err := cfgstore.NewJSON(diskfs, configfile, nil)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading configuration failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := doImport(logger, configstore); err != nil {
|
||||
if err := doImport(logger, diskfs, configstore); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
func doImport(logger log.Logger, fs fs.Filesystem, configstore cfgstore.Store) error {
|
||||
if logger == nil {
|
||||
logger = log.New("")
|
||||
}
|
||||
@@ -65,23 +74,27 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
|
||||
logger = logger.WithField("database", v1filename)
|
||||
|
||||
if _, err := os.Stat(v1filename); err != nil {
|
||||
if _, err := fs.Stat(v1filename); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
logger.Info().Log("Database doesn't exist and nothing will be imported")
|
||||
return nil
|
||||
}
|
||||
|
||||
logger.Error().WithError(err).Log("Checking for v1 database")
|
||||
|
||||
return fmt.Errorf("checking for v1 database: %w", err)
|
||||
}
|
||||
|
||||
logger.Info().Log("Found database")
|
||||
|
||||
// Load an existing DB
|
||||
datastore := store.NewJSONStore(store.JSONConfig{
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
datastore, err := store.NewJSON(store.JSONConfig{
|
||||
Filesystem: fs,
|
||||
Filepath: cfg.DB.Dir + "/db.json",
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Creating datastore for new database failed")
|
||||
return fmt.Errorf("creating datastore for new database failed: %w", err)
|
||||
}
|
||||
|
||||
data, err := datastore.Load()
|
||||
if err != nil {
|
||||
@@ -103,7 +116,7 @@ func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
importConfig.binary = cfg.FFmpeg.Binary
|
||||
|
||||
// Rewrite the old database to the new database
|
||||
r, err := importV1(v1filename, importConfig)
|
||||
r, err := importV1(fs, v1filename, importConfig)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Importing database failed")
|
||||
return fmt.Errorf("importing database failed: %w", err)
|
||||
|
||||
@@ -1,20 +1,30 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestImport(t *testing.T) {
|
||||
configstore := store.NewDummy()
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
memfs.WriteFileReader("/mime.types", strings.NewReader("foobar"))
|
||||
memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar"))
|
||||
|
||||
configstore, err := store.NewJSON(memfs, "/config.json", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := configstore.Get()
|
||||
|
||||
err := configstore.Set(cfg)
|
||||
err = configstore.Set(cfg)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = doImport(nil, configstore)
|
||||
err = doImport(nil, memfs, configstore)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func (v versionInfo) MinorString() string {
|
||||
// Version of the app
|
||||
var Version = versionInfo{
|
||||
Major: 16,
|
||||
Minor: 11,
|
||||
Minor: 12,
|
||||
Patch: 0,
|
||||
}
|
||||
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
@@ -45,14 +47,21 @@ const version int64 = 3
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
fs fs.Filesystem
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
config := &Config{}
|
||||
func New(f fs.Filesystem) *Config {
|
||||
config := &Config{
|
||||
fs: f,
|
||||
}
|
||||
|
||||
if config.fs == nil {
|
||||
config.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
|
||||
}
|
||||
|
||||
config.init()
|
||||
|
||||
@@ -69,7 +78,7 @@ func (d *Config) Set(name, val string) error {
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
data := New(d.fs)
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
@@ -111,6 +120,7 @@ func (d *Config) Clone() *Config {
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
|
||||
data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block)
|
||||
data.Storage.S3 = copy.Slice(d.Storage.S3)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
@@ -143,7 +153,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
@@ -172,14 +182,14 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
@@ -195,6 +205,9 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (S3)
|
||||
d.vars.Register(value.NewS3StorageListValue(&d.Storage.S3, []value.S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
@@ -215,7 +228,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
@@ -232,6 +245,7 @@ func (d *Config) init() {
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Debug.MemoryLimit, 0), "debug.memory_limit_mbytes", "CORE_DEBUG_MEMORY_LIMIT_MBYTES", nil, "Impose a soft memory limit for the core, in megabytes", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
@@ -256,7 +270,7 @@ func (d *Config) init() {
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
@@ -374,6 +388,21 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(d.Storage.S3) != 0 {
|
||||
names := map[string]struct{}{
|
||||
"disk": {},
|
||||
"mem": {},
|
||||
}
|
||||
|
||||
for _, s3 := range d.Storage.S3 {
|
||||
if _, ok := names[s3.Name]; ok {
|
||||
d.vars.Log("error", "storage.s3", "the name %s is already in use or reserved", s3.Name)
|
||||
}
|
||||
|
||||
names[s3.Name] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigCopy(t *testing.T) {
|
||||
config1 := New()
|
||||
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
config1 := New(fs)
|
||||
|
||||
config1.Version = 42
|
||||
config1.DB.Dir = "foo"
|
||||
@@ -50,3 +55,30 @@ func TestConfigCopy(t *testing.T) {
|
||||
require.Equal(t, []string{"bar.com"}, config1.Host.Name)
|
||||
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
}
|
||||
|
||||
func TestValidateDefault(t *testing.T) {
|
||||
fs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"))
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, fresh)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"))
|
||||
require.NoError(t, err)
|
||||
|
||||
cfg := New(fs)
|
||||
|
||||
cfg.Validate(true)
|
||||
|
||||
errors := []string{}
|
||||
cfg.Messages(func(level string, v vars.Variable, message string) {
|
||||
if level == "error" {
|
||||
errors = append(errors, message)
|
||||
}
|
||||
})
|
||||
|
||||
require.Equal(t, 0, len(cfg.Overrides()))
|
||||
require.Equal(t, false, cfg.HasErrors(), errors)
|
||||
}
|
||||
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
// Data is the actual configuration data for the app
|
||||
type Data struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
Version int64 `json:"version" jsonschema:"minimum=3,maximum=3"`
|
||||
CreatedAt time.Time `json:"created_at"` // When this config has been persisted
|
||||
LoadedAt time.Time `json:"-"` // When this config has been actually used
|
||||
UpdatedAt time.Time `json:"-"` // Irrelevant
|
||||
Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Address string `json:"address"`
|
||||
@@ -21,7 +22,7 @@ type Data struct {
|
||||
Log struct {
|
||||
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
|
||||
Topics []string `json:"topics"`
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
} `json:"log"`
|
||||
DB struct {
|
||||
Dir string `json:"dir"`
|
||||
@@ -67,12 +68,12 @@ type Data struct {
|
||||
Storage struct {
|
||||
Disk struct {
|
||||
Dir string `json:"dir"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Cache struct {
|
||||
Enable bool `json:"enable"`
|
||||
Size uint64 `json:"max_size_mbytes"`
|
||||
TTL int64 `json:"ttl_seconds"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes"`
|
||||
Size uint64 `json:"max_size_mbytes" format:"uint64"`
|
||||
TTL int64 `json:"ttl_seconds" format:"int64"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
|
||||
Types struct {
|
||||
Allow []string `json:"allow"`
|
||||
Block []string `json:"block"`
|
||||
@@ -85,9 +86,10 @@ type Data struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"auth"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Purge bool `json:"purge"`
|
||||
} `json:"memory"`
|
||||
S3 []value.S3Storage `json:"s3"`
|
||||
CORS struct {
|
||||
Origins []string `json:"origins"`
|
||||
} `json:"cors"`
|
||||
@@ -113,7 +115,7 @@ type Data struct {
|
||||
} `json:"srt"`
|
||||
FFmpeg struct {
|
||||
Binary string `json:"binary"`
|
||||
MaxProcesses int64 `json:"max_processes"`
|
||||
MaxProcesses int64 `json:"max_processes" format:"int64"`
|
||||
Access struct {
|
||||
Input struct {
|
||||
Allow []string `json:"allow"`
|
||||
@@ -125,33 +127,34 @@ type Data struct {
|
||||
} `json:"output"`
|
||||
} `json:"access"`
|
||||
Log struct {
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxHistory int `json:"max_history"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
MaxHistory int `json:"max_history" format:"int"`
|
||||
} `json:"log"`
|
||||
} `json:"ffmpeg"`
|
||||
Playout struct {
|
||||
Enable bool `json:"enable"`
|
||||
MinPort int `json:"min_port"`
|
||||
MaxPort int `json:"max_port"`
|
||||
MinPort int `json:"min_port" format:"int"`
|
||||
MaxPort int `json:"max_port" format:"int"`
|
||||
} `json:"playout"`
|
||||
Debug struct {
|
||||
Profiling bool `json:"profiling"`
|
||||
ForceGC int `json:"force_gc"`
|
||||
Profiling bool `json:"profiling"`
|
||||
ForceGC int `json:"force_gc" format:"int"`
|
||||
MemoryLimit int64 `json:"memory_limit_mbytes" format:"int64"`
|
||||
} `json:"debug"`
|
||||
Metrics struct {
|
||||
Enable bool `json:"enable"`
|
||||
EnablePrometheus bool `json:"enable_prometheus"`
|
||||
Range int64 `json:"range_sec"` // seconds
|
||||
Interval int64 `json:"interval_sec"` // seconds
|
||||
Range int64 `json:"range_sec" format:"int64"` // seconds
|
||||
Interval int64 `json:"interval_sec" format:"int64"` // seconds
|
||||
} `json:"metrics"`
|
||||
Sessions struct {
|
||||
Enable bool `json:"enable"`
|
||||
IPIgnoreList []string `json:"ip_ignorelist"`
|
||||
SessionTimeout int `json:"session_timeout_sec"`
|
||||
SessionTimeout int `json:"session_timeout_sec" format:"int"`
|
||||
Persist bool `json:"persist"`
|
||||
PersistInterval int `json:"persist_interval_sec"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit"`
|
||||
MaxSessions uint64 `json:"max_sessions"`
|
||||
PersistInterval int `json:"persist_interval_sec" format:"int"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
|
||||
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
|
||||
} `json:"sessions"`
|
||||
Service struct {
|
||||
Enable bool `json:"enable"`
|
||||
@@ -165,8 +168,8 @@ type Data struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
func UpgradeV2ToV3(d *v2.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
func UpgradeV2ToV3(d *v2.Data, fs fs.Filesystem) (*Data, error) {
|
||||
cfg := New(fs)
|
||||
|
||||
return MergeV2toV3(&cfg.Data, d)
|
||||
}
|
||||
@@ -189,7 +192,6 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
@@ -228,6 +230,10 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
|
||||
// Actual changes
|
||||
data.Debug.Profiling = d.Debug.Profiling
|
||||
data.Debug.ForceGC = d.Debug.ForceGC
|
||||
data.Debug.MemoryLimit = 0
|
||||
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
data.TLS.Address = d.TLS.Address
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
@@ -242,6 +248,8 @@ func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.Storage.S3 = []value.S3Storage{}
|
||||
|
||||
data.Version = 3
|
||||
|
||||
return data, nil
|
||||
@@ -267,7 +275,6 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
@@ -299,6 +306,9 @@ func DowngradeV3toV2(d *Data) (*v2.Data, error) {
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.Debug.Profiling = d.Debug.Profiling
|
||||
data.Debug.ForceGC = d.Debug.ForceGC
|
||||
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
data.TLS.Address = d.TLS.Address
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
|
||||
36
config/data_test.go
Normal file
36
config/data_test.go
Normal file
@@ -0,0 +1,36 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestUpgrade(t *testing.T) {
|
||||
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
|
||||
v2cfg := v2.New(fs)
|
||||
v2cfg.Storage.Disk.Cache.Types = []string{".foo", ".bar"}
|
||||
|
||||
v3cfg, err := UpgradeV2ToV3(&v2cfg.Data, fs)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(3), v3cfg.Version)
|
||||
require.ElementsMatch(t, []string{".foo", ".bar"}, v3cfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.ElementsMatch(t, []string{".m3u8", ".mpd"}, v3cfg.Storage.Disk.Cache.Types.Block)
|
||||
}
|
||||
|
||||
func TestDowngrade(t *testing.T) {
|
||||
fs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
|
||||
v3cfg := New(fs)
|
||||
v3cfg.Storage.Disk.Cache.Types.Allow = []string{".foo", ".bar"}
|
||||
|
||||
v2cfg, err := DowngradeV3toV2(&v3cfg.Data)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(2), v2cfg.Version)
|
||||
require.ElementsMatch(t, []string{".foo", ".bar"}, v2cfg.Storage.Disk.Cache.Types)
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
)
|
||||
|
||||
type dummyStore struct {
|
||||
current *config.Config
|
||||
active *config.Config
|
||||
}
|
||||
|
||||
// NewDummyStore returns a store that returns the default config
|
||||
func NewDummy() Store {
|
||||
s := &dummyStore{}
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
s.current = cfg
|
||||
|
||||
cfg = config.New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
s.active = cfg
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *dummyStore) Get() *config.Config {
|
||||
return c.current.Clone()
|
||||
}
|
||||
|
||||
func (c *dummyStore) Set(d *config.Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.current = d.Clone()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dummyStore) GetActive() *config.Config {
|
||||
return c.active.Clone()
|
||||
}
|
||||
|
||||
func (c *dummyStore) SetActive(d *config.Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.active = d.Clone()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dummyStore) Reload() error {
|
||||
return nil
|
||||
}
|
||||
@@ -5,16 +5,16 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
type jsonStore struct {
|
||||
fs fs.Filesystem
|
||||
path string
|
||||
|
||||
data map[string]*config.Config
|
||||
@@ -22,18 +22,32 @@ type jsonStore struct {
|
||||
reloadFn func()
|
||||
}
|
||||
|
||||
// NewJSONStore will read a JSON config file from the given path. After successfully reading it in, it will be written
|
||||
// back to the path. The returned error will be nil if everything went fine.
|
||||
// If the path doesn't exist, a default JSON config file will be written to that path.
|
||||
// The returned ConfigStore can be used to retrieve or write the config.
|
||||
func NewJSON(path string, reloadFn func()) (Store, error) {
|
||||
// NewJSONStore will read the JSON config file from the given path. After successfully reading it in, it will be written
|
||||
// back to the path. The returned error will be nil if everything went fine. If the path doesn't exist, a default JSON
|
||||
// config file will be written to that path. The returned ConfigStore can be used to retrieve or write the config.
|
||||
func NewJSON(f fs.Filesystem, path string, reloadFn func()) (Store, error) {
|
||||
c := &jsonStore{
|
||||
path: path,
|
||||
fs: f,
|
||||
data: make(map[string]*config.Config),
|
||||
reloadFn: reloadFn,
|
||||
}
|
||||
|
||||
c.data["base"] = config.New()
|
||||
path, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to determine absolute path of '%s': %w", path, err)
|
||||
}
|
||||
|
||||
c.path = path
|
||||
|
||||
if len(c.path) == 0 {
|
||||
c.path = "/config.json"
|
||||
}
|
||||
|
||||
if c.fs == nil {
|
||||
return nil, fmt.Errorf("no valid filesystem provided")
|
||||
}
|
||||
|
||||
c.data["base"] = config.New(f)
|
||||
|
||||
if err := c.load(c.data["base"]); err != nil {
|
||||
return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err)
|
||||
@@ -57,14 +71,10 @@ func (c *jsonStore) Set(d *config.Config) error {
|
||||
|
||||
data := d.Clone()
|
||||
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
if err := c.store(data); err != nil {
|
||||
return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err)
|
||||
}
|
||||
|
||||
data.UpdatedAt = time.Now()
|
||||
|
||||
c.data["base"] = data
|
||||
|
||||
return nil
|
||||
@@ -89,7 +99,9 @@ func (c *jsonStore) SetActive(d *config.Config) error {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.data["merged"] = d.Clone()
|
||||
data := d.Clone()
|
||||
|
||||
c.data["merged"] = data
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -109,15 +121,19 @@ func (c *jsonStore) load(cfg *config.Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := os.Stat(c.path); os.IsNotExist(err) {
|
||||
if _, err := c.fs.Stat(c.path); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
jsondata, err := os.ReadFile(c.path)
|
||||
jsondata, err := c.fs.ReadFile(c.path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(jsondata) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := migrate(jsondata)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -125,15 +141,12 @@ func (c *jsonStore) load(cfg *config.Config) error {
|
||||
|
||||
cfg.Data = *data
|
||||
|
||||
cfg.LoadedAt = time.Now()
|
||||
cfg.UpdatedAt = cfg.LoadedAt
|
||||
cfg.UpdatedAt = cfg.CreatedAt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) store(data *config.Config) error {
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
if len(c.path) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -143,28 +156,9 @@ func (c *jsonStore) store(data *config.Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dir, filename := filepath.Split(c.path)
|
||||
_, _, err = c.fs.WriteFileSafe(c.path, jsondata)
|
||||
|
||||
tmpfile, err := os.CreateTemp(dir, filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
if _, err := tmpfile.Write(jsondata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := file.Rename(tmpfile.Name(), c.path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
func migrate(jsondata []byte) (*config.Data, error) {
|
||||
@@ -176,38 +170,38 @@ func migrate(jsondata []byte) (*config.Data, error) {
|
||||
}
|
||||
|
||||
if version.Version == 1 {
|
||||
dataV1 := &v1.New().Data
|
||||
dataV1 := &v1.New(nil).Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV1); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV2, err := v2.UpgradeV1ToV2(dataV1)
|
||||
dataV2, err := v2.UpgradeV1ToV2(dataV1, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2)
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = dataV3
|
||||
} else if version.Version == 2 {
|
||||
dataV2 := &v2.New().Data
|
||||
dataV2 := &v2.New(nil).Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV2); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2)
|
||||
dataV3, err := config.UpgradeV2ToV3(dataV2, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
data = dataV3
|
||||
} else if version.Version == 3 {
|
||||
dataV3 := &config.New().Data
|
||||
dataV3 := &config.New(nil).Data
|
||||
|
||||
if err := gojson.Unmarshal(jsondata, dataV3); err != nil {
|
||||
return nil, json.FormatError(jsondata, err)
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestMigrationV1ToV3(t *testing.T) {
|
||||
jsondatav3, err := os.ReadFile("./fixtures/config_v1_v3.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3 := config.New()
|
||||
datav3 := config.New(nil)
|
||||
json.Unmarshal(jsondatav3, datav3)
|
||||
|
||||
data, err := migrate(jsondatav1)
|
||||
@@ -37,7 +37,7 @@ func TestMigrationV2ToV3(t *testing.T) {
|
||||
jsondatav3, err := os.ReadFile("./fixtures/config_v2_v3.json")
|
||||
require.NoError(t, err)
|
||||
|
||||
datav3 := config.New()
|
||||
datav3 := config.New(nil)
|
||||
json.Unmarshal(jsondatav3, datav3)
|
||||
|
||||
data, err := migrate(jsondatav2)
|
||||
|
||||
53
config/store/location.go
Normal file
53
config/store/location.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Location returns the path to the config file. If no path is provided,
|
||||
// different standard location will be probed:
|
||||
// - os.UserConfigDir() + /datarhei-core/config.js
|
||||
// - os.UserHomeDir() + /.config/datarhei-core/config.js
|
||||
// - ./config/config.js
|
||||
// If the config doesn't exist in none of these locations, it will be assumed
|
||||
// at ./config/config.js
|
||||
func Location(filepath string) string {
|
||||
configfile := filepath
|
||||
if len(configfile) != 0 {
|
||||
return configfile
|
||||
}
|
||||
|
||||
locations := []string{}
|
||||
|
||||
if dir, err := os.UserConfigDir(); err == nil {
|
||||
locations = append(locations, dir+"/datarhei-core/config.js")
|
||||
}
|
||||
|
||||
if dir, err := os.UserHomeDir(); err == nil {
|
||||
locations = append(locations, dir+"/.config/datarhei-core/config.js")
|
||||
}
|
||||
|
||||
locations = append(locations, "./config/config.js")
|
||||
|
||||
for _, path := range locations {
|
||||
info, err := os.Stat(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
configfile = path
|
||||
}
|
||||
|
||||
if len(configfile) == 0 {
|
||||
configfile = "./config/config.js"
|
||||
}
|
||||
|
||||
os.MkdirAll(path.Dir(configfile), 0740)
|
||||
|
||||
return configfile
|
||||
}
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
@@ -21,14 +22,21 @@ const version int64 = 1
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
fs fs.Filesystem
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
func New(f fs.Filesystem) *Config {
|
||||
cfg := &Config{
|
||||
fs: f,
|
||||
}
|
||||
|
||||
if cfg.fs == nil {
|
||||
cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
|
||||
}
|
||||
|
||||
cfg.init()
|
||||
|
||||
@@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error {
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
data := New(d.fs)
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
@@ -118,7 +126,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
@@ -146,14 +154,14 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
@@ -187,7 +195,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
@@ -228,7 +236,7 @@ func (d *Config) init() {
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
|
||||
@@ -10,7 +10,7 @@ type Data struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
Version int64 `json:"version" jsonschema:"minimum=1,maximum=1"`
|
||||
Version int64 `json:"version" jsonschema:"minimum=1,maximum=1" format:"int64"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Address string `json:"address"`
|
||||
@@ -18,7 +18,7 @@ type Data struct {
|
||||
Log struct {
|
||||
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
|
||||
Topics []string `json:"topics"`
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
} `json:"log"`
|
||||
DB struct {
|
||||
Dir string `json:"dir"`
|
||||
@@ -63,12 +63,12 @@ type Data struct {
|
||||
Storage struct {
|
||||
Disk struct {
|
||||
Dir string `json:"dir"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Cache struct {
|
||||
Enable bool `json:"enable"`
|
||||
Size uint64 `json:"max_size_mbytes"`
|
||||
TTL int64 `json:"ttl_seconds"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes"`
|
||||
Size uint64 `json:"max_size_mbytes" format:"uint64"`
|
||||
TTL int64 `json:"ttl_seconds" format:"int64"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
|
||||
Types []string `json:"types"`
|
||||
} `json:"cache"`
|
||||
} `json:"disk"`
|
||||
@@ -78,7 +78,7 @@ type Data struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"auth"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Purge bool `json:"purge"`
|
||||
} `json:"memory"`
|
||||
CORS struct {
|
||||
@@ -105,7 +105,7 @@ type Data struct {
|
||||
} `json:"srt"`
|
||||
FFmpeg struct {
|
||||
Binary string `json:"binary"`
|
||||
MaxProcesses int64 `json:"max_processes"`
|
||||
MaxProcesses int64 `json:"max_processes" format:"int64"`
|
||||
Access struct {
|
||||
Input struct {
|
||||
Allow []string `json:"allow"`
|
||||
@@ -117,33 +117,33 @@ type Data struct {
|
||||
} `json:"output"`
|
||||
} `json:"access"`
|
||||
Log struct {
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxHistory int `json:"max_history"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
MaxHistory int `json:"max_history" format:"int"`
|
||||
} `json:"log"`
|
||||
} `json:"ffmpeg"`
|
||||
Playout struct {
|
||||
Enable bool `json:"enable"`
|
||||
MinPort int `json:"min_port"`
|
||||
MaxPort int `json:"max_port"`
|
||||
MinPort int `json:"min_port" format:"int"`
|
||||
MaxPort int `json:"max_port" format:"int"`
|
||||
} `json:"playout"`
|
||||
Debug struct {
|
||||
Profiling bool `json:"profiling"`
|
||||
ForceGC int `json:"force_gc"`
|
||||
ForceGC int `json:"force_gc" format:"int"`
|
||||
} `json:"debug"`
|
||||
Metrics struct {
|
||||
Enable bool `json:"enable"`
|
||||
EnablePrometheus bool `json:"enable_prometheus"`
|
||||
Range int64 `json:"range_sec"` // seconds
|
||||
Interval int64 `json:"interval_sec"` // seconds
|
||||
Range int64 `json:"range_sec" format:"int64"` // seconds
|
||||
Interval int64 `json:"interval_sec" format:"int64"` // seconds
|
||||
} `json:"metrics"`
|
||||
Sessions struct {
|
||||
Enable bool `json:"enable"`
|
||||
IPIgnoreList []string `json:"ip_ignorelist"`
|
||||
SessionTimeout int `json:"session_timeout_sec"`
|
||||
SessionTimeout int `json:"session_timeout_sec" format:"int"`
|
||||
Persist bool `json:"persist"`
|
||||
PersistInterval int `json:"persist_interval_sec"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit"`
|
||||
MaxSessions uint64 `json:"max_sessions"`
|
||||
PersistInterval int `json:"persist_interval_sec" format:"int"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
|
||||
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
|
||||
} `json:"sessions"`
|
||||
Service struct {
|
||||
Enable bool `json:"enable"`
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
@@ -21,14 +22,21 @@ const version int64 = 2
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
fs fs.Filesystem
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
func New(f fs.Filesystem) *Config {
|
||||
cfg := &Config{
|
||||
fs: f,
|
||||
}
|
||||
|
||||
if cfg.fs == nil {
|
||||
cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{})
|
||||
}
|
||||
|
||||
cfg.init()
|
||||
|
||||
@@ -45,7 +53,7 @@ func (d *Config) Set(name, val string) error {
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
data := New(d.fs)
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
@@ -118,7 +126,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
@@ -146,14 +154,14 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
@@ -188,7 +196,7 @@ func (d *Config) init() {
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
@@ -229,7 +237,7 @@ func (d *Config) init() {
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
|
||||
@@ -10,13 +10,14 @@ import (
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
Version int64 `json:"version" jsonschema:"minimum=2,maximum=2"`
|
||||
Version int64 `json:"version" jsonschema:"minimum=2,maximum=2" format:"int64"`
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Address string `json:"address"`
|
||||
@@ -24,7 +25,7 @@ type Data struct {
|
||||
Log struct {
|
||||
Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"`
|
||||
Topics []string `json:"topics"`
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
} `json:"log"`
|
||||
DB struct {
|
||||
Dir string `json:"dir"`
|
||||
@@ -69,12 +70,12 @@ type Data struct {
|
||||
Storage struct {
|
||||
Disk struct {
|
||||
Dir string `json:"dir"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Cache struct {
|
||||
Enable bool `json:"enable"`
|
||||
Size uint64 `json:"max_size_mbytes"`
|
||||
TTL int64 `json:"ttl_seconds"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes"`
|
||||
Size uint64 `json:"max_size_mbytes" format:"uint64"`
|
||||
TTL int64 `json:"ttl_seconds" format:"int64"`
|
||||
FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"`
|
||||
Types []string `json:"types"`
|
||||
} `json:"cache"`
|
||||
} `json:"disk"`
|
||||
@@ -84,7 +85,7 @@ type Data struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"auth"`
|
||||
Size int64 `json:"max_size_mbytes"`
|
||||
Size int64 `json:"max_size_mbytes" format:"int64"`
|
||||
Purge bool `json:"purge"`
|
||||
} `json:"memory"`
|
||||
CORS struct {
|
||||
@@ -112,7 +113,7 @@ type Data struct {
|
||||
} `json:"srt"`
|
||||
FFmpeg struct {
|
||||
Binary string `json:"binary"`
|
||||
MaxProcesses int64 `json:"max_processes"`
|
||||
MaxProcesses int64 `json:"max_processes" format:"int64"`
|
||||
Access struct {
|
||||
Input struct {
|
||||
Allow []string `json:"allow"`
|
||||
@@ -124,33 +125,33 @@ type Data struct {
|
||||
} `json:"output"`
|
||||
} `json:"access"`
|
||||
Log struct {
|
||||
MaxLines int `json:"max_lines"`
|
||||
MaxHistory int `json:"max_history"`
|
||||
MaxLines int `json:"max_lines" format:"int"`
|
||||
MaxHistory int `json:"max_history" format:"int"`
|
||||
} `json:"log"`
|
||||
} `json:"ffmpeg"`
|
||||
Playout struct {
|
||||
Enable bool `json:"enable"`
|
||||
MinPort int `json:"min_port"`
|
||||
MaxPort int `json:"max_port"`
|
||||
MinPort int `json:"min_port" format:"int"`
|
||||
MaxPort int `json:"max_port" format:"int"`
|
||||
} `json:"playout"`
|
||||
Debug struct {
|
||||
Profiling bool `json:"profiling"`
|
||||
ForceGC int `json:"force_gc"`
|
||||
ForceGC int `json:"force_gc" format:"int"`
|
||||
} `json:"debug"`
|
||||
Metrics struct {
|
||||
Enable bool `json:"enable"`
|
||||
EnablePrometheus bool `json:"enable_prometheus"`
|
||||
Range int64 `json:"range_sec"` // seconds
|
||||
Interval int64 `json:"interval_sec"` // seconds
|
||||
Range int64 `json:"range_sec" format:"int64"` // seconds
|
||||
Interval int64 `json:"interval_sec" format:"int64"` // seconds
|
||||
} `json:"metrics"`
|
||||
Sessions struct {
|
||||
Enable bool `json:"enable"`
|
||||
IPIgnoreList []string `json:"ip_ignorelist"`
|
||||
SessionTimeout int `json:"session_timeout_sec"`
|
||||
SessionTimeout int `json:"session_timeout_sec" format:"int"`
|
||||
Persist bool `json:"persist"`
|
||||
PersistInterval int `json:"persist_interval_sec"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit"`
|
||||
MaxSessions uint64 `json:"max_sessions"`
|
||||
PersistInterval int `json:"persist_interval_sec" format:"int"`
|
||||
MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"`
|
||||
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
|
||||
} `json:"sessions"`
|
||||
Service struct {
|
||||
Enable bool `json:"enable"`
|
||||
@@ -164,8 +165,8 @@ type Data struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
func UpgradeV1ToV2(d *v1.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
func UpgradeV1ToV2(d *v1.Data, fs fs.Filesystem) (*Data, error) {
|
||||
cfg := New(fs)
|
||||
|
||||
return MergeV1ToV2(&cfg.Data, d)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -16,6 +17,28 @@ type Auth0Tenant struct {
|
||||
Users []string `json:"users"`
|
||||
}
|
||||
|
||||
func (a *Auth0Tenant) String() string {
|
||||
u := url.URL{
|
||||
Scheme: "auth0",
|
||||
Host: a.Domain,
|
||||
}
|
||||
|
||||
if len(a.ClientID) != 0 {
|
||||
u.User = url.User(a.ClientID)
|
||||
}
|
||||
|
||||
q := url.Values{}
|
||||
q.Set("aud", a.Audience)
|
||||
|
||||
for _, user := range a.Users {
|
||||
q.Add("user", user)
|
||||
}
|
||||
|
||||
u.RawQuery = q.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
type TenantList struct {
|
||||
p *[]Auth0Tenant
|
||||
separator string
|
||||
@@ -32,18 +55,34 @@ func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *Tenan
|
||||
return v
|
||||
}
|
||||
|
||||
// Set allows to set a tenant list in two formats:
|
||||
// - a separator separated list of bas64 encoded Auth0Tenant JSON objects
|
||||
// - a separator separated list of Auth0Tenant in URL representation: auth0://[clientid]@[domain]?aud=[audience]&user=...&user=...
|
||||
func (s *TenantList) Set(val string) error {
|
||||
list := []Auth0Tenant{}
|
||||
|
||||
for i, elm := range strings.Split(val, s.separator) {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t := Auth0Tenant{}
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
|
||||
if strings.HasPrefix(elm, "auth0://") {
|
||||
data, err := url.Parse(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid url encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t.Domain = data.Host
|
||||
t.ClientID = data.User.Username()
|
||||
t.Audience = data.Query().Get("aud")
|
||||
t.Users = data.Query()["user"]
|
||||
} else {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
}
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
@@ -62,10 +101,10 @@ func (s *TenantList) String() string {
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
|
||||
list = append(list, t.String())
|
||||
}
|
||||
|
||||
return strings.Join(list, ",")
|
||||
return strings.Join(list, s.separator)
|
||||
}
|
||||
|
||||
func (s *TenantList) Validate() error {
|
||||
|
||||
43
config/value/auth0_test.go
Normal file
43
config/value/auth0_test.go
Normal file
@@ -0,0 +1,43 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAuth0Value(t *testing.T) {
|
||||
tenants := []Auth0Tenant{}
|
||||
|
||||
v := NewTenantList(&tenants, nil, " ")
|
||||
require.Equal(t, "(empty)", v.String())
|
||||
|
||||
v.Set("auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3")
|
||||
require.Equal(t, []Auth0Tenant{
|
||||
{
|
||||
Domain: "domain",
|
||||
ClientID: "clientid",
|
||||
Audience: "audience",
|
||||
Users: []string{"user1", "user2"},
|
||||
},
|
||||
{
|
||||
Domain: "domain2",
|
||||
Audience: "audience2",
|
||||
Users: []string{"user3"},
|
||||
},
|
||||
}, tenants)
|
||||
require.Equal(t, "auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3", v.String())
|
||||
require.NoError(t, v.Validate())
|
||||
|
||||
v.Set("eyJkb21haW4iOiJkYXRhcmhlaS5ldS5hdXRoMC5jb20iLCJhdWRpZW5jZSI6Imh0dHBzOi8vZGF0YXJoZWkuY29tL2NvcmUiLCJ1c2VycyI6WyJhdXRoMHx4eHgiXX0=")
|
||||
require.Equal(t, []Auth0Tenant{
|
||||
{
|
||||
Domain: "datarhei.eu.auth0.com",
|
||||
ClientID: "",
|
||||
Audience: "https://datarhei.com/core",
|
||||
Users: []string{"auth0|xxx"},
|
||||
},
|
||||
}, tenants)
|
||||
require.Equal(t, "auth0://datarhei.eu.auth0.com?aud=https%3A%2F%2Fdatarhei.com%2Fcore&user=auth0%7Cxxx", v.String())
|
||||
require.NoError(t, v.Validate())
|
||||
}
|
||||
127
config/value/network_test.go
Normal file
127
config/value/network_test.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAddressValue(t *testing.T) {
|
||||
var x string
|
||||
|
||||
val := NewAddress(&x, ":8080")
|
||||
|
||||
require.Equal(t, ":8080", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = "foobaz:9090"
|
||||
|
||||
require.Equal(t, "foobaz:9090", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("fooboz:7070")
|
||||
|
||||
require.Equal(t, "fooboz:7070", x)
|
||||
}
|
||||
|
||||
func TestCIDRListValue(t *testing.T) {
|
||||
var x []string
|
||||
|
||||
val := NewCIDRList(&x, []string{}, " ")
|
||||
|
||||
require.Equal(t, "(empty)", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, true, val.IsEmpty())
|
||||
|
||||
x = []string{"127.0.0.1/32", "127.0.0.2/32"}
|
||||
|
||||
require.Equal(t, "127.0.0.1/32 127.0.0.2/32", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("129.0.0.1/32 129.0.0.2/32")
|
||||
|
||||
require.Equal(t, []string{"129.0.0.1/32", "129.0.0.2/32"}, x)
|
||||
}
|
||||
|
||||
func TestCORSOriginaValue(t *testing.T) {
|
||||
var x []string
|
||||
|
||||
val := NewCORSOrigins(&x, []string{}, " ")
|
||||
|
||||
require.Equal(t, "(empty)", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, true, val.IsEmpty())
|
||||
|
||||
x = []string{"*"}
|
||||
|
||||
require.Equal(t, "*", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("http://localhost")
|
||||
|
||||
require.Equal(t, []string{"http://localhost"}, x)
|
||||
}
|
||||
|
||||
func TestPortValue(t *testing.T) {
|
||||
var x int
|
||||
|
||||
val := NewPort(&x, 11)
|
||||
|
||||
require.Equal(t, "11", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = 42
|
||||
|
||||
require.Equal(t, "42", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("77")
|
||||
|
||||
require.Equal(t, int(77), x)
|
||||
}
|
||||
|
||||
func TestURLValue(t *testing.T) {
|
||||
var x string
|
||||
|
||||
val := NewURL(&x, "http://localhost/foobar")
|
||||
|
||||
require.Equal(t, "http://localhost/foobar", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = "http://localhost:8080/foobar"
|
||||
|
||||
require.Equal(t, "http://localhost:8080/foobar", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("http://localhost:8080/fooboz/foobaz")
|
||||
|
||||
require.Equal(t, "http://localhost:8080/fooboz/foobaz", x)
|
||||
}
|
||||
|
||||
func TestEmailValue(t *testing.T) {
|
||||
var x string
|
||||
|
||||
val := NewEmail(&x, "foobar@example.com")
|
||||
|
||||
require.Equal(t, "foobar@example.com", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = "foobar+baz@example.com"
|
||||
|
||||
require.Equal(t, "foobar+baz@example.com", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("foobar@sub.example.com")
|
||||
|
||||
require.Equal(t, "foobar@sub.example.com", x)
|
||||
}
|
||||
@@ -2,39 +2,51 @@ package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
// must directory
|
||||
|
||||
type MustDir string
|
||||
type MustDir struct {
|
||||
p *string
|
||||
fs fs.Filesystem
|
||||
}
|
||||
|
||||
func NewMustDir(p *string, val string, fs fs.Filesystem) *MustDir {
|
||||
v := &MustDir{
|
||||
p: p,
|
||||
fs: fs,
|
||||
}
|
||||
|
||||
func NewMustDir(p *string, val string) *MustDir {
|
||||
*p = val
|
||||
|
||||
return (*MustDir)(p)
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *MustDir) Set(val string) error {
|
||||
*u = MustDir(val)
|
||||
*u.p = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MustDir) String() string {
|
||||
return string(*u)
|
||||
return *u.p
|
||||
}
|
||||
|
||||
func (u *MustDir) Validate() error {
|
||||
val := string(*u)
|
||||
val := *u.p
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return fmt.Errorf("path name must not be empty")
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err := u.fs.MkdirAll(val, 0750); err != nil {
|
||||
return fmt.Errorf("%s can't be created (%w)", val, err)
|
||||
}
|
||||
|
||||
finfo, err := u.fs.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
@@ -47,36 +59,44 @@ func (u *MustDir) Validate() error {
|
||||
}
|
||||
|
||||
func (u *MustDir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
return len(*u.p) == 0
|
||||
}
|
||||
|
||||
// directory
|
||||
|
||||
type Dir string
|
||||
type Dir struct {
|
||||
p *string
|
||||
fs fs.Filesystem
|
||||
}
|
||||
|
||||
func NewDir(p *string, val string, fs fs.Filesystem) *Dir {
|
||||
v := &Dir{
|
||||
p: p,
|
||||
fs: fs,
|
||||
}
|
||||
|
||||
func NewDir(p *string, val string) *Dir {
|
||||
*p = val
|
||||
|
||||
return (*Dir)(p)
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *Dir) Set(val string) error {
|
||||
*u = Dir(val)
|
||||
*u.p = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Dir) String() string {
|
||||
return string(*u)
|
||||
return *u.p
|
||||
}
|
||||
|
||||
func (u *Dir) Validate() error {
|
||||
val := string(*u)
|
||||
val := *u.p
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
finfo, err := u.fs.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
@@ -89,32 +109,40 @@ func (u *Dir) Validate() error {
|
||||
}
|
||||
|
||||
func (u *Dir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
return len(*u.p) == 0
|
||||
}
|
||||
|
||||
// executable
|
||||
|
||||
type Exec string
|
||||
type Exec struct {
|
||||
p *string
|
||||
fs fs.Filesystem
|
||||
}
|
||||
|
||||
func NewExec(p *string, val string, fs fs.Filesystem) *Exec {
|
||||
v := &Exec{
|
||||
p: p,
|
||||
fs: fs,
|
||||
}
|
||||
|
||||
func NewExec(p *string, val string) *Exec {
|
||||
*p = val
|
||||
|
||||
return (*Exec)(p)
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *Exec) Set(val string) error {
|
||||
*u = Exec(val)
|
||||
*u.p = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Exec) String() string {
|
||||
return string(*u)
|
||||
return *u.p
|
||||
}
|
||||
|
||||
func (u *Exec) Validate() error {
|
||||
val := string(*u)
|
||||
val := *u.p
|
||||
|
||||
_, err := exec.LookPath(val)
|
||||
_, err := u.fs.LookPath(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s not found or is not executable", val)
|
||||
}
|
||||
@@ -123,36 +151,44 @@ func (u *Exec) Validate() error {
|
||||
}
|
||||
|
||||
func (u *Exec) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
return len(*u.p) == 0
|
||||
}
|
||||
|
||||
// regular file
|
||||
|
||||
type File string
|
||||
type File struct {
|
||||
p *string
|
||||
fs fs.Filesystem
|
||||
}
|
||||
|
||||
func NewFile(p *string, val string, fs fs.Filesystem) *File {
|
||||
v := &File{
|
||||
p: p,
|
||||
fs: fs,
|
||||
}
|
||||
|
||||
func NewFile(p *string, val string) *File {
|
||||
*p = val
|
||||
|
||||
return (*File)(p)
|
||||
return v
|
||||
}
|
||||
|
||||
func (u *File) Set(val string) error {
|
||||
*u = File(val)
|
||||
*u.p = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *File) String() string {
|
||||
return string(*u)
|
||||
return *u.p
|
||||
}
|
||||
|
||||
func (u *File) Validate() error {
|
||||
val := string(*u)
|
||||
val := *u.p
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
finfo, err := u.fs.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
@@ -165,7 +201,7 @@ func (u *File) Validate() error {
|
||||
}
|
||||
|
||||
func (u *File) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
return len(*u.p) == 0
|
||||
}
|
||||
|
||||
// absolute path
|
||||
|
||||
142
config/value/os_test.go
Normal file
142
config/value/os_test.go
Normal file
@@ -0,0 +1,142 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMustDirValue(t *testing.T) {
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = memfs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
var x string
|
||||
|
||||
val := NewMustDir(&x, "./foobar", memfs)
|
||||
|
||||
require.Equal(t, "./foobar", val.String())
|
||||
require.NoError(t, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
info, err := memfs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
require.True(t, info.IsDir())
|
||||
|
||||
x = "/bar/foo"
|
||||
|
||||
require.Equal(t, "/bar/foo", val.String())
|
||||
|
||||
_, err = memfs.Stat("/bar/foo")
|
||||
require.Error(t, err)
|
||||
|
||||
require.NoError(t, val.Validate())
|
||||
|
||||
info, err = memfs.Stat("/bar/foo")
|
||||
require.NoError(t, err)
|
||||
require.True(t, info.IsDir())
|
||||
|
||||
memfs.WriteFile("/foo/bar", []byte("hello"))
|
||||
|
||||
val.Set("/foo/bar")
|
||||
|
||||
require.Error(t, val.Validate())
|
||||
}
|
||||
|
||||
func TestDirValue(t *testing.T) {
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
var x string
|
||||
|
||||
val := NewDir(&x, "/foobar", memfs)
|
||||
|
||||
require.Equal(t, "/foobar", val.String())
|
||||
require.Error(t, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
err = memfs.MkdirAll("/foobar", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, val.Validate())
|
||||
|
||||
_, _, err = memfs.WriteFile("/foo/bar", []byte("hello"))
|
||||
require.NoError(t, err)
|
||||
|
||||
val.Set("/foo/bar")
|
||||
|
||||
require.Error(t, val.Validate())
|
||||
}
|
||||
|
||||
func TestFileValue(t *testing.T) {
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
var x string
|
||||
|
||||
val := NewFile(&x, "/foobar", memfs)
|
||||
|
||||
require.Equal(t, "/foobar", val.String())
|
||||
require.Error(t, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
_, _, err = memfs.WriteFile("/foobar", []byte("hello"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, val.Validate())
|
||||
|
||||
err = memfs.MkdirAll("/foo/bar", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
val.Set("/foo/bar")
|
||||
|
||||
require.Error(t, val.Validate())
|
||||
}
|
||||
|
||||
func TestExecValue(t *testing.T) {
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
var x string
|
||||
|
||||
val := NewExec(&x, "/foobar", memfs)
|
||||
|
||||
require.Equal(t, "/foobar", val.String())
|
||||
require.Error(t, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
_, _, err = memfs.WriteFile("/foobar", []byte("hello"))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, val.Validate())
|
||||
|
||||
err = memfs.MkdirAll("/foo/bar", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
val.Set("/foo/bar")
|
||||
|
||||
require.Error(t, val.Validate())
|
||||
}
|
||||
|
||||
func TestAbsolutePathValue(t *testing.T) {
|
||||
var x string
|
||||
|
||||
val := NewAbsolutePath(&x, "foobar")
|
||||
|
||||
require.Equal(t, "foobar", val.String())
|
||||
require.Error(t, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = "/foobaz"
|
||||
|
||||
require.Equal(t, "/foobaz", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("/fooboz")
|
||||
|
||||
require.Equal(t, "/fooboz", x)
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -127,11 +128,20 @@ func (s *StringMapString) String() string {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
sms := *s.p
|
||||
|
||||
keys := []string{}
|
||||
for k := range sms {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
mappings := make([]string, len(*s.p))
|
||||
|
||||
i := 0
|
||||
for k, v := range *s.p {
|
||||
mappings[i] = k + ":" + v
|
||||
for _, k := range keys {
|
||||
mappings[i] = k + ":" + sms[k]
|
||||
i++
|
||||
}
|
||||
|
||||
|
||||
147
config/value/primitives_test.go
Normal file
147
config/value/primitives_test.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestStringValue(t *testing.T) {
|
||||
var x string
|
||||
|
||||
val := NewString(&x, "foobar")
|
||||
|
||||
require.Equal(t, "foobar", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = "foobaz"
|
||||
|
||||
require.Equal(t, "foobaz", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("fooboz")
|
||||
|
||||
require.Equal(t, "fooboz", x)
|
||||
}
|
||||
|
||||
func TestStringListValue(t *testing.T) {
|
||||
var x []string
|
||||
|
||||
val := NewStringList(&x, []string{"foobar"}, " ")
|
||||
|
||||
require.Equal(t, "foobar", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = []string{"foobar", "foobaz"}
|
||||
|
||||
require.Equal(t, "foobar foobaz", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("fooboz foobar")
|
||||
|
||||
require.Equal(t, []string{"fooboz", "foobar"}, x)
|
||||
}
|
||||
|
||||
func TestStringMapStringValue(t *testing.T) {
|
||||
var x map[string]string
|
||||
|
||||
val := NewStringMapString(&x, map[string]string{"a": "foobar"})
|
||||
|
||||
require.Equal(t, "a:foobar", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = map[string]string{"a": "foobar", "b": "foobaz"}
|
||||
|
||||
require.Equal(t, "a:foobar b:foobaz", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("x:fooboz y:foobar")
|
||||
|
||||
require.Equal(t, map[string]string{"x": "fooboz", "y": "foobar"}, x)
|
||||
}
|
||||
|
||||
func TestBoolValue(t *testing.T) {
|
||||
var x bool
|
||||
|
||||
val := NewBool(&x, false)
|
||||
|
||||
require.Equal(t, "false", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, true, val.IsEmpty())
|
||||
|
||||
x = true
|
||||
|
||||
require.Equal(t, "true", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("false")
|
||||
|
||||
require.Equal(t, false, x)
|
||||
}
|
||||
|
||||
func TestIntValue(t *testing.T) {
|
||||
var x int
|
||||
|
||||
val := NewInt(&x, 11)
|
||||
|
||||
require.Equal(t, "11", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = 42
|
||||
|
||||
require.Equal(t, "42", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("77")
|
||||
|
||||
require.Equal(t, int(77), x)
|
||||
}
|
||||
|
||||
func TestInt64Value(t *testing.T) {
|
||||
var x int64
|
||||
|
||||
val := NewInt64(&x, 11)
|
||||
|
||||
require.Equal(t, "11", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = 42
|
||||
|
||||
require.Equal(t, "42", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("77")
|
||||
|
||||
require.Equal(t, int64(77), x)
|
||||
}
|
||||
|
||||
func TestUint64Value(t *testing.T) {
|
||||
var x uint64
|
||||
|
||||
val := NewUint64(&x, 11)
|
||||
|
||||
require.Equal(t, "11", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = 42
|
||||
|
||||
require.Equal(t, "42", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("77")
|
||||
|
||||
require.Equal(t, uint64(77), x)
|
||||
}
|
||||
179
config/value/s3.go
Normal file
179
config/value/s3.go
Normal file
@@ -0,0 +1,179 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/publicsuffix"
|
||||
)
|
||||
|
||||
// array of s3 storages
|
||||
// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy
|
||||
|
||||
type S3Storage struct {
|
||||
Name string `json:"name"`
|
||||
Mountpoint string `json:"mountpoint"`
|
||||
Auth struct {
|
||||
Enable bool `json:"enable"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
} `json:"auth"`
|
||||
Endpoint string `json:"endpoint"`
|
||||
AccessKeyID string `json:"access_key_id"`
|
||||
SecretAccessKey string `json:"secret_access_key"`
|
||||
Bucket string `json:"bucket"`
|
||||
Region string `json:"region"`
|
||||
UseSSL bool `json:"use_ssl"`
|
||||
}
|
||||
|
||||
func (t *S3Storage) String() string {
|
||||
u := url.URL{}
|
||||
|
||||
if t.UseSSL {
|
||||
u.Scheme = "https"
|
||||
} else {
|
||||
u.Scheme = "http"
|
||||
}
|
||||
|
||||
u.User = url.UserPassword(t.AccessKeyID, "---")
|
||||
|
||||
u.Host = t.Endpoint
|
||||
|
||||
if len(t.Region) != 0 {
|
||||
u.Host = t.Region + "." + u.Host
|
||||
}
|
||||
|
||||
if len(t.Bucket) != 0 {
|
||||
u.Path = "/" + t.Bucket
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("name", t.Name)
|
||||
v.Set("mountpoint", t.Mountpoint)
|
||||
|
||||
if t.Auth.Enable {
|
||||
if len(t.Auth.Username) != 0 {
|
||||
v.Set("username", t.Auth.Username)
|
||||
}
|
||||
|
||||
if len(t.Auth.Password) != 0 {
|
||||
v.Set("password", "---")
|
||||
}
|
||||
}
|
||||
|
||||
u.RawQuery = v.Encode()
|
||||
|
||||
return u.String()
|
||||
}
|
||||
|
||||
type s3StorageListValue struct {
|
||||
p *[]S3Storage
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue {
|
||||
v := &s3StorageListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) Set(val string) error {
|
||||
list := []S3Storage{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
u, err := url.Parse(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err)
|
||||
}
|
||||
|
||||
t := S3Storage{
|
||||
Name: u.Query().Get("name"),
|
||||
Mountpoint: u.Query().Get("mountpoint"),
|
||||
AccessKeyID: u.User.Username(),
|
||||
}
|
||||
|
||||
hostname := u.Hostname()
|
||||
port := u.Port()
|
||||
|
||||
domain, err := publicsuffix.EffectiveTLDPlusOne(hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid eTLD (%s): %w", hostname, err)
|
||||
}
|
||||
|
||||
t.Endpoint = domain
|
||||
if len(port) != 0 {
|
||||
t.Endpoint += ":" + port
|
||||
}
|
||||
|
||||
region := strings.TrimSuffix(hostname, domain)
|
||||
if len(region) != 0 {
|
||||
t.Region = strings.TrimSuffix(region, ".")
|
||||
}
|
||||
|
||||
secret, ok := u.User.Password()
|
||||
if ok {
|
||||
t.SecretAccessKey = secret
|
||||
}
|
||||
|
||||
t.Bucket = strings.TrimPrefix(u.Path, "/")
|
||||
|
||||
if u.Scheme == "https" {
|
||||
t.UseSSL = true
|
||||
}
|
||||
|
||||
if u.Query().Has("username") || u.Query().Has("password") {
|
||||
t.Auth.Enable = true
|
||||
t.Auth.Username = u.Query().Get("username")
|
||||
t.Auth.Username = u.Query().Get("password")
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, t.String())
|
||||
}
|
||||
|
||||
return strings.Join(list, s.separator)
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Name) == 0 {
|
||||
return fmt.Errorf("the name for s3 storage %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Mountpoint) == 0 {
|
||||
return fmt.Errorf("the mountpoint for s3 storage %d is missing", i)
|
||||
}
|
||||
|
||||
if t.Auth.Enable {
|
||||
if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 {
|
||||
return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *s3StorageListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
30
config/value/time_test.go
Normal file
30
config/value/time_test.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestTimeValue(t *testing.T) {
|
||||
var x time.Time
|
||||
|
||||
tm := time.Unix(1257894000, 0).UTC()
|
||||
|
||||
val := NewTime(&x, tm)
|
||||
|
||||
require.Equal(t, "2009-11-10T23:00:00Z", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
x = time.Unix(1257894001, 0).UTC()
|
||||
|
||||
require.Equal(t, "2009-11-10T23:00:01Z", val.String())
|
||||
require.Equal(t, nil, val.Validate())
|
||||
require.Equal(t, false, val.IsEmpty())
|
||||
|
||||
val.Set("2009-11-11T23:00:00Z")
|
||||
|
||||
require.Equal(t, time.Time(time.Date(2009, time.November, 11, 23, 0, 0, 0, time.UTC)), x)
|
||||
}
|
||||
@@ -3,29 +3,9 @@ package value
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIntValue(t *testing.T) {
|
||||
var i int
|
||||
|
||||
ivar := NewInt(&i, 11)
|
||||
|
||||
assert.Equal(t, "11", ivar.String())
|
||||
assert.Equal(t, nil, ivar.Validate())
|
||||
assert.Equal(t, false, ivar.IsEmpty())
|
||||
|
||||
i = 42
|
||||
|
||||
assert.Equal(t, "42", ivar.String())
|
||||
assert.Equal(t, nil, ivar.Validate())
|
||||
assert.Equal(t, false, ivar.IsEmpty())
|
||||
|
||||
ivar.Set("77")
|
||||
|
||||
assert.Equal(t, int(77), i)
|
||||
}
|
||||
|
||||
type testdata struct {
|
||||
value1 int
|
||||
value2 int
|
||||
@@ -37,22 +17,22 @@ func TestCopyStruct(t *testing.T) {
|
||||
NewInt(&data1.value1, 1)
|
||||
NewInt(&data1.value2, 2)
|
||||
|
||||
assert.Equal(t, int(1), data1.value1)
|
||||
assert.Equal(t, int(2), data1.value2)
|
||||
require.Equal(t, int(1), data1.value1)
|
||||
require.Equal(t, int(2), data1.value2)
|
||||
|
||||
data2 := testdata{}
|
||||
|
||||
val21 := NewInt(&data2.value1, 3)
|
||||
val22 := NewInt(&data2.value2, 4)
|
||||
|
||||
assert.Equal(t, int(3), data2.value1)
|
||||
assert.Equal(t, int(4), data2.value2)
|
||||
require.Equal(t, int(3), data2.value1)
|
||||
require.Equal(t, int(4), data2.value2)
|
||||
|
||||
data2 = data1
|
||||
|
||||
assert.Equal(t, int(1), data2.value1)
|
||||
assert.Equal(t, int(2), data2.value2)
|
||||
require.Equal(t, int(1), data2.value1)
|
||||
require.Equal(t, int(2), data2.value2)
|
||||
|
||||
assert.Equal(t, "1", val21.String())
|
||||
assert.Equal(t, "2", val22.String())
|
||||
require.Equal(t, "1", val21.String())
|
||||
require.Equal(t, "2", val22.String())
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package vars
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
@@ -38,3 +39,210 @@ func TestVars(t *testing.T) {
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "foobar", x)
|
||||
}
|
||||
|
||||
func TestSetDefault(t *testing.T) {
|
||||
v := Variables{}
|
||||
s := ""
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
|
||||
v.Set("string", "foobaz")
|
||||
|
||||
require.Equal(t, "foobaz", s)
|
||||
|
||||
v.SetDefault("strong")
|
||||
|
||||
require.Equal(t, "foobaz", s)
|
||||
|
||||
v.SetDefault("string")
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
value, err := v.Get("string")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foobar", value)
|
||||
|
||||
value, err = v.Get("strong")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "", value)
|
||||
}
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
err := v.Set("string", "foobaz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foobaz", s)
|
||||
|
||||
err = v.Set("strong", "fooboz")
|
||||
require.Error(t, err)
|
||||
require.Equal(t, "foobaz", s)
|
||||
}
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
v.Log("info", "string", "hello %s", "world")
|
||||
require.Equal(t, 1, len(v.logs))
|
||||
|
||||
v.Log("info", "strong", "hello %s", "world")
|
||||
require.Equal(t, 1, len(v.logs))
|
||||
|
||||
require.Equal(t, "hello world", v.logs[0].message)
|
||||
require.Equal(t, "info", v.logs[0].level)
|
||||
require.Equal(t, Variable{
|
||||
Value: "foobar",
|
||||
Name: "string",
|
||||
EnvName: "",
|
||||
Description: "a string",
|
||||
Merged: false,
|
||||
}, v.logs[0].variable)
|
||||
|
||||
v.ResetLogs()
|
||||
|
||||
require.Equal(t, 0, len(v.logs))
|
||||
}
|
||||
|
||||
func TestMerge(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
os.Setenv("CORE_TEST_STRING", "foobaz")
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
|
||||
|
||||
require.Equal(t, s, "foobar")
|
||||
|
||||
v.Merge()
|
||||
|
||||
require.Equal(t, s, "foobaz")
|
||||
require.Equal(t, true, v.IsMerged("string"))
|
||||
require.Equal(t, 0, len(v.logs))
|
||||
|
||||
os.Unsetenv("CORE_TEST_STRING")
|
||||
}
|
||||
|
||||
func TestMergeAlt(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
os.Setenv("CORE_TEST_STRING", "foobaz")
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRUNG", []string{"CORE_TEST_STRING"}, "a string", false, false)
|
||||
|
||||
require.Equal(t, s, "foobar")
|
||||
|
||||
v.Merge()
|
||||
|
||||
require.Equal(t, s, "foobaz")
|
||||
require.Equal(t, true, v.IsMerged("string"))
|
||||
require.Equal(t, 1, len(v.logs))
|
||||
|
||||
require.Contains(t, v.logs[0].message, "CORE_TEST_STRUNG")
|
||||
require.Equal(t, "warn", v.logs[0].level)
|
||||
|
||||
os.Unsetenv("CORE_TEST_STRING")
|
||||
}
|
||||
|
||||
func TestNoMerge(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
os.Setenv("CORE_TEST_STRONG", "foobaz")
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
|
||||
|
||||
require.Equal(t, s, "foobar")
|
||||
|
||||
v.Merge()
|
||||
|
||||
require.Equal(t, s, "foobar")
|
||||
require.Equal(t, false, v.IsMerged("string"))
|
||||
|
||||
os.Unsetenv("CORE_TEST_STRONG")
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s1 := ""
|
||||
s2 := ""
|
||||
|
||||
v.Register(value.NewString(&s1, ""), "string", "", nil, "a string", false, false)
|
||||
v.Register(value.NewString(&s2, ""), "string", "", nil, "a string", true, false)
|
||||
|
||||
require.Equal(t, s1, "")
|
||||
require.Equal(t, s2, "")
|
||||
|
||||
require.Equal(t, false, v.HasErrors())
|
||||
|
||||
v.Validate()
|
||||
|
||||
require.Equal(t, true, v.HasErrors())
|
||||
|
||||
ninfo := 0
|
||||
nerror := 0
|
||||
v.Messages(func(level string, v Variable, message string) {
|
||||
if level == "info" {
|
||||
ninfo++
|
||||
} else if level == "error" {
|
||||
nerror++
|
||||
}
|
||||
})
|
||||
|
||||
require.Equal(t, 2, ninfo)
|
||||
require.Equal(t, 1, nerror)
|
||||
}
|
||||
|
||||
func TestOverrides(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
os.Setenv("CORE_TEST_STRING", "foobaz")
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false)
|
||||
v.Merge()
|
||||
|
||||
overrides := v.Overrides()
|
||||
|
||||
require.ElementsMatch(t, []string{"string"}, overrides)
|
||||
}
|
||||
|
||||
func TestDisquise(t *testing.T) {
|
||||
v := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, true)
|
||||
|
||||
v.Log("info", "string", "hello %s", "world")
|
||||
require.Equal(t, 1, len(v.logs))
|
||||
|
||||
require.Equal(t, "hello world", v.logs[0].message)
|
||||
require.Equal(t, "info", v.logs[0].level)
|
||||
require.Equal(t, Variable{
|
||||
Value: "***",
|
||||
Name: "string",
|
||||
EnvName: "",
|
||||
Description: "a string",
|
||||
Merged: false,
|
||||
}, v.logs[0].variable)
|
||||
}
|
||||
|
||||
1108
docs/docs.go
1108
docs/docs.go
File diff suppressed because it is too large
Load Diff
1108
docs/swagger.json
1108
docs/swagger.json
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -4,6 +4,9 @@ import (
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
// Match returns whether the name matches the glob pattern, also considering
|
||||
// one or several optionnal separator. An error is only returned if the pattern
|
||||
// is invalid.
|
||||
func Match(pattern, name string, separators ...rune) (bool, error) {
|
||||
g, err := glob.Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
|
||||
58
go.mod
58
go.mod
@@ -11,22 +11,25 @@ require (
|
||||
github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759
|
||||
github.com/go-playground/validator/v10 v10.11.1
|
||||
github.com/gobwas/glob v0.2.3
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/invopop/jsonschema v0.4.0
|
||||
github.com/joho/godotenv v1.4.0
|
||||
github.com/labstack/echo/v4 v4.9.1
|
||||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/mattn/go-isatty v0.0.16
|
||||
github.com/mattn/go-isatty v0.0.17
|
||||
github.com/minio/minio-go/v7 v7.0.47
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
|
||||
github.com/prometheus/client_golang v1.13.1
|
||||
github.com/shirou/gopsutil/v3 v3.22.10
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/shirou/gopsutil/v3 v3.22.11
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/swaggo/echo-swagger v1.3.5
|
||||
github.com/swaggo/swag v1.8.7
|
||||
github.com/vektah/gqlparser/v2 v2.5.1
|
||||
github.com/xeipuuv/gojsonschema v1.2.0
|
||||
golang.org/x/mod v0.6.0
|
||||
go.uber.org/zap v1.24.0
|
||||
golang.org/x/mod v0.7.0
|
||||
golang.org/x/net v0.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
@@ -34,13 +37,14 @@ require (
|
||||
github.com/agnivade/levenshtein v1.1.1 // indirect
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||
github.com/go-openapi/spec v0.20.7 // indirect
|
||||
github.com/go-openapi/spec v0.20.8 // indirect
|
||||
github.com/go-openapi/swag v0.22.3 // indirect
|
||||
github.com/go-playground/locales v0.14.0 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.0 // indirect
|
||||
@@ -50,7 +54,9 @@ require (
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/iancoleman/orderedmap v0.2.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.15.15 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 // indirect
|
||||
github.com/labstack/gommon v0.4.0 // indirect
|
||||
github.com/leodido/go-urn v1.2.1 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
@@ -60,16 +66,24 @@ require (
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mholt/acmez v1.0.4 // indirect
|
||||
github.com/miekg/dns v1.1.50 // indirect
|
||||
github.com/minio/md5-simd v1.1.2 // indirect
|
||||
github.com/minio/sha256-simd v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/rs/xid v1.4.0 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.10 // indirect
|
||||
github.com/tklauser/numcpus v0.5.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/urfave/cli/v2 v2.8.1 // indirect
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasttemplate v1.2.2 // indirect
|
||||
@@ -78,14 +92,14 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.1.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.1.0 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
go.uber.org/goleak v1.1.12 // indirect
|
||||
go.uber.org/multierr v1.9.0 // indirect
|
||||
golang.org/x/crypto v0.5.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.4.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
516
go.sum
516
go.sum
@@ -1,41 +1,7 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbpJMw=
|
||||
github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
|
||||
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
|
||||
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
|
||||
@@ -46,11 +12,6 @@ github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaW
|
||||
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
|
||||
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
|
||||
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
|
||||
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
|
||||
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
|
||||
@@ -61,21 +22,12 @@ github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLj
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
|
||||
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE=
|
||||
github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
@@ -89,22 +41,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g=
|
||||
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
@@ -114,8 +53,8 @@ github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/a
|
||||
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||
github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I=
|
||||
github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI=
|
||||
github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
||||
github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU=
|
||||
github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
|
||||
@@ -128,105 +67,48 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
|
||||
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
|
||||
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
|
||||
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
|
||||
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
|
||||
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
|
||||
github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA=
|
||||
github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws=
|
||||
github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0=
|
||||
github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg=
|
||||
github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2 h1:XhdX4fqAJUA0yj+kUwMavO0hHrSPAecYdYf1ZmxHvak=
|
||||
github.com/klauspost/cpuid/v2 v2.1.2/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
|
||||
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
|
||||
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
@@ -261,25 +143,29 @@ github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
|
||||
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
|
||||
github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
|
||||
github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs=
|
||||
github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw=
|
||||
github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g=
|
||||
github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
|
||||
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
|
||||
@@ -287,66 +173,47 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6
|
||||
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
|
||||
github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI=
|
||||
github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig=
|
||||
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc=
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_golang v1.13.1 h1:3gMjIY2+/hzmqhtUC/aQNYldJA6DtH3CgQvwS+02K1c=
|
||||
github.com/prometheus/client_golang v1.13.1/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo=
|
||||
github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
|
||||
github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY=
|
||||
github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
|
||||
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.10 h1:4KMHdfBRYXGF9skjDWiL4RA2N+E8dRdodU/bOZpPoVg=
|
||||
github.com/shirou/gopsutil/v3 v3.22.10/go.mod h1:QNza6r4YQoydyCfo6rH0blGfKahgibh4dQmV5xdFkQk=
|
||||
github.com/shirou/gopsutil/v3 v3.22.11 h1:kxsPKS+Eeo+VnEQ2XCaGJepeP6KY53QoRTETx3+1ndM=
|
||||
github.com/shirou/gopsutil/v3 v3.22.11/go.mod h1:xl0EeL4vXJ+hQMAGN8B9VFpxukEMA0XdevQOe5MZ1oY=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
@@ -363,11 +230,10 @@ github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9J
|
||||
github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ=
|
||||
github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU=
|
||||
github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk=
|
||||
github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw=
|
||||
github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk=
|
||||
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
|
||||
github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A=
|
||||
github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4=
|
||||
github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY=
|
||||
@@ -387,180 +253,69 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
|
||||
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
|
||||
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
|
||||
go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
|
||||
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
go.uber.org/zap v1.23.0 h1:OjGQ5KQDEUawVHxNwQgPpiypGHOxo2mNZsOqTak4fFY=
|
||||
go.uber.org/zap v1.23.0/go.mod h1:D+nX8jyLsMHMYrln8A0rJjFt/T/9/bGgIhAqxv5URuY=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
|
||||
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
|
||||
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
|
||||
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -569,165 +324,49 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA=
|
||||
golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4=
|
||||
golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
|
||||
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -735,13 +374,12 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
|
||||
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
@@ -749,13 +387,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
|
||||
type AVstreamIO struct {
|
||||
State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"`
|
||||
Packet uint64 `json:"packet"`
|
||||
Packet uint64 `json:"packet" format:"uint64"`
|
||||
Time uint64 `json:"time"`
|
||||
Size uint64 `json:"size_kb"`
|
||||
}
|
||||
@@ -25,11 +25,11 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) {
|
||||
type AVstream struct {
|
||||
Input AVstreamIO `json:"input"`
|
||||
Output AVstreamIO `json:"output"`
|
||||
Aqueue uint64 `json:"aqueue"`
|
||||
Queue uint64 `json:"queue"`
|
||||
Dup uint64 `json:"dup"`
|
||||
Drop uint64 `json:"drop"`
|
||||
Enc uint64 `json:"enc"`
|
||||
Aqueue uint64 `json:"aqueue" format:"uint64"`
|
||||
Queue uint64 `json:"queue" format:"uint64"`
|
||||
Dup uint64 `json:"dup" format:"uint64"`
|
||||
Drop uint64 `json:"drop" format:"uint64"`
|
||||
Enc uint64 `json:"enc" format:"uint64"`
|
||||
Looping bool `json:"looping"`
|
||||
Duplicating bool `json:"duplicating"`
|
||||
GOP string `json:"gop"`
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
|
||||
// Error represents an error response of the API
|
||||
type Error struct {
|
||||
Code int `json:"code" jsonschema:"required"`
|
||||
Code int `json:"code" jsonschema:"required" format:"int"`
|
||||
Message string `json:"message" jsonschema:""`
|
||||
Details []string `json:"details" jsonschema:""`
|
||||
}
|
||||
|
||||
@@ -3,6 +3,13 @@ package api
|
||||
// FileInfo represents informatiion about a file on a filesystem
|
||||
type FileInfo struct {
|
||||
Name string `json:"name" jsonschema:"minLength=1"`
|
||||
Size int64 `json:"size_bytes" jsonschema:"minimum=0"`
|
||||
LastMod int64 `json:"last_modified" jsonschema:"minimum=0"`
|
||||
Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"`
|
||||
LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"`
|
||||
}
|
||||
|
||||
// FilesystemInfo represents information about a filesystem
|
||||
type FilesystemInfo struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Mount string `json:"mount"`
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ type MetricsQueryMetric struct {
|
||||
}
|
||||
|
||||
type MetricsQuery struct {
|
||||
Timerange int64 `json:"timerange_sec"`
|
||||
Interval int64 `json:"interval_sec"`
|
||||
Timerange int64 `json:"timerange_sec" format:"int64"`
|
||||
Interval int64 `json:"interval_sec" format:"int64"`
|
||||
Metrics []MetricsQueryMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
@@ -51,8 +51,8 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) {
|
||||
}
|
||||
|
||||
type MetricsResponse struct {
|
||||
Timerange int64 `json:"timerange_sec"`
|
||||
Interval int64 `json:"interval_sec"`
|
||||
Timerange int64 `json:"timerange_sec" format:"int64"`
|
||||
Interval int64 `json:"interval_sec" format:"int64"`
|
||||
Metrics []MetricsResponseMetric `json:"metrics"`
|
||||
}
|
||||
|
||||
|
||||
@@ -4,9 +4,9 @@ import "github.com/datarhei/core/v16/playout"
|
||||
|
||||
type PlayoutStatusIO struct {
|
||||
State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"`
|
||||
Packet uint64 `json:"packet"`
|
||||
Time uint64 `json:"time"`
|
||||
Size uint64 `json:"size_kb"`
|
||||
Packet uint64 `json:"packet" format:"uint64"`
|
||||
Time uint64 `json:"time" format:"uint64"`
|
||||
Size uint64 `json:"size_kb" format:"uint64"`
|
||||
}
|
||||
|
||||
func (i *PlayoutStatusIO) Unmarshal(io playout.StatusIO) {
|
||||
@@ -33,12 +33,12 @@ func (s *PlayoutStatusSwap) Unmarshal(swap playout.StatusSwap) {
|
||||
type PlayoutStatus struct {
|
||||
ID string `json:"id"`
|
||||
Address string `json:"url"`
|
||||
Stream uint64 `json:"stream"`
|
||||
Queue uint64 `json:"queue"`
|
||||
AQueue uint64 `json:"aqueue"`
|
||||
Dup uint64 `json:"dup"`
|
||||
Drop uint64 `json:"drop"`
|
||||
Enc uint64 `json:"enc"`
|
||||
Stream uint64 `json:"stream" format:"uint64"`
|
||||
Queue uint64 `json:"queue" format:"uint64"`
|
||||
AQueue uint64 `json:"aqueue" format:"uint64"`
|
||||
Dup uint64 `json:"dup" format:"uint64"`
|
||||
Drop uint64 `json:"drop" format:"uint64"`
|
||||
Enc uint64 `json:"enc" format:"uint64"`
|
||||
Looping bool `json:"looping"`
|
||||
Duplicating bool `json:"duplicating"`
|
||||
GOP string `json:"gop"`
|
||||
|
||||
@@ -11,8 +11,8 @@ type ProbeIO struct {
|
||||
// common
|
||||
Address string `json:"url"`
|
||||
Format string `json:"format"`
|
||||
Index uint64 `json:"index"`
|
||||
Stream uint64 `json:"stream"`
|
||||
Index uint64 `json:"index" format:"uint64"`
|
||||
Stream uint64 `json:"stream" format:"uint64"`
|
||||
Language string `json:"language"`
|
||||
Type string `json:"type"`
|
||||
Codec string `json:"codec"`
|
||||
@@ -23,13 +23,13 @@ type ProbeIO struct {
|
||||
// video
|
||||
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
|
||||
Pixfmt string `json:"pix_fmt"`
|
||||
Width uint64 `json:"width"`
|
||||
Height uint64 `json:"height"`
|
||||
Width uint64 `json:"width" format:"uint64"`
|
||||
Height uint64 `json:"height" format:"uint64"`
|
||||
|
||||
// audio
|
||||
Sampling uint64 `json:"sampling_hz"`
|
||||
Sampling uint64 `json:"sampling_hz" format:"uint64"`
|
||||
Layout string `json:"layout"`
|
||||
Channels uint64 `json:"channels"`
|
||||
Channels uint64 `json:"channels" format:"uint64"`
|
||||
}
|
||||
|
||||
func (i *ProbeIO) Unmarshal(io *app.ProbeIO) {
|
||||
|
||||
@@ -13,7 +13,7 @@ type Process struct {
|
||||
ID string `json:"id" jsonschema:"minLength=1"`
|
||||
Type string `json:"type" jsonschema:"enum=ffmpeg"`
|
||||
Reference string `json:"reference"`
|
||||
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0"`
|
||||
CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"`
|
||||
Config *ProcessConfig `json:"config,omitempty"`
|
||||
State *ProcessState `json:"state,omitempty"`
|
||||
Report *ProcessReport `json:"report,omitempty"`
|
||||
@@ -30,15 +30,15 @@ type ProcessConfigIO struct {
|
||||
|
||||
type ProcessConfigIOCleanup struct {
|
||||
Pattern string `json:"pattern" validate:"required"`
|
||||
MaxFiles uint `json:"max_files"`
|
||||
MaxFileAge uint `json:"max_file_age_seconds"`
|
||||
MaxFiles uint `json:"max_files" format:"uint"`
|
||||
MaxFileAge uint `json:"max_file_age_seconds" format:"uint"`
|
||||
PurgeOnDelete bool `json:"purge_on_delete"`
|
||||
}
|
||||
|
||||
type ProcessConfigLimits struct {
|
||||
CPU float64 `json:"cpu_usage" jsonschema:"minimum=0,maximum=100"`
|
||||
Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0"`
|
||||
WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0"`
|
||||
Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"`
|
||||
WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"`
|
||||
}
|
||||
|
||||
// ProcessConfig represents the configuration of an ffmpeg process
|
||||
@@ -50,9 +50,9 @@ type ProcessConfig struct {
|
||||
Output []ProcessConfigIO `json:"output" validate:"required"`
|
||||
Options []string `json:"options"`
|
||||
Reconnect bool `json:"reconnect"`
|
||||
ReconnectDelay uint64 `json:"reconnect_delay_seconds"`
|
||||
ReconnectDelay uint64 `json:"reconnect_delay_seconds" format:"uint64"`
|
||||
Autostart bool `json:"autostart"`
|
||||
StaleTimeout uint64 `json:"stale_timeout_seconds"`
|
||||
StaleTimeout uint64 `json:"stale_timeout_seconds" format:"uint64"`
|
||||
Limits ProcessConfigLimits `json:"limits"`
|
||||
}
|
||||
|
||||
@@ -188,7 +188,7 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) {
|
||||
|
||||
// ProcessReportHistoryEntry represents the logs of a run of a restream process
|
||||
type ProcessReportHistoryEntry struct {
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
CreatedAt int64 `json:"created_at" format:"int64"`
|
||||
Prelude []string `json:"prelude"`
|
||||
Log [][2]string `json:"log"`
|
||||
}
|
||||
@@ -235,11 +235,11 @@ func (report *ProcessReport) Unmarshal(l *app.Log) {
|
||||
type ProcessState struct {
|
||||
Order string `json:"order" jsonschema:"enum=start,enum=stop"`
|
||||
State string `json:"exec" jsonschema:"enum=finished,enum=starting,enum=running,enum=finishing,enum=killed,enum=failed"`
|
||||
Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0"`
|
||||
Reconnect int64 `json:"reconnect_seconds"`
|
||||
Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0" format:"int64"`
|
||||
Reconnect int64 `json:"reconnect_seconds" format:"int64"`
|
||||
LastLog string `json:"last_logline"`
|
||||
Progress *Progress `json:"progress"`
|
||||
Memory uint64 `json:"memory_bytes"`
|
||||
Memory uint64 `json:"memory_bytes" format:"uint64"`
|
||||
CPU json.Number `json:"cpu_usage" swaggertype:"number" jsonschema:"type=number"`
|
||||
Command []string `json:"command"`
|
||||
}
|
||||
|
||||
@@ -13,29 +13,29 @@ type ProgressIO struct {
|
||||
Address string `json:"address" jsonschema:"minLength=1"`
|
||||
|
||||
// General
|
||||
Index uint64 `json:"index"`
|
||||
Stream uint64 `json:"stream"`
|
||||
Index uint64 `json:"index" format:"uint64"`
|
||||
Stream uint64 `json:"stream" format:"uint64"`
|
||||
Format string `json:"format"`
|
||||
Type string `json:"type"`
|
||||
Codec string `json:"codec"`
|
||||
Coder string `json:"coder"`
|
||||
Frame uint64 `json:"frame"`
|
||||
Frame uint64 `json:"frame" format:"uint64"`
|
||||
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
|
||||
Packet uint64 `json:"packet"`
|
||||
Packet uint64 `json:"packet" format:"uint64"`
|
||||
PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"`
|
||||
Size uint64 `json:"size_kb"` // kbytes
|
||||
Size uint64 `json:"size_kb" format:"uint64"` // kbytes
|
||||
Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
|
||||
|
||||
// Video
|
||||
Pixfmt string `json:"pix_fmt,omitempty"`
|
||||
Quantizer json.Number `json:"q,omitempty" swaggertype:"number" jsonschema:"type=number"`
|
||||
Width uint64 `json:"width,omitempty"`
|
||||
Height uint64 `json:"height,omitempty"`
|
||||
Width uint64 `json:"width,omitempty" format:"uint64"`
|
||||
Height uint64 `json:"height,omitempty" format:"uint64"`
|
||||
|
||||
// Audio
|
||||
Sampling uint64 `json:"sampling_hz,omitempty"`
|
||||
Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"`
|
||||
Layout string `json:"layout,omitempty"`
|
||||
Channels uint64 `json:"channels,omitempty"`
|
||||
Channels uint64 `json:"channels,omitempty" format:"uint64"`
|
||||
|
||||
// avstream
|
||||
AVstream *AVstream `json:"avstream"`
|
||||
@@ -79,16 +79,16 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) {
|
||||
type Progress struct {
|
||||
Input []ProgressIO `json:"inputs"`
|
||||
Output []ProgressIO `json:"outputs"`
|
||||
Frame uint64 `json:"frame"`
|
||||
Packet uint64 `json:"packet"`
|
||||
Frame uint64 `json:"frame" format:"uint64"`
|
||||
Packet uint64 `json:"packet" format:"uint64"`
|
||||
FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"`
|
||||
Quantizer json.Number `json:"q" swaggertype:"number" jsonschema:"type=number"`
|
||||
Size uint64 `json:"size_kb"` // kbytes
|
||||
Size uint64 `json:"size_kb" format:"uint64"` // kbytes
|
||||
Time json.Number `json:"time" swaggertype:"number" jsonschema:"type=number"`
|
||||
Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
|
||||
Speed json.Number `json:"speed" swaggertype:"number" jsonschema:"type=number"`
|
||||
Drop uint64 `json:"drop"`
|
||||
Dup uint64 `json:"dup"`
|
||||
Drop uint64 `json:"drop" format:"uint64"`
|
||||
Dup uint64 `json:"dup" format:"uint64"`
|
||||
}
|
||||
|
||||
// Unmarshal converts a restreamer Progress to a Progress in API representation
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
|
||||
// SessionStats are the accumulated numbers for the session summary
|
||||
type SessionStats struct {
|
||||
TotalSessions uint64 `json:"sessions"`
|
||||
TotalRxBytes uint64 `json:"traffic_rx_mb"`
|
||||
TotalTxBytes uint64 `json:"traffic_tx_mb"`
|
||||
TotalSessions uint64 `json:"sessions" format:"uint64"`
|
||||
TotalRxBytes uint64 `json:"traffic_rx_mb" format:"uint64"`
|
||||
TotalTxBytes uint64 `json:"traffic_tx_mb" format:"uint64"`
|
||||
}
|
||||
|
||||
// SessionPeers is for the grouping by peers in the summary
|
||||
@@ -24,12 +24,12 @@ type SessionPeers struct {
|
||||
type Session struct {
|
||||
ID string `json:"id"`
|
||||
Reference string `json:"reference"`
|
||||
CreatedAt int64 `json:"created_at"`
|
||||
CreatedAt int64 `json:"created_at" format:"int64"`
|
||||
Location string `json:"local"`
|
||||
Peer string `json:"remote"`
|
||||
Extra string `json:"extra"`
|
||||
RxBytes uint64 `json:"bytes_rx"`
|
||||
TxBytes uint64 `json:"bytes_tx"`
|
||||
RxBytes uint64 `json:"bytes_rx" format:"uint64"`
|
||||
TxBytes uint64 `json:"bytes_tx" format:"uint64"`
|
||||
RxBitrate json.Number `json:"bandwidth_rx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
|
||||
TxBitrate json.Number `json:"bandwidth_tx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s
|
||||
}
|
||||
@@ -50,10 +50,10 @@ func (s *Session) Unmarshal(sess session.Session) {
|
||||
// SessionSummaryActive represents the currently active sessions
|
||||
type SessionSummaryActive struct {
|
||||
SessionList []Session `json:"list"`
|
||||
Sessions uint64 `json:"sessions"`
|
||||
Sessions uint64 `json:"sessions" format:"uint64"`
|
||||
RxBitrate json.Number `json:"bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
|
||||
TxBitrate json.Number `json:"bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
|
||||
MaxSessions uint64 `json:"max_sessions"`
|
||||
MaxSessions uint64 `json:"max_sessions" format:"uint64"`
|
||||
MaxRxBitrate json.Number `json:"max_bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
|
||||
MaxTxBitrate json.Number `json:"max_bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s
|
||||
}
|
||||
|
||||
@@ -8,60 +8,60 @@ import (
|
||||
|
||||
// SRTStatistics represents the statistics of a SRT connection
|
||||
type SRTStatistics struct {
|
||||
MsTimeStamp uint64 `json:"timestamp_ms"` // The time elapsed, in milliseconds, since the SRT socket has been created
|
||||
MsTimeStamp uint64 `json:"timestamp_ms" format:"uint64"` // The time elapsed, in milliseconds, since the SRT socket has been created
|
||||
|
||||
// Accumulated
|
||||
|
||||
PktSent uint64 `json:"sent_pkt"` // The total number of sent DATA packets, including retransmitted packets
|
||||
PktRecv uint64 `json:"recv_pkt"` // The total number of received DATA packets, including retransmitted packets
|
||||
PktSentUnique uint64 `json:"sent_unique_pkt"` // The total number of unique DATA packets sent by the SRT sender
|
||||
PktRecvUnique uint64 `json:"recv_unique_pkt"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.
|
||||
PktSndLoss uint64 `json:"send_loss_pkt"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.
|
||||
PktRcvLoss uint64 `json:"recv_loss_pkt"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side
|
||||
PktRetrans uint64 `json:"sent_retrans_pkt"` // The total number of retransmitted packets sent by the SRT sender
|
||||
PktRcvRetrans uint64 `json:"recv_retran_pkts"` // The total number of retransmitted packets registered at the receiver side
|
||||
PktSentACK uint64 `json:"sent_ack_pkt"` // The total number of sent ACK (Acknowledgement) control packets
|
||||
PktRecvACK uint64 `json:"recv_ack_pkt"` // The total number of received ACK (Acknowledgement) control packets
|
||||
PktSentNAK uint64 `json:"sent_nak_pkt"` // The total number of sent NAK (Negative Acknowledgement) control packets
|
||||
PktRecvNAK uint64 `json:"recv_nak_pkt"` // The total number of received NAK (Negative Acknowledgement) control packets
|
||||
PktSentKM uint64 `json:"send_km_pkt"` // The total number of sent KM (Key Material) control packets
|
||||
PktRecvKM uint64 `json:"recv_km_pkt"` // The total number of received KM (Key Material) control packets
|
||||
UsSndDuration uint64 `json:"send_duration_us"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged
|
||||
PktSndDrop uint64 `json:"send_drop_pkt"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time
|
||||
PktRcvDrop uint64 `json:"recv_drop_pkt"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets
|
||||
PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt"` // The total number of packets that failed to be decrypted at the receiver side
|
||||
PktSent uint64 `json:"sent_pkt" format:"uint64"` // The total number of sent DATA packets, including retransmitted packets
|
||||
PktRecv uint64 `json:"recv_pkt" format:"uint64"` // The total number of received DATA packets, including retransmitted packets
|
||||
PktSentUnique uint64 `json:"sent_unique_pkt" format:"uint64"` // The total number of unique DATA packets sent by the SRT sender
|
||||
PktRecvUnique uint64 `json:"recv_unique_pkt" format:"uint64"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.
|
||||
PktSndLoss uint64 `json:"send_loss_pkt" format:"uint64"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.
|
||||
PktRcvLoss uint64 `json:"recv_loss_pkt" format:"uint64"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side
|
||||
PktRetrans uint64 `json:"sent_retrans_pkt" format:"uint64"` // The total number of retransmitted packets sent by the SRT sender
|
||||
PktRcvRetrans uint64 `json:"recv_retran_pkts" format:"uint64"` // The total number of retransmitted packets registered at the receiver side
|
||||
PktSentACK uint64 `json:"sent_ack_pkt" format:"uint64"` // The total number of sent ACK (Acknowledgement) control packets
|
||||
PktRecvACK uint64 `json:"recv_ack_pkt" format:"uint64"` // The total number of received ACK (Acknowledgement) control packets
|
||||
PktSentNAK uint64 `json:"sent_nak_pkt" format:"uint64"` // The total number of sent NAK (Negative Acknowledgement) control packets
|
||||
PktRecvNAK uint64 `json:"recv_nak_pkt" format:"uint64"` // The total number of received NAK (Negative Acknowledgement) control packets
|
||||
PktSentKM uint64 `json:"send_km_pkt" format:"uint64"` // The total number of sent KM (Key Material) control packets
|
||||
PktRecvKM uint64 `json:"recv_km_pkt" format:"uint64"` // The total number of received KM (Key Material) control packets
|
||||
UsSndDuration uint64 `json:"send_duration_us" format:"uint64"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged
|
||||
PktSndDrop uint64 `json:"send_drop_pkt" format:"uint64"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time
|
||||
PktRcvDrop uint64 `json:"recv_drop_pkt" format:"uint64"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets
|
||||
PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt" format:"uint64"` // The total number of packets that failed to be decrypted at the receiver side
|
||||
|
||||
ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSentUnique uint64 `json:"sent_unique_bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvLoss uint64 `json:"recv_loss_bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
|
||||
ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSent uint64 `json:"sent_bytes" format:"uint64"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecv uint64 `json:"recv_bytes" format:"uint64"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSentUnique uint64 `json:"sent_unique_bytes" format:"uint64"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRecvUnique uint64 `json:"recv_unique_bytes" format:"uint64"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvLoss uint64 `json:"recv_loss_bytes" format:"uint64"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size
|
||||
ByteRetrans uint64 `json:"sent_retrans_bytes" format:"uint64"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteSndDrop uint64 `json:"send_drop_bytes" format:"uint64"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvDrop uint64 `json:"recv_drop_bytes" format:"uint64"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes" format:"uint64"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)
|
||||
|
||||
// Instantaneous
|
||||
|
||||
UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds
|
||||
PktFlowWindow uint64 `json:"flow_window_pkt"` // The maximum number of packets that can be "in flight"
|
||||
PktFlightSize uint64 `json:"flight_size_pkt"` // The number of packets in flight
|
||||
MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds
|
||||
MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps
|
||||
ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes"` // The available space in the sender's buffer, in bytes
|
||||
ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes"` // The available space in the receiver's buffer, in bytes
|
||||
MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps
|
||||
ByteMSS uint64 `json:"mss_bytes"` // Maximum Segment Size (MSS), in bytes
|
||||
PktSndBuf uint64 `json:"send_buf_pkt"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged
|
||||
ByteSndBuf uint64 `json:"send_buf_bytes"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)
|
||||
MsSndBuf uint64 `json:"send_buf_ms"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets)
|
||||
MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value of the peer
|
||||
PktRcvBuf uint64 `json:"recv_buf_pkt"` // The number of acknowledged packets in receiver's buffer
|
||||
ByteRcvBuf uint64 `json:"recv_buf_bytes"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)
|
||||
MsRcvBuf uint64 `json:"recv_buf_ms"` // The timespan (msec) of acknowledged packets in the receiver's buffer
|
||||
MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY
|
||||
PktReorderTolerance uint64 `json:"reorder_tolerance_pkt"` // Instant value of the packet reorder tolerance
|
||||
PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms"` // Accumulated difference between the current time and the time-to-play of a packet that is received late
|
||||
UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds
|
||||
PktFlowWindow uint64 `json:"flow_window_pkt" format:"uint64"` // The maximum number of packets that can be "in flight"
|
||||
PktFlightSize uint64 `json:"flight_size_pkt" format:"uint64"` // The number of packets in flight
|
||||
MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds
|
||||
MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps
|
||||
ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes" format:"uint64"` // The available space in the sender's buffer, in bytes
|
||||
ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes" format:"uint64"` // The available space in the receiver's buffer, in bytes
|
||||
MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps
|
||||
ByteMSS uint64 `json:"mss_bytes" format:"uint64"` // Maximum Segment Size (MSS), in bytes
|
||||
PktSndBuf uint64 `json:"send_buf_pkt" format:"uint64"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged
|
||||
ByteSndBuf uint64 `json:"send_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)
|
||||
MsSndBuf uint64 `json:"send_buf_ms" format:"uint64"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets)
|
||||
MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value of the peer
|
||||
PktRcvBuf uint64 `json:"recv_buf_pkt" format:"uint64"` // The number of acknowledged packets in receiver's buffer
|
||||
ByteRcvBuf uint64 `json:"recv_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)
|
||||
MsRcvBuf uint64 `json:"recv_buf_ms" format:"uint64"` // The timespan (msec) of acknowledged packets in the receiver's buffer
|
||||
MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY
|
||||
PktReorderTolerance uint64 `json:"reorder_tolerance_pkt" format:"uint64"` // Instant value of the packet reorder tolerance
|
||||
PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms" format:"uint64"` // Accumulated difference between the current time and the time-to-play of a packet that is received late
|
||||
}
|
||||
|
||||
// Unmarshal converts the SRT statistics into API representation
|
||||
@@ -119,7 +119,7 @@ func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) {
|
||||
}
|
||||
|
||||
type SRTLog struct {
|
||||
Timestamp int64 `json:"ts"`
|
||||
Timestamp int64 `json:"ts" format:"int64"`
|
||||
Message []string `json:"msg"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package api
|
||||
|
||||
type WidgetProcess struct {
|
||||
CurrentSessions uint64 `json:"current_sessions"`
|
||||
TotalSessions uint64 `json:"total_sessions"`
|
||||
CurrentSessions uint64 `json:"current_sessions" format:"uint64"`
|
||||
TotalSessions uint64 `json:"total_sessions" format:"uint64"`
|
||||
Uptime int64 `json:"uptime"`
|
||||
}
|
||||
|
||||
25
http/fs/fs.go
Normal file
25
http/fs/fs.go
Normal file
@@ -0,0 +1,25 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
)
|
||||
|
||||
type FS struct {
|
||||
Name string
|
||||
Mountpoint string
|
||||
|
||||
AllowWrite bool
|
||||
|
||||
EnableAuth bool
|
||||
Username string
|
||||
Password string
|
||||
|
||||
DefaultFile string
|
||||
DefaultContentType string
|
||||
Gzip bool
|
||||
|
||||
Filesystem fs.Filesystem
|
||||
|
||||
Cache cache.Cacher
|
||||
}
|
||||
@@ -3,6 +3,7 @@ package api
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
@@ -71,6 +72,10 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
}
|
||||
|
||||
cfg := p.store.Get()
|
||||
cfgActive := p.store.GetActive()
|
||||
|
||||
// Copy the timestamp of when this config has been used
|
||||
cfg.LoadedAt = cfgActive.LoadedAt
|
||||
|
||||
// For each version, set the current config as default config value. This will
|
||||
// allow to set a partial config without destroying the other values.
|
||||
@@ -119,6 +124,9 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version)
|
||||
}
|
||||
|
||||
cfg.CreatedAt = time.Now()
|
||||
cfg.UpdatedAt = cfg.CreatedAt
|
||||
|
||||
// Now we make a copy from the config and merge it with the environment
|
||||
// variables. If this configuration is valid, we will store the un-merged
|
||||
// one to disk.
|
||||
@@ -157,15 +165,15 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
|
||||
// Reload will reload the currently active configuration
|
||||
// @Summary Reload the currently active configuration
|
||||
// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer.
|
||||
// @Description Reload the currently active configuration. This will trigger a restart of the Core.
|
||||
// @Tags v16.7.2
|
||||
// @ID config-3-reload
|
||||
// @Produce plain
|
||||
// @Success 200 {string} string "OK"
|
||||
// @Produce json
|
||||
// @Success 200 {string} string
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/config/reload [get]
|
||||
func (p *ConfigHandler) Reload(c echo.Context) error {
|
||||
p.store.Reload()
|
||||
|
||||
return c.String(http.StatusOK, "OK")
|
||||
return c.JSON(http.StatusOK, "OK")
|
||||
}
|
||||
|
||||
@@ -4,20 +4,32 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/http/mock"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getDummyConfigRouter() (*echo.Echo, store.Store) {
|
||||
func getDummyConfigRouter(t *testing.T) (*echo.Echo, store.Store) {
|
||||
router := mock.DummyEcho()
|
||||
|
||||
config := store.NewDummy()
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx"))
|
||||
require.NoError(t, err)
|
||||
|
||||
config, err := store.NewJSON(memfs, "/config.json", nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
handler := NewConfig(config)
|
||||
|
||||
@@ -28,7 +40,7 @@ func getDummyConfigRouter() (*echo.Echo, store.Store) {
|
||||
}
|
||||
|
||||
func TestConfigGet(t *testing.T) {
|
||||
router, _ := getDummyConfigRouter()
|
||||
router, _ := getDummyConfigRouter(t)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "GET", "/", nil)
|
||||
|
||||
@@ -36,18 +48,21 @@ func TestConfigGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigSetConflict(t *testing.T) {
|
||||
router, _ := getDummyConfigRouter()
|
||||
router, _ := getDummyConfigRouter(t)
|
||||
|
||||
cfg := config.New(nil)
|
||||
cfg.Storage.MimeTypes = "/path/to/mime.types"
|
||||
|
||||
var data bytes.Buffer
|
||||
|
||||
encoder := json.NewEncoder(&data)
|
||||
encoder.Encode(config.New())
|
||||
encoder.Encode(cfg)
|
||||
|
||||
mock.Request(t, http.StatusConflict, router, "PUT", "/", &data)
|
||||
}
|
||||
|
||||
func TestConfigSet(t *testing.T) {
|
||||
router, store := getDummyConfigRouter()
|
||||
router, store := getDummyConfigRouter(t)
|
||||
|
||||
storedcfg := store.Get()
|
||||
|
||||
@@ -57,11 +72,9 @@ func TestConfigSet(t *testing.T) {
|
||||
encoder := json.NewEncoder(&data)
|
||||
|
||||
// Setting a new v3 config
|
||||
cfg := config.New()
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg := config.New(nil)
|
||||
cfg.DB.Dir = "."
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"}
|
||||
cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"}
|
||||
cfg.Host.Name = []string{"foobar.com"}
|
||||
@@ -78,11 +91,9 @@ func TestConfigSet(t *testing.T) {
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
|
||||
// Setting a complete v1 config
|
||||
cfgv1 := v1.New()
|
||||
cfgv1.FFmpeg.Binary = "true"
|
||||
cfgv1 := v1.New(nil)
|
||||
cfgv1.DB.Dir = "."
|
||||
cfgv1.Storage.Disk.Dir = "."
|
||||
cfgv1.Storage.MimeTypes = ""
|
||||
cfgv1.Storage.Disk.Cache.Types = []string{".bbb"}
|
||||
cfgv1.Host.Name = []string{"foobar.com"}
|
||||
|
||||
|
||||
@@ -1,215 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The DiskFSHandler type provides handlers for manipulating a filesystem
|
||||
type DiskFSHandler struct {
|
||||
cache cache.Cacher
|
||||
filesystem fs.Filesystem
|
||||
handler *handler.DiskFSHandler
|
||||
}
|
||||
|
||||
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
|
||||
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
|
||||
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
|
||||
return &DiskFSHandler{
|
||||
cache: cache,
|
||||
filesystem: fs,
|
||||
handler: handler.NewDiskFS(fs, cache),
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the filesystem
|
||||
// @Description Fetch a file from the filesystem. The contents of that file are returned.
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [get]
|
||||
func (h *DiskFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if stat.IsDir() {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
// PutFile adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the filesystem
|
||||
// @Description Writes or overwrites a file on the filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [put]
|
||||
func (h *DiskFSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.filesystem.Store(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "%s", err)
|
||||
}
|
||||
|
||||
if h.cache != nil {
|
||||
h.cache.Delete(path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, path)
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// DeleteFile removes a file from the filesystem
|
||||
// @Summary Remove a file from the filesystem
|
||||
// @Description Remove a file from the filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk/{path} [delete]
|
||||
func (h *DiskFSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.filesystem.Delete(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
if h.cache != nil {
|
||||
h.cache.Delete(path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "OK")
|
||||
}
|
||||
|
||||
// ListFiles lists all files on the filesystem
|
||||
// @Summary List all files on the filesystem
|
||||
// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @Tags v16.7.2
|
||||
// @ID diskfs-3-list-files
|
||||
// @Produce json
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/disk [get]
|
||||
func (h *DiskFSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.filesystem.List(pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
fileinfos := []api.FileInfo{}
|
||||
|
||||
for _, f := range files {
|
||||
if f.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
fileinfos = append(fileinfos, api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
146
http/handler/api/filesystems.go
Normal file
146
http/handler/api/filesystems.go
Normal file
@@ -0,0 +1,146 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
type FSConfig struct {
|
||||
Type string
|
||||
Mountpoint string
|
||||
Handler *handler.FSHandler
|
||||
}
|
||||
|
||||
// The FSHandler type provides handlers for manipulating a filesystem
|
||||
type FSHandler struct {
|
||||
filesystems map[string]FSConfig
|
||||
}
|
||||
|
||||
// NewFS return a new FSHanlder type. You have to provide a filesystem to act on.
|
||||
func NewFS(filesystems map[string]FSConfig) *FSHandler {
|
||||
return &FSHandler{
|
||||
filesystems: filesystems,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileAPI returns the file at the given path
|
||||
// @Summary Fetch a file from a filesystem
|
||||
// @Description Fetch a file from a filesystem
|
||||
// @ID filesystem-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [get]
|
||||
func (h *FSHandler) GetFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.GetFile(c)
|
||||
}
|
||||
|
||||
// PutFileAPI adds or overwrites a file at the given path
|
||||
// @Summary Add a file to a filesystem
|
||||
// @Description Writes or overwrites a file on a filesystem
|
||||
// @ID filesystem-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [put]
|
||||
func (h *FSHandler) PutFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.PutFile(c)
|
||||
}
|
||||
|
||||
// DeleteFileAPI removes a file from a filesystem
|
||||
// @Summary Remove a file from a filesystem
|
||||
// @Description Remove a file from a filesystem
|
||||
// @ID filesystem-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name}/{path} [delete]
|
||||
func (h *FSHandler) DeleteFile(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.DeleteFile(c)
|
||||
}
|
||||
|
||||
// ListFiles lists all files on a filesystem
|
||||
// @Summary List all files on a filesystem
|
||||
// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @ID filesystem-3-list-files
|
||||
// @Produce json
|
||||
// @Param name path string true "Name of the filesystem"
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/{name} [get]
|
||||
func (h *FSHandler) ListFiles(c echo.Context) error {
|
||||
name := util.PathParam(c, "name")
|
||||
|
||||
config, ok := h.filesystems[name]
|
||||
if !ok {
|
||||
return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name)
|
||||
}
|
||||
|
||||
return config.Handler.ListFiles(c)
|
||||
}
|
||||
|
||||
// List lists all registered filesystems
|
||||
// @Summary List all registered filesystems
|
||||
// @Description Listall registered filesystems
|
||||
// @ID filesystem-3-list
|
||||
// @Produce json
|
||||
// @Success 200 {array} api.FilesystemInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs [get]
|
||||
func (h *FSHandler) List(c echo.Context) error {
|
||||
fss := []api.FilesystemInfo{}
|
||||
|
||||
for name, config := range h.filesystems {
|
||||
fss = append(fss, api.FilesystemInfo{
|
||||
Name: name,
|
||||
Type: config.Type,
|
||||
Mount: config.Mountpoint,
|
||||
})
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fss)
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The MemFSHandler type provides handlers for manipulating a filesystem
|
||||
type MemFSHandler struct {
|
||||
filesystem fs.Filesystem
|
||||
handler *handler.MemFSHandler
|
||||
}
|
||||
|
||||
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
|
||||
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
|
||||
return &MemFSHandler{
|
||||
filesystem: fs,
|
||||
handler: handler.NewMemFS(fs),
|
||||
}
|
||||
}
|
||||
|
||||
// GetFileAPI returns the file at the given path
|
||||
// @Summary Fetch a file from the memory filesystem
|
||||
// @Description Fetch a file from the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [get]
|
||||
func (h *MemFSHandler) GetFile(c echo.Context) error {
|
||||
return h.handler.GetFile(c)
|
||||
}
|
||||
|
||||
// PutFileAPI adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the memory filesystem
|
||||
// @Description Writes or overwrites a file on the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [put]
|
||||
func (h *MemFSHandler) PutFile(c echo.Context) error {
|
||||
return h.handler.PutFile(c)
|
||||
}
|
||||
|
||||
// DeleteFileAPI removes a file from the filesystem
|
||||
// @Summary Remove a file from the memory filesystem
|
||||
// @Description Remove a file from the memory filesystem
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [delete]
|
||||
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
|
||||
return h.handler.DeleteFile(c)
|
||||
}
|
||||
|
||||
// PatchFile creates a symbolic link to a file in the filesystem
|
||||
// @Summary Create a link to a file in the memory filesystem
|
||||
// @Description Create a link to a file in the memory filesystem. The file linked to has to exist.
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-patch
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param url body string true "Path to the file to link to"
|
||||
// @Success 201 {string} string
|
||||
// @Failure 400 {object} api.Error
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem/{path} [patch]
|
||||
func (h *MemFSHandler) PatchFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err)
|
||||
}
|
||||
|
||||
u, err := url.Parse(string(body))
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err)
|
||||
}
|
||||
|
||||
if err := h.filesystem.Symlink(u.Path, path); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
// ListFiles lists all files on the filesystem
|
||||
// @Summary List all files on the memory filesystem
|
||||
// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.
|
||||
// @Tags v16.7.2
|
||||
// @ID memfs-3-list-files
|
||||
// @Produce json
|
||||
// @Param glob query string false "glob pattern for file names"
|
||||
// @Param sort query string false "none, name, size, lastmod"
|
||||
// @Param order query string false "asc, desc"
|
||||
// @Success 200 {array} api.FileInfo
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/fs/mem [get]
|
||||
func (h *MemFSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.filesystem.List(pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
fileinfos[i] = api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
@@ -51,7 +51,7 @@ func (h *RestreamHandler) Add(c echo.Context) error {
|
||||
return api.Err(http.StatusBadRequest, "Unsupported process type", "Supported process types are: ffmpeg")
|
||||
}
|
||||
|
||||
if len(process.Input) == 0 && len(process.Output) == 0 {
|
||||
if len(process.Input) == 0 || len(process.Output) == 0 {
|
||||
return api.Err(http.StatusBadRequest, "At least one input and one output need to be defined")
|
||||
}
|
||||
|
||||
@@ -189,6 +189,14 @@ func (h *RestreamHandler) Update(c echo.Context) error {
|
||||
Autostart: true,
|
||||
}
|
||||
|
||||
current, err := h.restream.GetProcess(id)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusNotFound, "Process not found", "%s", id)
|
||||
}
|
||||
|
||||
// Prefill the config with the current values
|
||||
process.Unmarshal(current.Config)
|
||||
|
||||
if err := util.ShouldBindJSON(c, &process); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The DiskFSHandler type provides handlers for manipulating a filesystem
|
||||
type DiskFSHandler struct {
|
||||
cache cache.Cacher
|
||||
filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally
|
||||
// a Cacher where files will be purged from if the Cacher is related to the filesystem.
|
||||
func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler {
|
||||
return &DiskFSHandler{
|
||||
cache: cache,
|
||||
filesystem: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the filesystem
|
||||
// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.
|
||||
// @ID diskfs-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Router /{path} [get]
|
||||
func (h *DiskFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if stat.IsDir() {
|
||||
path = filepath.Join(path, "index.html")
|
||||
|
||||
file.Close()
|
||||
|
||||
file = h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ = file.Stat()
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
164
http/handler/filesystem.go
Normal file
164
http/handler/filesystem.go
Normal file
@@ -0,0 +1,164 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The FSHandler type provides handlers for manipulating a filesystem
|
||||
type FSHandler struct {
|
||||
fs fs.FS
|
||||
}
|
||||
|
||||
// NewFS return a new FSHandler type. You have to provide a filesystem to act on.
|
||||
func NewFS(fs fs.FS) *FSHandler {
|
||||
return &FSHandler{
|
||||
fs: fs,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *FSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.fs.Filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
if len(h.fs.DefaultFile) != 0 {
|
||||
if stat.IsDir() {
|
||||
path = filepath.Join(path, h.fs.DefaultFile)
|
||||
|
||||
file.Close()
|
||||
|
||||
file = h.fs.Filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
stat, _ = file.Stat()
|
||||
}
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
func (h *FSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.fs.Filesystem.WriteFileReader(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Bad request", "%s", err)
|
||||
}
|
||||
|
||||
if h.fs.Cache != nil {
|
||||
h.fs.Cache.Delete(path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
func (h *FSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.fs.Filesystem.Remove(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
if h.fs.Cache != nil {
|
||||
h.fs.Cache.Delete(path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "Deleted: "+path)
|
||||
}
|
||||
|
||||
func (h *FSHandler) ListFiles(c echo.Context) error {
|
||||
pattern := util.DefaultQuery(c, "glob", "")
|
||||
sortby := util.DefaultQuery(c, "sort", "none")
|
||||
order := util.DefaultQuery(c, "order", "asc")
|
||||
|
||||
files := h.fs.Filesystem.List("/", pattern)
|
||||
|
||||
var sortFunc func(i, j int) bool
|
||||
|
||||
switch sortby {
|
||||
case "name":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() }
|
||||
}
|
||||
case "size":
|
||||
if order == "desc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() }
|
||||
}
|
||||
default:
|
||||
if order == "asc" {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) }
|
||||
} else {
|
||||
sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) }
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(files, sortFunc)
|
||||
|
||||
var fileinfos []api.FileInfo = make([]api.FileInfo, len(files))
|
||||
|
||||
for i, f := range files {
|
||||
fileinfos[i] = api.FileInfo{
|
||||
Name: f.Name(),
|
||||
Size: f.Size(),
|
||||
LastMod: f.ModTime().Unix(),
|
||||
}
|
||||
}
|
||||
|
||||
return c.JSON(http.StatusOK, fileinfos)
|
||||
}
|
||||
@@ -1,130 +0,0 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
|
||||
// The MemFSHandler type provides handlers for manipulating a filesystem
|
||||
type MemFSHandler struct {
|
||||
filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
// NewMemFS return a new MemFS type. You have to provide a filesystem to act on.
|
||||
func NewMemFS(fs fs.Filesystem) *MemFSHandler {
|
||||
return &MemFSHandler{
|
||||
filesystem: fs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFile returns the file at the given path
|
||||
// @Summary Fetch a file from the memory filesystem
|
||||
// @Description Fetch a file from the memory filesystem
|
||||
// @ID memfs-get-file
|
||||
// @Produce application/data
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {file} byte
|
||||
// @Success 301 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Router /memfs/{path} [get]
|
||||
func (h *MemFSHandler) GetFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
mimeType := c.Response().Header().Get(echo.HeaderContentType)
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
file := h.filesystem.Open(path)
|
||||
if file == nil {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
stat, _ := file.Stat()
|
||||
|
||||
c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT"))
|
||||
|
||||
if path, ok := stat.IsLink(); ok {
|
||||
path = filepath.Clean("/" + path)
|
||||
|
||||
if path[0] == '/' {
|
||||
path = path[1:]
|
||||
}
|
||||
|
||||
return c.Redirect(http.StatusMovedPermanently, path)
|
||||
}
|
||||
|
||||
c.Response().Header().Set(echo.HeaderContentType, mimeType)
|
||||
|
||||
if c.Request().Method == "HEAD" {
|
||||
return c.Blob(http.StatusOK, "application/data", nil)
|
||||
}
|
||||
|
||||
return c.Stream(http.StatusOK, "application/data", file)
|
||||
}
|
||||
|
||||
// PutFile adds or overwrites a file at the given path
|
||||
// @Summary Add a file to the memory filesystem
|
||||
// @Description Writes or overwrites a file on the memory filesystem
|
||||
// @ID memfs-put-file
|
||||
// @Accept application/data
|
||||
// @Produce text/plain
|
||||
// @Produce json
|
||||
// @Param path path string true "Path to file"
|
||||
// @Param data body []byte true "File data"
|
||||
// @Success 201 {string} string
|
||||
// @Success 204 {string} string
|
||||
// @Failure 507 {object} api.Error
|
||||
// @Security BasicAuth
|
||||
// @Router /memfs/{path} [put]
|
||||
func (h *MemFSHandler) PutFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
req := c.Request()
|
||||
|
||||
_, created, err := h.filesystem.Store(path, req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "%s", err)
|
||||
}
|
||||
|
||||
c.Response().Header().Set("Content-Location", req.URL.RequestURI())
|
||||
|
||||
if created {
|
||||
return c.String(http.StatusCreated, "")
|
||||
}
|
||||
|
||||
return c.NoContent(http.StatusNoContent)
|
||||
}
|
||||
|
||||
// DeleteFile removes a file from the filesystem
|
||||
// @Summary Remove a file from the memory filesystem
|
||||
// @Description Remove a file from the memory filesystem
|
||||
// @ID memfs-delete-file
|
||||
// @Produce text/plain
|
||||
// @Param path path string true "Path to file"
|
||||
// @Success 200 {string} string
|
||||
// @Failure 404 {object} api.Error
|
||||
// @Security BasicAuth
|
||||
// @Router /memfs/{path} [delete]
|
||||
func (h *MemFSHandler) DeleteFile(c echo.Context) error {
|
||||
path := util.PathWildcardParam(c)
|
||||
|
||||
c.Response().Header().Del(echo.HeaderContentType)
|
||||
|
||||
size := h.filesystem.Delete(path)
|
||||
|
||||
if size < 0 {
|
||||
return api.Err(http.StatusNotFound, "File not found", path)
|
||||
}
|
||||
|
||||
return c.String(http.StatusOK, "Deleted: "+path)
|
||||
}
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
"github.com/datarhei/core/v16/http/errorhandler"
|
||||
"github.com/datarhei/core/v16/http/validator"
|
||||
"github.com/datarhei/core/v16/internal/testhelper"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/restream"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
@@ -32,7 +33,17 @@ func DummyRestreamer(pathPrefix string) (restream.Restreamer, error) {
|
||||
return nil, fmt.Errorf("failed to build helper program: %w", err)
|
||||
}
|
||||
|
||||
store := store.NewDummyStore(store.DummyConfig{})
|
||||
memfs, err := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create memory filesystem: %w", err)
|
||||
}
|
||||
|
||||
store, err := store.NewJSON(store.JSONConfig{
|
||||
Filesystem: memfs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ffmpeg, err := ffmpeg.New(ffmpeg.Config{
|
||||
Binary: binary,
|
||||
|
||||
246
http/server.go
246
http/server.go
@@ -29,19 +29,20 @@
|
||||
package http
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/errorhandler"
|
||||
"github.com/datarhei/core/v16/http/fs"
|
||||
"github.com/datarhei/core/v16/http/graph/resolver"
|
||||
"github.com/datarhei/core/v16/http/handler"
|
||||
api "github.com/datarhei/core/v16/http/handler/api"
|
||||
"github.com/datarhei/core/v16/http/jwt"
|
||||
"github.com/datarhei/core/v16/http/router"
|
||||
"github.com/datarhei/core/v16/http/validator"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/monitor"
|
||||
"github.com/datarhei/core/v16/net"
|
||||
@@ -79,8 +80,7 @@ type Config struct {
|
||||
Metrics monitor.HistoryReader
|
||||
Prometheus prometheus.Reader
|
||||
MimeTypesFile string
|
||||
DiskFS fs.Filesystem
|
||||
MemFS MemFSConfig
|
||||
Filesystems []fs.FS
|
||||
IPLimiter net.IPLimiter
|
||||
Profiling bool
|
||||
Cors CorsConfig
|
||||
@@ -94,13 +94,6 @@ type Config struct {
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
type MemFSConfig struct {
|
||||
EnableAuth bool
|
||||
Username string
|
||||
Password string
|
||||
Filesystem fs.Filesystem
|
||||
}
|
||||
|
||||
type CorsConfig struct {
|
||||
Origins []string
|
||||
}
|
||||
@@ -114,8 +107,6 @@ type server struct {
|
||||
|
||||
handler struct {
|
||||
about *api.AboutHandler
|
||||
memfs *handler.MemFSHandler
|
||||
diskfs *handler.DiskFSHandler
|
||||
prometheus *handler.PrometheusHandler
|
||||
profiling *handler.ProfilingHandler
|
||||
ping *handler.PingHandler
|
||||
@@ -127,8 +118,6 @@ type server struct {
|
||||
log *api.LogHandler
|
||||
restream *api.RestreamHandler
|
||||
playout *api.PlayoutHandler
|
||||
memfs *api.MemFSHandler
|
||||
diskfs *api.DiskFSHandler
|
||||
rtmp *api.RTMPHandler
|
||||
srt *api.SRTHandler
|
||||
config *api.ConfigHandler
|
||||
@@ -148,18 +137,12 @@ type server struct {
|
||||
hlsrewrite echo.MiddlewareFunc
|
||||
}
|
||||
|
||||
memfs struct {
|
||||
enableAuth bool
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
diskfs fs.Filesystem
|
||||
|
||||
gzip struct {
|
||||
mimetypes []string
|
||||
}
|
||||
|
||||
filesystems map[string]*filesystem
|
||||
|
||||
router *echo.Echo
|
||||
mimeTypesFile string
|
||||
profiling bool
|
||||
@@ -167,32 +150,63 @@ type server struct {
|
||||
readOnly bool
|
||||
}
|
||||
|
||||
type filesystem struct {
|
||||
fs.FS
|
||||
|
||||
handler *handler.FSHandler
|
||||
middleware echo.MiddlewareFunc
|
||||
}
|
||||
|
||||
func NewServer(config Config) (Server, error) {
|
||||
s := &server{
|
||||
logger: config.Logger,
|
||||
mimeTypesFile: config.MimeTypesFile,
|
||||
profiling: config.Profiling,
|
||||
diskfs: config.DiskFS,
|
||||
readOnly: config.ReadOnly,
|
||||
}
|
||||
|
||||
s.v3handler.diskfs = api.NewDiskFS(
|
||||
config.DiskFS,
|
||||
config.Cache,
|
||||
)
|
||||
s.filesystems = map[string]*filesystem{}
|
||||
|
||||
s.handler.diskfs = handler.NewDiskFS(
|
||||
config.DiskFS,
|
||||
config.Cache,
|
||||
)
|
||||
corsPrefixes := map[string][]string{
|
||||
"/api": {"*"},
|
||||
}
|
||||
|
||||
s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
|
||||
PathPrefix: config.DiskFS.Base(),
|
||||
})
|
||||
for _, fs := range config.Filesystems {
|
||||
if _, ok := s.filesystems[fs.Name]; ok {
|
||||
return nil, fmt.Errorf("the filesystem name '%s' is already in use", fs.Name)
|
||||
}
|
||||
|
||||
s.memfs.enableAuth = config.MemFS.EnableAuth
|
||||
s.memfs.username = config.MemFS.Username
|
||||
s.memfs.password = config.MemFS.Password
|
||||
if !strings.HasPrefix(fs.Mountpoint, "/") {
|
||||
fs.Mountpoint = "/" + fs.Mountpoint
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(fs.Mountpoint, "/") {
|
||||
fs.Mountpoint = strings.TrimSuffix(fs.Mountpoint, "/")
|
||||
}
|
||||
|
||||
if _, ok := corsPrefixes[fs.Mountpoint]; ok {
|
||||
return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", fs.Mountpoint, fs.Name)
|
||||
}
|
||||
|
||||
corsPrefixes[fs.Mountpoint] = config.Cors.Origins
|
||||
|
||||
filesystem := &filesystem{
|
||||
FS: fs,
|
||||
handler: handler.NewFS(fs),
|
||||
}
|
||||
|
||||
if fs.Filesystem.Type() == "disk" {
|
||||
filesystem.middleware = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{
|
||||
PathPrefix: fs.Filesystem.Metadata("base"),
|
||||
})
|
||||
}
|
||||
|
||||
s.filesystems[filesystem.Name] = filesystem
|
||||
}
|
||||
|
||||
if _, ok := corsPrefixes["/"]; !ok {
|
||||
return nil, fmt.Errorf("one filesystem must be mounted at /")
|
||||
}
|
||||
|
||||
if config.Logger == nil {
|
||||
s.logger = log.New("HTTP")
|
||||
@@ -224,16 +238,6 @@ func NewServer(config Config) (Server, error) {
|
||||
)
|
||||
}
|
||||
|
||||
if config.MemFS.Filesystem != nil {
|
||||
s.v3handler.memfs = api.NewMemFS(
|
||||
config.MemFS.Filesystem,
|
||||
)
|
||||
|
||||
s.handler.memfs = handler.NewMemFS(
|
||||
config.MemFS.Filesystem,
|
||||
)
|
||||
}
|
||||
|
||||
if config.Prometheus != nil {
|
||||
s.handler.prometheus = handler.NewPrometheus(
|
||||
config.Prometheus.HTTPHandler(),
|
||||
@@ -292,12 +296,6 @@ func NewServer(config Config) (Server, error) {
|
||||
Logger: s.logger,
|
||||
})
|
||||
|
||||
if config.Cache != nil {
|
||||
s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{
|
||||
Cache: config.Cache,
|
||||
})
|
||||
}
|
||||
|
||||
s.v3handler.widget = api.NewWidget(api.WidgetConfig{
|
||||
Restream: config.Restream,
|
||||
Registry: config.Sessions,
|
||||
@@ -308,11 +306,7 @@ func NewServer(config Config) (Server, error) {
|
||||
})
|
||||
|
||||
if middleware, err := mwcors.NewWithConfig(mwcors.Config{
|
||||
Prefixes: map[string][]string{
|
||||
"/": config.Cors.Origins,
|
||||
"/api": {"*"},
|
||||
"/memfs": config.Cors.Origins,
|
||||
},
|
||||
Prefixes: corsPrefixes,
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
@@ -437,65 +431,66 @@ func (s *server) setRoutes() {
|
||||
doc.Use(gzipMiddleware)
|
||||
doc.GET("", echoSwagger.WrapHandler)
|
||||
|
||||
// Serve static data
|
||||
fs := s.router.Group("/*")
|
||||
fs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "text/html",
|
||||
}))
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
}))
|
||||
if s.middleware.cache != nil {
|
||||
fs.Use(s.middleware.cache)
|
||||
}
|
||||
fs.Use(s.middleware.hlsrewrite)
|
||||
if s.middleware.session != nil {
|
||||
fs.Use(s.middleware.session)
|
||||
}
|
||||
// Mount filesystems
|
||||
for _, filesystem := range s.filesystems {
|
||||
// Define a local variable because later in the loop we have a closure
|
||||
filesystem := filesystem
|
||||
|
||||
fs.GET("", s.handler.diskfs.GetFile)
|
||||
fs.HEAD("", s.handler.diskfs.GetFile)
|
||||
|
||||
// Memory FS
|
||||
if s.handler.memfs != nil {
|
||||
memfs := s.router.Group("/memfs/*")
|
||||
memfs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
}))
|
||||
if s.middleware.session != nil {
|
||||
memfs.Use(s.middleware.session)
|
||||
mountpoint := filesystem.Mountpoint + "/*"
|
||||
if filesystem.Mountpoint == "/" {
|
||||
mountpoint = "/*"
|
||||
}
|
||||
|
||||
memfs.HEAD("", s.handler.memfs.GetFile)
|
||||
memfs.GET("", s.handler.memfs.GetFile)
|
||||
fs := s.router.Group(mountpoint)
|
||||
fs.Use(mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: filesystem.DefaultContentType,
|
||||
}))
|
||||
|
||||
var authmw echo.MiddlewareFunc
|
||||
if filesystem.Gzip {
|
||||
fs.Use(mwgzip.NewWithConfig(mwgzip.Config{
|
||||
Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes),
|
||||
Level: mwgzip.BestSpeed,
|
||||
MinLength: 1000,
|
||||
}))
|
||||
}
|
||||
|
||||
if s.memfs.enableAuth {
|
||||
authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
|
||||
if username == s.memfs.username && password == s.memfs.password {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
if filesystem.Cache != nil {
|
||||
mwcache := mwcache.NewWithConfig(mwcache.Config{
|
||||
Cache: filesystem.Cache,
|
||||
})
|
||||
fs.Use(mwcache)
|
||||
}
|
||||
|
||||
memfs.POST("", s.handler.memfs.PutFile, authmw)
|
||||
memfs.PUT("", s.handler.memfs.PutFile, authmw)
|
||||
memfs.DELETE("", s.handler.memfs.DeleteFile, authmw)
|
||||
} else {
|
||||
memfs.POST("", s.handler.memfs.PutFile)
|
||||
memfs.PUT("", s.handler.memfs.PutFile)
|
||||
memfs.DELETE("", s.handler.memfs.DeleteFile)
|
||||
if filesystem.middleware != nil {
|
||||
fs.Use(filesystem.middleware)
|
||||
}
|
||||
|
||||
if s.middleware.session != nil {
|
||||
fs.Use(s.middleware.session)
|
||||
}
|
||||
|
||||
fs.GET("", filesystem.handler.GetFile)
|
||||
fs.HEAD("", filesystem.handler.GetFile)
|
||||
|
||||
if filesystem.AllowWrite {
|
||||
if filesystem.EnableAuth {
|
||||
authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) {
|
||||
if username == filesystem.Username && password == filesystem.Password {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
fs.POST("", filesystem.handler.PutFile, authmw)
|
||||
fs.PUT("", filesystem.handler.PutFile, authmw)
|
||||
fs.DELETE("", filesystem.handler.DeleteFile, authmw)
|
||||
} else {
|
||||
fs.POST("", filesystem.handler.PutFile)
|
||||
fs.PUT("", filesystem.handler.PutFile)
|
||||
fs.DELETE("", filesystem.handler.DeleteFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -593,32 +588,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
|
||||
}
|
||||
}
|
||||
|
||||
// v3 Memory FS
|
||||
if s.v3handler.memfs != nil {
|
||||
v3.GET("/fs/mem", s.v3handler.memfs.ListFiles)
|
||||
v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile)
|
||||
|
||||
if !s.readOnly {
|
||||
v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile)
|
||||
v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile)
|
||||
v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile)
|
||||
// v3 Filesystems
|
||||
fshandlers := map[string]api.FSConfig{}
|
||||
for _, fs := range s.filesystems {
|
||||
fshandlers[fs.Name] = api.FSConfig{
|
||||
Type: fs.Filesystem.Type(),
|
||||
Mountpoint: fs.Mountpoint,
|
||||
Handler: fs.handler,
|
||||
}
|
||||
}
|
||||
|
||||
// v3 Disk FS
|
||||
v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles)
|
||||
v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
handler := api.NewFS(fshandlers)
|
||||
|
||||
v3.GET("/fs", handler.List)
|
||||
|
||||
v3.GET("/fs/:name", handler.ListFiles)
|
||||
v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{
|
||||
MimeTypesFile: s.mimeTypesFile,
|
||||
DefaultContentType: "application/data",
|
||||
}))
|
||||
|
||||
if !s.readOnly {
|
||||
v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile)
|
||||
v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile)
|
||||
v3.PUT("/fs/:name/*", handler.PutFile)
|
||||
v3.DELETE("/fs/:name/*", handler.DeleteFile)
|
||||
}
|
||||
|
||||
// v3 RTMP
|
||||
|
||||
446
io/fs/disk.go
446
io/fs/disk.go
@@ -1,25 +1,30 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/glob"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
)
|
||||
|
||||
// DiskConfig is the config required to create a new disk
|
||||
// filesystem.
|
||||
// DiskConfig is the config required to create a new disk filesystem.
|
||||
type DiskConfig struct {
|
||||
// Dir is the path to the directory to observe
|
||||
Dir string
|
||||
// For logging, optional
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
// Size of the filesystem in bytes
|
||||
Size int64
|
||||
// RootedDiskConfig is the config required to create a new rooted disk filesystem.
|
||||
type RootedDiskConfig struct {
|
||||
// Root is the path this filesystem is rooted to
|
||||
Root string
|
||||
|
||||
// For logging, optional
|
||||
Logger log.Logger
|
||||
@@ -27,8 +32,9 @@ type DiskConfig struct {
|
||||
|
||||
// diskFileInfo implements the FileInfo interface
|
||||
type diskFileInfo struct {
|
||||
dir string
|
||||
root string
|
||||
name string
|
||||
mode os.FileMode
|
||||
finfo os.FileInfo
|
||||
}
|
||||
|
||||
@@ -37,31 +43,37 @@ func (fi *diskFileInfo) Name() string {
|
||||
}
|
||||
|
||||
func (fi *diskFileInfo) Size() int64 {
|
||||
if fi.finfo.IsDir() {
|
||||
return 0
|
||||
}
|
||||
|
||||
return fi.finfo.Size()
|
||||
}
|
||||
|
||||
func (fi *diskFileInfo) Mode() fs.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
|
||||
func (fi *diskFileInfo) ModTime() time.Time {
|
||||
return fi.finfo.ModTime()
|
||||
}
|
||||
|
||||
func (fi *diskFileInfo) IsLink() (string, bool) {
|
||||
mode := fi.finfo.Mode()
|
||||
if mode&os.ModeSymlink == 0 {
|
||||
if fi.mode&os.ModeSymlink == 0 {
|
||||
return fi.name, false
|
||||
}
|
||||
|
||||
path, err := os.Readlink(filepath.Join(fi.dir, fi.name))
|
||||
path, err := os.Readlink(filepath.Join(fi.root, fi.name))
|
||||
if err != nil {
|
||||
return fi.name, false
|
||||
}
|
||||
|
||||
path = filepath.Join(fi.dir, path)
|
||||
|
||||
if !strings.HasPrefix(path, fi.dir) {
|
||||
if !strings.HasPrefix(path, fi.root) {
|
||||
return fi.name, false
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(path, fi.dir)
|
||||
name := strings.TrimPrefix(path, fi.root)
|
||||
|
||||
if name[0] != os.PathSeparator {
|
||||
name = string(os.PathSeparator) + name
|
||||
}
|
||||
@@ -75,8 +87,9 @@ func (fi *diskFileInfo) IsDir() bool {
|
||||
|
||||
// diskFile implements the File interface
|
||||
type diskFile struct {
|
||||
dir string
|
||||
root string
|
||||
name string
|
||||
mode os.FileMode
|
||||
file *os.File
|
||||
}
|
||||
|
||||
@@ -91,8 +104,9 @@ func (f *diskFile) Stat() (FileInfo, error) {
|
||||
}
|
||||
|
||||
dif := &diskFileInfo{
|
||||
dir: f.dir,
|
||||
root: f.root,
|
||||
name: f.name,
|
||||
mode: f.mode,
|
||||
finfo: finfo,
|
||||
}
|
||||
|
||||
@@ -109,11 +123,11 @@ func (f *diskFile) Read(p []byte) (int, error) {
|
||||
|
||||
// diskFilesystem implements the Filesystem interface
|
||||
type diskFilesystem struct {
|
||||
dir string
|
||||
metadata map[string]string
|
||||
lock sync.RWMutex
|
||||
|
||||
// Max. size of the filesystem in bytes as
|
||||
// given by the config
|
||||
maxSize int64
|
||||
root string
|
||||
cwd string
|
||||
|
||||
// Current size of the filesystem in bytes
|
||||
currentSize int64
|
||||
@@ -123,53 +137,102 @@ type diskFilesystem struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewDiskFilesystem returns a new filesystem that is backed by a disk
|
||||
// that implements the Filesystem interface
|
||||
// NewDiskFilesystem returns a new filesystem that is backed by the disk filesystem.
|
||||
// The root is / and the working directory is whatever is returned by os.Getwd(). The value
|
||||
// of Root in the config will be ignored.
|
||||
func NewDiskFilesystem(config DiskConfig) (Filesystem, error) {
|
||||
fs := &diskFilesystem{
|
||||
maxSize: config.Size,
|
||||
logger: config.Logger,
|
||||
metadata: make(map[string]string),
|
||||
root: "/",
|
||||
cwd: "/",
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
cwd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs.cwd = cwd
|
||||
|
||||
if len(fs.cwd) == 0 {
|
||||
fs.cwd = "/"
|
||||
}
|
||||
|
||||
fs.cwd = filepath.Clean(fs.cwd)
|
||||
if !filepath.IsAbs(fs.cwd) {
|
||||
return nil, fmt.Errorf("the current working directory must be an absolute path")
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("DiskFS")
|
||||
}
|
||||
|
||||
if err := fs.Rebase(config.Dir); err != nil {
|
||||
return nil, err
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Base() string {
|
||||
return fs.dir
|
||||
// NewRootedDiskFilesystem returns a filesystem that is backed by the disk filesystem. The
|
||||
// root of the filesystem is defined by DiskConfig.Root. The working directory is "/". Root
|
||||
// must be directory. If it doesn't exist, it will be created
|
||||
func NewRootedDiskFilesystem(config RootedDiskConfig) (Filesystem, error) {
|
||||
fs := &diskFilesystem{
|
||||
metadata: make(map[string]string),
|
||||
root: config.Root,
|
||||
cwd: "/",
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if len(fs.root) == 0 {
|
||||
fs.root = "/"
|
||||
}
|
||||
|
||||
if root, err := filepath.Abs(fs.root); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
fs.root = root
|
||||
}
|
||||
|
||||
err := os.MkdirAll(fs.root, 0700)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err := os.Stat(fs.root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !info.IsDir() {
|
||||
return nil, fmt.Errorf("root is not a directory")
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Rebase(base string) error {
|
||||
if len(base) == 0 {
|
||||
return fmt.Errorf("invalid base path provided")
|
||||
}
|
||||
func (fs *diskFilesystem) Name() string {
|
||||
return "disk"
|
||||
}
|
||||
|
||||
dir, err := filepath.Abs(base)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (fs *diskFilesystem) Type() string {
|
||||
return "disk"
|
||||
}
|
||||
|
||||
base = dir
|
||||
func (fs *diskFilesystem) Metadata(key string) string {
|
||||
fs.lock.RLock()
|
||||
defer fs.lock.RUnlock()
|
||||
|
||||
finfo, err := os.Stat(base)
|
||||
if err != nil {
|
||||
return fmt.Errorf("the provided base path '%s' doesn't exist", fs.dir)
|
||||
}
|
||||
return fs.metadata[key]
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("the provided base path '%s' must be a directory", fs.dir)
|
||||
}
|
||||
func (fs *diskFilesystem) SetMetadata(key, data string) {
|
||||
fs.lock.Lock()
|
||||
defer fs.lock.Unlock()
|
||||
|
||||
fs.dir = base
|
||||
|
||||
return nil
|
||||
fs.metadata[key] = data
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Size() (int64, int64) {
|
||||
@@ -178,7 +241,11 @@ func (fs *diskFilesystem) Size() (int64, int64) {
|
||||
if time.Since(fs.lastSizeCheck) >= 10*time.Second {
|
||||
var size int64 = 0
|
||||
|
||||
fs.walk(func(path string, info os.FileInfo) {
|
||||
fs.walk(fs.root, func(path string, info os.FileInfo) {
|
||||
if info.IsDir() {
|
||||
return
|
||||
}
|
||||
|
||||
size += info.Size()
|
||||
})
|
||||
|
||||
@@ -187,17 +254,21 @@ func (fs *diskFilesystem) Size() (int64, int64) {
|
||||
fs.lastSizeCheck = time.Now()
|
||||
}
|
||||
|
||||
return fs.currentSize, fs.maxSize
|
||||
return fs.currentSize, -1
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Resize(size int64) {
|
||||
fs.maxSize = size
|
||||
func (fs *diskFilesystem) Purge(size int64) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Files() int64 {
|
||||
var nfiles int64 = 0
|
||||
|
||||
fs.walk(func(path string, info os.FileInfo) {
|
||||
fs.walk(fs.root, func(path string, info os.FileInfo) {
|
||||
if info.IsDir() {
|
||||
return
|
||||
}
|
||||
|
||||
nfiles++
|
||||
})
|
||||
|
||||
@@ -205,38 +276,58 @@ func (fs *diskFilesystem) Files() int64 {
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Symlink(oldname, newname string) error {
|
||||
oldname = filepath.Join(fs.dir, filepath.Clean("/"+oldname))
|
||||
oldname = fs.cleanPath(oldname)
|
||||
newname = fs.cleanPath(newname)
|
||||
|
||||
if !filepath.IsAbs(newname) {
|
||||
return nil
|
||||
info, err := os.Lstat(oldname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newname = filepath.Join(fs.dir, filepath.Clean("/"+newname))
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
return fmt.Errorf("%s can't link to another link (%s)", newname, oldname)
|
||||
}
|
||||
|
||||
err := os.Symlink(oldname, newname)
|
||||
if info.IsDir() {
|
||||
return fmt.Errorf("can't symlink directories")
|
||||
}
|
||||
|
||||
return err
|
||||
return os.Symlink(oldname, newname)
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Open(path string) File {
|
||||
path = filepath.Join(fs.dir, filepath.Clean("/"+path))
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
df := &diskFile{
|
||||
root: fs.root,
|
||||
name: strings.TrimPrefix(path, fs.root),
|
||||
}
|
||||
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
df.mode = info.Mode()
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
df := &diskFile{
|
||||
dir: fs.dir,
|
||||
name: path,
|
||||
file: f,
|
||||
}
|
||||
df.file = f
|
||||
|
||||
return df
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
path = filepath.Join(fs.dir, filepath.Clean("/"+path))
|
||||
func (fs *diskFilesystem) ReadFile(path string) ([]byte, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
return os.ReadFile(path)
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
replace := true
|
||||
|
||||
@@ -258,16 +349,155 @@ func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
replace = false
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
||||
size, err := f.ReadFrom(r)
|
||||
if err != nil {
|
||||
return -1, false, fmt.Errorf("reading data failed: %w", err)
|
||||
}
|
||||
|
||||
fs.lastSizeCheck = time.Time{}
|
||||
|
||||
return size, !replace, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Delete(path string) int64 {
|
||||
path = filepath.Join(fs.dir, filepath.Clean("/"+path))
|
||||
func (fs *diskFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
|
||||
return fs.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
|
||||
path = fs.cleanPath(path)
|
||||
dir, filename := filepath.Split(path)
|
||||
|
||||
tmpfile, err := os.CreateTemp(dir, filename)
|
||||
if err != nil {
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
size, err := tmpfile.Write(data)
|
||||
if err != nil {
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
replace := false
|
||||
if _, err := fs.Stat(path); err == nil {
|
||||
replace = true
|
||||
}
|
||||
|
||||
if err := fs.rename(tmpfile.Name(), path); err != nil {
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
fs.lastSizeCheck = time.Time{}
|
||||
|
||||
return int64(size), !replace, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Rename(src, dst string) error {
|
||||
src = fs.cleanPath(src)
|
||||
dst = fs.cleanPath(dst)
|
||||
|
||||
return fs.rename(src, dst)
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) rename(src, dst string) error {
|
||||
if src == dst {
|
||||
return nil
|
||||
}
|
||||
|
||||
// First try to rename the file
|
||||
if err := os.Rename(src, dst); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// If renaming the file fails, copy the data
|
||||
if err := fs.copy(src, dst); err != nil {
|
||||
os.Remove(dst)
|
||||
return fmt.Errorf("failed to copy files: %w", err)
|
||||
}
|
||||
|
||||
if err := os.Remove(src); err != nil {
|
||||
os.Remove(dst)
|
||||
return fmt.Errorf("failed to remove source file: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Copy(src, dst string) error {
|
||||
src = fs.cleanPath(src)
|
||||
dst = fs.cleanPath(dst)
|
||||
|
||||
return fs.copy(src, dst)
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) copy(src, dst string) error {
|
||||
source, err := os.Open(src)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open source file: %w", err)
|
||||
}
|
||||
|
||||
destination, err := os.Create(dst)
|
||||
if err != nil {
|
||||
source.Close()
|
||||
return fmt.Errorf("failed to create destination file: %w", err)
|
||||
}
|
||||
defer destination.Close()
|
||||
|
||||
if _, err := io.Copy(destination, source); err != nil {
|
||||
source.Close()
|
||||
os.Remove(dst)
|
||||
return fmt.Errorf("failed to copy data from source to destination: %w", err)
|
||||
}
|
||||
|
||||
source.Close()
|
||||
|
||||
fs.lastSizeCheck = time.Time{}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Stat(path string) (FileInfo, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
dif := &diskFileInfo{
|
||||
root: fs.root,
|
||||
name: strings.TrimPrefix(path, fs.root),
|
||||
}
|
||||
|
||||
info, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dif.mode = info.Mode()
|
||||
|
||||
if info.Mode()&os.ModeSymlink != 0 {
|
||||
info, err = os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
dif.finfo = info
|
||||
|
||||
return dif, nil
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) Remove(path string) int64 {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
finfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
@@ -280,28 +510,31 @@ func (fs *diskFilesystem) Delete(path string) int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
fs.lastSizeCheck = time.Time{}
|
||||
|
||||
return size
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) DeleteAll() int64 {
|
||||
func (fs *diskFilesystem) RemoveAll() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) List(pattern string) []FileInfo {
|
||||
func (fs *diskFilesystem) List(path, pattern string) []FileInfo {
|
||||
path = fs.cleanPath(path)
|
||||
files := []FileInfo{}
|
||||
|
||||
fs.walk(func(path string, info os.FileInfo) {
|
||||
if path == fs.dir {
|
||||
fs.walk(path, func(path string, info os.FileInfo) {
|
||||
if path == fs.root {
|
||||
return
|
||||
}
|
||||
|
||||
name := strings.TrimPrefix(path, fs.dir)
|
||||
name := strings.TrimPrefix(path, fs.root)
|
||||
if name[0] != os.PathSeparator {
|
||||
name = string(os.PathSeparator) + name
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
name += "/"
|
||||
return
|
||||
}
|
||||
|
||||
if len(pattern) != 0 {
|
||||
@@ -311,7 +544,7 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo {
|
||||
}
|
||||
|
||||
files = append(files, &diskFileInfo{
|
||||
dir: fs.dir,
|
||||
root: fs.root,
|
||||
name: name,
|
||||
finfo: info,
|
||||
})
|
||||
@@ -320,8 +553,53 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo {
|
||||
return files
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) {
|
||||
filepath.Walk(fs.dir, func(path string, info os.FileInfo, err error) error {
|
||||
func (fs *diskFilesystem) LookPath(file string) (string, error) {
|
||||
if strings.Contains(file, "/") {
|
||||
file = fs.cleanPath(file)
|
||||
err := fs.findExecutable(file)
|
||||
if err == nil {
|
||||
return file, nil
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
path := os.Getenv("PATH")
|
||||
for _, dir := range filepath.SplitList(path) {
|
||||
if dir == "" {
|
||||
// Unix shell semantics: path element "" means "."
|
||||
dir = "."
|
||||
}
|
||||
path := filepath.Join(dir, file)
|
||||
path = fs.cleanPath(path)
|
||||
if err := fs.findExecutable(path); err == nil {
|
||||
if !filepath.IsAbs(path) {
|
||||
return path, os.ErrNotExist
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) findExecutable(file string) error {
|
||||
d, err := fs.Stat(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m := d.Mode()
|
||||
if m.IsDir() {
|
||||
return fmt.Errorf("is a directory")
|
||||
}
|
||||
|
||||
if m&0111 != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.ErrPermission
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) walk(path string, walkfn func(path string, info os.FileInfo)) {
|
||||
filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -341,3 +619,11 @@ func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) {
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (fs *diskFilesystem) cleanPath(path string) string {
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join(fs.cwd, path)
|
||||
}
|
||||
|
||||
return filepath.Join(fs.root, filepath.Clean(path))
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type dummyFileInfo struct{}
|
||||
|
||||
func (d *dummyFileInfo) Name() string { return "" }
|
||||
func (d *dummyFileInfo) Size() int64 { return 0 }
|
||||
func (d *dummyFileInfo) ModTime() time.Time { return time.Date(2000, 1, 1, 0, 0, 0, 0, nil) }
|
||||
func (d *dummyFileInfo) IsLink() (string, bool) { return "", false }
|
||||
func (d *dummyFileInfo) IsDir() bool { return false }
|
||||
|
||||
type dummyFile struct{}
|
||||
|
||||
func (d *dummyFile) Read(p []byte) (int, error) { return 0, io.EOF }
|
||||
func (d *dummyFile) Close() error { return nil }
|
||||
func (d *dummyFile) Name() string { return "" }
|
||||
func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil }
|
||||
|
||||
type dummyFilesystem struct{}
|
||||
|
||||
func (d *dummyFilesystem) Base() string { return "/" }
|
||||
func (d *dummyFilesystem) Rebase(string) error { return nil }
|
||||
func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 }
|
||||
func (d *dummyFilesystem) Resize(int64) {}
|
||||
func (d *dummyFilesystem) Files() int64 { return 0 }
|
||||
func (d *dummyFilesystem) Symlink(string, string) error { return nil }
|
||||
func (d *dummyFilesystem) Open(string) File { return &dummyFile{} }
|
||||
func (d *dummyFilesystem) Store(string, io.Reader) (int64, bool, error) { return 0, true, nil }
|
||||
func (d *dummyFilesystem) Delete(string) int64 { return 0 }
|
||||
func (d *dummyFilesystem) DeleteAll() int64 { return 0 }
|
||||
func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} }
|
||||
|
||||
// NewDummyFilesystem return a dummy filesystem
|
||||
func NewDummyFilesystem() Filesystem {
|
||||
return &dummyFilesystem{}
|
||||
}
|
||||
118
io/fs/fs.go
118
io/fs/fs.go
@@ -3,24 +3,29 @@ package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileInfo describes a file and is returned by Stat.
|
||||
type FileInfo interface {
|
||||
// Name returns the full name of the file
|
||||
// Name returns the full name of the file.
|
||||
Name() string
|
||||
|
||||
// Size reports the size of the file in bytes
|
||||
// Size reports the size of the file in bytes.
|
||||
Size() int64
|
||||
|
||||
// ModTime returns the time of last modification
|
||||
// Mode returns the file mode.
|
||||
Mode() fs.FileMode
|
||||
|
||||
// ModTime returns the time of last modification.
|
||||
ModTime() time.Time
|
||||
|
||||
// IsLink returns the path this file is linking to and true. Otherwise an empty string and false.
|
||||
IsLink() (string, bool)
|
||||
|
||||
// IsDir returns whether the file represents a directory
|
||||
// IsDir returns whether the file represents a directory.
|
||||
IsDir() bool
|
||||
}
|
||||
|
||||
@@ -28,52 +33,101 @@ type FileInfo interface {
|
||||
type File interface {
|
||||
io.ReadCloser
|
||||
|
||||
// Name returns the Name of the file
|
||||
// Name returns the Name of the file.
|
||||
Name() string
|
||||
|
||||
// Stat returns the FileInfo to this file. In case of an error
|
||||
// FileInfo is nil and the error is non-nil.
|
||||
// Stat returns the FileInfo to this file. In case of an error FileInfo is nil
|
||||
// and the error is non-nil. If the file is a symlink, the info reports the name and mode
|
||||
// of the link itself, but the modification time and size of the linked file.
|
||||
Stat() (FileInfo, error)
|
||||
}
|
||||
|
||||
// Filesystem is an interface that provides access to a filesystem.
|
||||
type Filesystem interface {
|
||||
// Base returns the base path of this filesystem
|
||||
Base() string
|
||||
|
||||
// Rebase sets a new base path for this filesystem
|
||||
Rebase(string) error
|
||||
|
||||
type ReadFilesystem interface {
|
||||
// Size returns the consumed size and capacity of the filesystem in bytes. The
|
||||
// capacity is negative if the filesystem can consume as much space as it can.
|
||||
// capacity is zero or negative if the filesystem can consume as much space as it wants.
|
||||
Size() (int64, int64)
|
||||
|
||||
// Resize resizes the filesystem to the new size. Files may need to be deleted.
|
||||
Resize(size int64)
|
||||
|
||||
// Files returns the current number of files in the filesystem.
|
||||
Files() int64
|
||||
|
||||
// Open returns the file stored at the given path. It returns nil if the
|
||||
// file doesn't exist. If the file is a symlink, the name is the name of
|
||||
// the link, but it will read the contents of the linked file.
|
||||
Open(path string) File
|
||||
|
||||
// ReadFile reads the content of the file at the given path into the writer. Returns
|
||||
// the number of bytes read or an error.
|
||||
ReadFile(path string) ([]byte, error)
|
||||
|
||||
// Stat returns info about the file at path. If the file doesn't exist, an error
|
||||
// will be returned. If the file is a symlink, the info reports the name and mode
|
||||
// of the link itself, but the modification time and size are of the linked file.
|
||||
Stat(path string) (FileInfo, error)
|
||||
|
||||
// List lists all files that are currently on the filesystem.
|
||||
List(path, pattern string) []FileInfo
|
||||
|
||||
// LookPath searches for an executable named file in the directories named by the PATH environment
|
||||
// variable. If file contains a slash, it is tried directly and the PATH is not consulted. Otherwise,
|
||||
// on success, the result is an absolute path. On non-disk filesystems. Only the mere existence
|
||||
// of that file is verfied.
|
||||
LookPath(file string) (string, error)
|
||||
}
|
||||
|
||||
type WriteFilesystem interface {
|
||||
// Symlink creates newname as a symbolic link to oldname.
|
||||
Symlink(oldname, newname string) error
|
||||
|
||||
// Open returns the file stored at the given path. It returns nil if the
|
||||
// file doesn't exist.
|
||||
Open(path string) File
|
||||
|
||||
// Store adds a file to the filesystem. Returns the size of the data that has been
|
||||
// WriteFileReader adds a file to the filesystem. Returns the size of the data that has been
|
||||
// stored in bytes and whether the file is new. The size is negative if there was
|
||||
// an error adding the file and error is not nil.
|
||||
Store(path string, r io.Reader) (int64, bool, error)
|
||||
WriteFileReader(path string, r io.Reader) (int64, bool, error)
|
||||
|
||||
// Delete removes a file at the given path from the filesystem. Returns the size of
|
||||
// WriteFile adds a file to the filesystem. Returns the size of the data that has been
|
||||
// stored in bytes and whether the file is new. The size is negative if there was
|
||||
// an error adding the file and error is not nil.
|
||||
WriteFile(path string, data []byte) (int64, bool, error)
|
||||
|
||||
// WriteFileSafe adds a file to the filesystem by first writing it to a tempfile and then
|
||||
// renaming it to the actual path. Returns the size of the data that has been
|
||||
// stored in bytes and whether the file is new. The size is negative if there was
|
||||
// an error adding the file and error is not nil.
|
||||
WriteFileSafe(path string, data []byte) (int64, bool, error)
|
||||
|
||||
// MkdirAll creates a directory named path, along with any necessary parents, and returns nil,
|
||||
// or else returns an error. The permission bits perm (before umask) are used for all directories
|
||||
// that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil.
|
||||
MkdirAll(path string, perm os.FileMode) error
|
||||
|
||||
// Rename renames the file from src to dst. If src and dst can't be renamed
|
||||
// regularly, the data is copied from src to dst. dst will be overwritten
|
||||
// if it already exists. src will be removed after all data has been copied
|
||||
// successfully. Both files exist during copying.
|
||||
Rename(src, dst string) error
|
||||
|
||||
// Copy copies a file from src to dst.
|
||||
Copy(src, dst string) error
|
||||
|
||||
// Remove removes a file at the given path from the filesystem. Returns the size of
|
||||
// the remove file in bytes. The size is negative if the file doesn't exist.
|
||||
Delete(path string) int64
|
||||
Remove(path string) int64
|
||||
|
||||
// DeleteAll removes all files from the filesystem. Returns the size of the
|
||||
// RemoveAll removes all files from the filesystem. Returns the size of the
|
||||
// removed files in bytes.
|
||||
DeleteAll() int64
|
||||
|
||||
// List lists all files that are currently on the filesystem.
|
||||
List(pattern string) []FileInfo
|
||||
RemoveAll() int64
|
||||
}
|
||||
|
||||
// Filesystem is an interface that provides access to a filesystem.
|
||||
type Filesystem interface {
|
||||
ReadFilesystem
|
||||
WriteFilesystem
|
||||
|
||||
// Name returns the name of the filesystem.
|
||||
Name() string
|
||||
|
||||
// Type returns the type of the filesystem, e.g. disk, mem, s3
|
||||
Type() string
|
||||
|
||||
Metadata(key string) string
|
||||
SetMetadata(key string, data string)
|
||||
}
|
||||
|
||||
742
io/fs/fs_test.go
Normal file
742
io/fs/fs_test.go
Normal file
@@ -0,0 +1,742 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var ErrNoMinio = errors.New("minio binary not found")
|
||||
|
||||
func startMinio(t *testing.T, path string) (*exec.Cmd, error) {
|
||||
err := os.MkdirAll(path, 0700)
|
||||
require.NoError(t, err)
|
||||
|
||||
minio, err := exec.LookPath("minio")
|
||||
if err != nil {
|
||||
return nil, ErrNoMinio
|
||||
}
|
||||
|
||||
proc := exec.Command(minio, "server", path, "--address", "127.0.0.1:9000")
|
||||
proc.Stderr = os.Stderr
|
||||
proc.Stdout = os.Stdout
|
||||
err = proc.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
return proc, nil
|
||||
}
|
||||
|
||||
func stopMinio(t *testing.T, proc *exec.Cmd) {
|
||||
err := proc.Process.Signal(os.Interrupt)
|
||||
require.NoError(t, err)
|
||||
|
||||
proc.Wait()
|
||||
}
|
||||
|
||||
func TestFilesystem(t *testing.T) {
|
||||
miniopath, err := filepath.Abs("./minio")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = os.RemoveAll(miniopath)
|
||||
require.NoError(t, err)
|
||||
|
||||
minio, err := startMinio(t, miniopath)
|
||||
if err != nil {
|
||||
if err != ErrNoMinio {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
os.RemoveAll("./testing/")
|
||||
|
||||
filesystems := map[string]func(string) (Filesystem, error){
|
||||
"memfs": func(name string) (Filesystem, error) {
|
||||
return NewMemFilesystem(MemConfig{})
|
||||
},
|
||||
"diskfs": func(name string) (Filesystem, error) {
|
||||
return NewRootedDiskFilesystem(RootedDiskConfig{
|
||||
Root: "./testing/" + name,
|
||||
})
|
||||
},
|
||||
"s3fs": func(name string) (Filesystem, error) {
|
||||
return NewS3Filesystem(S3Config{
|
||||
Name: name,
|
||||
Endpoint: "127.0.0.1:9000",
|
||||
AccessKeyID: "minioadmin",
|
||||
SecretAccessKey: "minioadmin",
|
||||
Region: "",
|
||||
Bucket: strings.ToLower(name),
|
||||
UseSSL: false,
|
||||
Logger: nil,
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
tests := map[string]func(*testing.T, Filesystem){
|
||||
"new": testNew,
|
||||
"metadata": testMetadata,
|
||||
"writeFile": testWriteFile,
|
||||
"writeFileSafe": testWriteFileSafe,
|
||||
"writeFileReader": testWriteFileReader,
|
||||
"delete": testDelete,
|
||||
"files": testFiles,
|
||||
"replace": testReplace,
|
||||
"list": testList,
|
||||
"listGlob": testListGlob,
|
||||
"deleteAll": testDeleteAll,
|
||||
"data": testData,
|
||||
"statDir": testStatDir,
|
||||
"mkdirAll": testMkdirAll,
|
||||
"rename": testRename,
|
||||
"renameOverwrite": testRenameOverwrite,
|
||||
"copy": testCopy,
|
||||
"symlink": testSymlink,
|
||||
"stat": testStat,
|
||||
"copyOverwrite": testCopyOverwrite,
|
||||
"symlinkErrors": testSymlinkErrors,
|
||||
"symlinkOpenStat": testSymlinkOpenStat,
|
||||
"open": testOpen,
|
||||
}
|
||||
|
||||
for fsname, fs := range filesystems {
|
||||
for name, test := range tests {
|
||||
t.Run(fsname+"-"+name, func(t *testing.T) {
|
||||
if fsname == "s3fs" && minio == nil {
|
||||
t.Skip("minio server not available")
|
||||
}
|
||||
filesystem, err := fs(name)
|
||||
require.NoError(t, err)
|
||||
test(t, filesystem)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
os.RemoveAll("./testing/")
|
||||
|
||||
if minio != nil {
|
||||
stopMinio(t, minio)
|
||||
}
|
||||
|
||||
os.RemoveAll(miniopath)
|
||||
}
|
||||
|
||||
func testNew(t *testing.T, fs Filesystem) {
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur, "current size")
|
||||
require.Equal(t, int64(-1), max, "max size")
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(0), cur, "number of files")
|
||||
}
|
||||
|
||||
func testMetadata(t *testing.T, fs Filesystem) {
|
||||
fs.SetMetadata("foo", "bar")
|
||||
require.Equal(t, "bar", fs.Metadata("foo"))
|
||||
}
|
||||
|
||||
func testWriteFile(t *testing.T, fs Filesystem) {
|
||||
size, created, err := fs.WriteFile("/foobar", []byte("xxxxx"))
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func testWriteFileSafe(t *testing.T, fs Filesystem) {
|
||||
size, created, err := fs.WriteFileSafe("/foobar", []byte("xxxxx"))
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func testWriteFileReader(t *testing.T, fs Filesystem) {
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func testOpen(t *testing.T, fs Filesystem) {
|
||||
file := fs.Open("/foobar")
|
||||
require.Nil(t, file)
|
||||
|
||||
_, _, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx"))
|
||||
require.NoError(t, err)
|
||||
|
||||
file = fs.Open("/foobar")
|
||||
require.NotNil(t, file)
|
||||
require.Equal(t, "/foobar", file.Name())
|
||||
|
||||
stat, err := file.Stat()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/foobar", stat.Name())
|
||||
require.Equal(t, int64(5), stat.Size())
|
||||
require.Equal(t, false, stat.IsDir())
|
||||
}
|
||||
|
||||
func testDelete(t *testing.T, fs Filesystem) {
|
||||
size := fs.Remove("/foobar")
|
||||
|
||||
require.Equal(t, int64(-1), size)
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
fs.WriteFileReader("/foobar", data)
|
||||
|
||||
size = fs.Remove("/foobar")
|
||||
|
||||
require.Equal(t, int64(5), size)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func testFiles(t *testing.T, fs Filesystem) {
|
||||
require.Equal(t, int64(0), fs.Files())
|
||||
|
||||
fs.WriteFileReader("/foobar.txt", strings.NewReader("bar"))
|
||||
|
||||
require.Equal(t, int64(1), fs.Files())
|
||||
|
||||
fs.MkdirAll("/path/to/foo", 0777)
|
||||
|
||||
require.Equal(t, int64(1), fs.Files())
|
||||
|
||||
fs.Remove("/foobar.txt")
|
||||
|
||||
require.Equal(t, int64(0), fs.Files())
|
||||
}
|
||||
|
||||
func testReplace(t *testing.T, fs Filesystem) {
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
|
||||
data = strings.NewReader("yyy")
|
||||
|
||||
size, created, err = fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(3), size)
|
||||
require.Equal(t, false, created)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(3), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func testList(t *testing.T, fs Filesystem) {
|
||||
fs.WriteFileReader("/foobar1", strings.NewReader("a"))
|
||||
fs.WriteFileReader("/foobar2", strings.NewReader("bb"))
|
||||
fs.WriteFileReader("/foobar3", strings.NewReader("ccc"))
|
||||
fs.WriteFileReader("/foobar4", strings.NewReader("dddd"))
|
||||
fs.WriteFileReader("/path/foobar3", strings.NewReader("ccc"))
|
||||
fs.WriteFileReader("/path/to/foobar4", strings.NewReader("dddd"))
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(17), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(6), cur)
|
||||
|
||||
getNames := func(files []FileInfo) []string {
|
||||
names := []string{}
|
||||
for _, f := range files {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
files := fs.List("/", "")
|
||||
|
||||
require.Equal(t, 6, len(files))
|
||||
require.ElementsMatch(t, []string{"/foobar1", "/foobar2", "/foobar3", "/foobar4", "/path/foobar3", "/path/to/foobar4"}, getNames(files))
|
||||
|
||||
files = fs.List("/path", "")
|
||||
|
||||
require.Equal(t, 2, len(files))
|
||||
require.ElementsMatch(t, []string{"/path/foobar3", "/path/to/foobar4"}, getNames(files))
|
||||
}
|
||||
|
||||
func testListGlob(t *testing.T, fs Filesystem) {
|
||||
fs.WriteFileReader("/foobar1", strings.NewReader("a"))
|
||||
fs.WriteFileReader("/path/foobar2", strings.NewReader("a"))
|
||||
fs.WriteFileReader("/path/to/foobar3", strings.NewReader("a"))
|
||||
fs.WriteFileReader("/foobar4", strings.NewReader("a"))
|
||||
|
||||
cur := fs.Files()
|
||||
|
||||
require.Equal(t, int64(4), cur)
|
||||
|
||||
getNames := func(files []FileInfo) []string {
|
||||
names := []string{}
|
||||
for _, f := range files {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
files := getNames(fs.List("/", "/foo*"))
|
||||
require.Equal(t, 2, len(files))
|
||||
require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files)
|
||||
|
||||
files = getNames(fs.List("/", "/*bar?"))
|
||||
require.Equal(t, 2, len(files))
|
||||
require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files)
|
||||
|
||||
files = getNames(fs.List("/", "/path/*"))
|
||||
require.Equal(t, 1, len(files))
|
||||
require.ElementsMatch(t, []string{"/path/foobar2"}, files)
|
||||
|
||||
files = getNames(fs.List("/", "/path/**"))
|
||||
require.Equal(t, 2, len(files))
|
||||
require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files)
|
||||
|
||||
files = getNames(fs.List("/path", "/**"))
|
||||
require.Equal(t, 2, len(files))
|
||||
require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files)
|
||||
}
|
||||
|
||||
func testDeleteAll(t *testing.T, fs Filesystem) {
|
||||
if _, ok := fs.(*diskFilesystem); ok {
|
||||
return
|
||||
}
|
||||
|
||||
fs.WriteFileReader("/foobar1", strings.NewReader("abc"))
|
||||
fs.WriteFileReader("/path/foobar2", strings.NewReader("abc"))
|
||||
fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc"))
|
||||
fs.WriteFileReader("/foobar4", strings.NewReader("abc"))
|
||||
|
||||
cur := fs.Files()
|
||||
|
||||
require.Equal(t, int64(4), cur)
|
||||
|
||||
size := fs.RemoveAll()
|
||||
require.Equal(t, int64(12), size)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func testData(t *testing.T, fs Filesystem) {
|
||||
file := fs.Open("/foobar")
|
||||
require.Nil(t, file)
|
||||
|
||||
_, err := fs.ReadFile("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
data := "gduwotoxqb"
|
||||
|
||||
data1 := strings.NewReader(data)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", data1)
|
||||
require.NoError(t, err)
|
||||
|
||||
file = fs.Open("/foobar")
|
||||
require.NotNil(t, file)
|
||||
|
||||
data2 := make([]byte, len(data)+1)
|
||||
n, err := file.Read(data2)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
require.Equal(t, len(data), n)
|
||||
require.Equal(t, []byte(data), data2[:n])
|
||||
|
||||
data3, err := fs.ReadFile("/foobar")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte(data), data3)
|
||||
}
|
||||
|
||||
func testStatDir(t *testing.T, fs Filesystem) {
|
||||
info, err := fs.Stat("/")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
data := strings.NewReader("gduwotoxqb")
|
||||
fs.WriteFileReader("/these/are/some/directories/foobar", data)
|
||||
|
||||
info, err = fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, info)
|
||||
|
||||
info, err = fs.Stat("/these/are/some/directories/foobar")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/these/are/some/directories/foobar", info.Name())
|
||||
require.Equal(t, int64(10), info.Size())
|
||||
require.Equal(t, false, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/these")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/these", info.Name())
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/these/are/")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/these/are", info.Name())
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/these/are/some")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/these/are/some", info.Name())
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/these/are/some/directories")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/these/are/some/directories", info.Name())
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
}
|
||||
|
||||
func testMkdirAll(t *testing.T, fs Filesystem) {
|
||||
info, err := fs.Stat("/foo/bar/dir")
|
||||
require.Error(t, err)
|
||||
require.Nil(t, info)
|
||||
|
||||
err = fs.MkdirAll("/foo/bar/dir", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.MkdirAll("/foo/bar", 0755)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err = fs.Stat("/foo/bar/dir")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/foo")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
info, err = fs.Stat("/foo/bar")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info)
|
||||
require.Equal(t, int64(0), info.Size())
|
||||
require.Equal(t, true, info.IsDir())
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.MkdirAll("/foobar", 0755)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func testRename(t *testing.T, fs Filesystem) {
|
||||
err := fs.Rename("/foobar", "/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Rename("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testRenameOverwrite(t *testing.T, fs Filesystem) {
|
||||
_, err := fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Rename("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := fs.ReadFile("/foobaz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foobar", string(data))
|
||||
}
|
||||
|
||||
func testSymlink(t *testing.T, fs Filesystem) {
|
||||
if _, ok := fs.(*s3Filesystem); ok {
|
||||
return
|
||||
}
|
||||
|
||||
err := fs.Symlink("/foobar", "/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Symlink("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
file := fs.Open("/foobaz")
|
||||
require.NotNil(t, file)
|
||||
require.Equal(t, "/foobaz", file.Name())
|
||||
|
||||
data := make([]byte, 10)
|
||||
n, err := file.Read(data)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 6, n)
|
||||
require.Equal(t, "foobar", string(data[:n]))
|
||||
|
||||
stat, err := fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/foobaz", stat.Name())
|
||||
require.Equal(t, int64(6), stat.Size())
|
||||
require.NotEqual(t, 0, int(stat.Mode()&os.ModeSymlink))
|
||||
|
||||
link, ok := stat.IsLink()
|
||||
require.Equal(t, "/foobar", link)
|
||||
require.Equal(t, true, ok)
|
||||
|
||||
data, err = fs.ReadFile("/foobaz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foobar", string(data))
|
||||
}
|
||||
|
||||
func testSymlinkOpenStat(t *testing.T, fs Filesystem) {
|
||||
if _, ok := fs.(*s3Filesystem); ok {
|
||||
return
|
||||
}
|
||||
|
||||
_, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Symlink("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
file := fs.Open("/foobaz")
|
||||
require.NotNil(t, file)
|
||||
require.Equal(t, "/foobaz", file.Name())
|
||||
|
||||
fstat, err := file.Stat()
|
||||
require.NoError(t, err)
|
||||
|
||||
stat, err := fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "/foobaz", fstat.Name())
|
||||
require.Equal(t, fstat.Name(), stat.Name())
|
||||
|
||||
require.Equal(t, int64(6), fstat.Size())
|
||||
require.Equal(t, fstat.Size(), stat.Size())
|
||||
|
||||
require.NotEqual(t, 0, int(fstat.Mode()&os.ModeSymlink))
|
||||
require.Equal(t, fstat.Mode(), stat.Mode())
|
||||
}
|
||||
|
||||
func testStat(t *testing.T, fs Filesystem) {
|
||||
_, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
file := fs.Open("/foobar")
|
||||
require.NotNil(t, file)
|
||||
|
||||
stat1, err := fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
stat2, err := file.Stat()
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, stat1, stat2)
|
||||
}
|
||||
|
||||
func testCopy(t *testing.T, fs Filesystem) {
|
||||
err := fs.Rename("/foobar", "/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Copy("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testCopyOverwrite(t *testing.T, fs Filesystem) {
|
||||
_, err := fs.Stat("/foobar")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Copy("/foobar", "/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = fs.Stat("/foobaz")
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := fs.ReadFile("/foobaz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foobar", string(data))
|
||||
}
|
||||
|
||||
func testSymlinkErrors(t *testing.T, fs Filesystem) {
|
||||
if _, ok := fs.(*s3Filesystem); ok {
|
||||
return
|
||||
}
|
||||
|
||||
err := fs.Symlink("/foobar", "/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar"))
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Symlink("/foobar", "/foobaz")
|
||||
require.Error(t, err)
|
||||
|
||||
err = fs.Symlink("/foobar", "/bazfoo")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = fs.Symlink("/bazfoo", "/barfoo")
|
||||
require.Error(t, err)
|
||||
}
|
||||
581
io/fs/mem.go
581
io/fs/mem.go
@@ -4,7 +4,11 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -15,25 +19,15 @@ import (
|
||||
// MemConfig is the config that is required for creating
|
||||
// a new memory filesystem.
|
||||
type MemConfig struct {
|
||||
// Base is the base path to be reported for this filesystem
|
||||
Base string
|
||||
|
||||
// Size is the capacity of the filesystem in bytes
|
||||
Size int64
|
||||
|
||||
// Set true to automatically delete the oldest files until there's
|
||||
// enough space to store a new file
|
||||
Purge bool
|
||||
|
||||
// For logging, optional
|
||||
Logger log.Logger
|
||||
Logger log.Logger // For logging, optional
|
||||
}
|
||||
|
||||
type memFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
lastMod time.Time
|
||||
linkTo string
|
||||
name string // Full name of the file (including path)
|
||||
size int64 // The size of the file in bytes
|
||||
dir bool // Whether this file represents a directory
|
||||
lastMod time.Time // The time of the last modification of the file
|
||||
linkTo string // Where the file links to, empty if it's not a link
|
||||
}
|
||||
|
||||
func (f *memFileInfo) Name() string {
|
||||
@@ -44,6 +38,20 @@ func (f *memFileInfo) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *memFileInfo) Mode() fs.FileMode {
|
||||
mode := fs.FileMode(fs.ModePerm)
|
||||
|
||||
if f.dir {
|
||||
mode |= fs.ModeDir
|
||||
}
|
||||
|
||||
if len(f.linkTo) != 0 {
|
||||
mode |= fs.ModeSymlink
|
||||
}
|
||||
|
||||
return mode
|
||||
}
|
||||
|
||||
func (f *memFileInfo) ModTime() time.Time {
|
||||
return f.lastMod
|
||||
}
|
||||
@@ -53,24 +61,12 @@ func (f *memFileInfo) IsLink() (string, bool) {
|
||||
}
|
||||
|
||||
func (f *memFileInfo) IsDir() bool {
|
||||
return false
|
||||
return f.dir
|
||||
}
|
||||
|
||||
type memFile struct {
|
||||
// Name of the file
|
||||
name string
|
||||
|
||||
// Size of the file in bytes
|
||||
size int64
|
||||
|
||||
// Last modification of the file as a UNIX timestamp
|
||||
lastMod time.Time
|
||||
|
||||
// Contents of the file
|
||||
data *bytes.Buffer
|
||||
|
||||
// Link to another file
|
||||
linkTo string
|
||||
memFileInfo
|
||||
data *bytes.Buffer // Contents of the file
|
||||
}
|
||||
|
||||
func (f *memFile) Name() string {
|
||||
@@ -81,6 +77,7 @@ func (f *memFile) Stat() (FileInfo, error) {
|
||||
info := &memFileInfo{
|
||||
name: f.name,
|
||||
size: f.size,
|
||||
dir: f.dir,
|
||||
lastMod: f.lastMod,
|
||||
linkTo: f.linkTo,
|
||||
}
|
||||
@@ -107,7 +104,8 @@ func (f *memFile) Close() error {
|
||||
}
|
||||
|
||||
type memFilesystem struct {
|
||||
base string
|
||||
metadata map[string]string
|
||||
metaLock sync.RWMutex
|
||||
|
||||
// Mapping of path to file
|
||||
files map[string]*memFile
|
||||
@@ -118,34 +116,27 @@ type memFilesystem struct {
|
||||
// Pool for the storage of the contents of files
|
||||
dataPool sync.Pool
|
||||
|
||||
// Max. size of the filesystem in bytes as
|
||||
// given by the config
|
||||
maxSize int64
|
||||
|
||||
// Current size of the filesystem in bytes
|
||||
currentSize int64
|
||||
|
||||
// Purge setting from the config
|
||||
purge bool
|
||||
|
||||
// Logger from the config
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
// NewMemFilesystem creates a new filesystem in memory that implements
|
||||
// the Filesystem interface.
|
||||
func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
func NewMemFilesystem(config MemConfig) (Filesystem, error) {
|
||||
fs := &memFilesystem{
|
||||
base: config.Base,
|
||||
maxSize: config.Size,
|
||||
purge: config.Purge,
|
||||
logger: config.Logger,
|
||||
metadata: make(map[string]string),
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("MemFS")
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithField("type", "mem")
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
|
||||
fs.dataPool = sync.Pool{
|
||||
@@ -154,61 +145,105 @@ func NewMemFilesystem(config MemConfig) Filesystem {
|
||||
},
|
||||
}
|
||||
|
||||
fs.logger.WithFields(log.Fields{
|
||||
"size_bytes": fs.maxSize,
|
||||
"purge": fs.purge,
|
||||
}).Debug().Log("Created")
|
||||
fs.logger.Debug().Log("Created")
|
||||
|
||||
return fs
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Base() string {
|
||||
return fs.base
|
||||
func NewMemFilesystemFromDir(dir string, config MemConfig) (Filesystem, error) {
|
||||
mem, err := NewMemFilesystem(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
mode := info.Mode()
|
||||
if !mode.IsRegular() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if mode&os.ModeSymlink != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
_, _, err = mem.WriteFileReader(path, file)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't copy %s", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mem, nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Rebase(base string) error {
|
||||
fs.base = base
|
||||
func (fs *memFilesystem) Name() string {
|
||||
return "mem"
|
||||
}
|
||||
|
||||
return nil
|
||||
func (fs *memFilesystem) Type() string {
|
||||
return "mem"
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Metadata(key string) string {
|
||||
fs.metaLock.RLock()
|
||||
defer fs.metaLock.RUnlock()
|
||||
|
||||
return fs.metadata[key]
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) SetMetadata(key, data string) {
|
||||
fs.metaLock.Lock()
|
||||
defer fs.metaLock.Unlock()
|
||||
|
||||
fs.metadata[key] = data
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Size() (int64, int64) {
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
return fs.currentSize, fs.maxSize
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Resize(size int64) {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
diffSize := fs.maxSize - size
|
||||
|
||||
if diffSize == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if diffSize > 0 {
|
||||
fs.free(diffSize)
|
||||
}
|
||||
|
||||
fs.logger.WithFields(log.Fields{
|
||||
"from_bytes": fs.maxSize,
|
||||
"to_bytes": size,
|
||||
}).Debug().Log("Resizing")
|
||||
|
||||
fs.maxSize = size
|
||||
return fs.currentSize, -1
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Files() int64 {
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
return int64(len(fs.files))
|
||||
nfiles := int64(0)
|
||||
|
||||
for _, f := range fs.files {
|
||||
if f.dir {
|
||||
continue
|
||||
}
|
||||
|
||||
nfiles++
|
||||
}
|
||||
|
||||
return nfiles
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Open(path string) File {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
file, ok := fs.files[path]
|
||||
fs.filesLock.RUnlock()
|
||||
@@ -218,29 +253,68 @@ func (fs *memFilesystem) Open(path string) File {
|
||||
}
|
||||
|
||||
newFile := &memFile{
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
lastMod: file.lastMod,
|
||||
linkTo: file.linkTo,
|
||||
memFileInfo: memFileInfo{
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
lastMod: file.lastMod,
|
||||
linkTo: file.linkTo,
|
||||
},
|
||||
}
|
||||
|
||||
if len(file.linkTo) != 0 {
|
||||
file, ok = fs.files[file.linkTo]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if file.data != nil {
|
||||
newFile.lastMod = file.lastMod
|
||||
newFile.data = bytes.NewBuffer(file.data.Bytes())
|
||||
newFile.size = int64(newFile.data.Len())
|
||||
}
|
||||
|
||||
return newFile
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) ReadFile(path string) ([]byte, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
file, ok := fs.files[path]
|
||||
fs.filesLock.RUnlock()
|
||||
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
if len(file.linkTo) != 0 {
|
||||
file, ok = fs.files[file.linkTo]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
if file.data != nil {
|
||||
return file.data.Bytes(), nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
oldname = fs.cleanPath(oldname)
|
||||
newname = fs.cleanPath(newname)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
if _, ok := fs.files[newname]; ok {
|
||||
return fmt.Errorf("%s already exist", newname)
|
||||
if _, ok := fs.files[oldname]; !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if oldname[0] != '/' {
|
||||
oldname = "/" + oldname
|
||||
if _, ok := fs.files[newname]; ok {
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
if file, ok := fs.files[oldname]; ok {
|
||||
@@ -250,11 +324,14 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
}
|
||||
|
||||
newFile := &memFile{
|
||||
name: newname,
|
||||
size: 0,
|
||||
lastMod: time.Now(),
|
||||
data: nil,
|
||||
linkTo: oldname,
|
||||
memFileInfo: memFileInfo{
|
||||
name: newname,
|
||||
dir: false,
|
||||
size: 0,
|
||||
lastMod: time.Now(),
|
||||
linkTo: oldname,
|
||||
},
|
||||
data: nil,
|
||||
}
|
||||
|
||||
fs.files[newname] = newFile
|
||||
@@ -262,18 +339,21 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
newFile := &memFile{
|
||||
name: path,
|
||||
size: 0,
|
||||
lastMod: time.Now(),
|
||||
data: nil,
|
||||
memFileInfo: memFileInfo{
|
||||
name: path,
|
||||
dir: false,
|
||||
size: 0,
|
||||
lastMod: time.Now(),
|
||||
},
|
||||
data: fs.dataPool.Get().(*bytes.Buffer),
|
||||
}
|
||||
|
||||
data := fs.dataPool.Get().(*bytes.Buffer)
|
||||
data.Reset()
|
||||
|
||||
size, err := data.ReadFrom(r)
|
||||
newFile.data.Reset()
|
||||
size, err := newFile.data.ReadFrom(r)
|
||||
if err != nil {
|
||||
fs.logger.WithFields(log.Fields{
|
||||
"path": path,
|
||||
@@ -281,55 +361,26 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
"error": err,
|
||||
}).Warn().Log("Incomplete file")
|
||||
}
|
||||
newFile.size = size
|
||||
newFile.data = data
|
||||
|
||||
// reject if the new file is larger than the available space
|
||||
if fs.maxSize > 0 && newFile.size > fs.maxSize {
|
||||
fs.dataPool.Put(data)
|
||||
return -1, false, fmt.Errorf("File is too big")
|
||||
}
|
||||
newFile.size = size
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
// calculate the new size of the filesystem
|
||||
newSize := fs.currentSize + newFile.size
|
||||
|
||||
file, replace := fs.files[path]
|
||||
if replace {
|
||||
newSize -= file.size
|
||||
delete(fs.files, path)
|
||||
|
||||
fs.currentSize -= file.size
|
||||
|
||||
fs.dataPool.Put(file.data)
|
||||
file.data = nil
|
||||
}
|
||||
|
||||
if fs.maxSize > 0 {
|
||||
if newSize > fs.maxSize {
|
||||
if !fs.purge {
|
||||
fs.dataPool.Put(data)
|
||||
return -1, false, fmt.Errorf("not enough space on device")
|
||||
}
|
||||
|
||||
if replace {
|
||||
delete(fs.files, path)
|
||||
fs.currentSize -= file.size
|
||||
|
||||
fs.dataPool.Put(file.data)
|
||||
file.data = nil
|
||||
}
|
||||
|
||||
newSize -= fs.free(fs.currentSize + newFile.size - fs.maxSize)
|
||||
}
|
||||
} else {
|
||||
if replace {
|
||||
delete(fs.files, path)
|
||||
|
||||
fs.dataPool.Put(file.data)
|
||||
file.data = nil
|
||||
}
|
||||
}
|
||||
|
||||
fs.currentSize = newSize
|
||||
fs.files[path] = newFile
|
||||
|
||||
fs.currentSize += newFile.size
|
||||
|
||||
logger := fs.logger.WithFields(log.Fields{
|
||||
"path": newFile.name,
|
||||
"filesize_bytes": newFile.size,
|
||||
@@ -345,7 +396,18 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) {
|
||||
return newFile.size, !replace, nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) free(size int64) int64 {
|
||||
func (fs *memFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
|
||||
return fs.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
|
||||
return fs.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Purge(size int64) int64 {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
files := []*memFile{}
|
||||
|
||||
for _, f := range fs.files {
|
||||
@@ -383,7 +445,190 @@ func (fs *memFilesystem) free(size int64) int64 {
|
||||
return freed
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Delete(path string) int64 {
|
||||
func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
info, err := fs.stat(path)
|
||||
if err == nil {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
f := &memFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: path,
|
||||
size: 0,
|
||||
dir: true,
|
||||
lastMod: time.Now(),
|
||||
},
|
||||
data: nil,
|
||||
}
|
||||
|
||||
fs.files[path] = f
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Rename(src, dst string) error {
|
||||
src = filepath.Join("/", filepath.Clean(src))
|
||||
dst = filepath.Join("/", filepath.Clean(dst))
|
||||
|
||||
if src == dst {
|
||||
return nil
|
||||
}
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
srcFile, ok := fs.files[src]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
dstFile, ok := fs.files[dst]
|
||||
if ok {
|
||||
fs.currentSize -= dstFile.size
|
||||
|
||||
fs.dataPool.Put(dstFile.data)
|
||||
dstFile.data = nil
|
||||
}
|
||||
|
||||
fs.files[dst] = srcFile
|
||||
delete(fs.files, src)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Copy(src, dst string) error {
|
||||
src = filepath.Join("/", filepath.Clean(src))
|
||||
dst = filepath.Join("/", filepath.Clean(dst))
|
||||
|
||||
if src == dst {
|
||||
return nil
|
||||
}
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
srcFile, ok := fs.files[src]
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if srcFile.dir {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if fs.isDir(dst) {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
dstFile, ok := fs.files[dst]
|
||||
if ok {
|
||||
fs.currentSize -= dstFile.size
|
||||
} else {
|
||||
dstFile = &memFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: dst,
|
||||
dir: false,
|
||||
size: srcFile.size,
|
||||
lastMod: time.Now(),
|
||||
},
|
||||
data: fs.dataPool.Get().(*bytes.Buffer),
|
||||
}
|
||||
}
|
||||
|
||||
dstFile.data.Reset()
|
||||
dstFile.data.Write(srcFile.data.Bytes())
|
||||
|
||||
fs.currentSize += dstFile.size
|
||||
|
||||
fs.files[dst] = dstFile
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Stat(path string) (FileInfo, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
return fs.stat(path)
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) stat(path string) (FileInfo, error) {
|
||||
file, ok := fs.files[path]
|
||||
if ok {
|
||||
f := &memFileInfo{
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
dir: file.dir,
|
||||
lastMod: file.lastMod,
|
||||
linkTo: file.linkTo,
|
||||
}
|
||||
|
||||
if len(f.linkTo) != 0 {
|
||||
file, ok := fs.files[f.linkTo]
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f.lastMod = file.lastMod
|
||||
f.size = file.size
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// Check for directories
|
||||
if !fs.isDir(path) {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f := &memFileInfo{
|
||||
name: path,
|
||||
size: 0,
|
||||
dir: true,
|
||||
lastMod: time.Now(),
|
||||
linkTo: "",
|
||||
}
|
||||
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) isDir(path string) bool {
|
||||
file, ok := fs.files[path]
|
||||
if ok {
|
||||
return file.dir
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path = path + "/"
|
||||
}
|
||||
|
||||
if path == "/" {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range fs.files {
|
||||
if strings.HasPrefix(k, path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Remove(path string) int64 {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
@@ -407,7 +652,7 @@ func (fs *memFilesystem) Delete(path string) int64 {
|
||||
return file.size
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) DeleteAll() int64 {
|
||||
func (fs *memFilesystem) RemoveAll() int64 {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
@@ -419,19 +664,28 @@ func (fs *memFilesystem) DeleteAll() int64 {
|
||||
return size
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) List(pattern string) []FileInfo {
|
||||
func (fs *memFilesystem) List(path, pattern string) []FileInfo {
|
||||
path = fs.cleanPath(path)
|
||||
files := []FileInfo{}
|
||||
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
for _, file := range fs.files {
|
||||
if !strings.HasPrefix(file.name, path) {
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pattern) != 0 {
|
||||
if ok, _ := glob.Match(pattern, file.name, '/'); !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if file.dir {
|
||||
continue
|
||||
}
|
||||
|
||||
files = append(files, &memFileInfo{
|
||||
name: file.name,
|
||||
size: file.size,
|
||||
@@ -442,3 +696,44 @@ func (fs *memFilesystem) List(pattern string) []FileInfo {
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) LookPath(file string) (string, error) {
|
||||
if strings.Contains(file, "/") {
|
||||
file = fs.cleanPath(file)
|
||||
info, err := fs.Stat(file)
|
||||
if err == nil {
|
||||
if !info.Mode().IsRegular() {
|
||||
return file, os.ErrNotExist
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
path := os.Getenv("PATH")
|
||||
for _, dir := range filepath.SplitList(path) {
|
||||
if dir == "" {
|
||||
// Unix shell semantics: path element "" means "."
|
||||
dir = "."
|
||||
}
|
||||
path := filepath.Join(dir, file)
|
||||
path = fs.cleanPath(path)
|
||||
if info, err := fs.Stat(path); err == nil {
|
||||
if !filepath.IsAbs(path) {
|
||||
return path, os.ErrNotExist
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return path, os.ErrNotExist
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) cleanPath(path string) string {
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join("/", path)
|
||||
}
|
||||
|
||||
return filepath.Join("/", filepath.Clean(path))
|
||||
}
|
||||
|
||||
@@ -1,406 +1,30 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
func TestMemFromDir(t *testing.T) {
|
||||
mem, err := NewMemFilesystemFromDir(".", MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
cur, max := mem.Size()
|
||||
names := []string{}
|
||||
for _, f := range mem.List("/", "/*.go") {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
|
||||
assert.Equal(t, int64(0), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func TestSimplePutNoPurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := mem.Store("/foobar", data)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(5), size)
|
||||
assert.Equal(t, true, created)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(5), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func TestSimpleDelete(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
size := mem.Delete("/foobar")
|
||||
|
||||
assert.Equal(t, int64(-1), size)
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
mem.Store("/foobar", data)
|
||||
|
||||
size = mem.Delete("/foobar")
|
||||
|
||||
assert.Equal(t, int64(5), size)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(0), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func TestReplaceNoPurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := mem.Store("/foobar", data)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(5), size)
|
||||
assert.Equal(t, true, created)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(5), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(1), cur)
|
||||
|
||||
data = strings.NewReader("yyy")
|
||||
|
||||
size, created, err = mem.Store("/foobar", data)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(3), size)
|
||||
assert.Equal(t, false, created)
|
||||
|
||||
cur, max = mem.Size()
|
||||
|
||||
assert.Equal(t, int64(3), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func TestReplacePurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: true,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxx")
|
||||
data2 := strings.NewReader("yyy")
|
||||
data3 := strings.NewReader("zzz")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
mem.Store("/foobar3", data3)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(9), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(3), cur)
|
||||
|
||||
data4 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar1", data4)
|
||||
|
||||
assert.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = mem.Size()
|
||||
|
||||
assert.Equal(t, int64(8), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestReplaceUnlimited(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 0,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := mem.Store("/foobar", data)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(5), size)
|
||||
assert.Equal(t, true, created)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(5), cur)
|
||||
assert.Equal(t, int64(0), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(1), cur)
|
||||
|
||||
data = strings.NewReader("yyy")
|
||||
|
||||
size, created, err = mem.Store("/foobar", data)
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, int64(3), size)
|
||||
assert.Equal(t, false, created)
|
||||
|
||||
cur, max = mem.Size()
|
||||
|
||||
assert.Equal(t, int64(3), cur)
|
||||
assert.Equal(t, int64(0), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func TestTooBigNoPurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data := strings.NewReader("xxxxxyyyyyz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar", data)
|
||||
|
||||
assert.Equal(t, int64(-1), size)
|
||||
}
|
||||
|
||||
func TestTooBigPurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: true,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
|
||||
data := strings.NewReader("xxxxxyyyyyz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar", data)
|
||||
|
||||
assert.Equal(t, int64(-1), size)
|
||||
}
|
||||
|
||||
func TestFullSpaceNoPurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(10), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(2), cur)
|
||||
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar3", data3)
|
||||
|
||||
assert.Equal(t, int64(-1), size)
|
||||
}
|
||||
|
||||
func TestFullSpacePurge(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: true,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(10), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(2), cur)
|
||||
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar3", data3)
|
||||
|
||||
assert.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = mem.Size()
|
||||
|
||||
assert.Equal(t, int64(10), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestFullSpacePurgeMulti(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: true,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxx")
|
||||
data2 := strings.NewReader("yyy")
|
||||
data3 := strings.NewReader("zzz")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
mem.Store("/foobar3", data3)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(9), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(3), cur)
|
||||
|
||||
data4 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := mem.Store("/foobar4", data4)
|
||||
|
||||
assert.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = mem.Size()
|
||||
|
||||
assert.Equal(t, int64(8), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestPurgeOrder(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: true,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
time.Sleep(1 * time.Second)
|
||||
mem.Store("/foobar2", data2)
|
||||
time.Sleep(1 * time.Second)
|
||||
mem.Store("/foobar3", data3)
|
||||
|
||||
file := mem.Open("/foobar1")
|
||||
|
||||
assert.Nil(t, file)
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data1 := strings.NewReader("a")
|
||||
data2 := strings.NewReader("bb")
|
||||
data3 := strings.NewReader("ccc")
|
||||
data4 := strings.NewReader("dddd")
|
||||
|
||||
mem.Store("/foobar1", data1)
|
||||
mem.Store("/foobar2", data2)
|
||||
mem.Store("/foobar3", data3)
|
||||
mem.Store("/foobar4", data4)
|
||||
|
||||
cur, max := mem.Size()
|
||||
|
||||
assert.Equal(t, int64(10), cur)
|
||||
assert.Equal(t, int64(10), max)
|
||||
|
||||
cur = mem.Files()
|
||||
|
||||
assert.Equal(t, int64(4), cur)
|
||||
|
||||
files := mem.List("")
|
||||
|
||||
assert.Equal(t, 4, len(files))
|
||||
}
|
||||
|
||||
func TestData(t *testing.T) {
|
||||
mem := NewMemFilesystem(MemConfig{
|
||||
Size: 10,
|
||||
Purge: false,
|
||||
})
|
||||
|
||||
data := "gduwotoxqb"
|
||||
|
||||
data1 := strings.NewReader(data)
|
||||
|
||||
mem.Store("/foobar", data1)
|
||||
|
||||
file := mem.Open("/foobar")
|
||||
|
||||
data2 := make([]byte, len(data)+1)
|
||||
n, _ := file.Read(data2)
|
||||
|
||||
assert.Equal(t, len(data), n)
|
||||
assert.Equal(t, []byte(data), data2[:n])
|
||||
require.ElementsMatch(t, []string{
|
||||
"/disk.go",
|
||||
"/fs_test.go",
|
||||
"/fs.go",
|
||||
"/mem_test.go",
|
||||
"/mem.go",
|
||||
"/readonly_test.go",
|
||||
"/readonly.go",
|
||||
"/s3.go",
|
||||
"/sized_test.go",
|
||||
"/sized.go",
|
||||
}, names)
|
||||
}
|
||||
|
||||
54
io/fs/readonly.go
Normal file
54
io/fs/readonly.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
type readOnlyFilesystem struct {
|
||||
Filesystem
|
||||
}
|
||||
|
||||
func NewReadOnlyFilesystem(fs Filesystem) (Filesystem, error) {
|
||||
r := &readOnlyFilesystem{
|
||||
Filesystem: fs,
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) Symlink(oldname, newname string) error {
|
||||
return os.ErrPermission
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) {
|
||||
return -1, false, os.ErrPermission
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
|
||||
return -1, false, os.ErrPermission
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
|
||||
return -1, false, os.ErrPermission
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
return os.ErrPermission
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) Remove(path string) int64 {
|
||||
return -1
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) RemoveAll() int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) Purge(size int64) int64 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *readOnlyFilesystem) Resize(size int64) error {
|
||||
return os.ErrPermission
|
||||
}
|
||||
50
io/fs/readonly_test.go
Normal file
50
io/fs/readonly_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReadOnly(t *testing.T) {
|
||||
mem, err := NewMemFilesystemFromDir(".", MemConfig{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ro, err := NewReadOnlyFilesystem(mem)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ro.Symlink("/readonly.go", "/foobar.go")
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = ro.WriteFile("/readonly.go", []byte("foobar"))
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar"))
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = ro.WriteFileSafe("/readonly.go", []byte("foobar"))
|
||||
require.Error(t, err)
|
||||
|
||||
err = ro.MkdirAll("/foobar/baz", 0700)
|
||||
require.Error(t, err)
|
||||
|
||||
res := ro.Remove("/readonly.go")
|
||||
require.Equal(t, int64(-1), res)
|
||||
|
||||
res = ro.RemoveAll()
|
||||
require.Equal(t, int64(0), res)
|
||||
|
||||
rop, ok := ro.(PurgeFilesystem)
|
||||
require.True(t, ok, "must implement PurgeFilesystem")
|
||||
|
||||
size, _ := ro.Size()
|
||||
res = rop.Purge(size)
|
||||
require.Equal(t, int64(0), res)
|
||||
|
||||
ros, ok := ro.(SizedFilesystem)
|
||||
require.True(t, ok, "must implement SizedFilesystem")
|
||||
|
||||
err = ros.Resize(100)
|
||||
require.Error(t, err)
|
||||
}
|
||||
649
io/fs/s3.go
Normal file
649
io/fs/s3.go
Normal file
@@ -0,0 +1,649 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/glob"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
)
|
||||
|
||||
type S3Config struct {
|
||||
// Namee is the name of the filesystem
|
||||
Name string
|
||||
Endpoint string
|
||||
AccessKeyID string
|
||||
SecretAccessKey string
|
||||
Region string
|
||||
Bucket string
|
||||
UseSSL bool
|
||||
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
type s3Filesystem struct {
|
||||
metadata map[string]string
|
||||
metaLock sync.RWMutex
|
||||
|
||||
name string
|
||||
|
||||
endpoint string
|
||||
accessKeyID string
|
||||
secretAccessKey string
|
||||
region string
|
||||
bucket string
|
||||
useSSL bool
|
||||
|
||||
client *minio.Client
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
var fakeDirEntry = "..."
|
||||
|
||||
func NewS3Filesystem(config S3Config) (Filesystem, error) {
|
||||
fs := &s3Filesystem{
|
||||
metadata: make(map[string]string),
|
||||
name: config.Name,
|
||||
endpoint: config.Endpoint,
|
||||
accessKeyID: config.AccessKeyID,
|
||||
secretAccessKey: config.SecretAccessKey,
|
||||
region: config.Region,
|
||||
bucket: config.Bucket,
|
||||
useSSL: config.UseSSL,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if fs.logger == nil {
|
||||
fs.logger = log.New("")
|
||||
}
|
||||
|
||||
client, err := minio.New(fs.endpoint, &minio.Options{
|
||||
Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""),
|
||||
Region: fs.region,
|
||||
Secure: fs.useSSL,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err)
|
||||
}
|
||||
|
||||
fs.logger = fs.logger.WithFields(log.Fields{
|
||||
"name": fs.name,
|
||||
"type": "s3",
|
||||
"bucket": fs.bucket,
|
||||
"region": fs.region,
|
||||
"endpoint": fs.endpoint,
|
||||
})
|
||||
|
||||
fs.logger.Debug().Log("Connected")
|
||||
|
||||
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
|
||||
defer cancel()
|
||||
|
||||
exists, err := client.BucketExists(ctx, fs.bucket)
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).Log("Can't access bucket")
|
||||
return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
fs.logger.Debug().Log("Bucket already exists")
|
||||
} else {
|
||||
fs.logger.Debug().Log("Bucket doesn't exists")
|
||||
err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).Log("Can't create bucket")
|
||||
return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err)
|
||||
} else {
|
||||
fs.logger.Debug().Log("Bucket created")
|
||||
}
|
||||
}
|
||||
|
||||
fs.client = client
|
||||
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Name() string {
|
||||
return fs.name
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Type() string {
|
||||
return "s3"
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Metadata(key string) string {
|
||||
fs.metaLock.RLock()
|
||||
defer fs.metaLock.RUnlock()
|
||||
|
||||
return fs.metadata[key]
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) SetMetadata(key, data string) {
|
||||
fs.metaLock.Lock()
|
||||
defer fs.metaLock.Unlock()
|
||||
|
||||
fs.metadata[key] = data
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Size() (int64, int64) {
|
||||
size := int64(0)
|
||||
|
||||
files := fs.List("/", "")
|
||||
|
||||
for _, file := range files {
|
||||
size += file.Size()
|
||||
}
|
||||
|
||||
return size, -1
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Files() int64 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
WithVersions: false,
|
||||
WithMetadata: false,
|
||||
Prefix: "",
|
||||
Recursive: true,
|
||||
MaxKeys: 0,
|
||||
StartAfter: "",
|
||||
UseV1: false,
|
||||
})
|
||||
|
||||
nfiles := int64(0)
|
||||
|
||||
for object := range ch {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
}
|
||||
|
||||
if strings.HasSuffix("/"+object.Key, "/"+fakeDirEntry) {
|
||||
// Skip fake entries (see MkdirAll)
|
||||
continue
|
||||
}
|
||||
|
||||
nfiles++
|
||||
}
|
||||
|
||||
return nfiles
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Symlink(oldname, newname string) error {
|
||||
return fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Stat(path string) (FileInfo, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
if len(path) == 0 {
|
||||
return &s3FileInfo{
|
||||
name: "/",
|
||||
size: 0,
|
||||
dir: true,
|
||||
lastModified: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
if fs.isDir(path) {
|
||||
return &s3FileInfo{
|
||||
name: "/" + path,
|
||||
size: 0,
|
||||
dir: true,
|
||||
lastModified: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", path).WithError(err).Log("Not found")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer object.Close()
|
||||
|
||||
stat, err := object.Stat()
|
||||
if err != nil {
|
||||
if fs.isDir(path) {
|
||||
return &s3FileInfo{
|
||||
name: "/" + path,
|
||||
size: 0,
|
||||
dir: true,
|
||||
lastModified: time.Now(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", path).WithError(err).Log("Stat failed")
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &s3FileInfo{
|
||||
name: "/" + stat.Key,
|
||||
size: stat.Size,
|
||||
lastModified: stat.LastModified,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Open(path string) File {
|
||||
path = fs.cleanPath(path)
|
||||
ctx := context.Background()
|
||||
|
||||
object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Not found")
|
||||
return nil
|
||||
}
|
||||
|
||||
stat, err := object.Stat()
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Stat failed")
|
||||
return nil
|
||||
}
|
||||
|
||||
file := &s3File{
|
||||
data: object,
|
||||
name: "/" + stat.Key,
|
||||
size: stat.Size,
|
||||
lastModified: stat.LastModified,
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", stat.Key).Log("Opened")
|
||||
|
||||
return file
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) ReadFile(path string) ([]byte, error) {
|
||||
path = fs.cleanPath(path)
|
||||
file := fs.Open(path)
|
||||
if file == nil {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
_, err := buf.ReadFrom(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) write(path string, r io.Reader) (int64, bool, error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
overwrite := false
|
||||
|
||||
_, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
|
||||
if err == nil {
|
||||
overwrite = true
|
||||
}
|
||||
|
||||
info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{
|
||||
UserMetadata: map[string]string{},
|
||||
UserTags: map[string]string{},
|
||||
Progress: nil,
|
||||
ContentType: "",
|
||||
ContentEncoding: "",
|
||||
ContentDisposition: "",
|
||||
ContentLanguage: "",
|
||||
CacheControl: "",
|
||||
Mode: "",
|
||||
RetainUntilDate: time.Time{},
|
||||
ServerSideEncryption: nil,
|
||||
NumThreads: 0,
|
||||
StorageClass: "",
|
||||
WebsiteRedirectLocation: "",
|
||||
PartSize: 0,
|
||||
LegalHold: "",
|
||||
SendContentMd5: false,
|
||||
DisableContentSha256: false,
|
||||
DisableMultipart: false,
|
||||
Internal: minio.AdvancedPutOptions{},
|
||||
})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).WithField("key", path).Log("Failed to store file")
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithFields(log.Fields{
|
||||
"key": path,
|
||||
"overwrite": overwrite,
|
||||
}).Log("Stored")
|
||||
|
||||
return info.Size, !overwrite, nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) {
|
||||
path = fs.cleanPath(path)
|
||||
return fs.write(path, r)
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) WriteFile(path string, data []byte) (int64, bool, error) {
|
||||
return fs.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
|
||||
return fs.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Rename(src, dst string) error {
|
||||
src = fs.cleanPath(src)
|
||||
dst = fs.cleanPath(dst)
|
||||
|
||||
err := fs.Copy(src, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
res := fs.Remove(src)
|
||||
if res == -1 {
|
||||
return fmt.Errorf("failed to remove source file: %s", src)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Copy(src, dst string) error {
|
||||
src = fs.cleanPath(src)
|
||||
dst = fs.cleanPath(dst)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
_, err := fs.client.CopyObject(ctx, minio.CopyDestOptions{
|
||||
Bucket: fs.bucket,
|
||||
Object: dst,
|
||||
}, minio.CopySrcOptions{
|
||||
Bucket: fs.bucket,
|
||||
Object: src,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
if path == "/" {
|
||||
return nil
|
||||
}
|
||||
|
||||
info, err := fs.Stat(path)
|
||||
if err == nil {
|
||||
if !info.IsDir() {
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
path = filepath.Join(path, fakeDirEntry)
|
||||
|
||||
_, _, err = fs.write(path, strings.NewReader(""))
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create directory")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) Remove(path string) int64 {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
|
||||
if err != nil {
|
||||
fs.logger.Debug().WithField("key", path).Log("Not found")
|
||||
return -1
|
||||
}
|
||||
|
||||
err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{
|
||||
GovernanceBypass: true,
|
||||
})
|
||||
if err != nil {
|
||||
fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file")
|
||||
return -1
|
||||
}
|
||||
|
||||
fs.logger.Debug().WithField("key", stat.Key).Log("Deleted")
|
||||
|
||||
return stat.Size
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) RemoveAll() int64 {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
totalSize := int64(0)
|
||||
|
||||
objectsCh := make(chan minio.ObjectInfo)
|
||||
|
||||
// Send object names that are needed to be removed to objectsCh
|
||||
go func() {
|
||||
defer close(objectsCh)
|
||||
|
||||
for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
}) {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
continue
|
||||
}
|
||||
totalSize += object.Size
|
||||
objectsCh <- object
|
||||
}
|
||||
}()
|
||||
|
||||
for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{
|
||||
GovernanceBypass: true,
|
||||
}) {
|
||||
fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed")
|
||||
}
|
||||
|
||||
fs.logger.Debug().Log("Deleted all files")
|
||||
|
||||
return totalSize
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) List(path, pattern string) []FileInfo {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
WithVersions: false,
|
||||
WithMetadata: false,
|
||||
Prefix: path,
|
||||
Recursive: true,
|
||||
MaxKeys: 0,
|
||||
StartAfter: "",
|
||||
UseV1: false,
|
||||
})
|
||||
|
||||
files := []FileInfo{}
|
||||
|
||||
for object := range ch {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
continue
|
||||
}
|
||||
|
||||
key := "/" + object.Key
|
||||
if strings.HasSuffix(key, "/"+fakeDirEntry) {
|
||||
// filter out fake directory entries (see MkdirAll)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(pattern) != 0 {
|
||||
if ok, _ := glob.Match(pattern, key, '/'); !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
f := &s3FileInfo{
|
||||
name: key,
|
||||
size: object.Size,
|
||||
lastModified: object.LastModified,
|
||||
}
|
||||
|
||||
files = append(files, f)
|
||||
}
|
||||
|
||||
return files
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) LookPath(file string) (string, error) {
|
||||
if strings.Contains(file, "/") {
|
||||
file = fs.cleanPath(file)
|
||||
info, err := fs.Stat(file)
|
||||
if err == nil {
|
||||
if !info.Mode().IsRegular() {
|
||||
return file, os.ErrNotExist
|
||||
}
|
||||
return file, nil
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
path := os.Getenv("PATH")
|
||||
for _, dir := range filepath.SplitList(path) {
|
||||
if dir == "" {
|
||||
// Unix shell semantics: path element "" means "."
|
||||
dir = "."
|
||||
}
|
||||
path := filepath.Join(dir, file)
|
||||
path = fs.cleanPath(path)
|
||||
if info, err := fs.Stat(path); err == nil {
|
||||
if !filepath.IsAbs(path) {
|
||||
return path, os.ErrNotExist
|
||||
}
|
||||
if !info.Mode().IsRegular() {
|
||||
return path, os.ErrNotExist
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
return "", os.ErrNotExist
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) isDir(path string) bool {
|
||||
if !strings.HasSuffix(path, "/") {
|
||||
path = path + "/"
|
||||
}
|
||||
|
||||
if path == "/" {
|
||||
return true
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
|
||||
WithVersions: false,
|
||||
WithMetadata: false,
|
||||
Prefix: path,
|
||||
Recursive: true,
|
||||
MaxKeys: 1,
|
||||
StartAfter: "",
|
||||
UseV1: false,
|
||||
})
|
||||
|
||||
files := uint64(0)
|
||||
|
||||
for object := range ch {
|
||||
if object.Err != nil {
|
||||
fs.logger.WithError(object.Err).Log("Listing object failed")
|
||||
continue
|
||||
}
|
||||
|
||||
files++
|
||||
}
|
||||
|
||||
return files > 0
|
||||
}
|
||||
|
||||
func (fs *s3Filesystem) cleanPath(path string) string {
|
||||
if !filepath.IsAbs(path) {
|
||||
path = filepath.Join("/", path)
|
||||
}
|
||||
|
||||
path = strings.TrimSuffix(path, "/"+fakeDirEntry)
|
||||
|
||||
return filepath.Join("/", filepath.Clean(path))[1:]
|
||||
}
|
||||
|
||||
type s3FileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
dir bool
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) Size() int64 {
|
||||
return f.size
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) Mode() os.FileMode {
|
||||
return fs.FileMode(fs.ModePerm)
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) ModTime() time.Time {
|
||||
return f.lastModified
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) IsLink() (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (f *s3FileInfo) IsDir() bool {
|
||||
return f.dir
|
||||
}
|
||||
|
||||
type s3File struct {
|
||||
data io.ReadCloser
|
||||
name string
|
||||
size int64
|
||||
lastModified time.Time
|
||||
}
|
||||
|
||||
func (f *s3File) Read(p []byte) (int, error) {
|
||||
return f.data.Read(p)
|
||||
}
|
||||
|
||||
func (f *s3File) Close() error {
|
||||
return f.data.Close()
|
||||
}
|
||||
|
||||
func (f *s3File) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *s3File) Stat() (FileInfo, error) {
|
||||
return &s3FileInfo{
|
||||
name: f.name,
|
||||
size: f.size,
|
||||
lastModified: f.lastModified,
|
||||
}, nil
|
||||
}
|
||||
168
io/fs/sized.go
Normal file
168
io/fs/sized.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type SizedFilesystem interface {
|
||||
Filesystem
|
||||
|
||||
// Resize resizes the filesystem to the new size. Files may need to be deleted.
|
||||
Resize(size int64) error
|
||||
}
|
||||
|
||||
type PurgeFilesystem interface {
|
||||
// Purge will free up at least size number of bytes and returns the actual
|
||||
// freed space in bytes.
|
||||
Purge(size int64) int64
|
||||
}
|
||||
|
||||
type sizedFilesystem struct {
|
||||
Filesystem
|
||||
|
||||
// Size is the capacity of the filesystem in bytes
|
||||
maxSize int64
|
||||
|
||||
// Set true to automatically delete the oldest files until there's
|
||||
// enough space to store a new file
|
||||
purge bool
|
||||
}
|
||||
|
||||
var _ PurgeFilesystem = &sizedFilesystem{}
|
||||
|
||||
func NewSizedFilesystem(fs Filesystem, maxSize int64, purge bool) (SizedFilesystem, error) {
|
||||
r := &sizedFilesystem{
|
||||
Filesystem: fs,
|
||||
maxSize: maxSize,
|
||||
purge: purge,
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) Size() (int64, int64) {
|
||||
currentSize, _ := r.Filesystem.Size()
|
||||
|
||||
return currentSize, r.maxSize
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) Resize(size int64) error {
|
||||
currentSize, _ := r.Size()
|
||||
if size >= currentSize {
|
||||
// If the new size is the same or larger than the current size,
|
||||
// nothing to do.
|
||||
r.maxSize = size
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the new size is less than the current size, purge some files.
|
||||
r.Purge(currentSize - size)
|
||||
|
||||
r.maxSize = size
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) {
|
||||
currentSize, maxSize := r.Size()
|
||||
if maxSize <= 0 {
|
||||
return r.Filesystem.WriteFileReader(path, rd)
|
||||
}
|
||||
|
||||
data := bytes.Buffer{}
|
||||
size, err := data.ReadFrom(rd)
|
||||
if err != nil {
|
||||
return -1, false, err
|
||||
}
|
||||
|
||||
// reject if the new file is larger than the available space
|
||||
if size > maxSize {
|
||||
return -1, false, fmt.Errorf("File is too big")
|
||||
}
|
||||
|
||||
// Calculate the new size of the filesystem
|
||||
newSize := currentSize + size
|
||||
|
||||
// If the the new size is larger than the allowed size, we have to free
|
||||
// some space.
|
||||
if newSize > maxSize {
|
||||
if !r.purge {
|
||||
return -1, false, fmt.Errorf("not enough space on device")
|
||||
}
|
||||
|
||||
if r.Purge(size) < size {
|
||||
return -1, false, fmt.Errorf("not enough space on device")
|
||||
}
|
||||
}
|
||||
|
||||
return r.Filesystem.WriteFileReader(path, &data)
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) {
|
||||
return r.WriteFileReader(path, bytes.NewBuffer(data))
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) {
|
||||
currentSize, maxSize := r.Size()
|
||||
if maxSize <= 0 {
|
||||
return r.Filesystem.WriteFile(path, data)
|
||||
}
|
||||
|
||||
size := int64(len(data))
|
||||
|
||||
// reject if the new file is larger than the available space
|
||||
if size > maxSize {
|
||||
return -1, false, fmt.Errorf("File is too big")
|
||||
}
|
||||
|
||||
// Calculate the new size of the filesystem
|
||||
newSize := currentSize + size
|
||||
|
||||
// If the the new size is larger than the allowed size, we have to free
|
||||
// some space.
|
||||
if newSize > maxSize {
|
||||
if !r.purge {
|
||||
return -1, false, fmt.Errorf("not enough space on device")
|
||||
}
|
||||
|
||||
if r.Purge(size) < size {
|
||||
return -1, false, fmt.Errorf("not enough space on device")
|
||||
}
|
||||
}
|
||||
|
||||
return r.Filesystem.WriteFileSafe(path, data)
|
||||
}
|
||||
|
||||
func (r *sizedFilesystem) Purge(size int64) int64 {
|
||||
if purger, ok := r.Filesystem.(PurgeFilesystem); ok {
|
||||
return purger.Purge(size)
|
||||
}
|
||||
|
||||
return 0
|
||||
/*
|
||||
files := r.Filesystem.List("/", "")
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].ModTime().Before(files[j].ModTime())
|
||||
})
|
||||
|
||||
var freed int64 = 0
|
||||
|
||||
for _, f := range files {
|
||||
r.Filesystem.Remove(f.Name())
|
||||
size -= f.Size()
|
||||
freed += f.Size()
|
||||
r.currentSize -= f.Size()
|
||||
|
||||
if size <= 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
files = nil
|
||||
|
||||
return freed
|
||||
*/
|
||||
}
|
||||
350
io/fs/sized_test.go
Normal file
350
io/fs/sized_test.go
Normal file
@@ -0,0 +1,350 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func newMemFS() Filesystem {
|
||||
mem, _ := NewMemFilesystem(MemConfig{})
|
||||
|
||||
return mem
|
||||
}
|
||||
|
||||
func TestNewSized(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
}
|
||||
|
||||
func TestSizedResize(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
err := fs.Resize(20)
|
||||
require.NoError(t, err)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(20), max)
|
||||
}
|
||||
|
||||
func TestSizedResizePurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx"))
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(10), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
err := fs.Resize(5)
|
||||
require.NoError(t, err)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(5), max)
|
||||
}
|
||||
|
||||
func TestSizedWrite(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(0), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
_, _, err = fs.WriteFile("/foobaz", []byte("xxxxxx"))
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx"))
|
||||
require.Error(t, err)
|
||||
|
||||
_, _, err = fs.WriteFileSafe("/foobaz", []byte("xxxxxx"))
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSizedReplaceNoPurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
|
||||
data = strings.NewReader("yyy")
|
||||
|
||||
size, created, err = fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(3), size)
|
||||
require.Equal(t, false, created)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(3), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func TestSizedReplacePurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
|
||||
|
||||
data1 := strings.NewReader("xxx")
|
||||
data2 := strings.NewReader("yyy")
|
||||
data3 := strings.NewReader("zzz")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
fs.WriteFileReader("/foobar3", data3)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(9), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(3), cur)
|
||||
|
||||
data4 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := fs.WriteFileReader("/foobar1", data4)
|
||||
|
||||
require.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(8), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestSizedReplaceUnlimited(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), -1, false)
|
||||
|
||||
data := strings.NewReader("xxxxx")
|
||||
|
||||
size, created, err := fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(5), size)
|
||||
require.Equal(t, true, created)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(5), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
|
||||
data = strings.NewReader("yyy")
|
||||
|
||||
size, created, err = fs.WriteFileReader("/foobar", data)
|
||||
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(3), size)
|
||||
require.Equal(t, false, created)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(3), cur)
|
||||
require.Equal(t, int64(-1), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(1), cur)
|
||||
}
|
||||
|
||||
func TestSizedTooBigNoPurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
data := strings.NewReader("xxxxxyyyyyz")
|
||||
|
||||
size, _, err := fs.WriteFileReader("/foobar", data)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, int64(-1), size)
|
||||
}
|
||||
|
||||
func TestSizedTooBigPurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
|
||||
data := strings.NewReader("xxxxxyyyyyz")
|
||||
|
||||
size, _, err := fs.WriteFileReader("/foobar", data)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, int64(-1), size)
|
||||
|
||||
require.Equal(t, int64(2), fs.Files())
|
||||
}
|
||||
|
||||
func TestSizedFullSpaceNoPurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, false)
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(10), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(2), cur)
|
||||
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, err := fs.WriteFileReader("/foobar3", data3)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, int64(-1), size)
|
||||
}
|
||||
|
||||
func TestSizedFullSpacePurge(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(10), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(2), cur)
|
||||
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := fs.WriteFileReader("/foobar3", data3)
|
||||
|
||||
require.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(10), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestSizedFullSpacePurgeMulti(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
|
||||
|
||||
data1 := strings.NewReader("xxx")
|
||||
data2 := strings.NewReader("yyy")
|
||||
data3 := strings.NewReader("zzz")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
fs.WriteFileReader("/foobar3", data3)
|
||||
|
||||
cur, max := fs.Size()
|
||||
|
||||
require.Equal(t, int64(9), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(3), cur)
|
||||
|
||||
data4 := strings.NewReader("zzzzz")
|
||||
|
||||
size, _, _ := fs.WriteFileReader("/foobar4", data4)
|
||||
|
||||
require.Equal(t, int64(5), size)
|
||||
|
||||
cur, max = fs.Size()
|
||||
|
||||
require.Equal(t, int64(8), cur)
|
||||
require.Equal(t, int64(10), max)
|
||||
|
||||
cur = fs.Files()
|
||||
|
||||
require.Equal(t, int64(2), cur)
|
||||
}
|
||||
|
||||
func TestSizedPurgeOrder(t *testing.T) {
|
||||
fs, _ := NewSizedFilesystem(newMemFS(), 10, true)
|
||||
|
||||
data1 := strings.NewReader("xxxxx")
|
||||
data2 := strings.NewReader("yyyyy")
|
||||
data3 := strings.NewReader("zzzzz")
|
||||
|
||||
fs.WriteFileReader("/foobar1", data1)
|
||||
time.Sleep(1 * time.Second)
|
||||
fs.WriteFileReader("/foobar2", data2)
|
||||
time.Sleep(1 * time.Second)
|
||||
fs.WriteFileReader("/foobar3", data3)
|
||||
|
||||
file := fs.Open("/foobar1")
|
||||
|
||||
require.Nil(t, file)
|
||||
}
|
||||
@@ -103,7 +103,6 @@ type Logger interface {
|
||||
type logger struct {
|
||||
output Writer
|
||||
component string
|
||||
topics map[string]struct{}
|
||||
}
|
||||
|
||||
// New returns an implementation of the Logger interface.
|
||||
@@ -121,14 +120,6 @@ func (l *logger) clone() *logger {
|
||||
component: l.component,
|
||||
}
|
||||
|
||||
if len(l.topics) != 0 {
|
||||
clone.topics = make(map[string]struct{})
|
||||
|
||||
for topic := range l.topics {
|
||||
clone.topics[topic] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return clone
|
||||
}
|
||||
|
||||
|
||||
@@ -5,15 +5,15 @@ import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoglevelNames(t *testing.T) {
|
||||
assert.Equal(t, "DEBUG", Ldebug.String())
|
||||
assert.Equal(t, "ERROR", Lerror.String())
|
||||
assert.Equal(t, "WARN", Lwarn.String())
|
||||
assert.Equal(t, "INFO", Linfo.String())
|
||||
assert.Equal(t, `SILENT`, Lsilent.String())
|
||||
require.Equal(t, "DEBUG", Ldebug.String())
|
||||
require.Equal(t, "ERROR", Lerror.String())
|
||||
require.Equal(t, "WARN", Lwarn.String())
|
||||
require.Equal(t, "INFO", Linfo.String())
|
||||
require.Equal(t, `SILENT`, Lsilent.String())
|
||||
}
|
||||
|
||||
func TestLogColorToNotTTY(t *testing.T) {
|
||||
@@ -23,7 +23,7 @@ func TestLogColorToNotTTY(t *testing.T) {
|
||||
w := NewConsoleWriter(writer, Linfo, true).(*syncWriter)
|
||||
formatter := w.writer.(*consoleWriter).formatter.(*consoleFormatter)
|
||||
|
||||
assert.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger")
|
||||
require.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger")
|
||||
}
|
||||
|
||||
func TestLogContext(t *testing.T) {
|
||||
@@ -53,7 +53,7 @@ func TestLogContext(t *testing.T) {
|
||||
lenWithoutCtx := buffer.Len()
|
||||
buffer.Reset()
|
||||
|
||||
assert.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context")
|
||||
require.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context")
|
||||
}
|
||||
|
||||
func TestLogClone(t *testing.T) {
|
||||
@@ -65,7 +65,7 @@ func TestLogClone(t *testing.T) {
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
|
||||
assert.Contains(t, buffer.String(), `component="test"`)
|
||||
require.Contains(t, buffer.String(), `component="test"`)
|
||||
|
||||
buffer.Reset()
|
||||
|
||||
@@ -74,7 +74,7 @@ func TestLogClone(t *testing.T) {
|
||||
logger2.Info().Log("info")
|
||||
writer.Flush()
|
||||
|
||||
assert.Contains(t, buffer.String(), `component="tset"`)
|
||||
require.Contains(t, buffer.String(), `component="tset"`)
|
||||
}
|
||||
|
||||
func TestLogSilent(t *testing.T) {
|
||||
@@ -85,22 +85,22 @@ func TestLogSilent(t *testing.T) {
|
||||
|
||||
logger.Debug().Log("debug")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Warn().Log("warn")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Error().Log("error")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
@@ -112,22 +112,22 @@ func TestLogDebug(t *testing.T) {
|
||||
|
||||
logger.Debug().Log("debug")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Warn().Log("warn")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Error().Log("error")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
@@ -139,22 +139,22 @@ func TestLogInfo(t *testing.T) {
|
||||
|
||||
logger.Debug().Log("debug")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Warn().Log("warn")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Error().Log("error")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
@@ -166,22 +166,22 @@ func TestLogWarn(t *testing.T) {
|
||||
|
||||
logger.Debug().Log("debug")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Warn().Log("warn")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Error().Log("error")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
@@ -193,21 +193,43 @@ func TestLogError(t *testing.T) {
|
||||
|
||||
logger.Debug().Log("debug")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Info().Log("info")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Warn().Log("warn")
|
||||
writer.Flush()
|
||||
assert.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
require.Equal(t, 0, buffer.Len(), "Buffer should be empty")
|
||||
buffer.Reset()
|
||||
|
||||
logger.Error().Log("error")
|
||||
writer.Flush()
|
||||
assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty")
|
||||
buffer.Reset()
|
||||
}
|
||||
|
||||
func TestLogWithField(t *testing.T) {
|
||||
bufwriter := NewBufferWriter(Linfo, 10)
|
||||
|
||||
logger := New("test").WithOutput(bufwriter)
|
||||
logger = logger.WithField("foo", "bar")
|
||||
logger.Info().Log("hello")
|
||||
|
||||
events := bufwriter.Events()
|
||||
|
||||
require.Equal(t, 1, len(events))
|
||||
require.Empty(t, events[0].err)
|
||||
require.Equal(t, "bar", events[0].Data["foo"])
|
||||
|
||||
logger = logger.WithField("func", func() bool { return true })
|
||||
logger.Info().Log("hello")
|
||||
|
||||
events = bufwriter.Events()
|
||||
require.Equal(t, 2, len(events))
|
||||
require.NotEmpty(t, events[1].err)
|
||||
require.Equal(t, "bar", events[0].Data["foo"])
|
||||
}
|
||||
|
||||
181
log/writer_test.go
Normal file
181
log/writer_test.go
Normal file
@@ -0,0 +1,181 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestJSONWriter(t *testing.T) {
|
||||
buffer := bytes.Buffer{}
|
||||
|
||||
writer := NewJSONWriter(&buffer, Linfo)
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "test",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
require.Equal(t, `{"Time":"2009-11-10T23:00:00Z","Level":"INFO","Component":"test","Caller":"me","Message":"hello world","Data":{"caller":"me","component":"test","foo":"bar","message":"hello world","ts":"2009-11-10T23:00:00Z"}}`, buffer.String())
|
||||
}
|
||||
|
||||
func TestConsoleWriter(t *testing.T) {
|
||||
buffer := bytes.Buffer{}
|
||||
|
||||
writer := NewConsoleWriter(&buffer, Linfo, false)
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "test",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
require.Equal(t, `ts=2009-11-10T23:00:00Z level=INFO component="test" msg="hello world" foo="bar"`+"\n", buffer.String())
|
||||
}
|
||||
|
||||
func TestTopicWriter(t *testing.T) {
|
||||
bufwriter := NewBufferWriter(Linfo, 10)
|
||||
writer1 := NewTopicWriter(bufwriter, []string{})
|
||||
writer2 := NewTopicWriter(bufwriter, []string{"foobar"})
|
||||
|
||||
writer1.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "test",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
writer2.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "test",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
require.Equal(t, 1, len(bufwriter.Events()))
|
||||
|
||||
writer1.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "foobar",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
writer2.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "foobar",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
require.Equal(t, 3, len(bufwriter.Events()))
|
||||
}
|
||||
|
||||
func TestMultiwriter(t *testing.T) {
|
||||
bufwriter1 := NewBufferWriter(Linfo, 10)
|
||||
bufwriter2 := NewBufferWriter(Linfo, 10)
|
||||
|
||||
writer := NewMultiWriter(bufwriter1, bufwriter2)
|
||||
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "foobar",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
require.Equal(t, 1, len(bufwriter1.Events()))
|
||||
require.Equal(t, 1, len(bufwriter2.Events()))
|
||||
}
|
||||
|
||||
func TestLevelRewriter(t *testing.T) {
|
||||
bufwriter := NewBufferWriter(Linfo, 10)
|
||||
|
||||
rule := LevelRewriteRule{
|
||||
Level: Lwarn,
|
||||
Component: "foobar",
|
||||
Match: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
}
|
||||
|
||||
writer := NewLevelRewriter(bufwriter, []LevelRewriteRule{rule})
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "foobar",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
events := bufwriter.Events()
|
||||
|
||||
require.Equal(t, 1, len(events))
|
||||
require.Equal(t, Lwarn, events[0].Level)
|
||||
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "foobar",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"bar": "foo"},
|
||||
})
|
||||
|
||||
events = bufwriter.Events()
|
||||
|
||||
require.Equal(t, 2, len(events))
|
||||
require.Equal(t, Linfo, events[1].Level)
|
||||
|
||||
writer.Write(&Event{
|
||||
logger: &logger{},
|
||||
Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC),
|
||||
Level: Linfo,
|
||||
Component: "test",
|
||||
Caller: "me",
|
||||
Message: "hello world",
|
||||
err: "",
|
||||
Data: map[string]interface{}{"foo": "bar"},
|
||||
})
|
||||
|
||||
events = bufwriter.Events()
|
||||
|
||||
require.Equal(t, 3, len(events))
|
||||
require.Equal(t, Linfo, events[2].Level)
|
||||
}
|
||||
5
main.go
5
main.go
@@ -5,6 +5,7 @@ import (
|
||||
"os/signal"
|
||||
|
||||
"github.com/datarhei/core/v16/app/api"
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
|
||||
_ "github.com/joho/godotenv/autoload"
|
||||
@@ -13,7 +14,9 @@ import (
|
||||
func main() {
|
||||
logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true))
|
||||
|
||||
app, err := api.New(os.Getenv("CORE_CONFIGFILE"), os.Stderr)
|
||||
configfile := store.Location(os.Getenv("CORE_CONFIGFILE"))
|
||||
|
||||
app, err := api.New(configfile, os.Stderr)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Failed to create new API")
|
||||
os.Exit(1)
|
||||
|
||||
@@ -12,7 +12,7 @@ type Pattern interface {
|
||||
Name() string
|
||||
|
||||
// Match returns whether a map of labels with its label values
|
||||
// match this pattern.
|
||||
// match this pattern. All labels have to be present and need to match.
|
||||
Match(labels map[string]string) bool
|
||||
|
||||
// IsValid returns whether the pattern is valid.
|
||||
@@ -26,7 +26,7 @@ type pattern struct {
|
||||
}
|
||||
|
||||
// NewPattern creates a new pattern with the given prefix and group name. There
|
||||
// has to be an even number of parameter, which is ("label", "labelvalue", "label",
|
||||
// has to be an even number of labels, which is ("label", "labelvalue", "label",
|
||||
// "labelvalue" ...). The label value will be interpreted as regular expression.
|
||||
func NewPattern(name string, labels ...string) Pattern {
|
||||
p := &pattern{
|
||||
@@ -38,7 +38,6 @@ func NewPattern(name string, labels ...string) Pattern {
|
||||
for i := 0; i < len(labels); i += 2 {
|
||||
exp, err := regexp.Compile(labels[i+1])
|
||||
if err != nil {
|
||||
fmt.Printf("error: %s\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -84,19 +83,35 @@ func (p *pattern) IsValid() bool {
|
||||
return p.valid
|
||||
}
|
||||
|
||||
// Metrics is a collection of values
|
||||
type Metrics interface {
|
||||
// Value returns the first value that matches the name and the labels. The labels
|
||||
// are used to create a pattern and therefore must obey to the rules of NewPattern.
|
||||
Value(name string, labels ...string) Value
|
||||
|
||||
// Values returns all values that matches the name and the labels. The labels
|
||||
// are used to create a pattern and therefore must obey to the rules of NewPattern.
|
||||
Values(name string, labels ...string) []Value
|
||||
|
||||
// Labels return a list of all values for a label.
|
||||
Labels(name string, label string) []string
|
||||
|
||||
// All returns all values currently stored in the collection.
|
||||
All() []Value
|
||||
|
||||
// Add adds a value to the collection.
|
||||
Add(v Value)
|
||||
|
||||
// String return a string representation of all collected values.
|
||||
String() string
|
||||
}
|
||||
|
||||
// metrics is an implementation of the Metrics interface.
|
||||
type metrics struct {
|
||||
values []Value
|
||||
}
|
||||
|
||||
// NewMetrics returns a new metrics instance.
|
||||
func NewMetrics() *metrics {
|
||||
return &metrics{}
|
||||
}
|
||||
@@ -231,8 +246,15 @@ func (v *value) Hash() string {
|
||||
func (v *value) String() string {
|
||||
s := fmt.Sprintf("%s: %f {", v.name, v.value)
|
||||
|
||||
for k, v := range v.labels {
|
||||
s += k + "=" + v + " "
|
||||
keys := []string{}
|
||||
for k := range v.labels {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Strings(keys)
|
||||
|
||||
for _, k := range keys {
|
||||
s += k + "=" + v.labels[k] + " "
|
||||
}
|
||||
|
||||
s += "}"
|
||||
|
||||
@@ -2,25 +2,154 @@ package metric
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
d := NewDesc("group", "", []string{"name"})
|
||||
v := NewValue(d, 42, "foobar")
|
||||
func TestPattern(t *testing.T) {
|
||||
p := NewPattern("bla", "label1", "value1", "label2")
|
||||
require.Equal(t, false, p.IsValid())
|
||||
|
||||
if v.L("name") != "foobar" {
|
||||
t.Fatalf("label name doesn't have the expected value")
|
||||
}
|
||||
p = NewPattern("bla", "label1", "value1", "label2", "valu(e2")
|
||||
require.Equal(t, false, p.IsValid())
|
||||
|
||||
p = NewPattern("bla")
|
||||
require.Equal(t, true, p.IsValid())
|
||||
require.Equal(t, "bla", p.Name())
|
||||
|
||||
p = NewPattern("bla", "label1", "value1", "label2", "value2")
|
||||
require.Equal(t, true, p.IsValid())
|
||||
}
|
||||
|
||||
func TestPatternMatch(t *testing.T) {
|
||||
p := NewPattern("bla", "label1", "value1", "label2")
|
||||
require.Equal(t, false, p.IsValid())
|
||||
require.Equal(t, false, p.Match(map[string]string{"label1": "value1"}))
|
||||
|
||||
p0 := NewPattern("bla")
|
||||
require.Equal(t, true, p0.IsValid())
|
||||
require.Equal(t, true, p0.Match(map[string]string{}))
|
||||
require.Equal(t, true, p0.Match(map[string]string{"labelX": "foobar"}))
|
||||
|
||||
p = NewPattern("bla", "label1", "value.", "label2", "val?ue2")
|
||||
require.Equal(t, true, p.IsValid())
|
||||
require.Equal(t, false, p.Match(map[string]string{}))
|
||||
require.Equal(t, false, p.Match(map[string]string{"label1": "value1"}))
|
||||
require.Equal(t, true, p.Match(map[string]string{"label1": "value1", "label2": "value2"}))
|
||||
require.Equal(t, true, p.Match(map[string]string{"label1": "value5", "label2": "vaue2"}))
|
||||
}
|
||||
|
||||
func TestValue(t *testing.T) {
|
||||
d := NewDesc("group", "", []string{"label1", "label2"})
|
||||
v := NewValue(d, 42, "foobar")
|
||||
require.Nil(t, v)
|
||||
|
||||
v = NewValue(d, 42, "foobar", "foobaz")
|
||||
require.NotNil(t, v)
|
||||
require.Equal(t, float64(42), v.Val())
|
||||
|
||||
require.Equal(t, "", v.L("labelX"))
|
||||
require.Equal(t, "foobar", v.L("label1"))
|
||||
require.Equal(t, "foobaz", v.L("label2"))
|
||||
require.Equal(t, "group", v.Name())
|
||||
require.Equal(t, "group:label1=foobar label2=foobaz ", v.Hash())
|
||||
require.Equal(t, "group: 42.000000 {label1=foobar label2=foobaz }", v.String())
|
||||
|
||||
require.Equal(t, map[string]string{"label1": "foobar", "label2": "foobaz"}, v.Labels())
|
||||
}
|
||||
|
||||
func TestValuePattern(t *testing.T) {
|
||||
d := NewDesc("group", "", []string{"label1", "label2"})
|
||||
v := NewValue(d, 42, "foobar", "foobaz")
|
||||
|
||||
p1 := NewPattern("group")
|
||||
p2 := NewPattern("group", "label1", "foobar")
|
||||
p3 := NewPattern("group", "label2", "foobaz")
|
||||
p4 := NewPattern("group", "label2", "foobaz", "label1", "foobar")
|
||||
|
||||
if v.Match([]Pattern{p1}) == false {
|
||||
t.Fatalf("pattern p1 should have matched")
|
||||
}
|
||||
require.Equal(t, true, v.Match(nil))
|
||||
require.Equal(t, true, v.Match([]Pattern{p1}))
|
||||
require.Equal(t, true, v.Match([]Pattern{p2}))
|
||||
require.Equal(t, true, v.Match([]Pattern{p3}))
|
||||
require.Equal(t, true, v.Match([]Pattern{p4}))
|
||||
require.Equal(t, true, v.Match([]Pattern{p1, p2, p3, p4}))
|
||||
|
||||
p2 := NewPattern("group", "name", "foobar")
|
||||
p5 := NewPattern("group", "label1", "foobaz")
|
||||
|
||||
if v.Match([]Pattern{p2}) == false {
|
||||
t.Fatalf("pattern p2 should have matched")
|
||||
}
|
||||
require.Equal(t, false, v.Match([]Pattern{p5}))
|
||||
|
||||
require.Equal(t, true, v.Match([]Pattern{p4, p5}))
|
||||
require.Equal(t, true, v.Match([]Pattern{p5, p4}))
|
||||
}
|
||||
|
||||
func TestDescription(t *testing.T) {
|
||||
d := NewDesc("name", "blabla", []string{"label"})
|
||||
|
||||
require.Equal(t, "name", d.Name())
|
||||
require.Equal(t, "blabla", d.Description())
|
||||
require.ElementsMatch(t, []string{"label"}, d.Labels())
|
||||
require.Equal(t, "name: blabla (label)", d.String())
|
||||
}
|
||||
|
||||
func TestMetri(t *testing.T) {
|
||||
m := NewMetrics()
|
||||
|
||||
require.Equal(t, "", m.String())
|
||||
require.Equal(t, 0, len(m.All()))
|
||||
|
||||
d := NewDesc("group", "", []string{"label1", "label2"})
|
||||
v1 := NewValue(d, 42, "foobar", "foobaz")
|
||||
require.NotNil(t, v1)
|
||||
|
||||
m.Add(v1)
|
||||
|
||||
require.Equal(t, v1.String(), m.String())
|
||||
require.Equal(t, 1, len(m.All()))
|
||||
|
||||
l := m.Labels("group", "label2")
|
||||
|
||||
require.ElementsMatch(t, []string{"foobaz"}, l)
|
||||
|
||||
v2 := NewValue(d, 77, "barfoo", "bazfoo")
|
||||
|
||||
m.Add(v2)
|
||||
|
||||
require.Equal(t, v1.String()+v2.String(), m.String())
|
||||
require.Equal(t, 2, len(m.All()))
|
||||
|
||||
l = m.Labels("group", "label2")
|
||||
|
||||
require.ElementsMatch(t, []string{"foobaz", "bazfoo"}, l)
|
||||
|
||||
v := m.Value("bla", "label1", "foo*")
|
||||
|
||||
require.Equal(t, nullValue, v)
|
||||
|
||||
v = m.Value("group")
|
||||
|
||||
require.NotEqual(t, nullValue, v)
|
||||
|
||||
v = m.Value("group", "label1", "foo*")
|
||||
|
||||
require.NotEqual(t, nullValue, v)
|
||||
|
||||
v = m.Value("group", "label2", "baz")
|
||||
|
||||
require.NotEqual(t, nullValue, v)
|
||||
|
||||
vs := m.Values("group")
|
||||
|
||||
require.Equal(t, 2, len(vs))
|
||||
|
||||
vs = m.Values("group", "label1", "foo*")
|
||||
|
||||
require.Equal(t, 2, len(vs))
|
||||
|
||||
vs = m.Values("group", "label2", "*baz*")
|
||||
|
||||
require.NotEqual(t, 2, len(vs))
|
||||
|
||||
vs = m.Values("group", "label1")
|
||||
|
||||
require.Equal(t, 0, len(vs))
|
||||
}
|
||||
|
||||
@@ -3,18 +3,27 @@ package net
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestAnonymizeIPString(t *testing.T) {
|
||||
_, err := AnonymizeIPString("127.987.475.21")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = AnonymizeIPString("bbd1:xxxx")
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = AnonymizeIPString("hello-world")
|
||||
require.Error(t, err)
|
||||
|
||||
ipv4 := "192.168.1.42"
|
||||
ipv6 := "bbd1:e95a:adbb:b29a:e38b:577f:6f9a:1fa7"
|
||||
|
||||
anonymizedIPv4, err := AnonymizeIPString(ipv4)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "192.168.1.0", anonymizedIPv4)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "192.168.1.0", anonymizedIPv4)
|
||||
|
||||
anonymizedIPv6, err := AnonymizeIPString(ipv6)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6)
|
||||
}
|
||||
|
||||
@@ -3,57 +3,63 @@ package net
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIPLimiterNew(t *testing.T) {
|
||||
var err error
|
||||
|
||||
_, err = NewIPLimiter([]string{}, []string{})
|
||||
assert.Nil(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = NewIPLimiter([]string{"::1/128", "127.0.0.1/32", ""}, []string{})
|
||||
assert.Nil(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = NewIPLimiter([]string{}, []string{"::1/128", "127.0.0.1/32", ""})
|
||||
assert.Nil(t, err)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func TestIPLimiterError(t *testing.T) {
|
||||
var err error
|
||||
|
||||
_, err = NewIPLimiter([]string{}, []string{})
|
||||
assert.Nil(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
_, err = NewIPLimiter([]string{"::1"}, []string{})
|
||||
assert.NotNil(t, err, "Should not accept invalid IP")
|
||||
require.NotNil(t, err, "Should not accept invalid IP")
|
||||
|
||||
_, err = NewIPLimiter([]string{}, []string{"::1"})
|
||||
assert.NotNil(t, err, "Should not accept invalid IP")
|
||||
require.NotNil(t, err, "Should not accept invalid IP")
|
||||
}
|
||||
|
||||
func TestIPLimiterInvalidIPs(t *testing.T) {
|
||||
limiter, _ := NewIPLimiter([]string{}, []string{})
|
||||
|
||||
assert.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed")
|
||||
require.False(t, limiter.IsAllowed(""), "Invalid IP shouldn't be allowed")
|
||||
}
|
||||
|
||||
func TestIPLimiterNoIPs(t *testing.T) {
|
||||
limiter, _ := NewIPLimiter([]string{}, []string{})
|
||||
|
||||
assert.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed")
|
||||
require.True(t, limiter.IsAllowed("127.0.0.1"), "IP should be allowed")
|
||||
}
|
||||
|
||||
func TestIPLimiterAllowlist(t *testing.T) {
|
||||
limiter, _ := NewIPLimiter([]string{}, []string{"::1/128"})
|
||||
|
||||
assert.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed")
|
||||
assert.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed")
|
||||
require.False(t, limiter.IsAllowed("127.0.0.1"), "Unallowed IP shouldn't be allowed")
|
||||
require.True(t, limiter.IsAllowed("::1"), "Allowed IP should be allowed")
|
||||
}
|
||||
|
||||
func TestIPLimiterBlocklist(t *testing.T) {
|
||||
limiter, _ := NewIPLimiter([]string{"::1/128"}, []string{})
|
||||
|
||||
assert.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed")
|
||||
assert.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed")
|
||||
require.True(t, limiter.IsAllowed("127.0.0.1"), "Allowed IP should be allowed")
|
||||
require.False(t, limiter.IsAllowed("::1"), "Unallowed IP shouldn't be allowed")
|
||||
}
|
||||
|
||||
func TestNullIPLimiter(t *testing.T) {
|
||||
limiter := NewNullIPLimiter()
|
||||
|
||||
require.True(t, limiter.IsAllowed("foobar"))
|
||||
}
|
||||
|
||||
@@ -3,19 +3,30 @@ package net
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNewPortrange(t *testing.T) {
|
||||
_, err := NewPortrange(1000, 1999)
|
||||
|
||||
assert.Nil(t, err, "Valid port range not accepted: %s", err)
|
||||
require.Nil(t, err, "Valid port range not accepted: %s", err)
|
||||
}
|
||||
|
||||
func TestInvalidPortrange(t *testing.T) {
|
||||
_, err := NewPortrange(1999, 1000)
|
||||
|
||||
assert.NotNil(t, err, "Invalid port range accepted")
|
||||
require.NotNil(t, err, "Invalid port range accepted")
|
||||
}
|
||||
|
||||
func TestOutOfRangePortrange(t *testing.T) {
|
||||
p, err := NewPortrange(-1, 70000)
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
portrange := p.(*portrange)
|
||||
|
||||
require.Equal(t, 1, portrange.min)
|
||||
require.Equal(t, 65535, len(portrange.ports))
|
||||
}
|
||||
|
||||
func TestGetPort(t *testing.T) {
|
||||
@@ -23,26 +34,26 @@ func TestGetPort(t *testing.T) {
|
||||
|
||||
port, err := portrange.Get()
|
||||
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1000, port)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1000, port)
|
||||
}
|
||||
|
||||
func TestGetPutPort(t *testing.T) {
|
||||
portrange, _ := NewPortrange(1000, 1999)
|
||||
|
||||
port, err := portrange.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1000, port)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1000, port)
|
||||
|
||||
port, err = portrange.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1001, port)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1001, port)
|
||||
|
||||
portrange.Put(1000)
|
||||
|
||||
port, err = portrange.Get()
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, 1000, port)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, 1000, port)
|
||||
}
|
||||
|
||||
func TestPortUnavailable(t *testing.T) {
|
||||
@@ -50,12 +61,12 @@ func TestPortUnavailable(t *testing.T) {
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
port, _ := portrange.Get()
|
||||
assert.Equal(t, 1000+i, port, "at index %d", i)
|
||||
require.Equal(t, 1000+i, port, "at index %d", i)
|
||||
}
|
||||
|
||||
port, err := portrange.Get()
|
||||
assert.NotNil(t, err)
|
||||
assert.Less(t, port, 0)
|
||||
require.NotNil(t, err)
|
||||
require.Less(t, port, 0)
|
||||
}
|
||||
|
||||
func TestPutPort(t *testing.T) {
|
||||
@@ -73,16 +84,27 @@ func TestClampRange(t *testing.T) {
|
||||
|
||||
port, _ := portrange.Get()
|
||||
|
||||
assert.Equal(t, 65000, port)
|
||||
require.Equal(t, 65000, port)
|
||||
|
||||
portrange.Put(65000)
|
||||
|
||||
for i := 65000; i <= 65535; i++ {
|
||||
port, _ := portrange.Get()
|
||||
assert.Equal(t, i, port, "at index %d", i)
|
||||
require.Equal(t, i, port, "at index %d", i)
|
||||
}
|
||||
|
||||
port, _ = portrange.Get()
|
||||
|
||||
assert.Less(t, port, 0)
|
||||
require.Less(t, port, 0)
|
||||
}
|
||||
|
||||
func TestDummyPortranger(t *testing.T) {
|
||||
portrange := NewDummyPortrange()
|
||||
|
||||
port, err := portrange.Get()
|
||||
|
||||
require.Error(t, err)
|
||||
require.Equal(t, 0, port)
|
||||
|
||||
portrange.Put(42)
|
||||
}
|
||||
|
||||
@@ -7,9 +7,20 @@ import (
|
||||
)
|
||||
|
||||
func TestLookup(t *testing.T) {
|
||||
_, err := Lookup("https://www.google.com")
|
||||
ip, err := Lookup("/localhost:8080/foobar")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "", ip)
|
||||
|
||||
ip, err = Lookup("http://")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "", ip)
|
||||
|
||||
ip, err = Lookup("https://www.google.com")
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, ip)
|
||||
}
|
||||
|
||||
func TestLocalhost(t *testing.T) {
|
||||
@@ -18,3 +29,22 @@ func TestLocalhost(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Subset(t, []string{"127.0.0.1", "::1"}, []string{ip})
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
err := Validate("http://localhost/foobar")
|
||||
require.NoError(t, err)
|
||||
|
||||
err = Validate("foobar")
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestScheme(t *testing.T) {
|
||||
r := HasScheme("http://localhost/foobar")
|
||||
require.True(t, r)
|
||||
|
||||
r = HasScheme("iueriherfd://localhost/foobar")
|
||||
require.True(t, r)
|
||||
|
||||
r = HasScheme("//localhost/foobar")
|
||||
require.False(t, r)
|
||||
}
|
||||
|
||||
@@ -192,6 +192,7 @@ type process struct {
|
||||
onStart func()
|
||||
onExit func()
|
||||
onStateChange func(from, to string)
|
||||
lock sync.Mutex
|
||||
}
|
||||
limits Limiter
|
||||
}
|
||||
@@ -588,6 +589,7 @@ func (p *process) stop(wait bool) error {
|
||||
if wait {
|
||||
wg.Add(1)
|
||||
|
||||
p.callbacks.lock.Lock()
|
||||
if p.callbacks.onExit == nil {
|
||||
p.callbacks.onExit = func() {
|
||||
wg.Done()
|
||||
@@ -601,6 +603,7 @@ func (p *process) stop(wait bool) error {
|
||||
p.callbacks.onExit = cb
|
||||
}
|
||||
}
|
||||
p.callbacks.lock.Unlock()
|
||||
}
|
||||
|
||||
var err error
|
||||
@@ -829,10 +832,12 @@ func (p *process) waiter() {
|
||||
// Reset the parser stats
|
||||
p.parser.ResetStats()
|
||||
|
||||
// Call the onStop callback
|
||||
// Call the onExit callback
|
||||
p.callbacks.lock.Lock()
|
||||
if p.callbacks.onExit != nil {
|
||||
go p.callbacks.onExit()
|
||||
}
|
||||
p.callbacks.lock.Unlock()
|
||||
|
||||
p.order.lock.Lock()
|
||||
defer p.order.lock.Unlock()
|
||||
|
||||
@@ -98,7 +98,7 @@ func (p *process) cpuTimes() (*cpuTimesStat, error) {
|
||||
}
|
||||
|
||||
s := &cpuTimesStat{
|
||||
total: times.Total(),
|
||||
total: cpuTotal(times),
|
||||
system: times.System,
|
||||
user: times.User,
|
||||
}
|
||||
|
||||
@@ -285,7 +285,7 @@ func (u *util) cpuTimes() (*cpuTimesStat, error) {
|
||||
}
|
||||
|
||||
s := &cpuTimesStat{
|
||||
total: times[0].Total(),
|
||||
total: cpuTotal(×[0]),
|
||||
system: times[0].System,
|
||||
user: times[0].User,
|
||||
idle: times[0].Idle,
|
||||
@@ -496,3 +496,8 @@ func (u *util) readFile(path string) ([]string, error) {
|
||||
|
||||
return lines, nil
|
||||
}
|
||||
|
||||
func cpuTotal(c *cpu.TimesStat) float64 {
|
||||
return c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq +
|
||||
c.Softirq + c.Steal + c.Guest + c.GuestNice
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package app
|
||||
|
||||
import (
|
||||
"github.com/datarhei/core/v16/process"
|
||||
"github.com/datarhei/core/v16/restream/replace"
|
||||
)
|
||||
|
||||
type ConfigIOCleanup struct {
|
||||
@@ -80,79 +79,6 @@ func (config *Config) Clone() *Config {
|
||||
return clone
|
||||
}
|
||||
|
||||
// ReplacePlaceholders replaces all placeholders in the config. The config
|
||||
// will be modified in place.
|
||||
func (config *Config) ResolvePlaceholders(r replace.Replacer) {
|
||||
for i, option := range config.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
|
||||
config.Options[i] = option
|
||||
}
|
||||
|
||||
// Resolving the given inputs
|
||||
for i, input := range config.Input {
|
||||
// Replace any known placeholders
|
||||
input.ID = r.Replace(input.ID, "processid", config.ID)
|
||||
input.ID = r.Replace(input.ID, "reference", config.Reference)
|
||||
input.Address = r.Replace(input.Address, "inputid", input.ID)
|
||||
input.Address = r.Replace(input.Address, "processid", config.ID)
|
||||
input.Address = r.Replace(input.Address, "reference", config.Reference)
|
||||
input.Address = r.Replace(input.Address, "diskfs", "")
|
||||
input.Address = r.Replace(input.Address, "memfs", "")
|
||||
input.Address = r.Replace(input.Address, "rtmp", "")
|
||||
input.Address = r.Replace(input.Address, "srt", "")
|
||||
|
||||
for j, option := range input.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "inputid", input.ID)
|
||||
option = r.Replace(option, "processid", config.ID)
|
||||
option = r.Replace(option, "reference", config.Reference)
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
option = r.Replace(option, "memfs", "")
|
||||
|
||||
input.Options[j] = option
|
||||
}
|
||||
|
||||
config.Input[i] = input
|
||||
}
|
||||
|
||||
// Resolving the given outputs
|
||||
for i, output := range config.Output {
|
||||
// Replace any known placeholders
|
||||
output.ID = r.Replace(output.ID, "processid", config.ID)
|
||||
output.Address = r.Replace(output.Address, "outputid", output.ID)
|
||||
output.Address = r.Replace(output.Address, "processid", config.ID)
|
||||
output.Address = r.Replace(output.Address, "reference", config.Reference)
|
||||
output.Address = r.Replace(output.Address, "diskfs", "")
|
||||
output.Address = r.Replace(output.Address, "memfs", "")
|
||||
output.Address = r.Replace(output.Address, "rtmp", "")
|
||||
output.Address = r.Replace(output.Address, "srt", "")
|
||||
|
||||
for j, option := range output.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "outputid", output.ID)
|
||||
option = r.Replace(option, "processid", config.ID)
|
||||
option = r.Replace(option, "reference", config.Reference)
|
||||
option = r.Replace(option, "diskfs", "")
|
||||
option = r.Replace(option, "memfs", "")
|
||||
|
||||
output.Options[j] = option
|
||||
}
|
||||
|
||||
for j, cleanup := range output.Cleanup {
|
||||
// Replace any known placeholders
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID)
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID)
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference)
|
||||
|
||||
output.Cleanup[j] = cleanup
|
||||
}
|
||||
|
||||
config.Output[i] = output
|
||||
}
|
||||
}
|
||||
|
||||
// CreateCommand created the FFmpeg command from this config.
|
||||
func (config *Config) CreateCommand() []string {
|
||||
var command []string
|
||||
|
||||
@@ -62,6 +62,11 @@ func New(config Config) Filesystem {
|
||||
rfs.logger = log.New("")
|
||||
}
|
||||
|
||||
rfs.logger = rfs.logger.WithFields(log.Fields{
|
||||
"name": config.FS.Name(),
|
||||
"type": config.FS.Type(),
|
||||
})
|
||||
|
||||
rfs.cleanupPatterns = make(map[string][]Pattern)
|
||||
|
||||
// already drain the stop
|
||||
@@ -130,7 +135,7 @@ func (rfs *filesystem) cleanup() {
|
||||
|
||||
for _, patterns := range rfs.cleanupPatterns {
|
||||
for _, pattern := range patterns {
|
||||
filesAndDirs := rfs.Filesystem.List(pattern.Pattern)
|
||||
filesAndDirs := rfs.Filesystem.List("/", pattern.Pattern)
|
||||
|
||||
files := []fs.FileInfo{}
|
||||
for _, f := range filesAndDirs {
|
||||
@@ -146,7 +151,7 @@ func (rfs *filesystem) cleanup() {
|
||||
if pattern.MaxFiles > 0 && uint(len(files)) > pattern.MaxFiles {
|
||||
for i := uint(0); i < uint(len(files))-pattern.MaxFiles; i++ {
|
||||
rfs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded")
|
||||
rfs.Filesystem.Delete(files[i].Name())
|
||||
rfs.Filesystem.Remove(files[i].Name())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -156,7 +161,7 @@ func (rfs *filesystem) cleanup() {
|
||||
for _, f := range files {
|
||||
if f.ModTime().Before(bestBefore) {
|
||||
rfs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded")
|
||||
rfs.Filesystem.Delete(f.Name())
|
||||
rfs.Filesystem.Remove(f.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -170,11 +175,11 @@ func (rfs *filesystem) purge(patterns []Pattern) (nfiles uint64) {
|
||||
continue
|
||||
}
|
||||
|
||||
files := rfs.Filesystem.List(pattern.Pattern)
|
||||
files := rfs.Filesystem.List("/", pattern.Pattern)
|
||||
sort.Slice(files, func(i, j int) bool { return len(files[i].Name()) > len(files[j].Name()) })
|
||||
for _, f := range files {
|
||||
rfs.logger.Debug().WithField("path", f.Name()).Log("Purging file")
|
||||
rfs.Filesystem.Delete(f.Name())
|
||||
rfs.Filesystem.Remove(f.Name())
|
||||
nfiles++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,11 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestMaxFiles(t *testing.T) {
|
||||
memfs := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Base: "/",
|
||||
Size: 1024,
|
||||
Purge: false,
|
||||
})
|
||||
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
|
||||
cleanfs := New(Config{
|
||||
FS: memfs,
|
||||
@@ -30,15 +26,15 @@ func TestMaxFiles(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return cleanfs.Files() == 3
|
||||
}, 3*time.Second, time.Second)
|
||||
|
||||
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
if cleanfs.Files() != 3 {
|
||||
@@ -47,7 +43,7 @@ func TestMaxFiles(t *testing.T) {
|
||||
|
||||
names := []string{}
|
||||
|
||||
for _, f := range cleanfs.List("/*.ts") {
|
||||
for _, f := range cleanfs.List("/", "/*.ts") {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
|
||||
@@ -60,11 +56,7 @@ func TestMaxFiles(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMaxAge(t *testing.T) {
|
||||
memfs := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Base: "/",
|
||||
Size: 1024,
|
||||
Purge: false,
|
||||
})
|
||||
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
|
||||
cleanfs := New(Config{
|
||||
FS: memfs,
|
||||
@@ -80,15 +72,15 @@ func TestMaxAge(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return cleanfs.Files() == 0
|
||||
}, 5*time.Second, time.Second)
|
||||
|
||||
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
if cleanfs.Files() != 1 {
|
||||
@@ -97,7 +89,7 @@ func TestMaxAge(t *testing.T) {
|
||||
|
||||
names := []string{}
|
||||
|
||||
for _, f := range cleanfs.List("/*.ts") {
|
||||
for _, f := range cleanfs.List("/", "/*.ts") {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
|
||||
@@ -110,11 +102,7 @@ func TestMaxAge(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnsetCleanup(t *testing.T) {
|
||||
memfs := fs.NewMemFilesystem(fs.MemConfig{
|
||||
Base: "/",
|
||||
Size: 1024,
|
||||
Purge: false,
|
||||
})
|
||||
memfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
|
||||
cleanfs := New(Config{
|
||||
FS: memfs,
|
||||
@@ -130,15 +118,15 @@ func TestUnsetCleanup(t *testing.T) {
|
||||
},
|
||||
})
|
||||
|
||||
cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0"))
|
||||
cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1"))
|
||||
cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
return cleanfs.Files() == 3
|
||||
}, 3*time.Second, time.Second)
|
||||
|
||||
cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
if cleanfs.Files() != 3 {
|
||||
@@ -147,7 +135,7 @@ func TestUnsetCleanup(t *testing.T) {
|
||||
|
||||
names := []string{}
|
||||
|
||||
for _, f := range cleanfs.List("/*.ts") {
|
||||
for _, f := range cleanfs.List("/", "/*.ts") {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
|
||||
@@ -158,7 +146,7 @@ func TestUnsetCleanup(t *testing.T) {
|
||||
|
||||
cleanfs.UnsetCleanup("foobar")
|
||||
|
||||
cleanfs.Store("/chunk_4.ts", strings.NewReader("chunk_4"))
|
||||
cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4"))
|
||||
|
||||
require.Eventually(t, func() bool {
|
||||
if cleanfs.Files() != 4 {
|
||||
@@ -167,7 +155,7 @@ func TestUnsetCleanup(t *testing.T) {
|
||||
|
||||
names := []string{}
|
||||
|
||||
for _, f := range cleanfs.List("/*.ts") {
|
||||
for _, f := range cleanfs.List("/", "/*.ts") {
|
||||
names = append(names, f.Name())
|
||||
}
|
||||
|
||||
|
||||
@@ -4,17 +4,23 @@ import (
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/glob"
|
||||
"github.com/datarhei/core/v16/restream/app"
|
||||
)
|
||||
|
||||
type TemplateFn func(config *app.Config, section string) string
|
||||
|
||||
type Replacer interface {
|
||||
// RegisterTemplate registers a template for a specific placeholder. Template
|
||||
// may contain placeholders as well of the form {name}. They will be replaced
|
||||
// by the parameters of the placeholder (see Replace).
|
||||
RegisterTemplate(placeholder, template string)
|
||||
// by the parameters of the placeholder (see Replace). If a parameter is not of
|
||||
// a template is not present, default values can be provided.
|
||||
RegisterTemplate(placeholder, template string, defaults map[string]string)
|
||||
|
||||
// RegisterTemplateFunc does the same as RegisterTemplate, but the template
|
||||
// is returned by the template function.
|
||||
RegisterTemplateFunc(placeholder string, template func() string)
|
||||
RegisterTemplateFunc(placeholder string, template TemplateFn, defaults map[string]string)
|
||||
|
||||
// Replace replaces all occurences of placeholder in str with value. The placeholder is of the
|
||||
// form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^
|
||||
@@ -24,12 +30,18 @@ type Replacer interface {
|
||||
// the value of the corresponding key in the parameters.
|
||||
// If the value is an empty string, the registered templates will be searched for that
|
||||
// placeholder. If no template is found, the placeholder will be replaced by the empty string.
|
||||
// A placeholder name may consist on of the letters a-z.
|
||||
Replace(str, placeholder, value string) string
|
||||
// A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain
|
||||
// a glob pattern to find the appropriate template.
|
||||
Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string
|
||||
}
|
||||
|
||||
type template struct {
|
||||
fn TemplateFn
|
||||
defaults map[string]string
|
||||
}
|
||||
|
||||
type replacer struct {
|
||||
templates map[string]func() string
|
||||
templates map[string]template
|
||||
|
||||
re *regexp.Regexp
|
||||
templateRe *regexp.Regexp
|
||||
@@ -38,41 +50,51 @@ type replacer struct {
|
||||
// New returns a Replacer
|
||||
func New() Replacer {
|
||||
r := &replacer{
|
||||
templates: make(map[string]func() string),
|
||||
re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`),
|
||||
templateRe: regexp.MustCompile(`{([a-z]+)}`),
|
||||
templates: make(map[string]template),
|
||||
re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`),
|
||||
templateRe: regexp.MustCompile(`{([a-z:]+)}`),
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *replacer) RegisterTemplate(placeholder, template string) {
|
||||
r.templates[placeholder] = func() string { return template }
|
||||
func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) {
|
||||
r.RegisterTemplateFunc(placeholder, func(*app.Config, string) string { return tmpl }, defaults)
|
||||
}
|
||||
|
||||
func (r *replacer) RegisterTemplateFunc(placeholder string, template func() string) {
|
||||
r.templates[placeholder] = template
|
||||
func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateFn, defaults map[string]string) {
|
||||
r.templates[placeholder] = template{
|
||||
fn: templateFn,
|
||||
defaults: defaults,
|
||||
}
|
||||
}
|
||||
|
||||
func (r *replacer) Replace(str, placeholder, value string) string {
|
||||
func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string {
|
||||
str = r.re.ReplaceAllStringFunc(str, func(match string) string {
|
||||
matches := r.re.FindStringSubmatch(match)
|
||||
if matches[1] != placeholder {
|
||||
|
||||
if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok {
|
||||
return match
|
||||
}
|
||||
|
||||
placeholder := matches[1]
|
||||
|
||||
// We need a copy from the value
|
||||
v := value
|
||||
var tmpl template = template{
|
||||
fn: func(*app.Config, string) string { return v },
|
||||
}
|
||||
|
||||
// Check for a registered template
|
||||
if len(v) == 0 {
|
||||
tmplFunc, ok := r.templates[placeholder]
|
||||
t, ok := r.templates[placeholder]
|
||||
if ok {
|
||||
v = tmplFunc()
|
||||
tmpl = t
|
||||
}
|
||||
}
|
||||
|
||||
v = r.compileTemplate(v, matches[3])
|
||||
v = tmpl.fn(config, section)
|
||||
v = r.compileTemplate(v, matches[3], vars, tmpl.defaults)
|
||||
|
||||
if len(matches[2]) != 0 {
|
||||
// If there's a character to escape, we also have to escape the
|
||||
@@ -97,13 +119,18 @@ func (r *replacer) Replace(str, placeholder, value string) string {
|
||||
// placeholder name and will be replaced with the value. The resulting string is "Hello World!".
|
||||
// If a placeholder name is not present in the params string, it will not be replaced. The key
|
||||
// and values can be escaped as in net/url.QueryEscape.
|
||||
func (r *replacer) compileTemplate(str, params string) string {
|
||||
if len(params) == 0 {
|
||||
func (r *replacer) compileTemplate(str, params string, vars map[string]string, defaults map[string]string) string {
|
||||
if len(params) == 0 && len(defaults) == 0 {
|
||||
return str
|
||||
}
|
||||
|
||||
p := make(map[string]string)
|
||||
|
||||
// Copy the defaults
|
||||
for key, value := range defaults {
|
||||
p[key] = value
|
||||
}
|
||||
|
||||
// taken from net/url.ParseQuery
|
||||
for params != "" {
|
||||
var key string
|
||||
@@ -111,15 +138,22 @@ func (r *replacer) compileTemplate(str, params string) string {
|
||||
if key == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
key, value, _ := strings.Cut(key, "=")
|
||||
key, err := url.QueryUnescape(key)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
value, err = url.QueryUnescape(value)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for name, v := range vars {
|
||||
value = strings.ReplaceAll(value, "$"+name, v)
|
||||
}
|
||||
|
||||
p[key] = value
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ package replace
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/restream/app"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -24,28 +25,56 @@ func TestReplace(t *testing.T) {
|
||||
r := New()
|
||||
|
||||
for _, e := range samples {
|
||||
replaced := r.Replace(e[0], "foobar", foobar)
|
||||
replaced := r.Replace(e[0], "foobar", foobar, nil, nil, "")
|
||||
require.Equal(t, e[1], replaced, e[0])
|
||||
}
|
||||
|
||||
replaced := r.Replace("{foobar}", "foobar", "")
|
||||
replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "")
|
||||
require.Equal(t, "", replaced)
|
||||
}
|
||||
|
||||
func TestReplaceTemplate(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplate("foobar", "Hello {who}! {what}?")
|
||||
r.RegisterTemplate("foo:bar", "Hello {who}! {what}?", nil)
|
||||
|
||||
replaced := r.Replace("{foobar,who=World}", "foobar", "")
|
||||
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! {what}?", replaced)
|
||||
|
||||
replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "")
|
||||
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! E=mc^2?", replaced)
|
||||
|
||||
replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "")
|
||||
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
|
||||
}
|
||||
|
||||
func TestReplaceTemplateFunc(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplateFunc("foo:bar", func(config *app.Config, kind string) string { return "Hello {who}! {what}?" }, nil)
|
||||
|
||||
replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! {what}?", replaced)
|
||||
|
||||
replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! E=mc^2?", replaced)
|
||||
|
||||
replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! E=mc\\\\:2?", replaced)
|
||||
}
|
||||
|
||||
func TestReplaceTemplateDefaults(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplate("foobar", "Hello {who}! {what}?", map[string]string{
|
||||
"who": "someone",
|
||||
"what": "something",
|
||||
})
|
||||
|
||||
replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello someone! something?", replaced)
|
||||
|
||||
replaced = r.Replace("{foobar,who=World}", "foobar", "", nil, nil, "")
|
||||
require.Equal(t, "Hello World! something?", replaced)
|
||||
}
|
||||
|
||||
func TestReplaceCompileTemplate(t *testing.T) {
|
||||
samples := [][3]string{
|
||||
{"Hello {who}!", "who=World", "Hello World!"},
|
||||
@@ -58,7 +87,58 @@ func TestReplaceCompileTemplate(t *testing.T) {
|
||||
r := New().(*replacer)
|
||||
|
||||
for _, e := range samples {
|
||||
replaced := r.compileTemplate(e[0], e[1])
|
||||
replaced := r.compileTemplate(e[0], e[1], nil, nil)
|
||||
require.Equal(t, e[2], replaced, e[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceCompileTemplateDefaults(t *testing.T) {
|
||||
samples := [][3]string{
|
||||
{"Hello {who}!", "", "Hello someone!"},
|
||||
{"Hello {who}!", "who=World", "Hello World!"},
|
||||
{"Hello {who}! {what}?", "who=World", "Hello World! something?"},
|
||||
{"Hello {who}! {what}?", "who=World,what=Yeah", "Hello World! Yeah?"},
|
||||
{"Hello {who}! {what}?", "who=World,what=", "Hello World! ?"},
|
||||
}
|
||||
|
||||
r := New().(*replacer)
|
||||
|
||||
for _, e := range samples {
|
||||
replaced := r.compileTemplate(e[0], e[1], nil, map[string]string{
|
||||
"who": "someone",
|
||||
"what": "something",
|
||||
})
|
||||
require.Equal(t, e[2], replaced, e[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceCompileTemplateWithVars(t *testing.T) {
|
||||
samples := [][3]string{
|
||||
{"Hello {who}!", "who=$processid", "Hello 123456789!"},
|
||||
{"Hello {who}! {what}?", "who=$location", "Hello World! {what}?"},
|
||||
{"Hello {who}! {what}?", "who=$location,what=Yeah", "Hello World! Yeah?"},
|
||||
{"Hello {who}! {what}?", "who=$location,what=$processid", "Hello World! 123456789?"},
|
||||
{"Hello {who}!", "who=$processidxxx", "Hello 123456789xxx!"},
|
||||
}
|
||||
|
||||
vars := map[string]string{
|
||||
"processid": "123456789",
|
||||
"location": "World",
|
||||
}
|
||||
|
||||
r := New().(*replacer)
|
||||
|
||||
for _, e := range samples {
|
||||
replaced := r.compileTemplate(e[0], e[1], vars, nil)
|
||||
require.Equal(t, e[2], replaced, e[0])
|
||||
}
|
||||
}
|
||||
|
||||
func TestReplaceGlob(t *testing.T) {
|
||||
r := New()
|
||||
r.RegisterTemplate("foo:bar", "Hello foobar", nil)
|
||||
r.RegisterTemplate("foo:baz", "Hello foobaz", nil)
|
||||
|
||||
replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "", nil, nil, "")
|
||||
require.Equal(t, "Hello foobaz, Hello foobar", replaced)
|
||||
}
|
||||
|
||||
@@ -30,30 +30,31 @@ import (
|
||||
|
||||
// The Restreamer interface
|
||||
type Restreamer interface {
|
||||
ID() string // ID of this instance
|
||||
Name() string // Arbitrary name of this instance
|
||||
CreatedAt() time.Time // Time of when this instance has been created
|
||||
Start() // Start all processes that have a "start" order
|
||||
Stop() // Stop all running process but keep their "start" order
|
||||
AddProcess(config *app.Config) error // Add a new process
|
||||
GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference
|
||||
DeleteProcess(id string) error // Delete a process
|
||||
UpdateProcess(id string, config *app.Config) error // Update a process
|
||||
StartProcess(id string) error // Start a process
|
||||
StopProcess(id string) error // Stop a process
|
||||
RestartProcess(id string) error // Restart a process
|
||||
ReloadProcess(id string) error // Reload a process
|
||||
GetProcess(id string) (*app.Process, error) // Get a process
|
||||
GetProcessState(id string) (*app.State, error) // Get the state of a process
|
||||
GetProcessLog(id string) (*app.Log, error) // Get the logs of a process
|
||||
GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process
|
||||
Probe(id string) app.Probe // Probe a process
|
||||
Skills() skills.Skills // Get the ffmpeg skills
|
||||
ReloadSkills() error // Reload the ffmpeg skills
|
||||
SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process
|
||||
GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process
|
||||
SetMetadata(key string, data interface{}) error // Set general metadata
|
||||
GetMetadata(key string) (interface{}, error) // Get previously set general metadata
|
||||
ID() string // ID of this instance
|
||||
Name() string // Arbitrary name of this instance
|
||||
CreatedAt() time.Time // Time of when this instance has been created
|
||||
Start() // Start all processes that have a "start" order
|
||||
Stop() // Stop all running process but keep their "start" order
|
||||
AddProcess(config *app.Config) error // Add a new process
|
||||
GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference
|
||||
DeleteProcess(id string) error // Delete a process
|
||||
UpdateProcess(id string, config *app.Config) error // Update a process
|
||||
StartProcess(id string) error // Start a process
|
||||
StopProcess(id string) error // Stop a process
|
||||
RestartProcess(id string) error // Restart a process
|
||||
ReloadProcess(id string) error // Reload a process
|
||||
GetProcess(id string) (*app.Process, error) // Get a process
|
||||
GetProcessState(id string) (*app.State, error) // Get the state of a process
|
||||
GetProcessLog(id string) (*app.Log, error) // Get the logs of a process
|
||||
GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process
|
||||
Probe(id string) app.Probe // Probe a process
|
||||
ProbeWithTimeout(id string, timeout time.Duration) app.Probe // Probe a process with specific timeout
|
||||
Skills() skills.Skills // Get the ffmpeg skills
|
||||
ReloadSkills() error // Reload the ffmpeg skills
|
||||
SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process
|
||||
GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process
|
||||
SetMetadata(key string, data interface{}) error // Set general metadata
|
||||
GetMetadata(key string) (interface{}, error) // Get previously set general metadata
|
||||
}
|
||||
|
||||
// Config is the required configuration for a new restreamer instance.
|
||||
@@ -61,8 +62,7 @@ type Config struct {
|
||||
ID string
|
||||
Name string
|
||||
Store store.Store
|
||||
DiskFS fs.Filesystem
|
||||
MemFS fs.Filesystem
|
||||
Filesystems []fs.Filesystem
|
||||
Replace replace.Replacer
|
||||
FFmpeg ffmpeg.FFmpeg
|
||||
MaxProcesses int64
|
||||
@@ -93,8 +93,8 @@ type restream struct {
|
||||
maxProc int64
|
||||
nProc int64
|
||||
fs struct {
|
||||
diskfs rfs.Filesystem
|
||||
memfs rfs.Filesystem
|
||||
list []rfs.Filesystem
|
||||
diskfs []rfs.Filesystem
|
||||
stopObserver context.CancelFunc
|
||||
}
|
||||
replace replace.Replacer
|
||||
@@ -124,29 +124,28 @@ func New(config Config) (Restreamer, error) {
|
||||
}
|
||||
|
||||
if r.store == nil {
|
||||
r.store = store.NewDummyStore(store.DummyConfig{})
|
||||
dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{})
|
||||
s, err := store.NewJSON(store.JSONConfig{
|
||||
Filesystem: dummyfs,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r.store = s
|
||||
}
|
||||
|
||||
if config.DiskFS != nil {
|
||||
r.fs.diskfs = rfs.New(rfs.Config{
|
||||
FS: config.DiskFS,
|
||||
Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"),
|
||||
for _, fs := range config.Filesystems {
|
||||
fs := rfs.New(rfs.Config{
|
||||
FS: fs,
|
||||
Logger: r.logger.WithComponent("Cleanup"),
|
||||
})
|
||||
} else {
|
||||
r.fs.diskfs = rfs.New(rfs.Config{
|
||||
FS: fs.NewDummyFilesystem(),
|
||||
})
|
||||
}
|
||||
|
||||
if config.MemFS != nil {
|
||||
r.fs.memfs = rfs.New(rfs.Config{
|
||||
FS: config.MemFS,
|
||||
Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"),
|
||||
})
|
||||
} else {
|
||||
r.fs.memfs = rfs.New(rfs.Config{
|
||||
FS: fs.NewDummyFilesystem(),
|
||||
})
|
||||
r.fs.list = append(r.fs.list, fs)
|
||||
|
||||
// Add the diskfs filesystems also to a separate array. We need it later for input and output validation
|
||||
if fs.Type() == "disk" {
|
||||
r.fs.diskfs = append(r.fs.diskfs, fs)
|
||||
}
|
||||
}
|
||||
|
||||
if r.replace == nil {
|
||||
@@ -185,12 +184,16 @@ func (r *restream) Start() {
|
||||
r.setCleanup(id, t.config)
|
||||
}
|
||||
|
||||
r.fs.diskfs.Start()
|
||||
r.fs.memfs.Start()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
r.fs.stopObserver = cancel
|
||||
go r.observe(ctx, 10*time.Second)
|
||||
|
||||
for _, fs := range r.fs.list {
|
||||
fs.Start()
|
||||
|
||||
if fs.Type() == "disk" {
|
||||
go r.observe(ctx, fs, 10*time.Second)
|
||||
}
|
||||
}
|
||||
|
||||
r.stopOnce = sync.Once{}
|
||||
})
|
||||
@@ -214,14 +217,16 @@ func (r *restream) Stop() {
|
||||
|
||||
r.fs.stopObserver()
|
||||
|
||||
r.fs.diskfs.Stop()
|
||||
r.fs.memfs.Stop()
|
||||
// Stop the cleanup jobs
|
||||
for _, fs := range r.fs.list {
|
||||
fs.Stop()
|
||||
}
|
||||
|
||||
r.startOnce = sync.Once{}
|
||||
})
|
||||
}
|
||||
|
||||
func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
|
||||
@@ -230,14 +235,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
size, limit := r.fs.diskfs.Size()
|
||||
size, limit := fs.Size()
|
||||
isFull := false
|
||||
if limit > 0 && size >= limit {
|
||||
isFull = true
|
||||
}
|
||||
|
||||
if isFull {
|
||||
// Stop all tasks that write to disk
|
||||
// Stop all tasks that write to this filesystem
|
||||
r.lock.Lock()
|
||||
for id, t := range r.tasks {
|
||||
if !t.valid {
|
||||
@@ -252,7 +257,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) {
|
||||
continue
|
||||
}
|
||||
|
||||
r.logger.Warn().Log("Shutting down because disk is full")
|
||||
r.logger.Warn().Log("Shutting down because filesystem is full")
|
||||
r.stopProcess(id)
|
||||
}
|
||||
r.lock.Unlock()
|
||||
@@ -290,7 +295,7 @@ func (r *restream) load() error {
|
||||
}
|
||||
|
||||
// Replace all placeholders in the config
|
||||
t.config.ResolvePlaceholders(r.replace)
|
||||
resolvePlaceholders(t.config, r.replace)
|
||||
|
||||
tasks[id] = t
|
||||
}
|
||||
@@ -463,7 +468,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
|
||||
logger: r.logger.WithField("id", process.ID),
|
||||
}
|
||||
|
||||
t.config.ResolvePlaceholders(r.replace)
|
||||
resolvePlaceholders(t.config, r.replace)
|
||||
|
||||
err := r.resolveAddresses(r.tasks, t.config)
|
||||
if err != nil {
|
||||
@@ -502,34 +507,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) {
|
||||
}
|
||||
|
||||
func (r *restream) setCleanup(id string, config *app.Config) {
|
||||
rePrefix := regexp.MustCompile(`^([a-z]+):`)
|
||||
|
||||
for _, output := range config.Output {
|
||||
for _, c := range output.Cleanup {
|
||||
if strings.HasPrefix(c.Pattern, "memfs:") {
|
||||
r.fs.memfs.SetCleanup(id, []rfs.Pattern{
|
||||
{
|
||||
Pattern: strings.TrimPrefix(c.Pattern, "memfs:"),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
},
|
||||
})
|
||||
} else if strings.HasPrefix(c.Pattern, "diskfs:") {
|
||||
r.fs.diskfs.SetCleanup(id, []rfs.Pattern{
|
||||
{
|
||||
Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
},
|
||||
matches := rePrefix.FindStringSubmatch(c.Pattern)
|
||||
if matches == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
name := matches[1]
|
||||
|
||||
// Support legacy names
|
||||
if name == "diskfs" {
|
||||
name = "disk"
|
||||
} else if name == "memfs" {
|
||||
name = "mem"
|
||||
}
|
||||
|
||||
for _, fs := range r.fs.list {
|
||||
if fs.Name() != name {
|
||||
continue
|
||||
}
|
||||
|
||||
pattern := rfs.Pattern{
|
||||
Pattern: rePrefix.ReplaceAllString(c.Pattern, ""),
|
||||
MaxFiles: c.MaxFiles,
|
||||
MaxFileAge: time.Duration(c.MaxFileAge) * time.Second,
|
||||
PurgeOnDelete: c.PurgeOnDelete,
|
||||
}
|
||||
|
||||
fs.SetCleanup(id, []rfs.Pattern{
|
||||
pattern,
|
||||
})
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restream) unsetCleanup(id string) {
|
||||
r.fs.diskfs.UnsetCleanup(id)
|
||||
r.fs.memfs.UnsetCleanup(id)
|
||||
for _, fs := range r.fs.list {
|
||||
fs.UnsetCleanup(id)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *restream) setPlayoutPorts(t *task) error {
|
||||
@@ -618,9 +639,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID)
|
||||
}
|
||||
|
||||
io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
if len(r.fs.diskfs) != 0 {
|
||||
maxFails := 0
|
||||
for _, fs := range r.fs.diskfs {
|
||||
io.Address, err = r.validateInputAddress(io.Address, fs.Metadata("base"))
|
||||
if err != nil {
|
||||
maxFails++
|
||||
}
|
||||
}
|
||||
|
||||
if maxFails == len(r.fs.diskfs) {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
}
|
||||
} else {
|
||||
io.Address, err = r.validateInputAddress(io.Address, "/")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -650,15 +685,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID)
|
||||
}
|
||||
|
||||
isFile := false
|
||||
if len(r.fs.diskfs) != 0 {
|
||||
maxFails := 0
|
||||
for _, fs := range r.fs.diskfs {
|
||||
isFile := false
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Metadata("base"))
|
||||
if err != nil {
|
||||
maxFails++
|
||||
}
|
||||
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base())
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
}
|
||||
}
|
||||
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
if maxFails == len(r.fs.diskfs) {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
} else {
|
||||
isFile := false
|
||||
io.Address, isFile, err = r.validateOutputAddress(io.Address, "/")
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err)
|
||||
}
|
||||
|
||||
if isFile {
|
||||
hasFiles = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1089,7 +1142,7 @@ func (r *restream) reloadProcess(id string) error {
|
||||
|
||||
t.config = t.process.Config.Clone()
|
||||
|
||||
t.config.ResolvePlaceholders(r.replace)
|
||||
resolvePlaceholders(t.config, r.replace)
|
||||
|
||||
err := r.resolveAddresses(r.tasks, t.config)
|
||||
if err != nil {
|
||||
@@ -1251,6 +1304,10 @@ func (r *restream) GetProcessLog(id string) (*app.Log, error) {
|
||||
}
|
||||
|
||||
func (r *restream) Probe(id string) app.Probe {
|
||||
return r.ProbeWithTimeout(id, 20*time.Second)
|
||||
}
|
||||
|
||||
func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe {
|
||||
r.lock.RLock()
|
||||
|
||||
appprobe := app.Probe{}
|
||||
@@ -1288,7 +1345,7 @@ func (r *restream) Probe(id string) app.Probe {
|
||||
ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{
|
||||
Reconnect: false,
|
||||
ReconnectDelay: 0,
|
||||
StaleTimeout: 20 * time.Second,
|
||||
StaleTimeout: timeout,
|
||||
Command: command,
|
||||
Parser: prober,
|
||||
Logger: task.logger,
|
||||
@@ -1437,3 +1494,97 @@ func (r *restream) GetMetadata(key string) (interface{}, error) {
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// resolvePlaceholders replaces all placeholders in the config. The config
|
||||
// will be modified in place.
|
||||
func resolvePlaceholders(config *app.Config, r replace.Replacer) {
|
||||
vars := map[string]string{
|
||||
"processid": config.ID,
|
||||
"reference": config.Reference,
|
||||
}
|
||||
|
||||
for i, option := range config.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "diskfs", "", vars, config, "global")
|
||||
option = r.Replace(option, "fs:*", "", vars, config, "global")
|
||||
|
||||
config.Options[i] = option
|
||||
}
|
||||
|
||||
// Resolving the given inputs
|
||||
for i, input := range config.Input {
|
||||
// Replace any known placeholders
|
||||
input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input")
|
||||
input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input")
|
||||
|
||||
vars["inputid"] = input.ID
|
||||
|
||||
input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input")
|
||||
input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input")
|
||||
input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input")
|
||||
input.Address = r.Replace(input.Address, "diskfs", "", vars, config, "input")
|
||||
input.Address = r.Replace(input.Address, "memfs", "", vars, config, "input")
|
||||
input.Address = r.Replace(input.Address, "fs:*", "", vars, config, "input")
|
||||
input.Address = r.Replace(input.Address, "rtmp", "", vars, config, "input")
|
||||
input.Address = r.Replace(input.Address, "srt", "", vars, config, "input")
|
||||
|
||||
for j, option := range input.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "inputid", input.ID, nil, nil, "input")
|
||||
option = r.Replace(option, "processid", config.ID, nil, nil, "input")
|
||||
option = r.Replace(option, "reference", config.Reference, nil, nil, "input")
|
||||
option = r.Replace(option, "diskfs", "", vars, config, "input")
|
||||
option = r.Replace(option, "memfs", "", vars, config, "input")
|
||||
option = r.Replace(option, "fs:*", "", vars, config, "input")
|
||||
|
||||
input.Options[j] = option
|
||||
}
|
||||
|
||||
delete(vars, "inputid")
|
||||
|
||||
config.Input[i] = input
|
||||
}
|
||||
|
||||
// Resolving the given outputs
|
||||
for i, output := range config.Output {
|
||||
// Replace any known placeholders
|
||||
output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output")
|
||||
output.ID = r.Replace(output.ID, "reference", config.Reference, nil, nil, "output")
|
||||
|
||||
vars["outputid"] = output.ID
|
||||
|
||||
output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output")
|
||||
output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output")
|
||||
output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output")
|
||||
output.Address = r.Replace(output.Address, "diskfs", "", vars, config, "output")
|
||||
output.Address = r.Replace(output.Address, "memfs", "", vars, config, "output")
|
||||
output.Address = r.Replace(output.Address, "fs:*", "", vars, config, "output")
|
||||
output.Address = r.Replace(output.Address, "rtmp", "", vars, config, "output")
|
||||
output.Address = r.Replace(output.Address, "srt", "", vars, config, "output")
|
||||
|
||||
for j, option := range output.Options {
|
||||
// Replace any known placeholders
|
||||
option = r.Replace(option, "outputid", output.ID, nil, nil, "output")
|
||||
option = r.Replace(option, "processid", config.ID, nil, nil, "output")
|
||||
option = r.Replace(option, "reference", config.Reference, nil, nil, "output")
|
||||
option = r.Replace(option, "diskfs", "", vars, config, "output")
|
||||
option = r.Replace(option, "memfs", "", vars, config, "output")
|
||||
option = r.Replace(option, "fs:*", "", vars, config, "output")
|
||||
|
||||
output.Options[j] = option
|
||||
}
|
||||
|
||||
for j, cleanup := range output.Cleanup {
|
||||
// Replace any known placeholders
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID, nil, nil, "output")
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID, nil, nil, "output")
|
||||
cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference, nil, nil, "output")
|
||||
|
||||
output.Cleanup[j] = cleanup
|
||||
}
|
||||
|
||||
delete(vars, "outputid")
|
||||
|
||||
config.Output[i] = output
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,11 +9,12 @@ import (
|
||||
"github.com/datarhei/core/v16/internal/testhelper"
|
||||
"github.com/datarhei/core/v16/net"
|
||||
"github.com/datarhei/core/v16/restream/app"
|
||||
"github.com/datarhei/core/v16/restream/replace"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator) (Restreamer, error) {
|
||||
func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) {
|
||||
binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build helper program: %w", err)
|
||||
@@ -30,7 +31,8 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp
|
||||
}
|
||||
|
||||
rs, err := New(Config{
|
||||
FFmpeg: ffmpeg,
|
||||
FFmpeg: ffmpeg,
|
||||
Replace: replacer,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -77,7 +79,7 @@ func getDummyProcess() *app.Config {
|
||||
}
|
||||
|
||||
func TestAddProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -97,7 +99,7 @@ func TestAddProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAutostartProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -112,7 +114,7 @@ func TestAutostartProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddInvalidProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Invalid process ID
|
||||
@@ -180,7 +182,7 @@ func TestAddInvalidProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRemoveProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -195,24 +197,98 @@ func TestRemoveProcess(t *testing.T) {
|
||||
require.NotEqual(t, nil, err, "Unset process found (%s)", process.ID)
|
||||
}
|
||||
|
||||
func TestGetProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
func TestUpdateProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
process1 := getDummyProcess()
|
||||
require.NotNil(t, process1)
|
||||
process1.ID = "process1"
|
||||
|
||||
rs.AddProcess(process)
|
||||
process2 := getDummyProcess()
|
||||
require.NotNil(t, process2)
|
||||
process2.ID = "process2"
|
||||
|
||||
_, err = rs.GetProcess(process.ID)
|
||||
require.Equal(t, nil, err, "Process not found (%s)", process.ID)
|
||||
err = rs.AddProcess(process1)
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
err = rs.AddProcess(process2)
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
process3 := getDummyProcess()
|
||||
require.NotNil(t, process3)
|
||||
process3.ID = "process2"
|
||||
|
||||
err = rs.UpdateProcess("process1", process3)
|
||||
require.Error(t, err)
|
||||
|
||||
process3.ID = "process3"
|
||||
err = rs.UpdateProcess("process1", process3)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = rs.GetProcess(process1.ID)
|
||||
require.Error(t, err)
|
||||
|
||||
_, err = rs.GetProcess(process3.ID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestGetProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process1 := getDummyProcess()
|
||||
process1.ID = "foo_aaa_1"
|
||||
process1.Reference = "foo_aaa_1"
|
||||
process2 := getDummyProcess()
|
||||
process2.ID = "bar_bbb_2"
|
||||
process2.Reference = "bar_bbb_2"
|
||||
process3 := getDummyProcess()
|
||||
process3.ID = "foo_ccc_3"
|
||||
process3.Reference = "foo_ccc_3"
|
||||
process4 := getDummyProcess()
|
||||
process4.ID = "bar_ddd_4"
|
||||
process4.Reference = "bar_ddd_4"
|
||||
|
||||
rs.AddProcess(process1)
|
||||
rs.AddProcess(process2)
|
||||
rs.AddProcess(process3)
|
||||
rs.AddProcess(process4)
|
||||
|
||||
_, err = rs.GetProcess(process1.ID)
|
||||
require.Equal(t, nil, err)
|
||||
|
||||
list := rs.GetProcessIDs("", "")
|
||||
require.Len(t, list, 1, "expected 1 process")
|
||||
require.Equal(t, process.ID, list[0], "expected same process ID")
|
||||
require.Len(t, list, 4)
|
||||
require.ElementsMatch(t, []string{"foo_aaa_1", "bar_bbb_2", "foo_ccc_3", "bar_ddd_4"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("foo_*", "")
|
||||
require.Len(t, list, 2)
|
||||
require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("bar_*", "")
|
||||
require.Len(t, list, 2)
|
||||
require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("*_bbb_*", "")
|
||||
require.Len(t, list, 1)
|
||||
require.ElementsMatch(t, []string{"bar_bbb_2"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("", "foo_*")
|
||||
require.Len(t, list, 2)
|
||||
require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("", "bar_*")
|
||||
require.Len(t, list, 2)
|
||||
require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list)
|
||||
|
||||
list = rs.GetProcessIDs("", "*_bbb_*")
|
||||
require.Len(t, list, 1)
|
||||
require.ElementsMatch(t, []string{"bar_bbb_2"}, list)
|
||||
}
|
||||
|
||||
func TestStartProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -238,7 +314,7 @@ func TestStartProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStopProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -263,7 +339,7 @@ func TestStopProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRestartProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -288,7 +364,7 @@ func TestRestartProcess(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReloadProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -318,8 +394,21 @@ func TestReloadProcess(t *testing.T) {
|
||||
rs.StopProcess(process.ID)
|
||||
}
|
||||
|
||||
func TestProcessData(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
func TestProbeProcess(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
|
||||
rs.AddProcess(process)
|
||||
|
||||
probe := rs.ProbeWithTimeout(process.ID, 5*time.Second)
|
||||
|
||||
require.Equal(t, 3, len(probe.Streams))
|
||||
}
|
||||
|
||||
func TestProcessMetadata(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -340,7 +429,7 @@ func TestProcessData(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestLog(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -373,7 +462,7 @@ func TestLog(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPlayoutNoRange(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -396,7 +485,7 @@ func TestPlayoutRange(t *testing.T) {
|
||||
portrange, err := net.NewPortrange(3000, 3001)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs, err := getDummyRestreamer(portrange, nil, nil)
|
||||
rs, err := getDummyRestreamer(portrange, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
@@ -417,7 +506,7 @@ func TestPlayoutRange(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAddressReference(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil)
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process1 := getDummyProcess()
|
||||
@@ -449,7 +538,7 @@ func TestAddressReference(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigValidation(t *testing.T) {
|
||||
rsi, err := getDummyRestreamer(nil, nil, nil)
|
||||
rsi, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs := rsi.(*restream)
|
||||
@@ -496,7 +585,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
|
||||
valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rsi, err := getDummyRestreamer(nil, valIn, valOut)
|
||||
rsi, err := getDummyRestreamer(nil, valIn, valOut, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs := rsi.(*restream)
|
||||
@@ -522,7 +611,7 @@ func TestConfigValidationFFmpeg(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestOutputAddressValidation(t *testing.T) {
|
||||
rsi, err := getDummyRestreamer(nil, nil, nil)
|
||||
rsi, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs := rsi.(*restream)
|
||||
@@ -561,3 +650,196 @@ func TestOutputAddressValidation(t *testing.T) {
|
||||
require.Equal(t, r.path, path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMetadata(t *testing.T) {
|
||||
rs, err := getDummyRestreamer(nil, nil, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := getDummyProcess()
|
||||
|
||||
data, _ := rs.GetMetadata("foobar")
|
||||
require.Equal(t, nil, data, "nothing should be stored under the key")
|
||||
|
||||
rs.SetMetadata("foobar", process)
|
||||
|
||||
data, _ = rs.GetMetadata("foobar")
|
||||
require.NotEqual(t, nil, data, "there should be something stored under the key")
|
||||
|
||||
p := data.(*app.Config)
|
||||
|
||||
require.Equal(t, process.ID, p.ID, "failed to retrieve stored data")
|
||||
}
|
||||
|
||||
func TestReplacer(t *testing.T) {
|
||||
replacer := replace.New()
|
||||
|
||||
replacer.RegisterTemplateFunc("diskfs", func(config *app.Config, section string) string {
|
||||
return "/mnt/diskfs"
|
||||
}, nil)
|
||||
|
||||
replacer.RegisterTemplateFunc("fs:disk", func(config *app.Config, section string) string {
|
||||
return "/mnt/diskfs"
|
||||
}, nil)
|
||||
|
||||
replacer.RegisterTemplateFunc("memfs", func(config *app.Config, section string) string {
|
||||
return "http://localhost/mnt/memfs"
|
||||
}, nil)
|
||||
|
||||
replacer.RegisterTemplateFunc("fs:mem", func(config *app.Config, section string) string {
|
||||
return "http://localhost/mnt/memfs"
|
||||
}, nil)
|
||||
|
||||
replacer.RegisterTemplateFunc("rtmp", func(config *app.Config, section string) string {
|
||||
return "rtmp://localhost/app/{name}?token=foobar"
|
||||
}, nil)
|
||||
|
||||
replacer.RegisterTemplateFunc("srt", func(config *app.Config, section string) string {
|
||||
template := "srt://localhost:6000?mode=caller&transtype=live&latency={latency}&streamid={name}"
|
||||
if section == "output" {
|
||||
template += ",mode:publish"
|
||||
} else {
|
||||
template += ",mode:request"
|
||||
}
|
||||
template += ",token:abcfoobar&passphrase=secret"
|
||||
|
||||
return template
|
||||
}, map[string]string{
|
||||
"latency": "20000", // 20 milliseconds, FFmpeg requires microseconds
|
||||
})
|
||||
|
||||
rsi, err := getDummyRestreamer(nil, nil, nil, replacer)
|
||||
require.NoError(t, err)
|
||||
|
||||
process := &app.Config{
|
||||
ID: "314159265359",
|
||||
Reference: "refref",
|
||||
Input: []app.ConfigIO{
|
||||
{
|
||||
ID: "in_{processid}_{reference}",
|
||||
Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}",
|
||||
Options: []string{
|
||||
"-f",
|
||||
"lavfi",
|
||||
"-re",
|
||||
"input:{inputid}",
|
||||
"process:{processid}",
|
||||
"reference:{reference}",
|
||||
"diskfs:{diskfs}/disk.txt",
|
||||
"memfs:{memfs}/mem.txt",
|
||||
"fsdisk:{fs:disk}/fsdisk.txt",
|
||||
"fsmem:{fs:mem}/$inputid.txt",
|
||||
},
|
||||
},
|
||||
},
|
||||
Output: []app.ConfigIO{
|
||||
{
|
||||
ID: "out_{processid}_{reference}",
|
||||
Address: "output:{outputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=$processid}_srt:{srt,name=$reference,latency=42}_rtmp:{rtmp,name=$outputid}",
|
||||
Options: []string{
|
||||
"-codec",
|
||||
"copy",
|
||||
"-f",
|
||||
"null",
|
||||
"output:{outputid}",
|
||||
"process:{processid}",
|
||||
"reference:{reference}",
|
||||
"diskfs:{diskfs}/disk.txt",
|
||||
"memfs:{memfs}/mem.txt",
|
||||
"fsdisk:{fs:disk}/fsdisk.txt",
|
||||
"fsmem:{fs:mem}/$outputid.txt",
|
||||
},
|
||||
Cleanup: []app.ConfigIOCleanup{
|
||||
{
|
||||
Pattern: "pattern_{outputid}_{processid}_{reference}_{rtmp,name=$outputid}",
|
||||
MaxFiles: 0,
|
||||
MaxFileAge: 0,
|
||||
PurgeOnDelete: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Options: []string{
|
||||
"-loglevel",
|
||||
"info",
|
||||
"{diskfs}/foobar_on_disk.txt",
|
||||
"{memfs}/foobar_in_mem.txt",
|
||||
"{fs:disk}/foobar_on_disk_aswell.txt",
|
||||
"{fs:mem}/foobar_in_mem_aswell.txt",
|
||||
},
|
||||
Reconnect: true,
|
||||
ReconnectDelay: 10,
|
||||
Autostart: false,
|
||||
StaleTimeout: 0,
|
||||
}
|
||||
|
||||
err = rsi.AddProcess(process)
|
||||
require.NoError(t, err)
|
||||
|
||||
rs := rsi.(*restream)
|
||||
|
||||
process = &app.Config{
|
||||
ID: "314159265359",
|
||||
Reference: "refref",
|
||||
FFVersion: "^4.0.2",
|
||||
Input: []app.ConfigIO{
|
||||
{
|
||||
ID: "in_314159265359_refref",
|
||||
Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar",
|
||||
Options: []string{
|
||||
"-f",
|
||||
"lavfi",
|
||||
"-re",
|
||||
"input:in_314159265359_refref",
|
||||
"process:314159265359",
|
||||
"reference:refref",
|
||||
"diskfs:/mnt/diskfs/disk.txt",
|
||||
"memfs:http://localhost/mnt/memfs/mem.txt",
|
||||
"fsdisk:/mnt/diskfs/fsdisk.txt",
|
||||
"fsmem:http://localhost/mnt/memfs/$inputid.txt",
|
||||
},
|
||||
Cleanup: []app.ConfigIOCleanup{},
|
||||
},
|
||||
},
|
||||
Output: []app.ConfigIO{
|
||||
{
|
||||
ID: "out_314159265359_refref",
|
||||
Address: "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar",
|
||||
Options: []string{
|
||||
"-codec",
|
||||
"copy",
|
||||
"-f",
|
||||
"null",
|
||||
"output:out_314159265359_refref",
|
||||
"process:314159265359",
|
||||
"reference:refref",
|
||||
"diskfs:/mnt/diskfs/disk.txt",
|
||||
"memfs:http://localhost/mnt/memfs/mem.txt",
|
||||
"fsdisk:/mnt/diskfs/fsdisk.txt",
|
||||
"fsmem:http://localhost/mnt/memfs/$outputid.txt",
|
||||
},
|
||||
Cleanup: []app.ConfigIOCleanup{
|
||||
{
|
||||
Pattern: "pattern_out_314159265359_refref_314159265359_refref_{rtmp,name=$outputid}",
|
||||
MaxFiles: 0,
|
||||
MaxFileAge: 0,
|
||||
PurgeOnDelete: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Options: []string{
|
||||
"-loglevel",
|
||||
"info",
|
||||
"/mnt/diskfs/foobar_on_disk.txt",
|
||||
"{memfs}/foobar_in_mem.txt",
|
||||
"/mnt/diskfs/foobar_on_disk_aswell.txt",
|
||||
"http://localhost/mnt/memfs/foobar_in_mem_aswell.txt",
|
||||
},
|
||||
Reconnect: true,
|
||||
ReconnectDelay: 10,
|
||||
Autostart: false,
|
||||
StaleTimeout: 0,
|
||||
}
|
||||
|
||||
require.Equal(t, process, rs.tasks["314159265359"].config)
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"github.com/datarhei/core/v16/log"
|
||||
)
|
||||
|
||||
type DummyConfig struct {
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
type dummyStore struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewDummyStore(config DummyConfig) Store {
|
||||
s := &dummyStore{
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if s.logger == nil {
|
||||
s.logger = log.New("")
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
func (sb *dummyStore) Store(data StoreData) error {
|
||||
sb.logger.Debug().Log("Data stored")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sb *dummyStore) Load() (StoreData, error) {
|
||||
sb.logger.Debug().Log("Data loaded")
|
||||
|
||||
return NewStoreData(), nil
|
||||
}
|
||||
@@ -4,24 +4,23 @@ import (
|
||||
gojson "encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
)
|
||||
|
||||
type JSONConfig struct {
|
||||
Filepath string
|
||||
FFVersion string
|
||||
Logger log.Logger
|
||||
Filesystem fs.Filesystem
|
||||
Filepath string // Full path to the database file
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
type jsonStore struct {
|
||||
filepath string
|
||||
ffversion string
|
||||
logger log.Logger
|
||||
fs fs.Filesystem
|
||||
filepath string
|
||||
logger log.Logger
|
||||
|
||||
// Mutex to serialize access to the backend
|
||||
lock sync.RWMutex
|
||||
@@ -29,18 +28,26 @@ type jsonStore struct {
|
||||
|
||||
var version uint64 = 4
|
||||
|
||||
func NewJSONStore(config JSONConfig) Store {
|
||||
func NewJSON(config JSONConfig) (Store, error) {
|
||||
s := &jsonStore{
|
||||
filepath: config.Filepath,
|
||||
ffversion: config.FFVersion,
|
||||
logger: config.Logger,
|
||||
fs: config.Filesystem,
|
||||
filepath: config.Filepath,
|
||||
logger: config.Logger,
|
||||
}
|
||||
|
||||
if len(s.filepath) == 0 {
|
||||
s.filepath = "/db.json"
|
||||
}
|
||||
|
||||
if s.fs == nil {
|
||||
return nil, fmt.Errorf("no valid filesystem provided")
|
||||
}
|
||||
|
||||
if s.logger == nil {
|
||||
s.logger = log.New("")
|
||||
}
|
||||
|
||||
return s
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *jsonStore) Load() (StoreData, error) {
|
||||
@@ -79,28 +86,11 @@ func (s *jsonStore) store(filepath string, data StoreData) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dir := path.Dir(filepath)
|
||||
name := path.Base(filepath)
|
||||
|
||||
tmpfile, err := os.CreateTemp(dir, name)
|
||||
_, _, err = s.fs.WriteFileSafe(filepath, jsondata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer os.Remove(tmpfile.Name())
|
||||
|
||||
if _, err := tmpfile.Write(jsondata); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tmpfile.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := file.Rename(tmpfile.Name(), filepath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.logger.WithField("file", filepath).Debug().Log("Stored data")
|
||||
|
||||
return nil
|
||||
@@ -113,7 +103,7 @@ type storeVersion struct {
|
||||
func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
|
||||
r := NewStoreData()
|
||||
|
||||
_, err := os.Stat(filepath)
|
||||
_, err := s.fs.Stat(filepath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return r, nil
|
||||
@@ -122,7 +112,7 @@ func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) {
|
||||
return r, err
|
||||
}
|
||||
|
||||
jsondata, err := os.ReadFile(filepath)
|
||||
jsondata, err := s.fs.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return r, err
|
||||
}
|
||||
|
||||
@@ -1,40 +1,61 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/io/fs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{})
|
||||
func getFS(t *testing.T) fs.Filesystem {
|
||||
fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{
|
||||
Root: ".",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := fs.Stat("./fixtures/v4_empty.json")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "/fixtures/v4_empty.json", info.Name())
|
||||
|
||||
return fs
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, store)
|
||||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := store.Load()
|
||||
require.Equal(t, nil, err)
|
||||
_, err = store.Load()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestLoadFailed(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_invalid.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
Filepath: "./fixtures/v4_invalid.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err := store.Load()
|
||||
require.NotEqual(t, nil, err)
|
||||
_, err = store.Load()
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
func TestIsEmpty(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
Filepath: "./fixtures/v4_empty.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
@@ -42,9 +63,11 @@ func TestIsEmpty(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNotExists(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_notexist.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
Filepath: "./fixtures/v4_notexist.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
@@ -52,11 +75,14 @@ func TestNotExists(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestStore(t *testing.T) {
|
||||
os.Remove("./fixtures/v4_store.json")
|
||||
fs := getFS(t)
|
||||
fs.Remove("./fixtures/v4_store.json")
|
||||
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v4_store.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: fs,
|
||||
Filepath: "./fixtures/v4_store.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := store.Load()
|
||||
require.NoError(t, err)
|
||||
@@ -70,13 +96,15 @@ func TestStore(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, data, data2)
|
||||
|
||||
os.Remove("./fixtures/v4_store.json")
|
||||
fs.Remove("./fixtures/v4_store.json")
|
||||
}
|
||||
|
||||
func TestInvalidVersion(t *testing.T) {
|
||||
store := NewJSONStore(JSONConfig{
|
||||
Filepath: "./fixtures/v3_empty.json",
|
||||
store, err := NewJSON(JSONConfig{
|
||||
Filesystem: getFS(t),
|
||||
Filepath: "./fixtures/v3_empty.json",
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
data, err := store.Load()
|
||||
require.Error(t, err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user