diff --git a/.github/workflows/build_bundle-rpi.yaml b/.github/workflows/build_bundle-rpi.yaml index ea000f4e..420b7eac 100644 --- a/.github/workflows/build_bundle-rpi.yaml +++ b/.github/workflows/build_bundle-rpi.yaml @@ -62,7 +62,7 @@ jobs: build-args: | CORE_IMAGE=datarhei/base:${{ env.OS_NAME }}-core-${{ env.OS_VERSION }}-${{ env.CORE_VERSION }} FFMPEG_IMAGE=datarhei/base:${{ env.OS_NAME }}-ffmpeg-rpi-${{ env.OS_VERSION }}-${{ env.FFMPEG_VERSION }} - platforms: linux/arm/v7,linux/arm/v6,linux/arm64 + platforms: linux/arm/v7,linux/arm64 push: true tags: | datarhei/core:rpi-${{ env.CORE_VERSION }} diff --git a/.github/workflows/go-tests.yml b/.github/workflows/go-tests.yml index 95aad096..d0b86012 100644 --- a/.github/workflows/go-tests.yml +++ b/.github/workflows/go-tests.yml @@ -3,20 +3,20 @@ name: tests on: [push, pull_request] jobs: - build: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v2 - with: - fetch-depth: 2 - - uses: actions/setup-go@v2 - with: - go-version: '1.18' - - name: Run coverage - run: go test -coverprofile=coverage.out -covermode=atomic -v ./... - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v2 - with: - token: ${{ secrets.CODECOV_TOKEN }} - files: coverage.out - flags: unit-linux + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 2 + - uses: actions/setup-go@v2 + with: + go-version: "1.19" + - name: Run coverage + run: go test -coverprofile=coverage.out -covermode=atomic -v ./... + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v2 + with: + token: ${{ secrets.CODECOV_TOKEN }} + files: coverage.out + flags: unit-linux diff --git a/.github_build/Build.alpine.env b/.github_build/Build.alpine.env index a0bd0a5c..ced95107 100644 --- a/.github_build/Build.alpine.env +++ b/.github_build/Build.alpine.env @@ -1,5 +1,5 @@ # CORE ALPINE BASE IMAGE OS_NAME=alpine -OS_VERSION=3.15 -GOLANG_IMAGE=golang:1.18.6-alpine3.15 -CORE_VERSION=16.10.1 +OS_VERSION=3.16 +GOLANG_IMAGE=golang:1.20-alpine3.16 +CORE_VERSION=16.12.0 diff --git a/.github_build/Build.bundle.cuda.env b/.github_build/Build.bundle.cuda.env index 911bf09b..808b4583 100644 --- a/.github_build/Build.bundle.cuda.env +++ b/.github_build/Build.bundle.cuda.env @@ -1,3 +1,3 @@ # CORE NVIDIA CUDA BUNDLE -FFMPEG_VERSION=4.4.2 -CUDA_VERSION=11.4.2 +FFMPEG_VERSION=5.1.2 +CUDA_VERSION=11.7.1 diff --git a/.github_build/Build.bundle.env b/.github_build/Build.bundle.env index 88b752ce..060a458e 100644 --- a/.github_build/Build.bundle.env +++ b/.github_build/Build.bundle.env @@ -1,2 +1,2 @@ # CORE BUNDLE -FFMPEG_VERSION=4.4.2 +FFMPEG_VERSION=5.1.2 diff --git a/.github_build/Build.bundle.rpi.env b/.github_build/Build.bundle.rpi.env index 83fc5dbf..781096cd 100644 --- a/.github_build/Build.bundle.rpi.env +++ b/.github_build/Build.bundle.rpi.env @@ -1,2 +1,2 @@ # CORE RASPBERRY-PI BUNDLE -FFMPEG_VERSION=4.4.2 +FFMPEG_VERSION=5.1.2 diff --git a/.github_build/Build.bundle.vaapi.env b/.github_build/Build.bundle.vaapi.env index 88b752ce..060a458e 100644 --- a/.github_build/Build.bundle.vaapi.env +++ b/.github_build/Build.bundle.vaapi.env @@ -1,2 +1,2 @@ # CORE BUNDLE -FFMPEG_VERSION=4.4.2 +FFMPEG_VERSION=5.1.2 diff --git a/.github_build/Build.ubuntu.env b/.github_build/Build.ubuntu.env index d2b205a9..4e02a698 100644 --- a/.github_build/Build.ubuntu.env +++ b/.github_build/Build.ubuntu.env @@ -1,5 +1,5 @@ # CORE UBUNTU BASE IMAGE OS_NAME=ubuntu OS_VERSION=20.04 -GOLANG_IMAGE=golang:1.18.6-alpine3.15 -CORE_VERSION=16.10.1 +GOLANG_IMAGE=golang:1.20-alpine3.16 +CORE_VERSION=16.12.0 diff --git a/.gitignore b/.gitignore index be58ceb7..e39532a4 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ .env /core* /import* +/ffmigrate* /data/** /test/** .vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index 50869919..bdd254d2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,38 @@ # Core +### Core v16.12.0 > v16.?.? + +- Fix better naming for storage endpoint documentation +- Fix freeing up S3 mounts +- Fix URL validation if the path contains FFmpeg specific placeholders +- Fix purging default file from HTTP cache + +### Core v16.11.0 > v16.12.0 + +- Add S3 storage support +- Add support for variables in placeholde parameter +- Add support for RTMP token as stream key as last element in path +- Add support for soft memory limit with debug.memory_limit_mbytes in config +- Add support for partial process config updates +- Add support for alternative syntax for auth0 tenants as environment variable +- Fix config timestamps created_at and loaded_at +- Fix /config/reload return type +- Fix modifying DTS in RTMP packets ([restreamer/#487](https://github.com/datarhei/restreamer/issues/487), [restreamer/#367](https://github.com/datarhei/restreamer/issues/367)) +- Fix default internal SRT latency to 20ms + +### Core v16.10.1 > v16.11.0 + +- Add FFmpeg 4.4 to FFmpeg 5.1 migration tool +- Add alternative SRT streamid +- Mod bump FFmpeg to v5.1.2 (datarhei/core:tag bundles) +- Fix crash with custom SSL certificates ([restreamer/#425](https://github.com/datarhei/restreamer/issues/425)) +- Fix proper version handling for config +- Fix widged session data +- Fix resetting process stats when process stopped +- Fix stale FFmpeg process detection for streams with only audio +- Fix wrong return status code ([#6](https://github.com/datarhei/core/issues/6))) +- Fix use SRT defaults for key material exchange + ### Core v16.10.0 > v16.10.1 - Add email address in TLS config for Let's Encrypt @@ -20,11 +53,11 @@ - Fix process cleanup on delete, remove empty directories from disk - Fix SRT blocking port on restart (upgrade datarhei/gosrt) - Fix RTMP communication (Blackmagic Web Presenter, thx 235 MEDIA) -- Fix RTMP communication (Blackmagic ATEM Mini, datarhei/restreamer#385) +- Fix RTMP communication (Blackmagic ATEM Mini, [#385](https://github.com/datarhei/restreamer/issues/385)) - Fix injecting commit, branch, and build info - Fix API metadata endpoints responses -#### Core v16.9.0 > v16.9.1 +#### Core v16.9.0 > v16.9.1^ - Fix v1 import app - Fix race condition diff --git a/Dockerfile b/Dockerfile index 50378cb0..da7039c7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,23 +1,25 @@ -ARG GOLANG_IMAGE=golang:1.18.4-alpine3.15 +ARG GOLANG_IMAGE=golang:1.20-alpine3.16 -ARG BUILD_IMAGE=alpine:3.15 +ARG BUILD_IMAGE=alpine:3.16 FROM $GOLANG_IMAGE as builder COPY . /dist/core RUN apk add \ - git \ - make && \ + git \ + make && \ cd /dist/core && \ go version && \ make release_linux && \ - make import_linux + make import_linux && \ + make ffmigrate_linux FROM $BUILD_IMAGE COPY --from=builder /dist/core/core /core/bin/core COPY --from=builder /dist/core/import /core/bin/import +COPY --from=builder /dist/core/ffmigrate /core/bin/ffmigrate COPY --from=builder /dist/core/mime.types /core/mime.types COPY --from=builder /dist/core/run.sh /core/bin/run.sh diff --git a/Dockerfile.test b/Dockerfile.test index 39260823..521784b5 100644 --- a/Dockerfile.test +++ b/Dockerfile.test @@ -1,8 +1,8 @@ -FROM golang:1.18.3-alpine3.15 +FROM golang:1.20-alpine3.16 RUN apk add alpine-sdk COPY . /dist/core RUN cd /dist/core && \ - go test -coverprofile=coverage.out -covermode=atomic -v ./... \ No newline at end of file + go test -coverprofile=coverage.out -covermode=atomic -v ./... diff --git a/Makefile b/Makefile index 6d1fadad..e20cce00 100644 --- a/Makefile +++ b/Makefile @@ -75,6 +75,14 @@ import: import_linux: cd app/import && CGO_ENABLED=0 GOOS=linux GOARCH=${OSARCH} go build -o ../../import -ldflags="-s -w" +## ffmigrate: Build ffmpeg migration binary +ffmigrate: + cd app/ffmigrate && CGO_ENABLED=${CGO_ENABLED} GOOS=${GOOS} GOARCH=${GOARCH} go build -o ../../ffmigrate -ldflags="-s -w" + +# github workflow workaround +ffmigrate_linux: + cd app/ffmigrate && CGO_ENABLED=0 GOOS=linux GOARCH=${OSARCH} go build -o ../../ffmigrate -ldflags="-s -w" + ## coverage: Generate code coverage analysis coverage: go test -race -coverprofile test/cover.out ./... @@ -96,7 +104,7 @@ release_linux: docker: docker build -t core:$(SHORTCOMMIT) . -.PHONY: help init build swagger test vet fmt vulncheck vendor commit coverage lint release import update +.PHONY: help init build swagger test vet fmt vulncheck vendor commit coverage lint release import ffmigrate update ## help: Show all commands help: Makefile diff --git a/README.md b/README.md index 0c1baf20..bd92149e 100644 --- a/README.md +++ b/README.md @@ -1,31 +1,62 @@ # Core -The cloud-native audio/video processing API. + -[](<[https://opensource.org/licenses/MI](https://www.apache.org/licenses/LICENSE-2.0)>) +[](<[https://opensource.org/licenses/MI](https://www.apache.org/licenses/LICENSE-2.0)>) [](https://github.com/datarhei/core/actions/workflows/codeql-analysis.yml) [](https://github.com/datarhei/core/actions/workflows/go-tests.yml) [](https://codecov.io/gh/datarhei/core) [](https://goreportcard.com/report/github.com/datarhei/core) [](https://pkg.go.dev/github.com/datarhei/core) +[](https://docs.datarhei.com/core/guides/beginner) -datarhei Core is management for FFmpeg processes without development effort. It is a central interface for mapping AV processes, is responsible for design and management, and provides all necessary interfaces to access the video content. The included control for FFmpeg can keep all used functions reliable and executable without the need for software developers to take care of it. In addition, process and resource limitation for all FFmpeg processes protects the host system from application overload. The overall system gives access to current process values (CPU, RAM) and complete control of system resources and loads with statistical access to process data and current and historical logs. +The datarhei Core is a process management solution for FFmpeg that offers a range of interfaces for media content, including HTTP, RTMP, SRT, and storage options. It is optimized for use in virtual environments such as Docker. It has been implemented in various contexts, from small-scale applications like Restreamer to large-scale, multi-instance frameworks spanning multiple locations, such as dedicated servers, cloud instances, and single-board computers. The datarhei Core stands out from traditional media servers by emphasizing FFmpeg and its capabilities rather than focusing on media conversion. -## Features +## Objectives of development -- Unrestricted FFmpeg process management -- Optimized for long-running tasks -- In-Memory- and Disk-Filesystem for media assets -- HTTP/S, RTMP/S and SRT services -- Let's Encrypt for HTTPS and RTMPS -- HLS/DASH Session tracking with bandwidth and current viewer limiters -- Multiple resource limiters and monitoring -- FFmpeg progress data -- Metrics incl. Prometheus support -- Logging and debugging for FFmpeg processes with history -- Multiple auth. by JWT and Auth0 -- 100% JSON REST API (Swagger documented) -- GraphQL for metrics, process, and progress data +The objectives of development are: + +- Unhindered use of FFmpeg processes +- Portability of FFmpeg, including management across development and production environments +- Scalability of FFmpeg-based applications through the ability to offload processes to additional instances +- Streamlining of media product development by focusing on features and design. + +## What issues have been resolved thus far? + +### Process management + +- Run multiple processes via API +- Unrestricted FFmpeg commands in process configuration. +- Error detection and recovery (e.g., FFmpeg stalls, dumps) +- Referencing for process chaining (pipelines) +- Placeholders for storage, RTMP, and SRT usage (automatic credentials management and URL resolution) +- Logs (access to current stdout/stderr) +- Log history (configurable log history, e.g., for error analysis) +- Resource limitation (max. CPU and MEMORY usage per process) +- Statistics (like FFmpeg progress per input and output, CPU and MEMORY, state, uptime) +- Input verification (like FFprobe) +- Metadata (option to store additional information like a title) + +### Media delivery + +- Configurable file systems (in-memory, disk-mount, S3) +- HTTP/S, RTMP/S, and SRT services, including Let's Encrypt +- Bandwidth and session limiting for HLS/MPEG DASH sessions (protects restreams from congestion) +- Viewer session API and logging + +### Misc + +- HTTP REST and GraphQL API +- Swagger documentation +- Metrics incl. Prometheus support (also detects POSIX and cgroups resources) +- Docker images for fast setup of development environments up to the integration of cloud resources + +## Docker images + +- datarhei/core:latest (AMD64, ARM64, ARMv7) +- datarhei/core:cuda-latest (Nvidia CUDA 11.7.1, AMD64) +- datarhei/core:rpi-latest (Raspberry Pi / OMX/V4L2-M2M, AMD64/ARMv7) +- datarhei/core:vaapi-latest (Intel VAAPI, AMD64) ## Quick start @@ -47,993 +78,14 @@ docker run --name core -d \ 3. Log in with Swagger Authorize > Basic authorization > Username: admin, Password: secret -## Docker images - -Native (linux/amd64,linux/arm64,linux/arm/v7) - -- datarhei/base:core-alpine-latest -- datarhei/base:core-ubuntu-latest - -Bundle with FFmpeg (linux/amd64,linux/arm64,linux/arm/v7) - -- datarhei/core:latest - -Bundle with FFmpeg for Raspberry Pi (linux/arm/v7) - -- datarhei/core:rpi-latest - -Bundle with FFmpeg for Nvidia Cuda (linux/amd64) - -- datarhei/core:cuda-latest - -Bundle with FFmpeg for Intel VAAPI (linux/amd64) - -- datarhei/core:vaapi-latest - ## Documentation -## Environment variables +Documentation is available on [docs.datarhei.com/core](https://docs.datarhei.com/core). -The environment variables can be set in the file `.env`, e.g. - -``` -CORE_API_AUTH_USERNAME=admin -CORE_API_AUTH_PASSWORD=datarhei -... -``` - -You can also provide them on the command line, whatever you prefer. If the same environment variable is set -in the `.env` file and on the command line, the one set on the command line will overrule the one from the `.env` file. - -The currently known environment variables (but not all will be respected) are: - -| Name | Default | Description | -| ----------------------------------------- | ------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -| CORE_CONFIGFILE | (not set) | Path to a config file. The following environment variables will override the respective values in the config file. | -| CORE_ADDRESS | `:8080` | HTTP listening address. | -| CORE_LOG_LEVEL | `info` | silent, error, warn, info, debug. | -| CORE_LOG_TOPICS | (not set) | List of topics to log (comma separated) | -| CORE_LOG_MAXLINES | `1000` | Number of latest log lines to keep in memory. | -| CORE_DB_DIR | `.` | Directory for holding the operational data. This directory must exist. | -| CORE_HOST_NAME | (not set) | Set to the domain name of the host this instance is running on. | -| CORE_HOST_AUTO | `true` | Enable detection of public IP addresses. | -| CORE_API_READ_ONLY | `false` | Allow only ready only access to the API | -| CORE_API_ACCESS_HTTP_ALLOW | (not set) | Comma separated list of IP ranges in CIDR notation (HTTP traffic), e.g. `127.0.0.1/32,::1/128`. | -| CORE_API_ACCESS_HTTP_BLOCK | (not set) | Comma separated list of IP ranges in CIDR notation (HTTP traffic), e.g. `127.0.0.1/32,::1/128`. | -| CORE_API_ACCESS_HTTPS_ALLOW | (not set) | Comma separated list of IP ranges in CIDR notation (HTTPS traffic), e.g. `127.0.0.1/32,::1/128`. | -| CORE_API_ACCESS_HTTPS_BLOCK | (not set) | Comma separated list of IP ranges in CIDR notation (HTTPS traffic), e.g. `127.0.0.1/32,::1/128`. | -| CORE_API_AUTH_ENABLE | `true` | Set to `false` to disable auth for all clients. | -| CORE_API_AUTH_DISABLE_LOCALHOST | `false` | Set to `true` to disable auth for clients from localhost. | -| CORE_API_AUTH_USERNAME | (required) | Username for auth. | -| CORE_API_AUTH_PASSWORD | (required) | Password for auth. | -| CORE_API_AUTH_JWT_SECRET | (not set) | A secret for en- and decrypting the JWT. If not set, a secret will be generated. | -| CORE_API_AUTH_AUTH0_ENABLE | `false` | Enable Auth0. | -| CORE_API_AUTH_AUTH0_TENANTS | (not set) | List of base64 encoded Auth0 tenant JSON objects (comma-separated). The tenant JSON object is defined as `{"domain":string,"audience":string,"users":array of strings}` | -| CORE_TLS_ADDRESS | `:8181` | Port to listen on for HTTPS requests. | -| CORE_TLS_ENABLE | `false` | Set to `true` to enable TLS support. | -| CORE_TLS_AUTO | `false` | Set to `true` to enable automatic retrieval of a Let's Encrypt certificate. Requires `CORE_TLS_ENABLE` to be `true` and `CORE_HOST_NAME` to be set with `CORE_HOST_AUTO` to `false`. | -| CORE_TLS_CERTFILE | (not set) | TLS certificate file in PEM format. | -| CORE_TLS_KEYFILE | (not set) | TLS key file in PEM format. | -| CORE_STORAGE_DISK_DIR | `.` | A directory that will be exposed by HTTP on /. This directory must exist. | -| CORE_STORAGE_DISK_MAXSIZEMBYTES | `0` | Max. allowed megabytes for `CORE_STORAGE_DISK_DIR`. | -| CORE_STORAGE_DISK_CACHE_ENABLE | `true` | Enable cache for files from `CORE_STORAGE_DISK_DIR`. | -| CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES | `0` | Max. allowed cache size, 0 for unlimited. | -| CORE_STORAGE_DISK_CACHE_TTLSECONDS | `300` | Seconds to keep files in cache. | -| CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES | `1` | Max. file size to put in cache. | -| CORE_STORAGE_DISK_CACHE_TYPES_ALLOW | (not set) | List of file extensions to cache (space-separated, e.g. ".html .js"), empty for all. | -| CORE_STORAGE_DISK_CACHE_TYPES_BLOCK | (not set) | List of file extensions not to cache (space-separated, e.g. ".m3u8 .mpd"), empty for none. | -| CORE_STORAGE_MEMORY_AUTH_ENABLE | `true` | Enable basic auth for PUT,POST, and DELETE on /memfs. | -| CORE_STORAGE_MEMORY_AUTH_USERNAME | (not set) | Username for Basic-Auth of `/memfs`. Required if auth is enabled. | -| CORE_STORAGE_MEMORY_AUTH_PASSWORD | (not set) | Password for Basic-Auth of `/memfs`. Required if auth is enabled. | -| CORE_STORAGE_MEMORY_MAXSIZEMBYTES | `0` | Max. allowed megabytes for `/memfs`. Any value <= 0 means unlimited. | -| CORE_STORAGE_MEMORY_PURGE | `false` | Set to `true` to remove the oldest entries if the `/memfs` is full. | -| CORE_STORAGE_COCORE_ORIGINS | `*` | List of allowed CORS origins (comma separated). Will be used for `/` and `/memfs`. | -| CORE_STORAGE_MIMETYPES_FILE | `mime.types` | Path to file with MIME type definitions. | -| CORE_RTMP_ENABLE | `false` | Enable RTMP server. | -| CORE_RTMP_ENABLE_TLS | `false` | Enable RTMP over TLS (RTMPS). Requires `CORE_TLS_ENABLE` to be `true`. | -| CORE_RTMP_ADDRESS | `:1935` | RTMP server listen address. | -| CORE_RTMP_ADDRESS_TLS | `:1936` | RTMPS server listen address. | -| CORE_RTMP_APP | `/` | RTMP app for publishing. | -| CORE_RTMP_TOKEN | (not set) | RTMP token for publishing and playing. The token is the value of the URL query parameter `token`. | -| CORE_SRT_ENABLE | `false` | Enable SRT server. | -| CORE_SRT_ADDRESS | `:6000` | SRT server listen address. | -| CORE_SRT_PASSPHRASE | (not set) | SRT passphrase. | -| CORE_SRT_TOKEN | (not set) | SRT token for publishing and playing. The token is the value of the URL query parameter `token`. | -| CORE_SRT_LOG_ENABLE | `false` | Enable SRT server logging. | -| CORE_SRT_LOG_TOPICS | (not set) | List topics to log from SRT server. See https://github.com/datarhei/gosrt#logging. | -| CORE_FFMPEG_BINARY | `ffmpeg` | Path to FFmpeg binary. | -| CORE_FFMPEG_MAXPROCESSES | `0` | Max. allowed simultaneously running FFmpeg instances. Any value <= 0 means unlimited. | -| CORE_FFMPEG_ACCESS_INPUT_ALLOW | (not set) | List of pattern for allowed input URI (space-separated), leave emtpy to allow any. | -| CORE_FFMPEG_ACCESS_INPUT_BLOCK | (not set) | List of pattern for blocked input URI (space-separated), leave emtpy to block none. | -| CORE_FFMPEG_ACCESS_OUTPUT_ALLOW | (not set) | List of pattern for allowed output URI (space-separated), leave emtpy to allow any. | -| CORE_FFMPEG_ACCESS_OUTPUT_BLOCK | (not set) | List of pattern for blocked output URI (space-separated), leave emtpy to block none. | -| CORE_FFMPEG_LOG_MAXLINES | `50` | Number of latest log lines to keep for each process. | -| CORE_FFMPEG_LOG_MAXHISTORY | `3` | Number of latest logs to keep for each process. | -| CORE_PLAYOUT_ENABLE | `false` | Enable playout API where available | -| CORE_PLAYOUT_MINPORT | `0` | Min. port a playout server per input can run on. | -| CORE_PLAYOUT_MAXPORT | `0` | Max. port a playout server per input can run on. | -| CORE_DEBUG_PROFILING | `false` | Set to `true` to enable profiling endpoint on `/profiling`. | -| CORE_DEBUG_FORCEGC | `0` | Number of seconds between forcing GC to return memory to the OS. Use in conjuction with `GODEBUG=madvdontneed=1`. Any value <= 0 means not to force GC. | -| CORE_METRICS_ENABLE | `false` | Enable collecting historic metrics data. | -| CORE_METRICS_ENABLE_PROMETHEUS | `false` | Enable prometheus endpoint /metrics. | -| CORE_METRICS_RANGE_SECONDS | `300` | Seconds to keep history metric data. | -| CORE_METRICS_INTERVAL_SECONDS | `2` | Interval for collecting metrics. | -| CORE_SESSIONS_ENABLE | `false` | Enable HLS statistics for `/memfs`. | -| CORE_SESSIONS_IP_IGNORELIST | (not set) | Comma separated list of IP ranges in CIDR notation, e.g. `127.0.0.1/32,::1/128`. | -| CORE_SESSIONS_SESSION_TIMEOUT_SEC | `30` | Timeout of a session in seconds. | -| CORE_SESSIONS_PERSIST | `false` | Whether to persist the session history. Will be stored in `CORE_DB_DIR`. | -| CORE_SESSIONS_MAXBITRATE_MBIT | `0` | Max. allowed outgoing bitrate in mbit/s, 0 for unlimited. | -| CORE_SESSIONS_MAXSESSIONS | `0` | Max. allowed number of simultaneous sessions, 0 for unlimited. | -| CORE_ROUTER_BLOCKED_PREFIXES | `/api` | List of path prefixes that can't be routed. | -| CORE_ROUTER_ROUTES | (not set) | List of route mappings of the form [from]:[to], e.g. `/foo:/bar`. Leave empty for no routings. | -| CORE_ROUTER_UI_PATH | (not set) | Path to directory with files for a UI. It will be mounted to `/ui` and uses `index.html` as default index page. | - -## Config - -The minimum config file has to look like this: - -``` -{ - "version": 1 -} -``` - -All other values will be filled with default values and persisted on disk. The entire default config file: - -``` -{ - "version": 3, - "id": "[will be generated if not given]", - "name": "[will be generated if not given]", - "address": ":8080", - "log": { - "level": "info", - "topics": [], - "max_lines": 1000 - }, - "db": { - "dir": "./config" - }, - "host": { - "name": [], - "auto": true - }, - "api": { - "read_only": false, - "access": { - "http": { - "allow": [], - "block": [] - }, - "https": { - "allow": [], - "block": [] - } - }, - "auth": { - "enable": true, - "disable_localhost": false, - "username": "", - "password": "", - "jwt": { - "secret": "" - }, - "auth0": { - "enable": false, - "tenants": [] - } - } - }, - "tls": { - "address": ":8181", - "enable": false, - "auto": false, - "cert_file": "", - "key_file": "" - }, - "storage": { - "disk": { - "dir": "./data", - "max_size_mbytes": 0, - "cache": { - "enable": true, - "max_size_mbytes": 0, - "ttl_seconds": 300, - "max_file_size_mbytes": 1, - "types": { - "allow": [], - "block": [] - } - } - }, - "memory": { - "auth": { - "enable": true, - "username": "admin", - "password": "vxbx0ViqfA75P1KCyw" - }, - "max_size_mbytes": 0, - "purge": false - }, - "cors": { - "origins": [ - "*" - ] - }, - "mimetypes_file": "mime.types" - }, - "rtmp": { - "enable": false, - "enable_tls": false, - "address": ":1935", - "address_tls": ":1936", - "app": "/", - "token": "" - }, - "srt": { - "enable": false, - "address": ":6000", - "passphrase": "", - "token": "", - "log": { - "enable": false, - "topics": [], - } - }, - "ffmpeg": { - "binary": "ffmpeg", - "max_processes": 0, - "access": { - "input": { - "allow": [], - "block": [] - }, - "output": { - "allow": [], - "block": [] - } - }, - "log": { - "max_lines": 50, - "max_history": 3 - } - }, - "playout": { - "enable": false, - "min_port": 0, - "max_port": 0 - }, - "debug": { - "profiling": false, - "force_gc": 0 - }, - "stats": { - "enable": true, - "ip_ignorelist": [ - "127.0.0.1/32", - "::1/128" - ], - "session_timeout_sec": 30, - "persist": false, - "persist_interval_sec": 300, - "max_bitrate_mbit": 0, - "max_sessions": 0 - }, - "service": { - "enable": false, - "token": "", - "url": "https://service.datarhei.com" - }, - "router": { - "blocked_prefixes": [ - "/api" - ], - "routes": {} - } -} -``` - -If you don't provide a path to a config file, the default config will be used, and nothing will be persisted to the disk. Default values can be overruled by environment variables. - -## TLS / HTTPS - -Enable TLS / HTTPS support by setting `CORE_TLS_ENABLE=true` and provide the certificate file and key file in PEM format by setting the environment variables `CORE_TLS_CERTFILE` and `CORE_TLS_KEYFILE` accordingly. If a certificate authority signs the certificate, the certificate file should be the concatenation of the server's certificate, any intermediates, and the CA's certificate. - -If TLS with given certificates is enabled, an HTTP server listening on `CORE_ADDRESS` (address) will be additionally started. This server provides access to the same memory filesystem as the HTTPS server (including limits and authorization), but its access is restricted to localhost only. - -### Let's Encrypt - -If you want to use automatic certificates from Let's Encrypt, set the environment variable `CORE_TLS_AUTO` to `true.` To work, the -environment variables `CORE_TLS_ENABLE` have to be `true,` and `CORE_HOST_NAME` has to be set to the host this host will be reachable. Otherwise, the ACME challenge will not work. The environment variables `CORE_TLS_CERTFILE` and `CORE_TLS_KEYFILE` will be ignored. - -If automatic TLS is enabled, the HTTP server (CORE_ADDRESS, resp. address) must listen on port 80. It is required to automatically acquire the certificate (serving the `HTTP-01` challenge). As a further requirement, `CORE_HOST_NAME` (host.name) must be set because it is used a the canonical name for the certificate. - -The obtained certificates will be stored in `CORE_DB_DIR/cert` to be available after a restart. - -The obtained certificates will be stored in `CORE_DB_DIR/cert` to be available after a restart. - -### Self-Signed certificates - -To create a self-signed certificate and key file pair, run this command and provide a reasonable value for the Common Name (CN). The CN is the fully qualified name of the host the instance is running on (e.g., `localhost`). You can also use an IP address or a wildcard name, e.g., `*.example.com`. - -RSA SSL certificate - -```sh -openssl req -newkey rsa:2048 -nodes -keyout key.pem -x509 -days 365 -out cert.pem -subj '/CN=localhost' -``` - -ECDSA SSL certificate - -```sh -openssl ecparam -name secp521r1 -genkey -out key.pem -openssl req -new -x509 -key key.pem -out cert.pem -days 365 -subj '/CN=localhost' -``` - -Call `openssl ecparam -list_curves` to see all available supported curves listed. - -## Access Control - -To control who has access to the API, a list of allowed IPs can be defined. This list is provided at startup with the environment variables `CORE_API_ACCESS_HTTP_BLOCK` and `CORE_API_ACCESS_HTTP_ALLOW.` This is a comma-separated list of IPs in CIDR notation, -e.g. `127.0.0.1/32,::1/128`. If the list is empty, then all IPs are allowed. If the list contains any invalid IP range, the server -will refuse to start. This can be separately defined for the HTTP and HTTPS server if you have TLS enabled with the environment variables `CORE_API_ACCESS_HTTPS_BLOCK` and `CORE_API_ACCESS_HTTPS_ALLOW.` - -## Input/Output Control - -To control where FFmpeg can read and where FFmpeg can write, you can define a pattern that matches the -input addresses or the output addresses. These patterns are regular expressions that can be provided at startup with the -environment variables `CORE_FFMPEG_ACCESS_INPUT` and `CORE_FFMPEG_ACCESS_OUTPUT.` The expressions need to be space-separated, e.g. -`HTTPS?:// RTSP:// RTMP://`. If one of the lists is empty, then no restriction on input, resp. The output will be applied. - -Independently of the value of `CORE_FFMPEG_ACCESS_OUTPUT` there's a check that verifies that output can only be written to the specified `CORE_STORAGE_DISK_DIR` and works as follows: If the address has a protocol specifier other than `file:,` then no further checks will be applied. If the protocol is `file:` or no protocol specifier is given, the address is assumed to be a path that is checked against the path shown in `CORE_STORAGE_DISK_DIR.` - -It will be rejected if the address is outside the `CORE_STORAGE_DISK_DIR` directory. Otherwise, the protocol `file:` will be prepended. If you give some expressions for `CORE_FFMPEG_ACCESS_OUTPUT,` you should also allow `file:.` - -Special cases are the output addresses `-` (which will be rewritten to `pipe:`), and `/dev/null` (which will be allowed even though it's outside of `CORE_STORAGE_DISK_DIR`). - -If you set a value for `CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES`, which is larger than `0`, it will be interpreted as max—allowed megabytes for the `CORE_STORAGE_DISK_DIR.` As soon as the limit is reached, all processes that have outputs writing to `CORE_STORAGE_DISK_DIR` will be stopped. You are responsible for cleaning up the directory and restarting these processes. - -## RTMP - -The datarhei Core includes a simple RTMP server for publishing and playing streams. Set the environment variable `CORE_RTMP_ENABLE` to `true` to enable the RTMP server. It is listening on `CORE_RTMP_ADDRESS`. Use `CORE_RTMP_APP` to limit the app a stream can be published on, e.g. `/live` to require URLs to start with `/live`. To prevent anybody can publish streams, set `CORE_RTMP_TOKEN` to a secret only known to the publishers and subscribers. The token has to be put in the query of the stream URL, e.g. `/live/stream?token=...`. - -For additionaly enabling the RTMPS server, set the config variable `rtmp.enable_tls` or environment variable `CORE_RTMP_ENABLE_TLS` to `true`. This requires `tls.enable` or `CORE_TLS_ENABLE` to be set to to `true`. Use `rtmp.address_tls` or `CORE_RTMP_ADDRESS_TLS` to set the listen address for the RTMPS server. - -| Method | Path | Description | -| ------ | ------------ | ------------------------------------- | -| GET | /api/v3/rtmp | List all currently published streams. | - -## SRT - -The datarhei Core includes a simple SRT server for publishing and playing streams. Set the environment variable `CORE_SRT_ENABLE` to `true` to enable the SRT server. It is listening on `CORE_SRT_ADDRESS`. - -The `streamid` is formatted according to Appendix B of the [SRT specs](https://datatracker.ietf.org/doc/html/draft-sharabayko-srt#appendix-B). The following keys are supported: - -| Key | Descriptions | -| ------- | ----------------------------------------------------------------------------------------------------------------- | -| `m` | The connection mode, either `publish` for publishing a stream or `request` for subscribing to a published stream. | -| `r` | Name of the resource. | -| `token` | A token to prevent anybody to publish or subscribe to a stream. This is set with `CORE_SRT_TOKEN`. | - -An example publishing streamid: `#!:m=publish,r=12345,token=foobar`. - -With your SRT client, connect to the SRT server always in `caller` mode, e.g. `srt://127.0.0.1:6000?mode=caller&streamid=#!:m=publish,r=12345,token=foobar&passphrase=foobarfoobar&transmode=live`. - -Via the API you can gather statistics of the currently connected SRT clients. - -| Method | Path | Description | -| ------ | ----------- | ------------------------------------- | -| GET | /api/v3/srt | List all currently published streams. | - -## Playout - -FFmpeg processes with a `avstream:` (or `playout:`) input stream can expose an HTTP API to control the playout of that stream. With -`CORE_PLAYOUT_ENABLE` you enable exposing this API. The API is only exposed to `localhost` and is transparently connected to the datarhei Core API. You have to provide a port range (`CORE_PLAYOUT_MINPORT` and `CORE_PLAYOUT_MAXPORT`) where datarhei/core can use ports to assign it to the playout API. - -| Method | Path | Description | -| -------- | ------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| GET | /api/v3/process/:id/playout/:inputid/status | Retrieve the current status as JSON. | -| GET | /api/v3/process/:id/playout/:inputid/keyframe/\*name | Retrieve the last deliverd key frame from the input stream as JPEG (if `name` has the ending `.jpg`) or PNG (if `name` has the ending `.png`). | -| GET | /api/v3/process/:id/playout/:inputid/errorframe/encode | Immediately encode the error frame to a GOP. Will only have an effect if the last key frame is currently in a loop. | -| PUT/POST | /api/v3/process/:id/playout/:inputid/errorframe/\*name | Upload any image or video media that can be decoded and will be used to replace the key frame loop. If the key frame is currently in a loop, it will be repaced immediately. Otherwise, it will be used the next time the key frame is in a loop. The body of the request is the media file. | -| PUT | /api/v3/process/:id/playout/:inputid/stream | Replace the current stream. The body of the request is the URL of the new stream. | - -## MIME Types - -The file with the MIME types has one MIME type per line followed by a list of file extensions (including the "."). - -``` -text/plain .txt -text/html .htm .html -... -``` - -## Memory Filesystem - -AA very simple in-memory filesystem is available. The uploaded data is stored in a map, where the path used to upload the file -is used as the key. Use the `POST` or `PUT` method with the proper direction for uploading a file. The body of the request contains the contents of the file. No particular encoding or `Content-Type` is required. The file can then be downloaded from the same path. - -| Method | Path | Description | -| ------ | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| POST | /memfs/\*path | Upload a file to the memory filesystem. The filename is `path` which can contain slashes. If there's already a file with the same `path`, it will be overwritten. | -| PUT | /memfs/\*path | Same as POST. | -| GET | /memfs/\*path | Download the file stored under `path`. The MIME types are applied based on the extension in the `path`. | -| DELETE | /memfs/\*path | Delete the file stored under `path`. | -| POST | /api/v3/memfs/\*path | Upload a file to the memory filesystem. | -| PUT | /api/v3/memfs/\*path | Same as POST. | -| GET | /api/v3/memfs/\*path | Download the file stored under `path`. | -| PATCH | /api/v3/memfs/\*path | Create a link to a file. The body contains the path to that file. | -| DELETE | /api/v3/memfs/\*path | Delete the file stored under `path`. | -| GET | /api/v3/memfs | List all files that are currently stored in the in-memory filesystem. | - -Use these endpoints to, e.g., store HLS chunks and .m3u8 files (in contrast to an actual disk or a ramdisk): - -``` -ffmpeg -f lavfi -re -i testsrc2=size=640x480:rate=25 -c:v libx264 -preset:v ultrafast -r 25 -g 50 -f hls -start_number 0 -hls_time 2 -hls_list_size 6 -hls_flags delete_segments+temp_file+append_list -method PUT -hls_segment_filename http://localhost:8080/memfs/foobar_%04d.ts -y http://localhost:8080/memfs/foobar.m3u8 -``` - -Then you can play it generally with, e.g., `ffplay http://localhost:3000/memfs/foobar.m3u8`. - -Use the environment variables `CORE_STORAGE_MEMORY_AUTH_USERNAME` and `CORE_STORAGE_MEMORY_AUTH_PASSWORD` to protect the `/memfs` with Basic-Auth. Basic-Auth will only be enabled -if both environment variables are set to non-empty values. The `GET /memfs/:path` will not be protected with Basic-Auth. - -Use the environment variable `CORE_STORAGE_MEMORY_MAXSIZEMBYTES` to limit the amount of data that is allowed to be stored. The value is interpreted as megabytes. Use a value equal to or smaller than `0` not to impose any limits. A `507 Insufficient Storage` will be returned if you hit the limit. - -Listing all currently stored files is done by calling `/v3/memfs` with the credentials set by the environment variables `CORE_API_AUTH_USERNAME` and `CORE_API_AUTH_PASSWORD`. -It also accepts the query parameter `sort` (`name,` `size,` or `lastmod`) and `order` (`asc` or `desc`). If a valid value for `sort` is given, the results are sorted in ascending order. - -## Routes - -All contents in `CORE_STORAGE_DISK_DIR` are served from `/.` If you want to redirect some paths to an existing file, you can add static routes in `router.routes` by providing a direct mapping, e.g. - -``` -router: { - routes: { - "/foo.txt": "/bar.txt", - } -} -``` - -The paths have to start with a `/.` Alternatively, you can serve whole directories from another root than `CORE_STORAGE_DISK_DIR.` Use a `/*` at the end of a path as key and a path on the filesystem as the target, e.g. - -``` -router: { - routes: { - "/ui/*": "/path/to/ui", - } -} -``` - -If you use a relative path as target, then it will be added to the current working directory. - -## API - -Check the detailed API description on `/api/swagger/index.html`. - -### Login / Auth - -With auth enabled, you have to retrieve a JWT/OAuth token before you can access the `/v3/` API calls. - -| Method | Path | Description | -| ------ | --------------------- | ---------------------------------------------- | -| POST | /api/login | Retrieve a token to access the API. | -| GET | /api/v3/refresh_token | Retrieve a fresh token with a new expiry date. | - -For the login you have to send - -``` -{ - "username": "...", - "password": "..." -} -``` - -The `username` and the `password` are set by the environment variables `CORE_API_AUTH_USERNAME` and `CORE_API_AUTH_PASSWORD`. - -On successful login, the response looks like this: - -``` -{ - "expire": "2019-01-18T19:55:55+01:00", - "token": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NDc4Mzc3NTUsImlkIjpudWxsLCJvcmlnX2lhdCI6MTU0NzgzNDE1NX0.ZcrpD4oRBqG3wUrfnh1DOVpXdUT7dvUnvetKFEVRKKc" -} -``` - -Use the `token` in all subsequent calls to the `/api/v3/` endpoints, e.g. - -``` -http http://localhost:8080/api/v3/process "Authorization: Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE1NDc4Mzc3NTUsImlkIjpudWxsLCJvcmlnX2lhdCI6MTU0NzgzNDE1NX0.ZcrpD4oRBqG3wUrfnh1DOVpXdUT7dvUnvetKFEVRKKc" -``` - -### Config - -| Method | Path | Description | -| ------ | --------------------- | -------------------------------------------------------------------------------------------------------------------- | -| GET | /api/v3/config | Retrieve the current config without the override values from the environment variables. | -| GET | /api/v3/config/active | Retrieve the current config with the override values from the environment variables are taken into account. | -| PUT | /api/v3/config | Store a new config. Only some values are respected, and the new config will only be used after a restart. | -| GET | /api/v3/config/reload | Reload the config. The config will be re-read and validated from the store. It will cause a restart of all services. | - -When retrieving the config via the API, critical values (such as passwords) will be disguised if not required otherwise. - -### Process - -With the process API call, you can manage different FFmpeg processes. A process is defined as: - -``` -{ - "id": "SomeId", - "reference": "SomeId", - "type": "ffmpeg", - "input": [ - { - "id": "inputid", - "address": "rtsp://1.2.3.4/stream.sdp", - "options": [ - ... list of input options ... - ] - }, - ... list of inputs ... - ], - "output": [ - { - "id": "outputid", - "address": "rtmp://rtmp.youtube.com/live2/...", - "options": [ - ... list of output options ... - ], - "cleanup": [{ - "pattern": "(memfs|diskfs):...", - "max_files: "number, - "max_file_age_seconds": "number", - "purge_on_delete: "(true|false)" - }] - }, - ... list of outputs ... - ], - "options": [ - ... list of global options ... - ], - "reconnect": (true|false), - "reconnect_delay_seconds": 10, - "autostart": (true|false), - "stale_timeout_seconds": 30 -} -``` - -The input, output, and global options are interpreted as command-line options for FFmpeg. - -#### Process Cleanup - -With the optional array of cleanup rules for each output, it is possible to define rules for removing files from the -memory filesystem or disk. Each rule consists of a glob pattern and a max. allowed number of files matching that pattern or -permitted maximum age for the files matching that pattern. The pattern starts with either `memfs:` or `diskfs:` depending on -which filesystem this rule is designated to. Then a [glob pattern](https://pkg.go.dev/path/filepath#Match) follows to -identify the files. If `max_files` is set to a number > 0, then the oldest files from the matching files will be deleted if -the list of matching files is longer than that number. If `max_file_age_seconds` is set to a number > 0, then all files -that are older than this number of seconds from the matching files will be deleted. If `purge_on_delete` is set to `true`, -then all matching files will be deleted when the process is deleted. - -The API calls are - -| Method | Path | Description | -| ------ | ----------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| POST | /api/v3/process | Adds a process. Overwriting an existing ID will result in an error. | -| GET | /api/v3/process | Retrieve a list of all known processes. Use the query parameter `ids` to list (comma separated) the IDs of the process you want to be part of the response. If the list is empty, all processes will be listed. Use the query parameter `filter` to list (comma separated) the wanted details per process (`config`, `state`, `log`). If the list is empty, all details will be included. | -| GET | /api/v3/process/:id | Retreive the details of a process including the config, state, and logs. Use the query parameter `filter` to list (comma separated) the wanted details per process (`config`, `state`, `log`). If the list is empty, all details will be included. | -| PUT | /api/v3/process/:id | Replaces the process with a new config. | -| GET | /api/v3/process/:id/config | Retrieve the config of a process as it was provided. | -| GET | /api/v3/process/:id/state | Retrieve the current state of a process. This includes the progress data if the process is running. | -| GET | /api/v3/process/:id/report | Retrieve the report and logs of a process. | -| GET | /api/v3/process/:id/debug | Retrieve an anonymized version of the details of a process. | -| DELETE | /api/v3/process/:id | Remove a specific process. Only possible if the process is not running. | -| PUT | /api/v3/process/:id/command | Send a command to a process. | -| GET | /api/v3/process/:id/data | Get all arbitrary JSON data that is stored with this process. | -| GET | /api/v3/process/:id/data/:key | Get arbitrary JSON data that is stored under the key `key.` | -| PUT | /api/v3/process/:id/data/:key | Store aribtrary JSON data under the key `key.` If the data is `null,` the key will be removed. | - -### Commands - -A command is defined as: - -``` -{ - "command": ("start"|"stop"|"restart"|"reload") -} -``` - -| Command | Description | -| --------- | ---------------------------------------------------------------------------------------------- | -| `start` | Start the process. If the process is already started, this won't have any effect. | -| `stop` | Stop the process. If the process is already stopped, this won't have any effect. | -| `restart` | Restart the process. If the process is not running, this won't have any effect. | -| `reload` | Reload the process. If the process was running, the reloaded process will start automatically. | - -### Placeholder - -Currently supported placeholders are: - -| Placeholder | Description | Location | -| ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------- | -| `{diskfs}` | Will be replaced by the provided `CORE_STORAGE_DISK_DIR`. | `options`, `input.address`, `input.options`, `output.address`, `output.options` | -| `{memfs}` | Will be replace by the base URL of the MemFS. | `input.address`, `input.options`, `output.address`, `output.options` | -| `{processid}` | Will be replaced by the ID of the process. | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | -| `{reference}` | Will be replaced by the reference of the process | `input.id`, `input.address`, `input.options`, `output.id`, `output.address`, `output.options`, `output.cleanup.pattern` | -| `{inputid}` | Will be replaced by the ID of the input. | `input.address`, `input.options` | -| `{outputid}` | Will be replaced by the ID of the output. | `output.address`, `output.options`, `output.cleanup.pattern` | -| `{rtmp}` | Will be replaced by the internal address of the RTMP server. Requires parameter `name` (name of the stream). | `input.address`, `output.address` | -| `{srt}` | Will be replaced by the internal address of the SRT server. Requires parameter `name` (name of the stream) and `mode` (either `publish` or `request`). | `input.address`, `output.address` | - -Before replacing the placeholders in the process config, all references (see below) will be resolved. - -If the value that gets filled in on the place of the placeholder needs escaping, you can define the character to be escaped in the placeholder by adding it to the placeholder name and prefix it with a `^`. -E.g. escape all `:` in the value (`http://example.com:8080`) for `{memfs}` placeholder, write `{memfs^:}`. It will then be replaced by `http\://example.com\:8080`. The escape character is always `\`. In -case there are `\` in the value, they will also get escaped. If the placeholder doesn't imply escaping, the value will be uses as-is. - -Add parameters to a placeholder by appending a comma separated list of key/values, e.g. `{placeholder,key1=value1,key2=value2}`. This can be combined with escaping. - -### References - -The input address of a process may contain a reference to the output of another process. It has the form `#[processid]:output=[id]`. - -A reference starts with a `#` followed by the process ID it refers to, followed by a `:.` Then comes `output,` followed by a `=.` -and the ID of the output. - -## FFmpeg - -### Statistics - -This repository contains a patch for the FFmpeg program to provide detailed progress information. With this patch, FFmpeg will output -the progress information in a JSON string that contains the data for each input and output stream individually. The JSON output is enabled -by default. It can be enabled with the global `-jsonstats` switch on the command line. Use the `-stats` switch -on the command line for the standard progress output. - -The Docker image that you can build with the provided Dockerfile includes the patched version of FFmpeg for your convenience. - -Example output with `-stats`: - -``` -frame= 143 fps= 25 q=-1.0 Lsize= 941kB time=00:00:05.68 bitrate=1357.0kbits/s speed=0.995x -``` - -Example output with `-jsonstats`: - -``` -JSONProgress:{"inputs":[{"id":0, "stream":0, "type":"video", "codec":"rawvideo", "coder":"rawvideo", "pix_fmt":"rgb24", "frame":188, "fps":24.95, "width":1280, "height":720, "size_kb":507600, "bitrate_kbps":552960.0},{"id":1, "stream":0, "type":"audio", "codec":"pcm_u8", "coder":"pcm_u8", "frame":314, "sampling_hz":44100, "layout":"stereo", "size_kb":628, "bitrate_kbps":705.6}], "outputs":[{"id":0, "stream":0, "type":"video", "codec":"h264", "coder":"libx264", "pix_fmt":"yuv420p", "frame":188, "fps":24.95, "q":-1.0, "width":1280, "height":720, "size_kb":1247, "bitrate_kbps":1365.6},{"id":0, "stream":1, "type":"audio", "codec":"aac", "coder":"aac", "frame":315, "sampling_hz":44100, "layout":"stereo", "size_kb":2, "bitrate_kbps":2.1}], "frame":188, "fps":24.95, "q":-1.0, "size_kb":1249, "bitrate_kbps":1367.7, "time":"0h0m7.48s", "speed":0.993, "dup":0, "drop":0} -``` - -The same output but nicely formatted: - -```json -{ - "bitrate_kbps": 1367.7, - "drop": 0, - "dup": 0, - "fps": 24.95, - "frame": 188, - "inputs": [ - { - "bitrate_kbps": 552960.0, - "codec": "rawvideo", - "coder": "rawvideo", - "fps": 24.95, - "frame": 188, - "height": 720, - "id": 0, - "pix_fmt": "rgb24", - "size_kb": 507600, - "stream": 0, - "type": "video", - "width": 1280 - }, - { - "bitrate_kbps": 705.6, - "codec": "pcm_u8", - "coder": "pcm_u8", - "frame": 314, - "id": 1, - "layout": "stereo", - "sampling_hz": 44100, - "size_kb": 628, - "stream": 0, - "type": "audio" - } - ], - "outputs": [ - { - "bitrate_kbps": 1365.6, - "codec": "h264", - "coder": "libx264", - "fps": 24.95, - "frame": 188, - "height": 720, - "id": 0, - "pix_fmt": "yuv420p", - "q": -1.0, - "size_kb": 1247, - "stream": 0, - "type": "video", - "width": 1280 - }, - { - "bitrate_kbps": 2.1, - "codec": "aac", - "coder": "aac", - "frame": 315, - "id": 0, - "layout": "stereo", - "sampling_hz": 44100, - "size_kb": 2, - "stream": 1, - "type": "audio" - } - ], - "q": -1.0, - "size_kb": 1249, - "speed": 0.993, - "time": "0h0m7.48s" -} -``` - -### Resilient Streaming - -Prepend the input source with `avstream:`, e.g. `... -i avstream:rtsp://1.2.3.4/stream.sdp ...`. It will reconnect to the stream if it breaks and repeats the last known intraframe until new data from the input stream is available. - -## Example - -Start `core` with the proper environment variables. Create a `.env` file or provide them on the command line. For this example, please use the following command line: - -``` -env CORE_API_AUTH_USERNAME=admin CORE_API_AUTH_PASSWORD=datarhei CORE_LOGLEVEL=debug CORE_STORAGE_DISK_DIR=./data ./core -``` - -Also, make sure that the directory `./data` exists. Otherwise, the state will not be stored and will be lost after a restart of -datarhei/core and the FFmpeg process will not be able to write the files. - -In this example, we will add a fake video and audio source. The video will be encoded with H264, and the audio will be encoded with AAC. The output will be an m3u8 stream. - -To talk to the API, we use the program [httpie](https://httpie.org/). - -First, we create a JSON file with the process definition (e.g. `testsrc.json`): - -```json -{ - "id": "testsrc", - "type": "ffmpeg", - "options": ["-loglevel", "info", "-err_detect", "ignore_err"], - "input": [ - { - "address": "testsrc=size=1280x720:rate=25", - "id": "video", - "options": ["-f", "lavfi", "-re"] - }, - { - "address": "anullsrc=r=44100:cl=stereo", - "id": "audio", - "options": ["-f", "lavfi"] - } - ], - "output": [ - { - "address": "http://127.0.0.1:8080/memfs/{processid}_{outputid}.m3u8", - "id": "hls", - "options": [ - "-codec:v", - "libx264", - "-preset:v", - "ultrafast", - "-r", - "25", - "-g", - "50", - "-pix_fmt", - "yuv420p", - "-b:v", - "1024k", - "-codec:a", - "aac", - "-b:a", - "64k", - "-hls_time", - "2", - "-hls_list_size", - "10", - "-hls_flags", - "delete_segments+temp_file+append_list", - "-hls_segment_filename", - "http://127.0.0.1:8080/memfs/{processid}_{outputid}_%04d.ts" - ] - } - ], - "reconnect": true, - "reconnect_delay_seconds": 10, - "stale_timeout_seconds": 10 -} -``` - -and POST it to the API: - -``` -http POST http://localhost:8080/v3/process < testsrc.json -``` - -Then check if it is there (as provided) - -``` -http http://localhost:8080/v3/process/testsrc -``` - -For the advanced, create another JSON file (e.g. `dump.json`): - -```json -{ - "id": "dump", - "type": "ffmpeg", - "options": ["-loglevel", "info", "-err_detect", "ignore_err"], - "input": [ - { - "address": "#testsrc:output=hls", - "id": "video", - "options": [] - } - ], - "output": [ - { - "address": "{diskfs}/{processid}.mp4", - "id": "hls", - "options": ["-codec", "copy", "-y"] - } - ], - "reconnect": true, - "reconnect_delay_seconds": 10, - "stale_timeout_seconds": 10 -} -``` - -and POST it to the API: - -``` -http POST http://localhost:8080/v3/process < dump.json -``` - -Then check if it is there (as provided) - -``` -http http://localhost:8080/v3/process/dump -``` - -Let's start the `testsrc` process - -``` -http PUT http://localhost:8080/v3/process/testsrc/command command=start -``` - -Now we can observe the progress of the process - -``` -http http://localhost:8080/v3/process/testsrc -``` - -or the log of the process - -``` -http http://localhost:8080/v3/process/testsrc/log -``` - -If you want to change the video bitrate, edit the `testsrc.json` file accordingly and replace the process: - -``` -http PUT http://localhost:8080/v3/process/testsrc < testsrc.json -``` - -It will stop the process, replace the config, and restart it. - -Now open, e.g., VLC, and load the stream `http://localhost:8080/memfs/testsrc_hls.m3u8`. - -This is enough; let's stop it - -``` -http PUT http://localhost:8080/v3/process/testsrc/command command=stop -``` - -and check its progress again - -``` -HTTP http://localhost:8080/v3/process/testsrc -``` - -Delete the process - -``` -HTTP DELETE http://localhost:8080/v3/process/testsrc -``` - -## Metrics - -Metrics for the processes and other aspects are provided for a Prometheus scraper on `/metrics.` - -Currently, available metrics are: - -| Metric | Type | Dimensions | Description | -| --------------------- | ------- | --------------------------------------------------------- | ----------------------------------------------- | -| ffmpeg_process | gauge | `core`, `process`, `name` | General stats per process. | -| ffmpeg_process_io | gauge | `core`, `process`, `type`, `id`, `index`, `media`, `name` | Stats per input and output of a process. | -| mem_limit_bytes | gauge | `core` | Total available memory in bytes. | -| mem_free_bytes | gauge | `core` | Free memory in bytes. | -| net_rx_bytes | gauge | `core`, `interface` | Number of received bytes by interface. | -| net_tx_bytes | gauge | `core`, `interface` | Number of sent bytes by interface. | -| cpus_system_time_secs | gauge | `core`, `cpu` | System time per CPU in seconds. | -| cpus_user_time_secs | gauge | `core`, `cpu` | User time per CPU in seconds. | -| cpus_idle_time_secs | gauge | `core`, `cpu` | Idle time per CPU in seconds. | -| session_total | counter | `core`, `collector` | Total number of sessions by collector. | -| session_active | gauge | `core`, `collector` | Current number of active sessions by collector. | -| session_rx_bytes | counter | `core`, `collector` | Total received bytes by collector. | -| session_tx_bytes | counter | `core`, `collector` | Total sent bytes by collector. | - -## Profiling - -Profiling information is available under `/profiling.` Set the environment variable `CORE_DEBUG_PROFILING=true` to make this endpoint -available. If authentication is enabled, you have to provide the token in the header. - -## Development - -### Requirement - -- Go v1.18+ ([Download here](https://golang.org/dl/)) - -### Build - -Clone the repository and build the binary - -``` -git clone git@github.com:datarhei/core.git -cd core -make -``` - -After the build process, the binary is available as `core` - -For more build options, run `make help.` - -### Cross Compile - -If you want to run the binary on a different operating system and/or architecture, you create the appropriate binary by simply setting some -environment variables, e.g. - -``` -env GOOS=linux GOARCH=arm go build -o core-linux-arm -env GOOS=linux GOARCH=arm64 go build -o core-linux-arm64 -env GOOS=freebsd GOARCH=amd64 go build -o core-freebsd-amd64 -env GOOS=windows GOARCH=amd64 go build -o core-windows-amd64 -env GOOS=macos GOARCH=amd64 go build -o core-macos-amd64 -... -``` - -### Docker - -Build the Docker image and run it to try out the API - -``` -docker build -t core . -docker run -it --rm -v ${PWD}/data:/core/data -p 8080:8080 core -``` - -### API Documentation - -The documentation of the API is available on `/api/swagger/index.html.` - -To generate the API documentation from the code, use [swag](https://github.com/swaggo/swag). - -``` -go install github.com/swaggo/swag (unless already installed) -make swagger -``` - -### Code style - -The source code is formatted with `go fmt`, or simply run `make fmt`. Static analysis of the source code is done with `staticcheck` -(see [staticcheck](https://staticcheck.io/docs/)), or simply run `make lint`. - -Before committing changes, you should run `make commit` to ensure that the source code is in shape. +- [Quick start](https://docs.datarhei.com/core/guides/beginner) +- [Installation](https://docs.datarhei.com/core/installation) +- [Configuration](https://docs.datarhei.com/core/configuration) +- [Coding](https://docs.datarhei.com/core/development/coding) ## License diff --git a/app/api/api.go b/app/api/api.go index 4820090f..7bad1ba2 100644 --- a/app/api/api.go +++ b/app/api/api.go @@ -6,6 +6,7 @@ import ( "fmt" "io" golog "log" + "math" gonet "net" gohttp "net/http" "net/url" @@ -17,9 +18,12 @@ import ( "github.com/datarhei/core/v16/app" "github.com/datarhei/core/v16/cluster" "github.com/datarhei/core/v16/config" + configstore "github.com/datarhei/core/v16/config/store" + configvars "github.com/datarhei/core/v16/config/vars" "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/http" "github.com/datarhei/core/v16/http/cache" + httpfs "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/io/fs" @@ -29,8 +33,9 @@ import ( "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/prometheus" "github.com/datarhei/core/v16/restream" + restreamapp "github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/replace" - "github.com/datarhei/core/v16/restream/store" + restreamstore "github.com/datarhei/core/v16/restream/store" "github.com/datarhei/core/v16/rtmp" "github.com/datarhei/core/v16/service" "github.com/datarhei/core/v16/session" @@ -38,6 +43,7 @@ import ( "github.com/datarhei/core/v16/update" "github.com/caddyserver/certmagic" + "go.uber.org/zap" ) // The API interface is the implementation for the restreamer API. @@ -65,6 +71,7 @@ type api struct { ffmpeg ffmpeg.FFmpeg diskfs fs.Filesystem memfs fs.Filesystem + s3fs map[string]fs.Filesystem rtmpserver rtmp.Server srtserver srt.Server metrics monitor.HistoryMonitor @@ -99,7 +106,7 @@ type api struct { config struct { path string - store config.Store + store configstore.Store config *config.Config } @@ -116,6 +123,7 @@ var ErrConfigReload = fmt.Errorf("configuration reload") func New(configpath string, logwriter io.Writer) (API, error) { a := &api{ state: "idle", + s3fs: map[string]fs.Filesystem{}, } a.config.path = configpath @@ -148,7 +156,8 @@ func (a *api) Reload() error { logger := log.New("Core").WithOutput(log.NewConsoleWriter(a.log.writer, log.Lwarn, true)) - store, err := config.NewJSONStore(a.config.path, func() { + rootfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{}) + store, err := configstore.NewJSON(rootfs, a.config.path, func() { a.errorChan <- ErrConfigReload }) if err != nil { @@ -160,7 +169,7 @@ func (a *api) Reload() error { cfg.Merge() if len(cfg.Host.Name) == 0 && cfg.Host.Auto { - cfg.SetPublicIPs() + cfg.Host.Name = net.GetPublicIPs(5 * time.Second) } cfg.Validate(false) @@ -228,8 +237,10 @@ func (a *api) Reload() error { logger.Info().WithFields(logfields).Log("") + logger.Info().WithField("path", a.config.path).Log("Read config file") + configlogger := logger.WithComponent("Config") - cfg.Messages(func(level string, v config.Variable, message string) { + cfg.Messages(func(level string, v configvars.Variable, message string) { configlogger = configlogger.WithFields(log.Fields{ "variable": v.Name, "value": v.Value, @@ -254,6 +265,8 @@ func (a *api) Reload() error { return fmt.Errorf("not all variables are set or valid") } + cfg.LoadedAt = time.Now() + store.SetActive(cfg) a.config.store = store @@ -286,7 +299,13 @@ func (a *api) start() error { } if cfg.Sessions.Persist { - sessionConfig.PersistDir = filepath.Join(cfg.DB.Dir, "sessions") + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: filepath.Join(cfg.DB.Dir, "sessions"), + }) + if err != nil { + return fmt.Errorf("unable to create filesystem for persisting sessions: %w", err) + } + sessionConfig.PersistFS = fs } sessions, err := session.New(sessionConfig) @@ -367,18 +386,18 @@ func (a *api) start() error { a.sessions = sessions } - store := store.NewJSONStore(store.JSONConfig{ - Dir: cfg.DB.Dir, - Logger: a.log.logger.core.WithComponent("ProcessStore"), - }) - - diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{ - Dir: cfg.Storage.Disk.Dir, - Size: cfg.Storage.Disk.Size * 1024 * 1024, + diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: cfg.Storage.Disk.Dir, Logger: a.log.logger.core.WithComponent("DiskFS"), }) if err != nil { + return fmt.Errorf("disk filesystem: %w", err) + } + + if diskfsRoot, err := filepath.Abs(cfg.Storage.Disk.Dir); err != nil { return err + } else { + diskfs.SetMetadata("base", diskfsRoot) } a.diskfs = diskfs @@ -400,17 +419,60 @@ func (a *api) start() error { } if a.memfs == nil { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: baseMemFS.String(), - Size: cfg.Storage.Memory.Size * 1024 * 1024, - Purge: cfg.Storage.Memory.Purge, + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{ Logger: a.log.logger.core.WithComponent("MemFS"), }) - a.memfs = memfs + memfs.SetMetadata("base", baseMemFS.String()) + + sizedfs, _ := fs.NewSizedFilesystem(memfs, cfg.Storage.Memory.Size*1024*1024, cfg.Storage.Memory.Purge) + + a.memfs = sizedfs } else { - a.memfs.Rebase(baseMemFS.String()) - a.memfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024) + a.memfs.SetMetadata("base", baseMemFS.String()) + if sizedfs, ok := a.memfs.(fs.SizedFilesystem); ok { + sizedfs.Resize(cfg.Storage.Memory.Size * 1024 * 1024) + } + } + + for _, s3 := range cfg.Storage.S3 { + if _, ok := a.s3fs[s3.Name]; ok { + return fmt.Errorf("the name '%s' for a s3 filesystem is already in use", s3.Name) + } + + baseS3FS := url.URL{ + Scheme: "http", + Path: s3.Mountpoint, + } + + host, port, _ := gonet.SplitHostPort(cfg.Address) + if len(host) == 0 { + baseS3FS.Host = "localhost:" + port + } else { + baseS3FS.Host = cfg.Address + } + + if s3.Auth.Enable { + baseS3FS.User = url.UserPassword(s3.Auth.Username, s3.Auth.Password) + } + + s3fs, err := fs.NewS3Filesystem(fs.S3Config{ + Name: s3.Name, + Endpoint: s3.Endpoint, + AccessKeyID: s3.AccessKeyID, + SecretAccessKey: s3.SecretAccessKey, + Region: s3.Region, + Bucket: s3.Bucket, + UseSSL: s3.UseSSL, + Logger: a.log.logger.core.WithComponent("FS"), + }) + if err != nil { + return fmt.Errorf("s3 filesystem (%s): %w", s3.Name, err) + } + + s3fs.SetMetadata("base", baseS3FS.String()) + + a.s3fs[s3.Name] = s3fs } var portrange net.Portranger @@ -418,18 +480,18 @@ func (a *api) start() error { if cfg.Playout.Enable { portrange, err = net.NewPortrange(cfg.Playout.MinPort, cfg.Playout.MaxPort) if err != nil { - return err + return fmt.Errorf("playout port range: %w", err) } } validatorIn, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Input.Allow, cfg.FFmpeg.Access.Input.Block) if err != nil { - return err + return fmt.Errorf("input address validator: %w", err) } validatorOut, err := ffmpeg.NewValidator(cfg.FFmpeg.Access.Output.Allow, cfg.FFmpeg.Access.Output.Block) if err != nil { - return err + return fmt.Errorf("output address validator: %w", err) } ffmpeg, err := ffmpeg.New(ffmpeg.Config{ @@ -443,7 +505,7 @@ func (a *api) start() error { Collector: a.sessions.Collector("ffmpeg"), }) if err != nil { - return err + return fmt.Errorf("unable to create ffmpeg: %w", err) } a.ffmpeg = ffmpeg @@ -451,47 +513,103 @@ func (a *api) start() error { a.replacer = replace.New() { - a.replacer.RegisterTemplate("diskfs", a.diskfs.Base()) - a.replacer.RegisterTemplate("memfs", a.memfs.Base()) + a.replacer.RegisterTemplateFunc("diskfs", func(config *restreamapp.Config, section string) string { + return a.diskfs.Metadata("base") + }, nil) - host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) - if len(host) == 0 { - host = "localhost" + a.replacer.RegisterTemplateFunc("fs:disk", func(config *restreamapp.Config, section string) string { + return a.diskfs.Metadata("base") + }, nil) + + a.replacer.RegisterTemplateFunc("memfs", func(config *restreamapp.Config, section string) string { + return a.memfs.Metadata("base") + }, nil) + + a.replacer.RegisterTemplateFunc("fs:mem", func(config *restreamapp.Config, section string) string { + return a.memfs.Metadata("base") + }, nil) + + for name, s3 := range a.s3fs { + a.replacer.RegisterTemplate("fs:"+name, s3.Metadata("base"), nil) } - template := "rtmp://" + host + ":" + port - if cfg.RTMP.App != "/" { - template += cfg.RTMP.App - } - template += "/{name}" + a.replacer.RegisterTemplateFunc("rtmp", func(config *restreamapp.Config, section string) string { + host, port, _ := gonet.SplitHostPort(cfg.RTMP.Address) + if len(host) == 0 { + host = "localhost" + } - if len(cfg.RTMP.Token) != 0 { - template += "?token=" + cfg.RTMP.Token - } + template := "rtmp://" + host + ":" + port + if cfg.RTMP.App != "/" { + template += cfg.RTMP.App + } + template += "/{name}" - a.replacer.RegisterTemplate("rtmp", template) + if len(cfg.RTMP.Token) != 0 { + template += "?token=" + cfg.RTMP.Token + } - host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) - if len(host) == 0 { - host = "localhost" - } + return template + }, nil) - template = "srt://" + host + ":" + port + "?mode=caller&transtype=live&streamid=#!:m={mode},r={name}" - if len(cfg.SRT.Token) != 0 { - template += ",token=" + cfg.SRT.Token + a.replacer.RegisterTemplateFunc("srt", func(config *restreamapp.Config, section string) string { + host, port, _ = gonet.SplitHostPort(cfg.SRT.Address) + if len(host) == 0 { + host = "localhost" + } + + template := "srt://" + host + ":" + port + "?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + if len(cfg.SRT.Token) != 0 { + template += ",token:" + cfg.SRT.Token + } + if len(cfg.SRT.Passphrase) != 0 { + template += "&passphrase=" + cfg.SRT.Passphrase + } + + return template + }, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) + } + + filesystems := []fs.Filesystem{ + a.diskfs, + a.memfs, + } + + for _, fs := range a.s3fs { + filesystems = append(filesystems, fs) + } + + var store restreamstore.Store = nil + + { + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: cfg.DB.Dir, + }) + if err != nil { + return err } - if len(cfg.SRT.Passphrase) != 0 { - template += "&passphrase=" + cfg.SRT.Passphrase + store, err = restreamstore.NewJSON(restreamstore.JSONConfig{ + Filesystem: fs, + Filepath: "/db.json", + Logger: a.log.logger.core.WithComponent("ProcessStore"), + }) + if err != nil { + return err } - a.replacer.RegisterTemplate("srt", template) } restream, err := restream.New(restream.Config{ ID: cfg.ID, Name: cfg.Name, Store: store, - DiskFS: a.diskfs, - MemFS: a.memfs, + Filesystems: filesystems, Replace: a.replacer, FFmpeg: a.ffmpeg, MaxProcesses: cfg.FFmpeg.MaxProcesses, @@ -569,9 +687,12 @@ func (a *api) start() error { metrics.Register(monitor.NewCPUCollector()) metrics.Register(monitor.NewMemCollector()) metrics.Register(monitor.NewNetCollector()) - metrics.Register(monitor.NewDiskCollector(a.diskfs.Base())) - metrics.Register(monitor.NewFilesystemCollector("diskfs", diskfs)) + metrics.Register(monitor.NewDiskCollector(a.diskfs.Metadata("base"))) + metrics.Register(monitor.NewFilesystemCollector("diskfs", a.diskfs)) metrics.Register(monitor.NewFilesystemCollector("memfs", a.memfs)) + for name, fs := range a.s3fs { + metrics.Register(monitor.NewFilesystemCollector(name, fs)) + } metrics.Register(monitor.NewRestreamCollector(a.restream)) metrics.Register(monitor.NewFFmpegCollector(a.ffmpeg)) metrics.Register(monitor.NewSessionCollector(a.sessions, []string{})) @@ -646,7 +767,7 @@ func (a *api) start() error { } if cfg.Storage.Disk.Cache.Enable { - diskCache, err := cache.NewLRUCache(cache.LRUConfig{ + cache, err := cache.NewLRUCache(cache.LRUConfig{ TTL: time.Duration(cfg.Storage.Disk.Cache.TTL) * time.Second, MaxSize: cfg.Storage.Disk.Cache.Size * 1024 * 1024, MaxFileSize: cfg.Storage.Disk.Cache.FileSize * 1024 * 1024, @@ -656,106 +777,111 @@ func (a *api) start() error { }) if err != nil { - return fmt.Errorf("unable to create disk cache: %w", err) + return fmt.Errorf("unable to create cache: %w", err) } - a.cache = diskCache + a.cache = cache } var autocertManager *certmagic.Config - if cfg.TLS.Enable && cfg.TLS.Auto { - if len(cfg.Host.Name) == 0 { - return fmt.Errorf("at least one host must be provided in host.name or RS_HOST_NAME") - } - - certmagic.DefaultACME.Agreed = true - certmagic.DefaultACME.Email = cfg.TLS.Email - certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA - certmagic.DefaultACME.DisableHTTPChallenge = false - certmagic.DefaultACME.DisableTLSALPNChallenge = true - certmagic.DefaultACME.Logger = nil - - certmagic.Default.Storage = &certmagic.FileStorage{ - Path: cfg.DB.Dir + "/cert", - } - certmagic.Default.DefaultServerName = cfg.Host.Name[0] - certmagic.Default.Logger = nil - certmagic.Default.OnEvent = func(event string, data interface{}) { - message := "" - - switch data := data.(type) { - case string: - message = data - case fmt.Stringer: - message = data.String() + if cfg.TLS.Enable { + if cfg.TLS.Auto { + if len(cfg.Host.Name) == 0 { + return fmt.Errorf("at least one host must be provided in host.name or CORE_HOST_NAME") } - if len(message) != 0 { - a.log.logger.core.WithComponent("certmagic").Info().WithField("event", event).Log(message) + certmagic.Default.Storage = &certmagic.FileStorage{ + Path: cfg.DB.Dir + "/cert", } - } + certmagic.Default.DefaultServerName = cfg.Host.Name[0] + certmagic.Default.Logger = zap.NewNop() - magic := certmagic.NewDefault() - acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME) + certmagic.DefaultACME.Agreed = true + certmagic.DefaultACME.Email = cfg.TLS.Email + certmagic.DefaultACME.CA = certmagic.LetsEncryptProductionCA + certmagic.DefaultACME.DisableHTTPChallenge = false + certmagic.DefaultACME.DisableTLSALPNChallenge = true + certmagic.DefaultACME.Logger = zap.NewNop() - magic.Issuers = []certmagic.Issuer{acme} + magic := certmagic.NewDefault() + acme := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME) + acme.Logger = zap.NewNop() - autocertManager = magic + magic.Issuers = []certmagic.Issuer{acme} + magic.Logger = zap.NewNop() - // Start temporary http server on configured port - tempserver := &gohttp.Server{ - Addr: cfg.Address, - Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) { - w.WriteHeader(gohttp.StatusNotFound) - })), - ReadTimeout: 10 * time.Second, - WriteTimeout: 10 * time.Second, - MaxHeaderBytes: 1 << 20, - } + autocertManager = magic - wg := sync.WaitGroup{} - wg.Add(1) - - go func() { - tempserver.ListenAndServe() - wg.Done() - }() - - var certerror bool - - // For each domain, get the certificate - for _, host := range cfg.Host.Name { - logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host) - logger.Info().Log("Acquiring certificate ...") - - ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute)) - - err := autocertManager.ManageSync(ctx, []string{host}) - - cancel() - - if err != nil { - logger.Error().WithField("error", err).Log("Failed to acquire certificate") - certerror = true - break + // Start temporary http server on configured port + tempserver := &gohttp.Server{ + Addr: cfg.Address, + Handler: acme.HTTPChallengeHandler(gohttp.HandlerFunc(func(w gohttp.ResponseWriter, r *gohttp.Request) { + w.WriteHeader(gohttp.StatusNotFound) + })), + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, } - logger.Info().Log("Successfully acquired certificate") - } + wg := sync.WaitGroup{} + wg.Add(1) - // Shut down the temporary http server - tempserver.Close() + go func() { + tempserver.ListenAndServe() + wg.Done() + }() - wg.Wait() + var certerror bool - if certerror { - a.log.logger.core.Warn().Log("Continuing with disabled TLS") - autocertManager = nil - cfg.TLS.Enable = false + // For each domain, get the certificate + for _, host := range cfg.Host.Name { + logger := a.log.logger.core.WithComponent("Let's Encrypt").WithField("host", host) + logger.Info().Log("Acquiring certificate ...") + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(5*time.Minute)) + + err := autocertManager.ManageSync(ctx, []string{host}) + + cancel() + + if err != nil { + logger.Error().WithField("error", err).Log("Failed to acquire certificate") + certerror = true + /* + problems, err := letsdebug.Check(host, letsdebug.HTTP01) + if err != nil { + logger.Error().WithField("error", err).Log("Failed to debug certificate acquisition") + } + + for _, p := range problems { + logger.Error().WithFields(log.Fields{ + "name": p.Name, + "detail": p.Detail, + }).Log(p.Explanation) + } + */ + break + } + + logger.Info().Log("Successfully acquired certificate") + } + + // Shut down the temporary http server + tempserver.Close() + + wg.Wait() + + if certerror { + a.log.logger.core.Warn().Log("Continuing with disabled TLS") + autocertManager = nil + cfg.TLS.Enable = false + } else { + cfg.TLS.CertFile = "" + cfg.TLS.KeyFile = "" + } } else { - cfg.TLS.CertFile = "" - cfg.TLS.KeyFile = "" + a.log.logger.core.Info().Log("Enabling TLS with cert and key files") } } @@ -772,14 +898,15 @@ func (a *api) start() error { Cluster: a.cluster, } - if autocertManager != nil && cfg.RTMP.EnableTLS { - config.TLSConfig = &tls.Config{ - GetCertificate: autocertManager.GetCertificate, - } - + if cfg.RTMP.EnableTLS { config.Logger = config.Logger.WithComponent("RTMP/S") a.log.logger.rtmps = a.log.logger.core.WithComponent("RTMPS").WithField("address", cfg.RTMP.AddressTLS) + if autocertManager != nil { + config.TLSConfig = &tls.Config{ + GetCertificate: autocertManager.GetCertificate, + } + } } rtmpserver, err := rtmp.New(config) @@ -843,22 +970,61 @@ func (a *api) start() error { a.log.logger.main = a.log.logger.core.WithComponent(logcontext).WithField("address", cfg.Address) - mainserverhandler, err := http.NewServer(http.Config{ + httpfilesystems := []httpfs.FS{ + { + Name: a.diskfs.Name(), + Mountpoint: "", + AllowWrite: false, + EnableAuth: false, + Username: "", + Password: "", + DefaultFile: "index.html", + DefaultContentType: "text/html", + Gzip: true, + Filesystem: a.diskfs, + Cache: a.cache, + }, + { + Name: a.memfs.Name(), + Mountpoint: "/memfs", + AllowWrite: true, + EnableAuth: cfg.Storage.Memory.Auth.Enable, + Username: cfg.Storage.Memory.Auth.Username, + Password: cfg.Storage.Memory.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.memfs, + Cache: nil, + }, + } + + for _, s3 := range cfg.Storage.S3 { + httpfilesystems = append(httpfilesystems, httpfs.FS{ + Name: s3.Name, + Mountpoint: s3.Mountpoint, + AllowWrite: true, + EnableAuth: s3.Auth.Enable, + Username: s3.Auth.Username, + Password: s3.Auth.Password, + DefaultFile: "", + DefaultContentType: "application/data", + Gzip: true, + Filesystem: a.s3fs[s3.Name], + Cache: a.cache, + }) + } + + serverConfig := http.Config{ Logger: a.log.logger.main, LogBuffer: a.log.buffer, Restream: a.restream, Metrics: a.metrics, Prometheus: a.prom, MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, + Filesystems: httpfilesystems, + IPLimiter: iplimiter, + Profiling: cfg.Debug.Profiling, Cors: http.CorsConfig{ Origins: cfg.Storage.CORS.Origins, }, @@ -866,12 +1032,13 @@ func (a *api) start() error { SRT: a.srtserver, JWT: a.httpjwt, Config: a.config.store, - Cache: a.cache, Sessions: a.sessions, Router: router, ReadOnly: cfg.API.ReadOnly, Cluster: a.cluster, - }) + } + + mainserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create server: %w", err) @@ -906,35 +1073,10 @@ func (a *api) start() error { a.log.logger.sidecar = a.log.logger.core.WithComponent("HTTP").WithField("address", cfg.Address) - sidecarserverhandler, err := http.NewServer(http.Config{ - Logger: a.log.logger.sidecar, - LogBuffer: a.log.buffer, - Restream: a.restream, - Metrics: a.metrics, - Prometheus: a.prom, - MimeTypesFile: cfg.Storage.MimeTypes, - DiskFS: a.diskfs, - MemFS: http.MemFSConfig{ - EnableAuth: cfg.Storage.Memory.Auth.Enable, - Username: cfg.Storage.Memory.Auth.Username, - Password: cfg.Storage.Memory.Auth.Password, - Filesystem: a.memfs, - }, - IPLimiter: iplimiter, - Profiling: cfg.Debug.Profiling, - Cors: http.CorsConfig{ - Origins: cfg.Storage.CORS.Origins, - }, - RTMP: a.rtmpserver, - SRT: a.srtserver, - JWT: a.httpjwt, - Config: a.config.store, - Cache: a.cache, - Sessions: a.sessions, - Router: router, - ReadOnly: cfg.API.ReadOnly, - Cluster: a.cluster, - }) + serverConfig.Logger = a.log.logger.sidecar + serverConfig.IPLimiter = iplimiter + + sidecarserverhandler, err := http.NewServer(serverConfig) if err != nil { return fmt.Errorf("unable to create sidecar HTTP server: %w", err) @@ -1126,6 +1268,12 @@ func (a *api) start() error { }(ctx) } + if cfg.Debug.MemoryLimit > 0 { + debug.SetMemoryLimit(cfg.Debug.MemoryLimit * 1024 * 1024) + } else { + debug.SetMemoryLimit(math.MaxInt64) + } + // Start the restream processes restream.Start() @@ -1216,6 +1364,9 @@ func (a *api) stop() { a.cache = nil } + // Free the S3 mounts + a.s3fs = map[string]fs.Filesystem{} + // Stop the SRT server if a.srtserver != nil { a.log.logger.srt.Info().Log("Stopping ...") @@ -1296,7 +1447,7 @@ func (a *api) Destroy() { // Free the MemFS if a.memfs != nil { - a.memfs.DeleteAll() + a.memfs.RemoveAll() a.memfs = nil } } diff --git a/app/ffmigrate/main.go b/app/ffmigrate/main.go new file mode 100644 index 00000000..5f3b4996 --- /dev/null +++ b/app/ffmigrate/main.go @@ -0,0 +1,176 @@ +package main + +import ( + "fmt" + "os" + "regexp" + + cfgstore "github.com/datarhei/core/v16/config/store" + cfgvars "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/ffmpeg" + "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" + "github.com/datarhei/core/v16/log" + "github.com/datarhei/core/v16/restream/store" + + "github.com/Masterminds/semver/v3" + _ "github.com/joho/godotenv/autoload" +) + +func main() { + logger := log.New("Migration").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithFields(log.Fields{ + "from": "ffmpeg4", + "to": "ffmpeg5", + }) + + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + diskfs, _ := fs.NewDiskFilesystem(fs.DiskConfig{}) + + configstore, err := cfgstore.NewJSON(diskfs, configfile, nil) + if err != nil { + logger.Error().WithError(err).Log("Loading configuration failed") + os.Exit(1) + } + + if err := doMigration(logger, configstore); err != nil { + os.Exit(1) + } +} + +func doMigration(logger log.Logger, configstore cfgstore.Store) error { + if logger == nil { + logger = log.New("") + } + + cfg := configstore.Get() + + // Merging the persisted config with the environment variables + cfg.Merge() + + cfg.Validate(false) + if cfg.HasErrors() { + logger.Error().Log("The configuration contains errors") + messages := []string{} + cfg.Messages(func(level string, v cfgvars.Variable, message string) { + if level == "error" { + logger.Error().WithFields(log.Fields{ + "variable": v.Name, + "value": v.Value, + "env": v.EnvName, + "description": v.Description, + }).Log(message) + + messages = append(messages, v.Name+": "+message) + } + }) + + return fmt.Errorf("the configuration contains errors: %v", messages) + } + + ff, err := ffmpeg.New(ffmpeg.Config{ + Binary: cfg.FFmpeg.Binary, + }) + if err != nil { + logger.Error().WithError(err).Log("Loading FFmpeg binary failed") + return fmt.Errorf("loading FFmpeg binary failed: %w", err) + } + + version, err := semver.NewVersion(ff.Skills().FFmpeg.Version) + if err != nil { + logger.Error().WithError(err).Log("Parsing FFmpeg version failed") + return fmt.Errorf("parsing FFmpeg version failed: %w", err) + } + + // The current FFmpeg version is 4. Nothing to do. + if version.Major() == 4 { + return nil + } + + if version.Major() != 5 { + err := fmt.Errorf("unknown FFmpeg version found: %d", version.Major()) + logger.Error().WithError(err).Log("Unsupported FFmpeg version found") + return fmt.Errorf("unsupported FFmpeg version found: %w", err) + } + + // Check if there's a DB file + dbFilepath := cfg.DB.Dir + "/db.json" + + if _, err = os.Stat(dbFilepath); err != nil { + // There's no DB to backup + logger.Info().WithField("db", dbFilepath).Log("Database not found. Migration not required") + return nil + } + + // Check if we already have a backup + backupFilepath := cfg.DB.Dir + "/db_ff4.json" + + if _, err = os.Stat(backupFilepath); err == nil { + // Yes, we have a backup. The migration already happened + logger.Info().WithField("backup", backupFilepath).Log("Migration already done") + return nil + } + + // Create a backup + if err := file.Copy(dbFilepath, backupFilepath); err != nil { + logger.Error().WithError(err).Log("Creating backup file failed") + return fmt.Errorf("creating backup file failed: %w", err) + } + + logger.Info().WithField("backup", backupFilepath).Log("Backup created") + + // Load the existing DB + datastore, err := store.NewJSON(store.JSONConfig{ + Filepath: cfg.DB.Dir + "/db.json", + }) + if err != nil { + return err + } + + data, err := datastore.Load() + if err != nil { + logger.Error().WithError(err).Log("Loading database failed") + return fmt.Errorf("loading database failed: %w", err) + } + + logger.Info().Log("Migrating processes ...") + + // Migrate the processes to version 5 + // Only this happens: + // - for RTSP inputs, replace -stimeout with -timeout + + reRTSP := regexp.MustCompile(`^rtsps?://`) + for id, p := range data.Process { + logger.Info().WithField("processid", p.ID).Log("") + + for index, input := range p.Config.Input { + if !reRTSP.MatchString(input.Address) { + continue + } + + for i, o := range input.Options { + if o != "-stimeout" { + continue + } + + input.Options[i] = "-timeout" + } + + p.Config.Input[index] = input + } + p.Config.FFVersion = version.String() + data.Process[id] = p + } + + logger.Info().Log("Migrating processes done") + + // Store the modified DB + if err := datastore.Store(data); err != nil { + logger.Error().WithError(err).Log("Storing database failed") + return fmt.Errorf("storing database failed: %w", err) + } + + logger.Info().Log("Completed") + + return nil +} diff --git a/app/import/import.go b/app/import/import.go index b453c4a9..5899c350 100644 --- a/app/import/import.go +++ b/app/import/import.go @@ -17,6 +17,7 @@ import ( "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/ffmpeg" "github.com/datarhei/core/v16/ffmpeg/skills" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream" "github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/store" @@ -495,14 +496,14 @@ type importConfigAudio struct { sampling string } -func importV1(path string, cfg importConfig) (store.StoreData, error) { +func importV1(fs fs.Filesystem, path string, cfg importConfig) (store.StoreData, error) { if len(cfg.id) == 0 { cfg.id = uuid.New().String() } r := store.NewStoreData() - jsondata, err := os.ReadFile(path) + jsondata, err := fs.ReadFile(path) if err != nil { return r, fmt.Errorf("failed to read data from %s: %w", path, err) } @@ -1417,9 +1418,19 @@ func probeInput(binary string, config app.Config) app.Probe { return app.Probe{} } + dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + store, err := store.NewJSON(store.JSONConfig{ + Filesystem: dummyfs, + Filepath: "/", + Logger: nil, + }) + if err != nil { + return app.Probe{} + } + rs, err := restream.New(restream.Config{ FFmpeg: ffmpeg, - Store: store.NewDummyStore(store.DummyConfig{}), + Store: store, }) if err != nil { return app.Probe{} diff --git a/app/import/import_test.go b/app/import/import_test.go index e0e8f3d6..8322c0eb 100644 --- a/app/import/import_test.go +++ b/app/import/import_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/datarhei/core/v16/encoding/json" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream/store" "github.com/stretchr/testify/require" @@ -36,8 +37,13 @@ import ( var id string = "4186b095-7f0a-4e94-8c3d-f17459ab252f" func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig) { + diskfs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: ".", + }) + require.NoError(t, err) + // Import v1 database - v4, err := importV1(v1Fixture, config) + v4, err := importV1(diskfs, v1Fixture, config) require.Equal(t, nil, err) // Reset variants @@ -50,7 +56,7 @@ func testV1Import(t *testing.T, v1Fixture, v4Fixture string, config importConfig require.Equal(t, nil, err) // Read the wanted result - wantdatav4, err := os.ReadFile(v4Fixture) + wantdatav4, err := diskfs.ReadFile(v4Fixture) require.Equal(t, nil, err) var wantv4 store.StoreData diff --git a/app/import/main.go b/app/import/main.go index b5b89b24..2d641caf 100644 --- a/app/import/main.go +++ b/app/import/main.go @@ -4,7 +4,9 @@ import ( "fmt" "os" - "github.com/datarhei/core/v16/config" + cfgstore "github.com/datarhei/core/v16/config/store" + cfgvars "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/restream/store" @@ -14,18 +16,26 @@ import ( func main() { logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1") - configstore, err := config.NewJSONStore(os.Getenv("CORE_CONFIGFILE"), nil) + configfile := cfgstore.Location(os.Getenv("CORE_CONFIGFILE")) + + diskfs, err := fs.NewDiskFilesystem(fs.DiskConfig{}) + if err != nil { + logger.Error().WithError(err).Log("Access disk filesystem failed") + os.Exit(1) + } + + configstore, err := cfgstore.NewJSON(diskfs, configfile, nil) if err != nil { logger.Error().WithError(err).Log("Loading configuration failed") os.Exit(1) } - if err := doImport(logger, configstore); err != nil { + if err := doImport(logger, diskfs, configstore); err != nil { os.Exit(1) } } -func doImport(logger log.Logger, configstore config.Store) error { +func doImport(logger log.Logger, fs fs.Filesystem, configstore cfgstore.Store) error { if logger == nil { logger = log.New("") } @@ -41,7 +51,7 @@ func doImport(logger log.Logger, configstore config.Store) error { if cfg.HasErrors() { logger.Error().Log("The configuration contains errors") messages := []string{} - cfg.Messages(func(level string, v config.Variable, message string) { + cfg.Messages(func(level string, v cfgvars.Variable, message string) { if level == "error" { logger.Error().WithFields(log.Fields{ "variable": v.Name, @@ -64,23 +74,27 @@ func doImport(logger log.Logger, configstore config.Store) error { logger = logger.WithField("database", v1filename) - if _, err := os.Stat(v1filename); err != nil { + if _, err := fs.Stat(v1filename); err != nil { if os.IsNotExist(err) { logger.Info().Log("Database doesn't exist and nothing will be imported") return nil } logger.Error().WithError(err).Log("Checking for v1 database") - return fmt.Errorf("checking for v1 database: %w", err) } logger.Info().Log("Found database") // Load an existing DB - datastore := store.NewJSONStore(store.JSONConfig{ - Dir: cfg.DB.Dir, + datastore, err := store.NewJSON(store.JSONConfig{ + Filesystem: fs, + Filepath: cfg.DB.Dir + "/db.json", }) + if err != nil { + logger.Error().WithError(err).Log("Creating datastore for new database failed") + return fmt.Errorf("creating datastore for new database failed: %w", err) + } data, err := datastore.Load() if err != nil { @@ -102,7 +116,7 @@ func doImport(logger log.Logger, configstore config.Store) error { importConfig.binary = cfg.FFmpeg.Binary // Rewrite the old database to the new database - r, err := importV1(v1filename, importConfig) + r, err := importV1(fs, v1filename, importConfig) if err != nil { logger.Error().WithError(err).Log("Importing database failed") return fmt.Errorf("importing database failed: %w", err) diff --git a/app/import/main_test.go b/app/import/main_test.go index b85bfafe..305110f9 100644 --- a/app/import/main_test.go +++ b/app/import/main_test.go @@ -1,20 +1,30 @@ package main import ( + "strings" "testing" - "github.com/datarhei/core/v16/config" + "github.com/datarhei/core/v16/config/store" + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" ) func TestImport(t *testing.T) { - configstore := config.NewDummyStore() + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + memfs.WriteFileReader("/mime.types", strings.NewReader("foobar")) + memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("foobar")) + + configstore, err := store.NewJSON(memfs, "/config.json", nil) + require.NoError(t, err) cfg := configstore.Get() - err := configstore.Set(cfg) + err = configstore.Set(cfg) require.NoError(t, err) - err = doImport(nil, configstore) + err = doImport(nil, memfs, configstore) require.NoError(t, err) } diff --git a/app/version.go b/app/version.go index 41660539..ec718dfd 100644 --- a/app/version.go +++ b/app/version.go @@ -29,8 +29,8 @@ func (v versionInfo) MinorString() string { // Version of the app var Version = versionInfo{ Major: 16, - Minor: 10, - Patch: 1, + Minor: 12, + Patch: 0, } // Commit is the git commit the app is build from. It should be filled in during compilation diff --git a/cluster/node.go b/cluster/node.go index 98b3e451..58235b99 100644 --- a/cluster/node.go +++ b/cluster/node.go @@ -242,7 +242,7 @@ func (n *node) files() { } for _, file := range files { - filesChan <- "memfs:" + file.Name + filesChan <- "mem:" + file.Name } }(filesChan) @@ -255,7 +255,7 @@ func (n *node) files() { } for _, file := range files { - filesChan <- "diskfs:" + file.Name + filesChan <- "disk:" + file.Name } }(filesChan) @@ -316,9 +316,9 @@ func (n *node) getURL(path string) (string, error) { u := "" - if prefix == "memfs:" { + if prefix == "mem:" { u = n.address + "/" + filepath.Join("memfs", path) - } else if prefix == "diskfs:" { + } else if prefix == "disk:" { u = n.address + path } else if prefix == "rtmp:" { u = n.rtmpAddress + path @@ -347,9 +347,9 @@ func (n *node) getFile(path string) (io.ReadCloser, error) { prefix := n.prefix.FindString(path) path = n.prefix.ReplaceAllString(path, "") - if prefix == "memfs:" { + if prefix == "mem:" { return n.peer.MemFSGetFile(path) - } else if prefix == "diskfs:" { + } else if prefix == "disk:" { return n.peer.DiskFSGetFile(path) } diff --git a/config/config.go b/config/config.go index 9086d876..33d9492b 100644 --- a/config/config.go +++ b/config/config.go @@ -3,76 +3,82 @@ package config import ( "context" - "fmt" "net" - "os" "time" + "github.com/datarhei/core/v16/config/copy" + "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/math/rand" haikunator "github.com/atrox/haikunatorgo/v2" "github.com/google/uuid" ) +/* +type Config interface { + // Merge merges the values of the known environment variables into the configuration + Merge() + + // Validate validates the current state of the Config for completeness and sanity. Errors are + // written to the log. Use resetLogs to indicate to reset the logs prior validation. + Validate(resetLogs bool) + + // Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'. + // The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message. + Messages(logger func(level string, v vars.Variable, message string)) + + // HasErrors returns whether there are some error messages in the log. + HasErrors() bool + + // Overrides returns a list of configuration value names that have been overriden by an environment variable. + Overrides() []string + + Get(name string) (string, error) + Set(name, val string) error +} +*/ + const version int64 = 3 -type variable struct { - value value // The actual value - defVal string // The default value in string representation - name string // A name for this value - envName string // The environment variable that corresponds to this value - envAltNames []string // Alternative environment variable names - description string // A desriptions for this value - required bool // Whether a non-empty value is required - disguise bool // Whether the value should be disguised if printed - merged bool // Whether this value has been replaced by its corresponding environment variable -} - -type Variable struct { - Value string - Name string - EnvName string - Description string - Merged bool -} - -type message struct { - message string // The log message - variable Variable // The config field this message refers to - level string // The loglevel for this message -} - -type Auth0Tenant struct { - Domain string `json:"domain"` - Audience string `json:"audience"` - ClientID string `json:"clientid"` - Users []string `json:"users"` -} - -type DataVersion struct { - Version int64 `json:"version"` -} +// Make sure that the config.Config interface is satisfied +//var _ config.Config = &Config{} // Config is a wrapper for Data type Config struct { - vars []*variable - logs []message + fs fs.Filesystem + vars vars.Variables Data } // New returns a Config which is initialized with its default values -func New() *Config { - config := &Config{} +func New(f fs.Filesystem) *Config { + config := &Config{ + fs: f, + } + + if config.fs == nil { + config.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } config.init() return config } +func (d *Config) Get(name string) (string, error) { + return d.vars.Get(name) +} + +func (d *Config) Set(name, val string) error { + return d.vars.Set(name, val) +} + // NewConfigFrom returns a clone of a Config -func NewConfigFrom(d *Config) *Config { - data := New() +func (d *Config) Clone() *Config { + data := New(d.fs) data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt @@ -100,286 +106,206 @@ func NewConfigFrom(d *Config) *Config { data.Service = d.Service data.Router = d.Router - data.Log.Topics = copyStringSlice(d.Log.Topics) + data.Log.Topics = copy.Slice(d.Log.Topics) - data.Host.Name = copyStringSlice(d.Host.Name) + data.Host.Name = copy.Slice(d.Host.Name) - data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow) - data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block) - data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow) - data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block) + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) - data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants) + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) - data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins) - data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types.Allow) - data.Storage.Disk.Cache.Types.Block = copyStringSlice(d.Storage.Disk.Cache.Types.Block) + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow) + data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block) + data.Storage.S3 = copy.Slice(d.Storage.S3) - data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow) - data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block) - data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow) - data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block) + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) - data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList) + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) - data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics) + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) - data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes) - data.Router.Routes = copyStringMap(d.Router.Routes) + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) - for i, v := range d.vars { - data.vars[i].merged = v.merged - } + data.vars.Transfer(&d.vars) return data } func (d *Config) init() { - d.val(newInt64Value(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false) - d.val(newTimeValue(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false) - d.val(newStringValue(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false) - d.val(newStringValue(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false) - d.val(newAddressValue(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false) - d.val(newBoolValue(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false) + d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false) + d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false) + d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false) + d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false) + d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false) + d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false) // Log - d.val(newStringValue(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false) - d.val(newStringListValue(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false) - d.val(newIntValue(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) + d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false) + d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false) + d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAX_LINES", []string{"CORE_LOG_MAXLINES"}, "Number of latest log lines to keep in memory", false, false) // DB - d.val(newMustDirValue(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) // Host - d.val(newStringListValue(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) - d.val(newBoolValue(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false) + d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) + d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false) // API - d.val(newBoolValue(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false) - d.val(newCIDRListValue(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) - d.val(newCIDRListValue(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) - d.val(newCIDRListValue(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) - d.val(newCIDRListValue(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) - d.val(newBoolValue(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false) - d.val(newBoolValue(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false) - d.val(newStringValue(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false) - d.val(newStringValue(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true) + d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true) // Auth JWT - d.val(newStringValue(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true) + d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true) // Auth Auth0 - d.val(newBoolValue(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false) - d.val(newTenantListValue(&d.API.Auth.Auth0.Tenants, []Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false) + d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false) // TLS - d.val(newAddressValue(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) - d.val(newBoolValue(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) - d.val(newBoolValue(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) - d.val(newEmailValue(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false) - d.val(newFileValue(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) - d.val(newFileValue(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) + d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) + d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) + d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERT_FILE", []string{"CORE_TLS_CERTFILE"}, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEY_FILE", []string{"CORE_TLS_KEYFILE"}, "Path to key file in PEM format", false, false) // Storage - d.val(newFileValue(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) // Storage (Disk) - d.val(newMustDirValue(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) - d.val(newInt64Value(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) - d.val(newBoolValue(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) - d.val(newUint64Value(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) - d.val(newInt64Value(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false) - d.val(newUint64Value(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false) - d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false) - d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAX_SIZE_MBYTES", []string{"CORE_STORAGE_DISK_MAXSIZEMBYTES"}, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAX_SIZE_MBYTES", []string{"CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES"}, "Max. allowed cache size, 0 for unlimited", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTL_SECONDS", []string{"CORE_STORAGE_DISK_CACHE_TTLSECONDS"}, "Seconds to keep files in cache", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAX_FILE_SIZE_MBYTES", []string{"CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES"}, "Max. file size to put in cache", false, false) + d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false) + d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false) // Storage (Memory) - d.val(newBoolValue(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false) - d.val(newStringValue(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false) - d.val(newStringValue(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true) - d.val(newInt64Value(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) - d.val(newBoolValue(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true) + d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAX_SIZE_MBYTES", []string{"CORE_STORAGE_MEMORY_MAXSIZEMBYTES"}, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + + // Storage (S3) + d.vars.Register(value.NewS3StorageListValue(&d.Storage.S3, []value.S3Storage{}, "|"), "storage.s3", "CORE_STORAGE_S3", nil, "List of S3 storage URLS", false, false) // Storage (CORS) - d.val(newCORSOriginsValue(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) + d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) // RTMP - d.val(newBoolValue(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false) - d.val(newBoolValue(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false) - d.val(newAddressValue(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false) - d.val(newAddressValue(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false) - d.val(newAbsolutePathValue(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false) - d.val(newStringValue(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true) + d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false) + d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false) + d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false) + d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false) + d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false) + d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true) // SRT - d.val(newBoolValue(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false) - d.val(newAddressValue(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false) - d.val(newStringValue(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true) - d.val(newStringValue(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true) - d.val(newBoolValue(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false) - d.val(newStringListValue(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) + d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false) + d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false) + d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true) + d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true) + d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false) + d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) // FFmpeg - d.val(newExecValue(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) - d.val(newInt64Value(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) - d.val(newStringListValue(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) - d.val(newStringListValue(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) - d.val(newStringListValue(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false) - d.val(newStringListValue(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false) - d.val(newIntValue(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false) - d.val(newIntValue(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false) + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAX_LINES", []string{"CORE_FFMPEG_LOG_MAXLINES"}, "Number of latest log lines to keep for each process", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAX_HISTORY", []string{"CORE_FFMPEG_LOG_MAXHISTORY"}, "Number of latest logs to keep for each process", false, false) // Playout - d.val(newBoolValue(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false) - d.val(newPortValue(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false) - d.val(newPortValue(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false) + d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false) + d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MIN_PORT", []string{"CORE_PLAYOUT_MINPORT"}, "Min. playout server port", false, false) + d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAX_PORT", []string{"CORE_PLAYOUT_MAXPORT"}, "Max. playout server port", false, false) // Debug - d.val(newBoolValue(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) - d.val(newIntValue(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false) + d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) + d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCE_GC", []string{"CORE_DEBUG_FORCEGC"}, "Number of seconds between forcing GC to return memory to the OS", false, false) + d.vars.Register(value.NewInt64(&d.Debug.MemoryLimit, 0), "debug.memory_limit_mbytes", "CORE_DEBUG_MEMORY_LIMIT_MBYTES", nil, "Impose a soft memory limit for the core, in megabytes", false, false) // Metrics - d.val(newBoolValue(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) - d.val(newBoolValue(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false) - d.val(newInt64Value(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false) - d.val(newInt64Value(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false) + d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) + d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false) // Sessions - d.val(newBoolValue(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false) - d.val(newCIDRListValue(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false) - d.val(newIntValue(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false) - d.val(newBoolValue(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false) - d.val(newIntValue(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false) - d.val(newUint64Value(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false) - d.val(newUint64Value(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false) + d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false) + d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false) + d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false) + d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAX_SESSIONS", []string{"CORE_SESSIONS_MAXSESSIONS"}, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false) // Service - d.val(newBoolValue(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false) - d.val(newStringValue(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true) - d.val(newURLValue(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false) + d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false) + d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true) + d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false) // Router - d.val(newStringListValue(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) - d.val(newStringMapStringValue(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) - d.val(newDirValue(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) -} - -func (d *Config) val(val value, name, envName string, envAltNames []string, description string, required, disguise bool) { - d.vars = append(d.vars, &variable{ - value: val, - defVal: val.String(), - name: name, - envName: envName, - envAltNames: envAltNames, - description: description, - required: required, - disguise: disguise, - }) -} - -func (d *Config) log(level string, v *variable, format string, args ...interface{}) { - variable := Variable{ - Value: v.value.String(), - Name: v.name, - EnvName: v.envName, - Description: v.description, - Merged: v.merged, - } - - if v.disguise { - variable.Value = "***" - } - - l := message{ - message: fmt.Sprintf(format, args...), - variable: variable, - level: level, - } - - d.logs = append(d.logs, l) -} - -// Merge merges the values of the known environment variables into the configuration -func (d *Config) Merge() { - for _, v := range d.vars { - if len(v.envName) == 0 { - continue - } - - var envval string - var ok bool - - envval, ok = os.LookupEnv(v.envName) - if !ok { - foundAltName := false - - for _, envName := range v.envAltNames { - envval, ok = os.LookupEnv(envName) - if ok { - foundAltName = true - d.log("warn", v, "deprecated name, please use %s", v.envName) - break - } - } - - if !foundAltName { - continue - } - } - - err := v.value.Set(envval) - if err != nil { - d.log("error", v, "%s", err.Error()) - } - - v.merged = true - } + d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) + d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) } // Validate validates the current state of the Config for completeness and sanity. Errors are // written to the log. Use resetLogs to indicate to reset the logs prior validation. func (d *Config) Validate(resetLogs bool) { if resetLogs { - d.logs = nil + d.vars.ResetLogs() } if d.Version != version { - d.log("error", d.findVariable("version"), "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version) + d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version) return } - for _, v := range d.vars { - d.log("info", v, "%s", "") - - err := v.value.Validate() - if err != nil { - d.log("error", v, "%s", err.Error()) - } - - if v.required && v.value.IsEmpty() { - d.log("error", v, "a value is required") - } - } + d.vars.Validate() // Individual sanity checks // If HTTP Auth is enabled, check that the username and password are set if d.API.Auth.Enable { if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 { - d.log("error", d.findVariable("api.auth.enable"), "api.auth.username and api.auth.password must be set") + d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set") } } // If Auth0 is enabled, check that domain, audience, and clientid are set if d.API.Auth.Auth0.Enable { if len(d.API.Auth.Auth0.Tenants) == 0 { - d.log("error", d.findVariable("api.auth.auth0.enable"), "at least one tenants must be set") + d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set") } for i, t := range d.API.Auth.Auth0.Tenants { if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 { - d.log("error", d.findVariable("api.auth.auth0.tenants"), "domain, audience, and clientid must be set (tenant %d)", i) + d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i) } } } @@ -387,14 +313,14 @@ func (d *Config) Validate(resetLogs bool) { // If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile if d.TLS.Enable && !d.TLS.Auto { if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 { - d.log("error", d.findVariable("tls.enable"), "tls.certfile and tls.keyfile must be set") + d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set") } } // If TLS and Let's Encrypt certificate is enabled, we require a public hostname if d.TLS.Enable && d.TLS.Auto { if len(d.Host.Name) == 0 { - d.log("error", d.findVariable("host.name"), "a hostname must be set in order to get an automatic TLS certificate") + d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate") } else { r := &net.Resolver{ PreferGo: true, @@ -404,7 +330,7 @@ func (d *Config) Validate(resetLogs bool) { for _, host := range d.Host.Name { // Don't lookup IP addresses if ip := net.ParseIP(host); ip != nil { - d.log("error", d.findVariable("host.name"), "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host) + d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host) } // Lookup host name with a timeout @@ -412,7 +338,7 @@ func (d *Config) Validate(resetLogs bool) { _, err := r.LookupHost(ctx, host) if err != nil { - d.log("error", d.findVariable("host.name"), "the host '%s' can't be resolved and will not work with automatic TLS", host) + d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host) } cancel() @@ -423,32 +349,31 @@ func (d *Config) Validate(resetLogs bool) { // If TLS and Let's Encrypt certificate is enabled, we require a non-empty email address if d.TLS.Enable && d.TLS.Auto { if len(d.TLS.Email) == 0 { - v := d.findVariable("tls.email") - v.value.Set(v.defVal) + d.vars.SetDefault("tls.email") } } // If TLS for RTMP is enabled, TLS must be enabled if d.RTMP.EnableTLS { if !d.RTMP.Enable { - d.log("error", d.findVariable("rtmp.enable"), "RTMP server must be enabled if RTMPS server is enabled") + d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled") } if !d.TLS.Enable { - d.log("error", d.findVariable("rtmp.enable_tls"), "RTMPS server can only be enabled if TLS is enabled") + d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled") } if len(d.RTMP.AddressTLS) == 0 { - d.log("error", d.findVariable("rtmp.address_tls"), "RTMPS server address must be set") + d.vars.Log("error", "rtmp.address_tls", "RTMPS server address must be set") } if d.RTMP.Enable && d.RTMP.Address == d.RTMP.AddressTLS { - d.log("error", d.findVariable("rtmp.address"), "The RTMP and RTMPS server can't listen on the same address") + d.vars.Log("error", "rtmp.address", "The RTMP and RTMPS server can't listen on the same address") } } // If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs - if d.findVariable("storage.memory.auth.username").merged && d.findVariable("storage.memory.auth.password").merged { + if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") { d.Storage.Memory.Auth.Enable = true if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 { @@ -459,121 +384,91 @@ func (d *Config) Validate(resetLogs bool) { // If Basic-Auth for memfs is enable, check that the username and password are set if d.Storage.Memory.Auth.Enable { if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 { - d.log("error", d.findVariable("storage.memory.auth.enable"), "storage.memory.auth.username and storage.memory.auth.password must be set") + d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set") + } + } + + if len(d.Storage.S3) != 0 { + names := map[string]struct{}{ + "disk": {}, + "mem": {}, + } + + for _, s3 := range d.Storage.S3 { + if _, ok := names[s3.Name]; ok { + d.vars.Log("error", "storage.s3", "the name %s is already in use or reserved", s3.Name) + } + + names[s3.Name] = struct{}{} } } // If playout is enabled, check that the port range is sane if d.Playout.Enable { if d.Playout.MinPort >= d.Playout.MaxPort { - d.log("error", d.findVariable("playout.min_port"), "must be bigger than playout.max_port") + d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port") } } // If cache is enabled, a valid TTL has to be set to a useful value if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 { - d.log("error", d.findVariable("storage.disk.cache.ttl_seconds"), "must be equal or greater than 0") + d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0") } // If the stats are enabled, the session timeout has to be set to a useful value if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 { - d.log("error", d.findVariable("stats.session_timeout_sec"), "must be equal or greater than 1") + d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1") } // If the stats and their persistence are enabled, the persist interval has to be set to a useful value if d.Sessions.Enable && d.Sessions.PersistInterval < 0 { - d.log("error", d.findVariable("stats.persist_interval_sec"), "must be at equal or greater than 0") + d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0") } // If the service is enabled, the token and enpoint have to be defined if d.Service.Enable { if len(d.Service.Token) == 0 { - d.log("error", d.findVariable("service.token"), "must be non-empty") + d.vars.Log("error", "service.token", "must be non-empty") } if len(d.Service.URL) == 0 { - d.log("error", d.findVariable("service.url"), "must be non-empty") + d.vars.Log("error", "service.url", "must be non-empty") } } // If historic metrics are enabled, the timerange and interval have to be valid if d.Metrics.Enable { if d.Metrics.Range <= 0 { - d.log("error", d.findVariable("metrics.range"), "must be greater 0") + d.vars.Log("error", "metrics.range", "must be greater 0") } if d.Metrics.Interval <= 0 { - d.log("error", d.findVariable("metrics.interval"), "must be greater 0") + d.vars.Log("error", "metrics.interval", "must be greater 0") } if d.Metrics.Interval > d.Metrics.Range { - d.log("error", d.findVariable("metrics.interval"), "must be smaller than the range") + d.vars.Log("error", "metrics.interval", "must be smaller than the range") } } } -func (d *Config) findVariable(name string) *variable { - for _, v := range d.vars { - if v.name == name { - return v - } - } - - return nil +// Merge merges the values of the known environment variables into the configuration +func (d *Config) Merge() { + d.vars.Merge() } // Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'. // The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message. -func (d *Config) Messages(logger func(level string, v Variable, message string)) { - for _, l := range d.logs { - logger(l.level, l.variable, l.message) - } +func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) { + d.vars.Messages(logger) } // HasErrors returns whether there are some error messages in the log. func (d *Config) HasErrors() bool { - for _, l := range d.logs { - if l.level == "error" { - return true - } - } - - return false + return d.vars.HasErrors() } // Overrides returns a list of configuration value names that have been overriden by an environment variable. func (d *Config) Overrides() []string { - overrides := []string{} - - for _, v := range d.vars { - if v.merged { - overrides = append(overrides, v.name) - } - } - - return overrides -} - -func copyStringSlice(src []string) []string { - dst := make([]string, len(src)) - copy(dst, src) - - return dst -} - -func copyStringMap(src map[string]string) map[string]string { - dst := make(map[string]string) - - for k, v := range src { - dst[k] = v - } - - return dst -} - -func copyTenantSlice(src []Auth0Tenant) []Auth0Tenant { - dst := make([]Auth0Tenant, len(src)) - copy(dst, src) - - return dst + return d.vars.Overrides() } diff --git a/config/config_test.go b/config/config_test.go index 38f54aa5..132857fe 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -1,55 +1,84 @@ package config import ( + "strings" "testing" - "github.com/stretchr/testify/assert" + "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" + + "github.com/stretchr/testify/require" ) func TestConfigCopy(t *testing.T) { - config1 := New() + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + config1 := New(fs) config1.Version = 42 config1.DB.Dir = "foo" - val1 := config1.findVariable("version") - val2 := config1.findVariable("db.dir") - val3 := config1.findVariable("host.name") + val1, _ := config1.Get("version") + val2, _ := config1.Get("db.dir") + val3, _ := config1.Get("host.name") - assert.Equal(t, "42", val1.value.String()) - assert.Equal(t, nil, val1.value.Validate()) - assert.Equal(t, false, val1.value.IsEmpty()) + require.Equal(t, "42", val1) + require.Equal(t, "foo", val2) + require.Equal(t, "(empty)", val3) - assert.Equal(t, "foo", val2.value.String()) - assert.Equal(t, "(empty)", val3.value.String()) + config1.Set("host.name", "foo.com") + val3, _ = config1.Get("host.name") + require.Equal(t, "foo.com", val3) - val3.value.Set("foo.com") + config2 := config1.Clone() - assert.Equal(t, "foo.com", val3.value.String()) + require.Equal(t, int64(42), config2.Version) + require.Equal(t, "foo", config2.DB.Dir) + require.Equal(t, []string{"foo.com"}, config2.Host.Name) - config2 := NewConfigFrom(config1) + config1.Set("version", "77") - assert.Equal(t, int64(42), config2.Version) - assert.Equal(t, "foo", config2.DB.Dir) - assert.Equal(t, []string{"foo.com"}, config2.Host.Name) + require.Equal(t, int64(77), config1.Version) + require.Equal(t, int64(42), config2.Version) - val1.value.Set("77") + config1.Set("db.dir", "bar") - assert.Equal(t, int64(77), config1.Version) - assert.Equal(t, int64(42), config2.Version) - - val2.value.Set("bar") - - assert.Equal(t, "bar", config1.DB.Dir) - assert.Equal(t, "foo", config2.DB.Dir) + require.Equal(t, "bar", config1.DB.Dir) + require.Equal(t, "foo", config2.DB.Dir) config2.DB.Dir = "baz" - assert.Equal(t, "bar", config1.DB.Dir) - assert.Equal(t, "baz", config2.DB.Dir) + require.Equal(t, "bar", config1.DB.Dir) + require.Equal(t, "baz", config2.DB.Dir) config1.Host.Name[0] = "bar.com" - assert.Equal(t, []string{"bar.com"}, config1.Host.Name) - assert.Equal(t, []string{"foo.com"}, config2.Host.Name) + require.Equal(t, []string{"bar.com"}, config1.Host.Name) + require.Equal(t, []string{"foo.com"}, config2.Host.Name) +} + +func TestValidateDefault(t *testing.T) { + fs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + size, fresh, err := fs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + require.Equal(t, int64(5), size) + require.Equal(t, true, fresh) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + require.NoError(t, err) + + cfg := New(fs) + + cfg.Validate(true) + + errors := []string{} + cfg.Messages(func(level string, v vars.Variable, message string) { + if level == "error" { + errors = append(errors, message) + } + }) + + require.Equal(t, 0, len(cfg.Overrides())) + require.Equal(t, false, cfg.HasErrors(), errors) } diff --git a/config/copy/copy.go b/config/copy/copy.go new file mode 100644 index 00000000..2541bf48 --- /dev/null +++ b/config/copy/copy.go @@ -0,0 +1,30 @@ +package copy + +import "github.com/datarhei/core/v16/config/value" + +func StringMap(src map[string]string) map[string]string { + dst := make(map[string]string) + + for k, v := range src { + dst[k] = v + } + + return dst +} + +func TenantSlice(src []value.Auth0Tenant) []value.Auth0Tenant { + dst := Slice(src) + + for i, t := range src { + dst[i].Users = Slice(t.Users) + } + + return dst +} + +func Slice[T any](src []T) []T { + dst := make([]T, len(src)) + copy(dst, src) + + return dst +} diff --git a/config/data.go b/config/data.go index d273368d..35507888 100644 --- a/config/data.go +++ b/config/data.go @@ -1,13 +1,20 @@ package config -import "time" +import ( + "time" + + "github.com/datarhei/core/v16/config/copy" + v2 "github.com/datarhei/core/v16/config/v2" + "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/io/fs" +) // Data is the actual configuration data for the app type Data struct { - CreatedAt time.Time `json:"created_at"` - LoadedAt time.Time `json:"-"` - UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=3,maximum=3"` + CreatedAt time.Time `json:"created_at"` // When this config has been persisted + LoadedAt time.Time `json:"-"` // When this config has been actually used + UpdatedAt time.Time `json:"-"` // Irrelevant + Version int64 `json:"version" jsonschema:"minimum=3,maximum=3" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -15,7 +22,7 @@ type Data struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -45,8 +52,8 @@ type Data struct { Secret string `json:"secret"` } `json:"jwt"` Auth0 struct { - Enable bool `json:"enable"` - Tenants []Auth0Tenant `json:"tenants"` + Enable bool `json:"enable"` + Tenants []value.Auth0Tenant `json:"tenants"` } `json:"auth0"` } `json:"auth"` } `json:"api"` @@ -61,12 +68,12 @@ type Data struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types struct { Allow []string `json:"allow"` Block []string `json:"block"` @@ -79,9 +86,10 @@ type Data struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` + S3 []value.S3Storage `json:"s3"` CORS struct { Origins []string `json:"origins"` } `json:"cors"` @@ -107,7 +115,7 @@ type Data struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -119,33 +127,34 @@ type Data struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { - Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + Profiling bool `json:"profiling"` + ForceGC int `json:"force_gc" format:"int"` + MemoryLimit int64 `json:"memory_limit_mbytes" format:"int64"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` @@ -159,8 +168,95 @@ type Data struct { } `json:"router"` } -func NewV3FromV2(d *dataV2) (*Data, error) { - data := &Data{} +func UpgradeV2ToV3(d *v2.Data, fs fs.Filesystem) (*Data, error) { + cfg := New(fs) + + return MergeV2toV3(&cfg.Data, d) +} + +func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) { + data.CreatedAt = d.CreatedAt + data.LoadedAt = d.LoadedAt + data.UpdatedAt = d.UpdatedAt + + data.ID = d.ID + data.Name = d.Name + data.Address = d.Address + data.CheckForUpdates = d.CheckForUpdates + + data.Log = d.Log + data.DB = d.DB + data.Host = d.Host + data.API = d.API + data.RTMP = d.RTMP + data.SRT = d.SRT + data.FFmpeg = d.FFmpeg + data.Playout = d.Playout + data.Metrics = d.Metrics + data.Sessions = d.Sessions + data.Service = d.Service + data.Router = d.Router + + data.Log.Topics = copy.Slice(d.Log.Topics) + + data.Host.Name = copy.Slice(d.Host.Name) + + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) + + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) + + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) + + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) + + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) + + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) + + data.Storage.MimeTypes = d.Storage.MimeTypes + + data.Storage.CORS = d.Storage.CORS + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + + data.Storage.Memory = d.Storage.Memory + + // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.Debug.MemoryLimit = 0 + + data.TLS.Enable = d.TLS.Enable + data.TLS.Address = d.TLS.Address + data.TLS.Auto = d.TLS.Auto + data.TLS.CertFile = d.TLS.CertFile + data.TLS.KeyFile = d.TLS.KeyFile + + data.Storage.Disk.Dir = d.Storage.Disk.Dir + data.Storage.Disk.Size = d.Storage.Disk.Size + data.Storage.Disk.Cache.Enable = d.Storage.Disk.Cache.Enable + data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size + data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize + data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL + data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types) + + data.Storage.S3 = []value.S3Storage{} + + data.Version = 3 + + return data, nil +} + +func DowngradeV3toV2(d *Data) (*v2.Data, error) { + data := &v2.Data{} data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt @@ -179,49 +275,50 @@ func NewV3FromV2(d *dataV2) (*Data, error) { data.SRT = d.SRT data.FFmpeg = d.FFmpeg data.Playout = d.Playout - data.Debug = d.Debug data.Metrics = d.Metrics data.Sessions = d.Sessions data.Service = d.Service data.Router = d.Router - data.Log.Topics = copyStringSlice(d.Log.Topics) + data.Log.Topics = copy.Slice(d.Log.Topics) - data.Host.Name = copyStringSlice(d.Host.Name) + data.Host.Name = copy.Slice(d.Host.Name) - data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow) - data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block) - data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow) - data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block) + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) - data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants) + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) - data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins) + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) - data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow) - data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block) - data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow) - data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block) + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) - data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList) + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) - data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics) + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) - data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes) - data.Router.Routes = copyStringMap(d.Router.Routes) + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) // Actual changes + data.Debug.Profiling = d.Debug.Profiling + data.Debug.ForceGC = d.Debug.ForceGC + data.TLS.Enable = d.TLS.Enable data.TLS.Address = d.TLS.Address data.TLS.Auto = d.TLS.Auto data.TLS.CertFile = d.TLS.CertFile data.TLS.KeyFile = d.TLS.KeyFile - data.TLS.Email = "cert@datarhei.com" data.Storage.MimeTypes = d.Storage.MimeTypes data.Storage.CORS = d.Storage.CORS - data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins) + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) data.Storage.Memory = d.Storage.Memory @@ -231,10 +328,9 @@ func NewV3FromV2(d *dataV2) (*Data, error) { data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL - data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types) - data.Storage.Disk.Cache.Types.Block = []string{} + data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types.Allow) - data.Version = 3 + data.Version = 2 return data, nil } diff --git a/config/data_test.go b/config/data_test.go new file mode 100644 index 00000000..956cf25d --- /dev/null +++ b/config/data_test.go @@ -0,0 +1,36 @@ +package config + +import ( + "testing" + + v2 "github.com/datarhei/core/v16/config/v2" + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" +) + +func TestUpgrade(t *testing.T) { + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + + v2cfg := v2.New(fs) + v2cfg.Storage.Disk.Cache.Types = []string{".foo", ".bar"} + + v3cfg, err := UpgradeV2ToV3(&v2cfg.Data, fs) + + require.NoError(t, err) + require.Equal(t, int64(3), v3cfg.Version) + require.ElementsMatch(t, []string{".foo", ".bar"}, v3cfg.Storage.Disk.Cache.Types.Allow) + require.ElementsMatch(t, []string{".m3u8", ".mpd"}, v3cfg.Storage.Disk.Cache.Types.Block) +} + +func TestDowngrade(t *testing.T) { + fs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + + v3cfg := New(fs) + v3cfg.Storage.Disk.Cache.Types.Allow = []string{".foo", ".bar"} + + v2cfg, err := DowngradeV3toV2(&v3cfg.Data) + + require.NoError(t, err) + require.Equal(t, int64(2), v2cfg.Version) + require.ElementsMatch(t, []string{".foo", ".bar"}, v2cfg.Storage.Disk.Cache.Types) +} diff --git a/config/dummy.go b/config/dummy.go deleted file mode 100644 index 87daf282..00000000 --- a/config/dummy.go +++ /dev/null @@ -1,83 +0,0 @@ -package config - -import "fmt" - -type dummyStore struct { - current *Config - active *Config -} - -// NewDummyStore returns a store that returns the default config -func NewDummyStore() Store { - s := &dummyStore{} - - cfg := New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - s.current = cfg - - cfg = New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - s.active = cfg - - return s -} - -func (c *dummyStore) Get() *Config { - cfg := New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - return cfg -} - -func (c *dummyStore) Set(d *Config) error { - d.Validate(true) - - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - c.current = NewConfigFrom(d) - - return nil -} - -func (c *dummyStore) GetActive() *Config { - cfg := New() - - cfg.DB.Dir = "." - cfg.FFmpeg.Binary = "true" - cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" - - return cfg -} - -func (c *dummyStore) SetActive(d *Config) error { - d.Validate(true) - - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - c.active = NewConfigFrom(d) - - return nil -} - -func (c *dummyStore) Reload() error { - return nil -} diff --git a/config/ip.go b/config/ip.go deleted file mode 100644 index 1ac57d51..00000000 --- a/config/ip.go +++ /dev/null @@ -1,71 +0,0 @@ -package config - -import ( - "io" - "net/http" - "sync" - "time" -) - -// SetPublicIPs will try to figure out the public IPs (v4 and v6) -// we're running on. There's a timeout of max. 5 seconds to do it. -// If it fails, the IPs will simply not be set. -func (d *Config) SetPublicIPs() { - var wg sync.WaitGroup - - ipv4 := "" - ipv6 := "" - - wg.Add(2) - - go func() { - defer wg.Done() - - ipv4 = doRequest("https://api.ipify.org") - }() - - go func() { - defer wg.Done() - - ipv6 = doRequest("https://api6.ipify.org") - }() - - wg.Wait() - - if len(ipv4) != 0 { - d.Host.Name = append(d.Host.Name, ipv4) - } - - if len(ipv6) != 0 && ipv4 != ipv6 { - d.Host.Name = append(d.Host.Name, ipv6) - } -} - -func doRequest(url string) string { - client := &http.Client{ - Timeout: 5 * time.Second, - } - - req, err := http.NewRequest("GET", url, nil) - if err != nil { - return "" - } - - resp, err := client.Do(req) - if err != nil { - return "" - } - - defer resp.Body.Close() - - body, err := io.ReadAll(resp.Body) - if err != nil { - return "" - } - - if resp.StatusCode != 200 { - return "" - } - - return string(body) -} diff --git a/config/json.go b/config/json.go deleted file mode 100644 index 3b185d0e..00000000 --- a/config/json.go +++ /dev/null @@ -1,201 +0,0 @@ -package config - -import ( - gojson "encoding/json" - "fmt" - "os" - "path/filepath" - "time" - - "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/io/file" -) - -type jsonStore struct { - path string - - data map[string]*Config - - reloadFn func() -} - -// NewJSONStore will read a JSON config file from the given path. After successfully reading it in, it will be written -// back to the path. The returned error will be nil if everything went fine. -// If the path doesn't exist, a default JSON config file will be written to that path. -// The returned ConfigStore can be used to retrieve or write the config. -func NewJSONStore(path string, reloadFn func()) (Store, error) { - c := &jsonStore{ - path: path, - data: make(map[string]*Config), - reloadFn: reloadFn, - } - - c.data["base"] = New() - - if err := c.load(c.data["base"]); err != nil { - return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err) - } - - if err := c.store(c.data["base"]); err != nil { - return nil, fmt.Errorf("failed to write JSON to '%s': %w", path, err) - } - - return c, nil -} - -func (c *jsonStore) Get() *Config { - return NewConfigFrom(c.data["base"]) -} - -func (c *jsonStore) Set(d *Config) error { - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - data := NewConfigFrom(d) - - data.CreatedAt = time.Now() - - if err := c.store(data); err != nil { - return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err) - } - - data.UpdatedAt = time.Now() - - c.data["base"] = data - - return nil -} - -func (c *jsonStore) GetActive() *Config { - if x, ok := c.data["merged"]; ok { - return NewConfigFrom(x) - } - - if x, ok := c.data["base"]; ok { - return NewConfigFrom(x) - } - - return nil -} - -func (c *jsonStore) SetActive(d *Config) error { - d.Validate(true) - - if d.HasErrors() { - return fmt.Errorf("configuration data has errors after validation") - } - - c.data["merged"] = NewConfigFrom(d) - - return nil -} - -func (c *jsonStore) Reload() error { - if c.reloadFn == nil { - return nil - } - - c.reloadFn() - - return nil -} - -func (c *jsonStore) load(config *Config) error { - if len(c.path) == 0 { - return nil - } - - if _, err := os.Stat(c.path); os.IsNotExist(err) { - return nil - } - - jsondata, err := os.ReadFile(c.path) - if err != nil { - return err - } - - dataV3 := &Data{} - - version := DataVersion{} - - if err = gojson.Unmarshal(jsondata, &version); err != nil { - return json.FormatError(jsondata, err) - } - - if version.Version == 1 { - dataV1 := &dataV1{} - - if err = gojson.Unmarshal(jsondata, dataV1); err != nil { - return json.FormatError(jsondata, err) - } - - dataV2, err := NewV2FromV1(dataV1) - if err != nil { - return err - } - - dataV3, err = NewV3FromV2(dataV2) - if err != nil { - return err - } - } else if version.Version == 2 { - dataV2 := &dataV2{} - - if err = gojson.Unmarshal(jsondata, dataV2); err != nil { - return json.FormatError(jsondata, err) - } - - dataV3, err = NewV3FromV2(dataV2) - if err != nil { - return err - } - } else if version.Version == 3 { - if err = gojson.Unmarshal(jsondata, dataV3); err != nil { - return json.FormatError(jsondata, err) - } - } - - config.Data = *dataV3 - - config.LoadedAt = time.Now() - config.UpdatedAt = config.LoadedAt - - return nil -} - -func (c *jsonStore) store(data *Config) error { - data.CreatedAt = time.Now() - - if len(c.path) == 0 { - return nil - } - - jsondata, err := gojson.MarshalIndent(data, "", " ") - if err != nil { - return err - } - - dir, filename := filepath.Split(c.path) - - tmpfile, err := os.CreateTemp(dir, filename) - if err != nil { - return err - } - - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return err - } - - if err := tmpfile.Close(); err != nil { - return err - } - - if err := file.Rename(tmpfile.Name(), c.path); err != nil { - return err - } - - return nil -} diff --git a/config/store/fixtures/config_v1.json b/config/store/fixtures/config_v1.json new file mode 100644 index 00000000..b85a216c --- /dev/null +++ b/config/store/fixtures/config_v1.json @@ -0,0 +1,138 @@ +{ + "created_at": "2022-11-08T12:01:22.533279+01:00", + "version": 1, + "id": "c5ea4473-2f84-417c-a0c6-35746bfc9fc9", + "name": "cool-breeze-4646", + "address": ":8080", + "update_check": true, + "log": { + "level": "info", + "topics": [], + "max_lines": 1000 + }, + "db": { + "dir": "./config" + }, + "host": { + "name": [], + "auto": true + }, + "api": { + "read_only": false, + "access": { + "http": { + "allow": [], + "block": [] + }, + "https": { + "allow": [], + "block": [] + } + }, + "auth": { + "enable": false, + "disable_localhost": false, + "username": "", + "password": "", + "jwt": { + "secret": "L(*C[:uuHzL.]Fzpk$q=fa@PO=Z;j;56" + }, + "auth0": { + "enable": false, + "tenants": [] + } + } + }, + "tls": { + "address": ":8181", + "enable": false, + "auto": false, + "cert_file": "", + "key_file": "" + }, + "storage": { + "disk": { + "dir": "./data", + "max_size_mbytes": 0, + "cache": { + "enable": true, + "max_size_mbytes": 0, + "ttl_seconds": 300, + "max_file_size_mbytes": 1, + "types": [] + } + }, + "memory": { + "auth": { + "enable": true, + "username": "admin", + "password": "dcFsZVGwVFkv1bE8Rl" + }, + "max_size_mbytes": 0, + "purge": false + }, + "cors": { + "origins": [ + "*" + ] + }, + "mimetypes_file": "./mime.types" + }, + "ffmpeg": { + "binary": "ffmpeg", + "max_processes": 0, + "access": { + "input": { + "allow": [], + "block": [] + }, + "output": { + "allow": [], + "block": [] + } + }, + "log": { + "max_lines": 50, + "max_history": 3 + } + }, + "playout": { + "enable": false, + "min_port": 0, + "max_port": 0 + }, + "debug": { + "profiling": false, + "force_gc": 0 + }, + "metrics": { + "enable": false, + "enable_prometheus": false, + "range_sec": 300, + "interval_sec": 2 + }, + "sessions": { + "enable": true, + "ip_ignorelist": [ + "127.0.0.1/32", + "::1/128" + ], + "session_timeout_sec": 30, + "persist": false, + "persist_interval_sec": 300, + "max_bitrate_mbit": 0, + "max_sessions": 0 + }, + "service": { + "enable": false, + "token": "", + "url": "https://service.datarhei.com" + }, + "router": { + "blocked_prefixes": [ + "/api" + ], + "routes": {}, + "ui_path": "" + } +} \ No newline at end of file diff --git a/config/store/fixtures/config_v1_v3.json b/config/store/fixtures/config_v1_v3.json new file mode 100644 index 00000000..398673d8 --- /dev/null +++ b/config/store/fixtures/config_v1_v3.json @@ -0,0 +1,163 @@ +{ + "created_at": "2022-11-08T13:34:47.498911+01:00", + "version": 3, + "id": "c5ea4473-2f84-417c-a0c6-35746bfc9fc9", + "name": "cool-breeze-4646", + "address": ":8080", + "update_check": true, + "log": { + "level": "info", + "topics": [], + "max_lines": 1000 + }, + "db": { + "dir": "./config" + }, + "host": { + "name": [], + "auto": true + }, + "api": { + "read_only": false, + "access": { + "http": { + "allow": [], + "block": [] + }, + "https": { + "allow": [], + "block": [] + } + }, + "auth": { + "enable": false, + "disable_localhost": false, + "username": "", + "password": "", + "jwt": { + "secret": "L(*C[:uuHzL.]Fzpk$q=fa@PO=Z;j;56" + }, + "auth0": { + "enable": false, + "tenants": [] + } + } + }, + "tls": { + "address": ":8181", + "enable": false, + "auto": false, + "email": "cert@datarhei.com", + "cert_file": "", + "key_file": "" + }, + "storage": { + "disk": { + "dir": "./data", + "max_size_mbytes": 0, + "cache": { + "enable": true, + "max_size_mbytes": 0, + "ttl_seconds": 300, + "max_file_size_mbytes": 1, + "types": { + "allow": [], + "block": [ + ".m3u8", + ".mpd" + ] + } + } + }, + "memory": { + "auth": { + "enable": true, + "username": "admin", + "password": "dcFsZVGwVFkv1bE8Rl" + }, + "max_size_mbytes": 0, + "purge": false + }, + "cors": { + "origins": [ + "*" + ] + }, + "mimetypes_file": "./mime.types" + }, + "rtmp": { + "enable": false, + "enable_tls": false, + "address": ":1935", + "address_tls": ":1936", + "app": "/", + "token": "" + }, + "srt": { + "enable": false, + "address": ":6000", + "passphrase": "", + "token": "", + "log": { + "enable": false, + "topics": [] + } + }, + "ffmpeg": { + "binary": "ffmpeg", + "max_processes": 0, + "access": { + "input": { + "allow": [], + "block": [] + }, + "output": { + "allow": [], + "block": [] + } + }, + "log": { + "max_lines": 50, + "max_history": 3 + } + }, + "playout": { + "enable": false, + "min_port": 0, + "max_port": 0 + }, + "debug": { + "profiling": false, + "force_gc": 0 + }, + "metrics": { + "enable": false, + "enable_prometheus": false, + "range_sec": 300, + "interval_sec": 2 + }, + "sessions": { + "enable": true, + "ip_ignorelist": [ + "127.0.0.1/32", + "::1/128" + ], + "session_timeout_sec": 30, + "persist": false, + "persist_interval_sec": 300, + "max_bitrate_mbit": 0, + "max_sessions": 0 + }, + "service": { + "enable": false, + "token": "", + "url": "https://service.datarhei.com" + }, + "router": { + "blocked_prefixes": [ + "/api" + ], + "routes": {}, + "ui_path": "" + } +} \ No newline at end of file diff --git a/config/store/fixtures/config_v2.json b/config/store/fixtures/config_v2.json new file mode 100644 index 00000000..c6536b37 --- /dev/null +++ b/config/store/fixtures/config_v2.json @@ -0,0 +1,140 @@ +{ + "created_at": "2022-11-08T11:54:44.224213+01:00", + "version": 2, + "id": "3bddc061-e534-4315-ab56-95b48c050ec9", + "name": "super-frog-1715", + "address": ":8080", + "update_check": true, + "log": { + "level": "info", + "topics": [], + "max_lines": 1000 + }, + "db": { + "dir": "./config" + }, + "host": { + "name": [], + "auto": true + }, + "api": { + "read_only": false, + "access": { + "http": { + "allow": [], + "block": [] + }, + "https": { + "allow": [], + "block": [] + } + }, + "auth": { + "enable": false, + "disable_localhost": false, + "username": "", + "password": "", + "jwt": { + "secret": "u4+N,UDq]jGxGbbQLQN[!jcMsa\u0026weIJW" + }, + "auth0": { + "enable": false, + "tenants": [] + } + } + }, + "tls": { + "address": ":8181", + "enable": false, + "auto": false, + "cert_file": "", + "key_file": "" + }, + "storage": { + "disk": { + "dir": "./data", + "max_size_mbytes": 0, + "cache": { + "enable": true, + "max_size_mbytes": 0, + "ttl_seconds": 300, + "max_file_size_mbytes": 1, + "types": [ + ".ts" + ] + } + }, + "memory": { + "auth": { + "enable": true, + "username": "admin", + "password": "DsAKRUg9wmOk4qpvvy" + }, + "max_size_mbytes": 0, + "purge": false + }, + "cors": { + "origins": [ + "*" + ] + }, + "mimetypes_file": "./mime.types" + }, + "ffmpeg": { + "binary": "ffmpeg", + "max_processes": 0, + "access": { + "input": { + "allow": [], + "block": [] + }, + "output": { + "allow": [], + "block": [] + } + }, + "log": { + "max_lines": 50, + "max_history": 3 + } + }, + "playout": { + "enable": false, + "min_port": 0, + "max_port": 0 + }, + "debug": { + "profiling": false, + "force_gc": 0 + }, + "metrics": { + "enable": false, + "enable_prometheus": false, + "range_sec": 300, + "interval_sec": 2 + }, + "sessions": { + "enable": true, + "ip_ignorelist": [ + "127.0.0.1/32", + "::1/128" + ], + "session_timeout_sec": 30, + "persist": false, + "persist_interval_sec": 300, + "max_bitrate_mbit": 0, + "max_sessions": 0 + }, + "service": { + "enable": false, + "token": "", + "url": "https://service.datarhei.com" + }, + "router": { + "blocked_prefixes": [ + "/api" + ], + "routes": {}, + "ui_path": "" + } +} \ No newline at end of file diff --git a/config/store/fixtures/config_v2_v3.json b/config/store/fixtures/config_v2_v3.json new file mode 100644 index 00000000..0661f638 --- /dev/null +++ b/config/store/fixtures/config_v2_v3.json @@ -0,0 +1,165 @@ +{ + "created_at": "2022-11-08T11:54:44.224213+01:00", + "version": 3, + "id": "3bddc061-e534-4315-ab56-95b48c050ec9", + "name": "super-frog-1715", + "address": ":8080", + "update_check": true, + "log": { + "level": "info", + "topics": [], + "max_lines": 1000 + }, + "db": { + "dir": "./config" + }, + "host": { + "name": [], + "auto": true + }, + "api": { + "read_only": false, + "access": { + "http": { + "allow": [], + "block": [] + }, + "https": { + "allow": [], + "block": [] + } + }, + "auth": { + "enable": false, + "disable_localhost": false, + "username": "", + "password": "", + "jwt": { + "secret": "u4+N,UDq]jGxGbbQLQN[!jcMsa\u0026weIJW" + }, + "auth0": { + "enable": false, + "tenants": [] + } + } + }, + "tls": { + "address": ":8181", + "enable": false, + "auto": false, + "cert_file": "", + "key_file": "", + "email": "cert@datarhei.com" + }, + "storage": { + "disk": { + "dir": "./data", + "max_size_mbytes": 0, + "cache": { + "enable": true, + "max_size_mbytes": 0, + "ttl_seconds": 300, + "max_file_size_mbytes": 1, + "types": { + "allow": [ + ".ts" + ], + "block": [ + ".m3u8", + ".mpd" + ] + } + } + }, + "memory": { + "auth": { + "enable": true, + "username": "admin", + "password": "DsAKRUg9wmOk4qpvvy" + }, + "max_size_mbytes": 0, + "purge": false + }, + "cors": { + "origins": [ + "*" + ] + }, + "mimetypes_file": "./mime.types" + }, + "rtmp": { + "enable": false, + "enable_tls": false, + "address": ":1935", + "address_tls": ":1936", + "app": "/", + "token": "" + }, + "srt": { + "enable": false, + "address": ":6000", + "passphrase": "", + "token": "", + "log": { + "enable": false, + "topics": [] + } + }, + "ffmpeg": { + "binary": "ffmpeg", + "max_processes": 0, + "access": { + "input": { + "allow": [], + "block": [] + }, + "output": { + "allow": [], + "block": [] + } + }, + "log": { + "max_lines": 50, + "max_history": 3 + } + }, + "playout": { + "enable": false, + "min_port": 0, + "max_port": 0 + }, + "debug": { + "profiling": false, + "force_gc": 0 + }, + "metrics": { + "enable": false, + "enable_prometheus": false, + "range_sec": 300, + "interval_sec": 2 + }, + "sessions": { + "enable": true, + "ip_ignorelist": [ + "127.0.0.1/32", + "::1/128" + ], + "session_timeout_sec": 30, + "persist": false, + "persist_interval_sec": 300, + "max_bitrate_mbit": 0, + "max_sessions": 0 + }, + "service": { + "enable": false, + "token": "", + "url": "https://service.datarhei.com" + }, + "router": { + "blocked_prefixes": [ + "/api" + ], + "routes": {}, + "ui_path": "" + } +} \ No newline at end of file diff --git a/config/store/json.go b/config/store/json.go new file mode 100644 index 00000000..976d18a0 --- /dev/null +++ b/config/store/json.go @@ -0,0 +1,214 @@ +package store + +import ( + gojson "encoding/json" + "fmt" + "os" + "path/filepath" + + "github.com/datarhei/core/v16/config" + v1 "github.com/datarhei/core/v16/config/v1" + v2 "github.com/datarhei/core/v16/config/v2" + "github.com/datarhei/core/v16/encoding/json" + "github.com/datarhei/core/v16/io/fs" +) + +type jsonStore struct { + fs fs.Filesystem + path string + + data map[string]*config.Config + + reloadFn func() +} + +// NewJSONStore will read the JSON config file from the given path. After successfully reading it in, it will be written +// back to the path. The returned error will be nil if everything went fine. If the path doesn't exist, a default JSON +// config file will be written to that path. The returned ConfigStore can be used to retrieve or write the config. +func NewJSON(f fs.Filesystem, path string, reloadFn func()) (Store, error) { + c := &jsonStore{ + fs: f, + data: make(map[string]*config.Config), + reloadFn: reloadFn, + } + + path, err := filepath.Abs(path) + if err != nil { + return nil, fmt.Errorf("failed to determine absolute path of '%s': %w", path, err) + } + + c.path = path + + if len(c.path) == 0 { + c.path = "/config.json" + } + + if c.fs == nil { + return nil, fmt.Errorf("no valid filesystem provided") + } + + c.data["base"] = config.New(f) + + if err := c.load(c.data["base"]); err != nil { + return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err) + } + + if err := c.store(c.data["base"]); err != nil { + return nil, fmt.Errorf("failed to write JSON to '%s': %w", path, err) + } + + return c, nil +} + +func (c *jsonStore) Get() *config.Config { + return c.data["base"].Clone() +} + +func (c *jsonStore) Set(d *config.Config) error { + if d.HasErrors() { + return fmt.Errorf("configuration data has errors after validation") + } + + data := d.Clone() + + if err := c.store(data); err != nil { + return fmt.Errorf("failed to write JSON to '%s': %w", c.path, err) + } + + c.data["base"] = data + + return nil +} + +func (c *jsonStore) GetActive() *config.Config { + if x, ok := c.data["merged"]; ok { + return x.Clone() + } + + if x, ok := c.data["base"]; ok { + return x.Clone() + } + + return nil +} + +func (c *jsonStore) SetActive(d *config.Config) error { + d.Validate(true) + + if d.HasErrors() { + return fmt.Errorf("configuration data has errors after validation") + } + + data := d.Clone() + + c.data["merged"] = data + + return nil +} + +func (c *jsonStore) Reload() error { + if c.reloadFn == nil { + return nil + } + + c.reloadFn() + + return nil +} + +func (c *jsonStore) load(cfg *config.Config) error { + if len(c.path) == 0 { + return nil + } + + if _, err := c.fs.Stat(c.path); os.IsNotExist(err) { + return nil + } + + jsondata, err := c.fs.ReadFile(c.path) + if err != nil { + return err + } + + if len(jsondata) == 0 { + return nil + } + + data, err := migrate(jsondata) + if err != nil { + return err + } + + cfg.Data = *data + + cfg.UpdatedAt = cfg.CreatedAt + + return nil +} + +func (c *jsonStore) store(data *config.Config) error { + if len(c.path) == 0 { + return nil + } + + jsondata, err := gojson.MarshalIndent(data, "", " ") + if err != nil { + return err + } + + _, _, err = c.fs.WriteFileSafe(c.path, jsondata) + + return err +} + +func migrate(jsondata []byte) (*config.Data, error) { + data := &config.Data{} + version := DataVersion{} + + if err := gojson.Unmarshal(jsondata, &version); err != nil { + return nil, json.FormatError(jsondata, err) + } + + if version.Version == 1 { + dataV1 := &v1.New(nil).Data + + if err := gojson.Unmarshal(jsondata, dataV1); err != nil { + return nil, json.FormatError(jsondata, err) + } + + dataV2, err := v2.UpgradeV1ToV2(dataV1, nil) + if err != nil { + return nil, err + } + + dataV3, err := config.UpgradeV2ToV3(dataV2, nil) + if err != nil { + return nil, err + } + + data = dataV3 + } else if version.Version == 2 { + dataV2 := &v2.New(nil).Data + + if err := gojson.Unmarshal(jsondata, dataV2); err != nil { + return nil, json.FormatError(jsondata, err) + } + + dataV3, err := config.UpgradeV2ToV3(dataV2, nil) + if err != nil { + return nil, err + } + + data = dataV3 + } else if version.Version == 3 { + dataV3 := &config.New(nil).Data + + if err := gojson.Unmarshal(jsondata, dataV3); err != nil { + return nil, json.FormatError(jsondata, err) + } + + data = dataV3 + } + + return data, nil +} diff --git a/config/store/json_test.go b/config/store/json_test.go new file mode 100644 index 00000000..f549fd3e --- /dev/null +++ b/config/store/json_test.go @@ -0,0 +1,50 @@ +package store + +import ( + "encoding/json" + "os" + "testing" + "time" + + "github.com/datarhei/core/v16/config" + + "github.com/stretchr/testify/require" +) + +func TestMigrationV1ToV3(t *testing.T) { + jsondatav1, err := os.ReadFile("./fixtures/config_v1.json") + require.NoError(t, err) + + jsondatav3, err := os.ReadFile("./fixtures/config_v1_v3.json") + require.NoError(t, err) + + datav3 := config.New(nil) + json.Unmarshal(jsondatav3, datav3) + + data, err := migrate(jsondatav1) + require.NoError(t, err) + + datav3.Data.CreatedAt = time.Time{} + data.CreatedAt = time.Time{} + + require.Equal(t, datav3.Data, *data) +} + +func TestMigrationV2ToV3(t *testing.T) { + jsondatav2, err := os.ReadFile("./fixtures/config_v2.json") + require.NoError(t, err) + + jsondatav3, err := os.ReadFile("./fixtures/config_v2_v3.json") + require.NoError(t, err) + + datav3 := config.New(nil) + json.Unmarshal(jsondatav3, datav3) + + data, err := migrate(jsondatav2) + require.NoError(t, err) + + datav3.Data.CreatedAt = time.Time{} + data.CreatedAt = time.Time{} + + require.Equal(t, datav3.Data, *data) +} diff --git a/config/store/location.go b/config/store/location.go new file mode 100644 index 00000000..e073a0c8 --- /dev/null +++ b/config/store/location.go @@ -0,0 +1,53 @@ +package store + +import ( + "os" + "path" +) + +// Location returns the path to the config file. If no path is provided, +// different standard location will be probed: +// - os.UserConfigDir() + /datarhei-core/config.js +// - os.UserHomeDir() + /.config/datarhei-core/config.js +// - ./config/config.js +// If the config doesn't exist in none of these locations, it will be assumed +// at ./config/config.js +func Location(filepath string) string { + configfile := filepath + if len(configfile) != 0 { + return configfile + } + + locations := []string{} + + if dir, err := os.UserConfigDir(); err == nil { + locations = append(locations, dir+"/datarhei-core/config.js") + } + + if dir, err := os.UserHomeDir(); err == nil { + locations = append(locations, dir+"/.config/datarhei-core/config.js") + } + + locations = append(locations, "./config/config.js") + + for _, path := range locations { + info, err := os.Stat(path) + if err != nil { + continue + } + + if info.IsDir() { + continue + } + + configfile = path + } + + if len(configfile) == 0 { + configfile = "./config/config.js" + } + + os.MkdirAll(path.Dir(configfile), 0740) + + return configfile +} diff --git a/config/store.go b/config/store/store.go similarity index 69% rename from config/store.go rename to config/store/store.go index 6aa2895b..a4c46150 100644 --- a/config/store.go +++ b/config/store/store.go @@ -1,23 +1,29 @@ -package config +package store + +import "github.com/datarhei/core/v16/config" // Store is a store for the configuration data. type Store interface { // Get the current configuration. - Get() *Config + Get() *config.Config // Set a new configuration for persistence. - Set(data *Config) error + Set(data *config.Config) error // GetActive returns the configuration that has been set as // active before, otherwise it return nil. - GetActive() *Config + GetActive() *config.Config // SetActive will keep the given configuration // as active in memory. It can be retrieved later with GetActive() - SetActive(data *Config) error + SetActive(data *config.Config) error // Reload will reload the stored configuration. It has to make sure // that all affected components will receiver their potentially // changed configuration. Reload() error } + +type DataVersion struct { + Version int64 `json:"version"` +} diff --git a/config/types.go b/config/types.go deleted file mode 100644 index f5a27e82..00000000 --- a/config/types.go +++ /dev/null @@ -1,844 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "net" - "net/mail" - "net/url" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "time" - - "github.com/datarhei/core/v16/http/cors" -) - -type value interface { - // String returns a string representation of the value. - String() string - - // Set a new value for the value. Returns an - // error if the given string representation can't - // be transformed to the value. Returns nil - // if the new value has been set. - Set(string) error - - // Validate the value. The returned error will - // indicate what is wrong with the current value. - // Returns nil if the value is OK. - Validate() error - - // IsEmpty returns whether the value represents an empty - // representation for that value. - IsEmpty() bool -} - -// string - -type stringValue string - -func newStringValue(p *string, val string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) String() string { - return string(*s) -} - -func (s *stringValue) Validate() error { - return nil -} - -func (s *stringValue) IsEmpty() bool { - return len(string(*s)) == 0 -} - -// address (host?:port) - -type addressValue string - -func newAddressValue(p *string, val string) *addressValue { - *p = val - return (*addressValue)(p) -} - -func (s *addressValue) Set(val string) error { - // Check if the new value is only a port number - re := regexp.MustCompile("^[0-9]+$") - if re.MatchString(val) { - val = ":" + val - } - - *s = addressValue(val) - return nil -} - -func (s *addressValue) String() string { - return string(*s) -} - -func (s *addressValue) Validate() error { - _, port, err := net.SplitHostPort(string(*s)) - if err != nil { - return err - } - - re := regexp.MustCompile("^[0-9]+$") - if !re.MatchString(port) { - return fmt.Errorf("the port must be numerical") - } - - return nil -} - -func (s *addressValue) IsEmpty() bool { - return s.Validate() != nil -} - -// array of strings - -type stringListValue struct { - p *[]string - separator string -} - -func newStringListValue(p *[]string, val []string, separator string) *stringListValue { - v := &stringListValue{ - p: p, - separator: separator, - } - *p = val - return v -} - -func (s *stringListValue) Set(val string) error { - list := []string{} - - for _, elm := range strings.Split(val, s.separator) { - elm = strings.TrimSpace(elm) - if len(elm) != 0 { - list = append(list, elm) - } - } - - *s.p = list - - return nil -} - -func (s *stringListValue) String() string { - if s.IsEmpty() { - return "(empty)" - } - - return strings.Join(*s.p, s.separator) -} - -func (s *stringListValue) Validate() error { - return nil -} - -func (s *stringListValue) IsEmpty() bool { - return len(*s.p) == 0 -} - -// array of auth0 tenants - -type tenantListValue struct { - p *[]Auth0Tenant - separator string -} - -func newTenantListValue(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *tenantListValue { - v := &tenantListValue{ - p: p, - separator: separator, - } - - *p = val - return v -} - -func (s *tenantListValue) Set(val string) error { - list := []Auth0Tenant{} - - for i, elm := range strings.Split(val, s.separator) { - data, err := base64.StdEncoding.DecodeString(elm) - if err != nil { - return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) - } - - t := Auth0Tenant{} - if err := json.Unmarshal(data, &t); err != nil { - return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) - } - - list = append(list, t) - } - - *s.p = list - - return nil -} - -func (s *tenantListValue) String() string { - if s.IsEmpty() { - return "(empty)" - } - - list := []string{} - - for _, t := range *s.p { - list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users))) - } - - return strings.Join(list, ",") -} - -func (s *tenantListValue) Validate() error { - for i, t := range *s.p { - if len(t.Domain) == 0 { - return fmt.Errorf("the domain for tenant %d is missing", i) - } - - if len(t.Audience) == 0 { - return fmt.Errorf("the audience for tenant %d is missing", i) - } - } - - return nil -} - -func (s *tenantListValue) IsEmpty() bool { - return len(*s.p) == 0 -} - -// map of strings to strings - -type stringMapStringValue struct { - p *map[string]string -} - -func newStringMapStringValue(p *map[string]string, val map[string]string) *stringMapStringValue { - v := &stringMapStringValue{ - p: p, - } - - if *p == nil { - *p = make(map[string]string) - } - - if val != nil { - *p = val - } - - return v -} - -func (s *stringMapStringValue) Set(val string) error { - mappings := make(map[string]string) - - for _, elm := range strings.Split(val, " ") { - elm = strings.TrimSpace(elm) - if len(elm) == 0 { - continue - } - - mapping := strings.SplitN(elm, ":", 2) - - mappings[mapping[0]] = mapping[1] - } - - *s.p = mappings - - return nil -} - -func (s *stringMapStringValue) String() string { - if s.IsEmpty() { - return "(empty)" - } - - mappings := make([]string, len(*s.p)) - - i := 0 - for k, v := range *s.p { - mappings[i] = k + ":" + v - i++ - } - - return strings.Join(mappings, " ") -} - -func (s *stringMapStringValue) Validate() error { - return nil -} - -func (s *stringMapStringValue) IsEmpty() bool { - return len(*s.p) == 0 -} - -// array of CIDR notation IP adresses - -type cidrListValue struct { - p *[]string - separator string -} - -func newCIDRListValue(p *[]string, val []string, separator string) *cidrListValue { - v := &cidrListValue{ - p: p, - separator: separator, - } - *p = val - return v -} - -func (s *cidrListValue) Set(val string) error { - list := []string{} - - for _, elm := range strings.Split(val, s.separator) { - elm = strings.TrimSpace(elm) - if len(elm) != 0 { - list = append(list, elm) - } - } - - *s.p = list - - return nil -} - -func (s *cidrListValue) String() string { - if s.IsEmpty() { - return "(empty)" - } - - return strings.Join(*s.p, s.separator) -} - -func (s *cidrListValue) Validate() error { - for _, cidr := range *s.p { - _, _, err := net.ParseCIDR(cidr) - if err != nil { - return err - } - } - - return nil -} - -func (s *cidrListValue) IsEmpty() bool { - return len(*s.p) == 0 -} - -// array of origins for CORS - -type corsOriginsValue struct { - p *[]string - separator string -} - -func newCORSOriginsValue(p *[]string, val []string, separator string) *corsOriginsValue { - v := &corsOriginsValue{ - p: p, - separator: separator, - } - *p = val - return v -} - -func (s *corsOriginsValue) Set(val string) error { - list := []string{} - - for _, elm := range strings.Split(val, s.separator) { - elm = strings.TrimSpace(elm) - if len(elm) != 0 { - list = append(list, elm) - } - } - - *s.p = list - - return nil -} - -func (s *corsOriginsValue) String() string { - if s.IsEmpty() { - return "(empty)" - } - - return strings.Join(*s.p, s.separator) -} - -func (s *corsOriginsValue) Validate() error { - return cors.Validate(*s.p) -} - -func (s *corsOriginsValue) IsEmpty() bool { - return len(*s.p) == 0 -} - -// boolean - -type boolValue bool - -func newBoolValue(p *bool, val bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(val string) error { - v, err := strconv.ParseBool(val) - if err != nil { - return err - } - *b = boolValue(v) - return nil -} - -func (b *boolValue) String() string { - return strconv.FormatBool(bool(*b)) -} - -func (b *boolValue) Validate() error { - return nil -} - -func (b *boolValue) IsEmpty() bool { - return !bool(*b) -} - -// int - -type intValue int - -func newIntValue(p *int, val int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(val string) error { - v, err := strconv.Atoi(val) - if err != nil { - return err - } - *i = intValue(v) - return nil -} - -func (i *intValue) String() string { - return strconv.Itoa(int(*i)) -} - -func (i *intValue) Validate() error { - return nil -} - -func (i *intValue) IsEmpty() bool { - return int(*i) == 0 -} - -// int64 - -type int64Value int64 - -func newInt64Value(p *int64, val int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (u *int64Value) Set(val string) error { - v, err := strconv.ParseInt(val, 0, 64) - if err != nil { - return err - } - *u = int64Value(v) - return nil -} - -func (u *int64Value) String() string { - return strconv.FormatInt(int64(*u), 10) -} - -func (u *int64Value) Validate() error { - return nil -} - -func (u *int64Value) IsEmpty() bool { - return int64(*u) == 0 -} - -// uint64 - -type uint64Value uint64 - -func newUint64Value(p *uint64, val uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (u *uint64Value) Set(val string) error { - v, err := strconv.ParseUint(val, 0, 64) - if err != nil { - return err - } - *u = uint64Value(v) - return nil -} - -func (u *uint64Value) String() string { - return strconv.FormatUint(uint64(*u), 10) -} - -func (u *uint64Value) Validate() error { - return nil -} - -func (u *uint64Value) IsEmpty() bool { - return uint64(*u) == 0 -} - -// network port - -type portValue int - -func newPortValue(p *int, val int) *portValue { - *p = val - return (*portValue)(p) -} - -func (i *portValue) Set(val string) error { - v, err := strconv.Atoi(val) - if err != nil { - return err - } - *i = portValue(v) - return nil -} - -func (i *portValue) String() string { - return strconv.Itoa(int(*i)) -} - -func (i *portValue) Validate() error { - val := int(*i) - - if val < 0 || val >= (1<<16) { - return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1) - } - - return nil -} - -func (i *portValue) IsEmpty() bool { - return int(*i) == 0 -} - -// must directory - -type mustDirValue string - -func newMustDirValue(p *string, val string) *mustDirValue { - *p = val - return (*mustDirValue)(p) -} - -func (u *mustDirValue) Set(val string) error { - *u = mustDirValue(val) - return nil -} - -func (u *mustDirValue) String() string { - return string(*u) -} - -func (u *mustDirValue) Validate() error { - val := string(*u) - - if len(strings.TrimSpace(val)) == 0 { - return fmt.Errorf("path name must not be empty") - } - - finfo, err := os.Stat(val) - if err != nil { - return fmt.Errorf("%s does not exist", val) - } - - if !finfo.IsDir() { - return fmt.Errorf("%s is not a directory", val) - } - - return nil -} - -func (u *mustDirValue) IsEmpty() bool { - return len(string(*u)) == 0 -} - -// directory - -type dirValue string - -func newDirValue(p *string, val string) *dirValue { - *p = val - return (*dirValue)(p) -} - -func (u *dirValue) Set(val string) error { - *u = dirValue(val) - return nil -} - -func (u *dirValue) String() string { - return string(*u) -} - -func (u *dirValue) Validate() error { - val := string(*u) - - if len(strings.TrimSpace(val)) == 0 { - return nil - } - - finfo, err := os.Stat(val) - if err != nil { - return fmt.Errorf("%s does not exist", val) - } - - if !finfo.IsDir() { - return fmt.Errorf("%s is not a directory", val) - } - - return nil -} - -func (u *dirValue) IsEmpty() bool { - return len(string(*u)) == 0 -} - -// executable - -type execValue string - -func newExecValue(p *string, val string) *execValue { - *p = val - return (*execValue)(p) -} - -func (u *execValue) Set(val string) error { - *u = execValue(val) - return nil -} - -func (u *execValue) String() string { - return string(*u) -} - -func (u *execValue) Validate() error { - val := string(*u) - - _, err := exec.LookPath(val) - if err != nil { - return fmt.Errorf("%s not found or is not executable", val) - } - - return nil -} - -func (u *execValue) IsEmpty() bool { - return len(string(*u)) == 0 -} - -// regular file - -type fileValue string - -func newFileValue(p *string, val string) *fileValue { - *p = val - return (*fileValue)(p) -} - -func (u *fileValue) Set(val string) error { - *u = fileValue(val) - return nil -} - -func (u *fileValue) String() string { - return string(*u) -} - -func (u *fileValue) Validate() error { - val := string(*u) - - if len(val) == 0 { - return nil - } - - finfo, err := os.Stat(val) - if err != nil { - return fmt.Errorf("%s does not exist", val) - } - - if !finfo.Mode().IsRegular() { - return fmt.Errorf("%s is not a regular file", val) - } - - return nil -} - -func (u *fileValue) IsEmpty() bool { - return len(string(*u)) == 0 -} - -// time - -type timeValue time.Time - -func newTimeValue(p *time.Time, val time.Time) *timeValue { - *p = val - return (*timeValue)(p) -} - -func (u *timeValue) Set(val string) error { - v, err := time.Parse(time.RFC3339, val) - if err != nil { - return err - } - *u = timeValue(v) - return nil -} - -func (u *timeValue) String() string { - v := time.Time(*u) - return v.Format(time.RFC3339) -} - -func (u *timeValue) Validate() error { - return nil -} - -func (u *timeValue) IsEmpty() bool { - v := time.Time(*u) - return v.IsZero() -} - -// url - -type urlValue string - -func newURLValue(p *string, val string) *urlValue { - *p = val - return (*urlValue)(p) -} - -func (u *urlValue) Set(val string) error { - *u = urlValue(val) - return nil -} - -func (u *urlValue) String() string { - return string(*u) -} - -func (u *urlValue) Validate() error { - val := string(*u) - - if len(val) == 0 { - return nil - } - - URL, err := url.Parse(val) - if err != nil { - return fmt.Errorf("%s is not a valid URL", val) - } - - if len(URL.Scheme) == 0 || len(URL.Host) == 0 { - return fmt.Errorf("%s is not a valid URL", val) - } - - return nil -} - -func (u *urlValue) IsEmpty() bool { - return len(string(*u)) == 0 -} - -// absolute path - -type absolutePathValue string - -func newAbsolutePathValue(p *string, val string) *absolutePathValue { - *p = filepath.Clean(val) - return (*absolutePathValue)(p) -} - -func (s *absolutePathValue) Set(val string) error { - *s = absolutePathValue(filepath.Clean(val)) - return nil -} - -func (s *absolutePathValue) String() string { - return string(*s) -} - -func (s *absolutePathValue) Validate() error { - path := string(*s) - - if !filepath.IsAbs(path) { - return fmt.Errorf("%s is not an absolute path", path) - } - - return nil -} - -func (s *absolutePathValue) IsEmpty() bool { - return len(string(*s)) == 0 -} - -// email address - -type emailValue string - -func newEmailValue(p *string, val string) *emailValue { - *p = val - return (*emailValue)(p) -} - -func (s *emailValue) Set(val string) error { - addr, err := mail.ParseAddress(val) - if err != nil { - return err - } - - *s = emailValue(addr.Address) - return nil -} - -func (s *emailValue) String() string { - return string(*s) -} - -func (s *emailValue) Validate() error { - if len(s.String()) == 0 { - return nil - } - - _, err := mail.ParseAddress(s.String()) - return err -} - -func (s *emailValue) IsEmpty() bool { - return len(string(*s)) == 0 -} diff --git a/config/types_test.go b/config/types_test.go deleted file mode 100644 index f38c3160..00000000 --- a/config/types_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIntValue(t *testing.T) { - var i int - - ivar := newIntValue(&i, 11) - - assert.Equal(t, "11", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) - - i = 42 - - assert.Equal(t, "42", ivar.String()) - assert.Equal(t, nil, ivar.Validate()) - assert.Equal(t, false, ivar.IsEmpty()) - - ivar.Set("77") - - assert.Equal(t, int(77), i) -} - -type testdata struct { - value1 int - value2 int -} - -func TestCopyStruct(t *testing.T) { - data1 := testdata{} - - newIntValue(&data1.value1, 1) - newIntValue(&data1.value2, 2) - - assert.Equal(t, int(1), data1.value1) - assert.Equal(t, int(2), data1.value2) - - data2 := testdata{} - - val21 := newIntValue(&data2.value1, 3) - val22 := newIntValue(&data2.value2, 4) - - assert.Equal(t, int(3), data2.value1) - assert.Equal(t, int(4), data2.value2) - - data2 = data1 - - assert.Equal(t, int(1), data2.value1) - assert.Equal(t, int(2), data2.value2) - - assert.Equal(t, "1", val21.String()) - assert.Equal(t, "2", val22.String()) -} diff --git a/config/v1/config.go b/config/v1/config.go new file mode 100644 index 00000000..022edfe9 --- /dev/null +++ b/config/v1/config.go @@ -0,0 +1,405 @@ +package v1 + +import ( + "context" + "net" + "time" + + "github.com/datarhei/core/v16/config/copy" + "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" + "github.com/datarhei/core/v16/math/rand" + + haikunator "github.com/atrox/haikunatorgo/v2" + "github.com/google/uuid" +) + +const version int64 = 1 + +// Make sure that the config.Config interface is satisfied +//var _ config.Config = &Config{} + +// Config is a wrapper for Data +type Config struct { + fs fs.Filesystem + vars vars.Variables + + Data +} + +// New returns a Config which is initialized with its default values +func New(f fs.Filesystem) *Config { + cfg := &Config{ + fs: f, + } + + if cfg.fs == nil { + cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } + + cfg.init() + + return cfg +} + +func (d *Config) Get(name string) (string, error) { + return d.vars.Get(name) +} + +func (d *Config) Set(name, val string) error { + return d.vars.Set(name, val) +} + +// NewConfigFrom returns a clone of a Config +func (d *Config) Clone() *Config { + data := New(d.fs) + + data.CreatedAt = d.CreatedAt + data.LoadedAt = d.LoadedAt + data.UpdatedAt = d.UpdatedAt + + data.Version = d.Version + data.ID = d.ID + data.Name = d.Name + data.Address = d.Address + data.CheckForUpdates = d.CheckForUpdates + + data.Log = d.Log + data.DB = d.DB + data.Host = d.Host + data.API = d.API + data.TLS = d.TLS + data.Storage = d.Storage + data.RTMP = d.RTMP + data.SRT = d.SRT + data.FFmpeg = d.FFmpeg + data.Playout = d.Playout + data.Debug = d.Debug + data.Metrics = d.Metrics + data.Sessions = d.Sessions + data.Service = d.Service + data.Router = d.Router + + data.Log.Topics = copy.Slice(d.Log.Topics) + + data.Host.Name = copy.Slice(d.Host.Name) + + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) + + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) + + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types) + + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) + + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) + + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) + + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) + + data.vars.Transfer(&d.vars) + + return data +} + +func (d *Config) init() { + d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false) + d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false) + d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false) + d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false) + d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false) + d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false) + + // Log + d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false) + d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false) + d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) + + // DB + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + + // Host + d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) + d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false) + + // API + d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true) + + // Auth JWT + d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true) + + // Auth Auth0 + d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false) + d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false) + + // TLS + d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) + d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) + d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + + // Storage + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + + // Storage (Disk) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false) + d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false) + + // Storage (Memory) + d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true) + d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + + // Storage (CORS) + d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) + + // RTMP + d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false) + d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false) + d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false) + d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false) + d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true) + + // SRT + d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false) + d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false) + d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true) + d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true) + d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false) + d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) + + // FFmpeg + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false) + + // Playout + d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false) + d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false) + d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false) + + // Debug + d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) + d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false) + + // Metrics + d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) + d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false) + + // Sessions + d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false) + d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false) + d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false) + d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false) + d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false) + + // Service + d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false) + d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true) + d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false) + + // Router + d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) + d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) +} + +// Validate validates the current state of the Config for completeness and sanity. Errors are +// written to the log. Use resetLogs to indicate to reset the logs prior validation. +func (d *Config) Validate(resetLogs bool) { + if resetLogs { + d.vars.ResetLogs() + } + + if d.Version != version { + d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version) + + return + } + + d.vars.Validate() + + // Individual sanity checks + + // If HTTP Auth is enabled, check that the username and password are set + if d.API.Auth.Enable { + if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 { + d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set") + } + } + + // If Auth0 is enabled, check that domain, audience, and clientid are set + if d.API.Auth.Auth0.Enable { + if len(d.API.Auth.Auth0.Tenants) == 0 { + d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set") + } + + for i, t := range d.API.Auth.Auth0.Tenants { + if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 { + d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i) + } + } + } + + // If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile + if d.TLS.Enable && !d.TLS.Auto { + if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 { + d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set") + } + } + + // If TLS and Let's Encrypt certificate is enabled, we require a public hostname + if d.TLS.Enable && d.TLS.Auto { + if len(d.Host.Name) == 0 { + d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate") + } else { + r := &net.Resolver{ + PreferGo: true, + StrictErrors: true, + } + + for _, host := range d.Host.Name { + // Don't lookup IP addresses + if ip := net.ParseIP(host); ip != nil { + d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host) + } + + // Lookup host name with a timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + _, err := r.LookupHost(ctx, host) + if err != nil { + d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host) + } + + cancel() + } + } + } + + // If TLS for RTMP is enabled, TLS must be enabled + if d.RTMP.EnableTLS { + if !d.RTMP.Enable { + d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled") + } + + if !d.TLS.Enable { + d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled") + } + } + + // If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs + if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") { + d.Storage.Memory.Auth.Enable = true + + if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 { + d.Storage.Memory.Auth.Enable = false + } + } + + // If Basic-Auth for memfs is enable, check that the username and password are set + if d.Storage.Memory.Auth.Enable { + if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 { + d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set") + } + } + + // If playout is enabled, check that the port range is sane + if d.Playout.Enable { + if d.Playout.MinPort >= d.Playout.MaxPort { + d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port") + } + } + + // If cache is enabled, a valid TTL has to be set to a useful value + if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 { + d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0") + } + + // If the stats are enabled, the session timeout has to be set to a useful value + if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 { + d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1") + } + + // If the stats and their persistence are enabled, the persist interval has to be set to a useful value + if d.Sessions.Enable && d.Sessions.PersistInterval < 0 { + d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0") + } + + // If the service is enabled, the token and enpoint have to be defined + if d.Service.Enable { + if len(d.Service.Token) == 0 { + d.vars.Log("error", "service.token", "must be non-empty") + } + + if len(d.Service.URL) == 0 { + d.vars.Log("error", "service.url", "must be non-empty") + } + } + + // If historic metrics are enabled, the timerange and interval have to be valid + if d.Metrics.Enable { + if d.Metrics.Range <= 0 { + d.vars.Log("error", "metrics.range", "must be greater 0") + } + + if d.Metrics.Interval <= 0 { + d.vars.Log("error", "metrics.interval", "must be greater 0") + } + + if d.Metrics.Interval > d.Metrics.Range { + d.vars.Log("error", "metrics.interval", "must be smaller than the range") + } + } +} + +func (d *Config) Merge() { + d.vars.Merge() +} + +func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) { + d.vars.Messages(logger) +} + +func (d *Config) HasErrors() bool { + return d.vars.HasErrors() +} + +func (d *Config) Overrides() []string { + return d.vars.Overrides() +} diff --git a/config/data_v1.go b/config/v1/data.go similarity index 74% rename from config/data_v1.go rename to config/v1/data.go index bfd77a64..2826f02d 100644 --- a/config/data_v1.go +++ b/config/v1/data.go @@ -1,12 +1,16 @@ -package config +package v1 -import "time" +import ( + "time" -type dataV1 struct { + "github.com/datarhei/core/v16/config/value" +) + +type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=1,maximum=1"` + Version int64 `json:"version" jsonschema:"minimum=1,maximum=1" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -14,7 +18,7 @@ type dataV1 struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -44,8 +48,8 @@ type dataV1 struct { Secret string `json:"secret"` } `json:"jwt"` Auth0 struct { - Enable bool `json:"enable"` - Tenants []Auth0Tenant `json:"tenants"` + Enable bool `json:"enable"` + Tenants []value.Auth0Tenant `json:"tenants"` } `json:"auth0"` } `json:"auth"` } `json:"api"` @@ -59,12 +63,12 @@ type dataV1 struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -74,7 +78,7 @@ type dataV1 struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -101,7 +105,7 @@ type dataV1 struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -113,33 +117,33 @@ type dataV1 struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` diff --git a/config/v2/config.go b/config/v2/config.go new file mode 100644 index 00000000..e1bfb0cb --- /dev/null +++ b/config/v2/config.go @@ -0,0 +1,406 @@ +package v2 + +import ( + "context" + "net" + "time" + + "github.com/datarhei/core/v16/config/copy" + "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/io/fs" + "github.com/datarhei/core/v16/math/rand" + + haikunator "github.com/atrox/haikunatorgo/v2" + "github.com/google/uuid" +) + +const version int64 = 2 + +// Make sure that the config.Config interface is satisfied +//var _ config.Config = &Config{} + +// Config is a wrapper for Data +type Config struct { + fs fs.Filesystem + vars vars.Variables + + Data +} + +// New returns a Config which is initialized with its default values +func New(f fs.Filesystem) *Config { + cfg := &Config{ + fs: f, + } + + if cfg.fs == nil { + cfg.fs, _ = fs.NewMemFilesystem(fs.MemConfig{}) + } + + cfg.init() + + return cfg +} + +func (d *Config) Get(name string) (string, error) { + return d.vars.Get(name) +} + +func (d *Config) Set(name, val string) error { + return d.vars.Set(name, val) +} + +// NewConfigFrom returns a clone of a Config +func (d *Config) Clone() *Config { + data := New(d.fs) + + data.CreatedAt = d.CreatedAt + data.LoadedAt = d.LoadedAt + data.UpdatedAt = d.UpdatedAt + + data.Version = d.Version + data.ID = d.ID + data.Name = d.Name + data.Address = d.Address + data.CheckForUpdates = d.CheckForUpdates + + data.Log = d.Log + data.DB = d.DB + data.Host = d.Host + data.API = d.API + data.TLS = d.TLS + data.Storage = d.Storage + data.RTMP = d.RTMP + data.SRT = d.SRT + data.FFmpeg = d.FFmpeg + data.Playout = d.Playout + data.Debug = d.Debug + data.Metrics = d.Metrics + data.Sessions = d.Sessions + data.Service = d.Service + data.Router = d.Router + + data.Log.Topics = copy.Slice(d.Log.Topics) + + data.Host.Name = copy.Slice(d.Host.Name) + + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) + + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) + + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types) + + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) + + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) + + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) + + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) + + data.vars.Transfer(&d.vars) + + return data +} + +func (d *Config) init() { + d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false) + d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false) + d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false) + d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false) + d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false) + d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false) + + // Log + d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false) + d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false) + d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false) + + // DB + d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config", d.fs), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false) + + // Host + d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false) + d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false) + + // API + d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false) + d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false) + d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true) + + // Auth JWT + d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true) + + // Auth Auth0 + d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false) + d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false) + + // TLS + d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false) + d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false) + d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false) + d.vars.Register(value.NewFile(&d.TLS.CertFile, "", d.fs), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false) + d.vars.Register(value.NewFile(&d.TLS.KeyFile, "", d.fs), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false) + + // Storage + d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types", d.fs), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false) + + // Storage (Disk) + d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data", d.fs), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false) + d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false) + d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false) + d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false) + + // Storage (Memory) + d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false) + d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true) + d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false) + d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false) + + // Storage (CORS) + d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false) + + // RTMP + d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false) + d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false) + d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false) + d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false) + d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false) + d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true) + + // SRT + d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false) + d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false) + d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true) + d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true) + d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false) + d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false) + + // FFmpeg + d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg", d.fs), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false) + d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false) + d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false) + d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false) + + // Playout + d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false) + d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false) + d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false) + + // Debug + d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false) + d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false) + + // Metrics + d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false) + d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false) + d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false) + + // Sessions + d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false) + d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false) + d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false) + d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false) + d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false) + d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false) + + // Service + d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false) + d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true) + d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false) + + // Router + d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false) + d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false) + d.vars.Register(value.NewDir(&d.Router.UIPath, "", d.fs), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false) +} + +// Validate validates the current state of the Config for completeness and sanity. Errors are +// written to the log. Use resetLogs to indicate to reset the logs prior validation. +func (d *Config) Validate(resetLogs bool) { + if resetLogs { + d.vars.ResetLogs() + } + + if d.Version != version { + d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version) + + return + } + + d.vars.Validate() + + // Individual sanity checks + + // If HTTP Auth is enabled, check that the username and password are set + if d.API.Auth.Enable { + if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 { + d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set") + } + } + + // If Auth0 is enabled, check that domain, audience, and clientid are set + if d.API.Auth.Auth0.Enable { + if len(d.API.Auth.Auth0.Tenants) == 0 { + d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set") + } + + for i, t := range d.API.Auth.Auth0.Tenants { + if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 { + d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i) + } + } + } + + // If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile + if d.TLS.Enable && !d.TLS.Auto { + if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 { + d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set") + } + } + + // If TLS and Let's Encrypt certificate is enabled, we require a public hostname + if d.TLS.Enable && d.TLS.Auto { + if len(d.Host.Name) == 0 { + d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate") + } else { + r := &net.Resolver{ + PreferGo: true, + StrictErrors: true, + } + + for _, host := range d.Host.Name { + // Don't lookup IP addresses + if ip := net.ParseIP(host); ip != nil { + d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host) + } + + // Lookup host name with a timeout + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + + _, err := r.LookupHost(ctx, host) + if err != nil { + d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host) + } + + cancel() + } + } + } + + // If TLS for RTMP is enabled, TLS must be enabled + if d.RTMP.EnableTLS { + if !d.RTMP.Enable { + d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled") + } + + if !d.TLS.Enable { + d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled") + } + } + + // If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs + if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") { + d.Storage.Memory.Auth.Enable = true + + if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 { + d.Storage.Memory.Auth.Enable = false + } + } + + // If Basic-Auth for memfs is enable, check that the username and password are set + if d.Storage.Memory.Auth.Enable { + if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 { + d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set") + } + } + + // If playout is enabled, check that the port range is sane + if d.Playout.Enable { + if d.Playout.MinPort >= d.Playout.MaxPort { + d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port") + } + } + + // If cache is enabled, a valid TTL has to be set to a useful value + if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 { + d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0") + } + + // If the stats are enabled, the session timeout has to be set to a useful value + if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 { + d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1") + } + + // If the stats and their persistence are enabled, the persist interval has to be set to a useful value + if d.Sessions.Enable && d.Sessions.PersistInterval < 0 { + d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0") + } + + // If the service is enabled, the token and enpoint have to be defined + if d.Service.Enable { + if len(d.Service.Token) == 0 { + d.vars.Log("error", "service.token", "must be non-empty") + } + + if len(d.Service.URL) == 0 { + d.vars.Log("error", "service.url", "must be non-empty") + } + } + + // If historic metrics are enabled, the timerange and interval have to be valid + if d.Metrics.Enable { + if d.Metrics.Range <= 0 { + d.vars.Log("error", "metrics.range", "must be greater 0") + } + + if d.Metrics.Interval <= 0 { + d.vars.Log("error", "metrics.interval", "must be greater 0") + } + + if d.Metrics.Interval > d.Metrics.Range { + d.vars.Log("error", "metrics.interval", "must be smaller than the range") + } + } +} + +func (d *Config) Merge() { + d.vars.Merge() +} + +func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) { + d.vars.Messages(logger) +} + +func (d *Config) HasErrors() bool { + return d.vars.HasErrors() +} + +func (d *Config) Overrides() []string { + return d.vars.Overrides() +} diff --git a/config/data_v2.go b/config/v2/data.go similarity index 53% rename from config/data_v2.go rename to config/v2/data.go index 0249429d..1c226376 100644 --- a/config/data_v2.go +++ b/config/v2/data.go @@ -1,4 +1,4 @@ -package config +package v2 import ( "fmt" @@ -6,13 +6,18 @@ import ( "strconv" "strings" "time" + + "github.com/datarhei/core/v16/config/copy" + v1 "github.com/datarhei/core/v16/config/v1" + "github.com/datarhei/core/v16/config/value" + "github.com/datarhei/core/v16/io/fs" ) -type dataV2 struct { +type Data struct { CreatedAt time.Time `json:"created_at"` LoadedAt time.Time `json:"-"` UpdatedAt time.Time `json:"-"` - Version int64 `json:"version" jsonschema:"minimum=2,maximum=2"` + Version int64 `json:"version" jsonschema:"minimum=2,maximum=2" format:"int64"` ID string `json:"id"` Name string `json:"name"` Address string `json:"address"` @@ -20,7 +25,7 @@ type dataV2 struct { Log struct { Level string `json:"level" enums:"debug,info,warn,error,silent" jsonschema:"enum=debug,enum=info,enum=warn,enum=error,enum=silent"` Topics []string `json:"topics"` - MaxLines int `json:"max_lines"` + MaxLines int `json:"max_lines" format:"int"` } `json:"log"` DB struct { Dir string `json:"dir"` @@ -50,8 +55,8 @@ type dataV2 struct { Secret string `json:"secret"` } `json:"jwt"` Auth0 struct { - Enable bool `json:"enable"` - Tenants []Auth0Tenant `json:"tenants"` + Enable bool `json:"enable"` + Tenants []value.Auth0Tenant `json:"tenants"` } `json:"auth0"` } `json:"auth"` } `json:"api"` @@ -65,12 +70,12 @@ type dataV2 struct { Storage struct { Disk struct { Dir string `json:"dir"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Cache struct { Enable bool `json:"enable"` - Size uint64 `json:"max_size_mbytes"` - TTL int64 `json:"ttl_seconds"` - FileSize uint64 `json:"max_file_size_mbytes"` + Size uint64 `json:"max_size_mbytes" format:"uint64"` + TTL int64 `json:"ttl_seconds" format:"int64"` + FileSize uint64 `json:"max_file_size_mbytes" format:"uint64"` Types []string `json:"types"` } `json:"cache"` } `json:"disk"` @@ -80,7 +85,7 @@ type dataV2 struct { Username string `json:"username"` Password string `json:"password"` } `json:"auth"` - Size int64 `json:"max_size_mbytes"` + Size int64 `json:"max_size_mbytes" format:"int64"` Purge bool `json:"purge"` } `json:"memory"` CORS struct { @@ -108,7 +113,7 @@ type dataV2 struct { } `json:"srt"` FFmpeg struct { Binary string `json:"binary"` - MaxProcesses int64 `json:"max_processes"` + MaxProcesses int64 `json:"max_processes" format:"int64"` Access struct { Input struct { Allow []string `json:"allow"` @@ -120,33 +125,33 @@ type dataV2 struct { } `json:"output"` } `json:"access"` Log struct { - MaxLines int `json:"max_lines"` - MaxHistory int `json:"max_history"` + MaxLines int `json:"max_lines" format:"int"` + MaxHistory int `json:"max_history" format:"int"` } `json:"log"` } `json:"ffmpeg"` Playout struct { Enable bool `json:"enable"` - MinPort int `json:"min_port"` - MaxPort int `json:"max_port"` + MinPort int `json:"min_port" format:"int"` + MaxPort int `json:"max_port" format:"int"` } `json:"playout"` Debug struct { Profiling bool `json:"profiling"` - ForceGC int `json:"force_gc"` + ForceGC int `json:"force_gc" format:"int"` } `json:"debug"` Metrics struct { Enable bool `json:"enable"` EnablePrometheus bool `json:"enable_prometheus"` - Range int64 `json:"range_sec"` // seconds - Interval int64 `json:"interval_sec"` // seconds + Range int64 `json:"range_sec" format:"int64"` // seconds + Interval int64 `json:"interval_sec" format:"int64"` // seconds } `json:"metrics"` Sessions struct { Enable bool `json:"enable"` IPIgnoreList []string `json:"ip_ignorelist"` - SessionTimeout int `json:"session_timeout_sec"` + SessionTimeout int `json:"session_timeout_sec" format:"int"` Persist bool `json:"persist"` - PersistInterval int `json:"persist_interval_sec"` - MaxBitrate uint64 `json:"max_bitrate_mbit"` - MaxSessions uint64 `json:"max_sessions"` + PersistInterval int `json:"persist_interval_sec" format:"int"` + MaxBitrate uint64 `json:"max_bitrate_mbit" format:"uint64"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` } `json:"sessions"` Service struct { Enable bool `json:"enable"` @@ -160,11 +165,15 @@ type dataV2 struct { } `json:"router"` } -// Migrate will migrate some settings, depending on the version it finds. Migrations -// are only going upwards,i.e. from a lower version to a higher version. -func NewV2FromV1(d *dataV1) (*dataV2, error) { - data := &dataV2{} +func UpgradeV1ToV2(d *v1.Data, fs fs.Filesystem) (*Data, error) { + cfg := New(fs) + return MergeV1ToV2(&cfg.Data, d) +} + +// Migrate will migrate some settings, depending on the version it finds. Migrations +// are only going upwards, i.e. from a lower version to a higher version. +func MergeV1ToV2(data *Data, d *v1.Data) (*Data, error) { data.CreatedAt = d.CreatedAt data.LoadedAt = d.LoadedAt data.UpdatedAt = d.UpdatedAt @@ -189,30 +198,30 @@ func NewV2FromV1(d *dataV1) (*dataV2, error) { data.Service = d.Service data.Router = d.Router - data.Log.Topics = copyStringSlice(d.Log.Topics) + data.Log.Topics = copy.Slice(d.Log.Topics) - data.Host.Name = copyStringSlice(d.Host.Name) + data.Host.Name = copy.Slice(d.Host.Name) - data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow) - data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block) - data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow) - data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block) + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) - data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants) + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) - data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins) + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) - data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow) - data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block) - data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow) - data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block) + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) - data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList) + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) - data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics) + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) - data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes) - data.Router.Routes = copyStringMap(d.Router.Routes) + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) // Actual changes data.RTMP.Enable = d.RTMP.Enable @@ -245,3 +254,67 @@ func NewV2FromV1(d *dataV1) (*dataV2, error) { return data, nil } + +func DowngradeV2toV1(d *Data) (*v1.Data, error) { + data := &v1.Data{} + + data.CreatedAt = d.CreatedAt + data.LoadedAt = d.LoadedAt + data.UpdatedAt = d.UpdatedAt + + data.ID = d.ID + data.Name = d.Name + data.Address = d.Address + data.CheckForUpdates = d.CheckForUpdates + + data.Log = d.Log + data.DB = d.DB + data.Host = d.Host + data.API = d.API + data.TLS = d.TLS + data.Storage = d.Storage + data.SRT = d.SRT + data.FFmpeg = d.FFmpeg + data.Playout = d.Playout + data.Debug = d.Debug + data.Metrics = d.Metrics + data.Sessions = d.Sessions + data.Service = d.Service + data.Router = d.Router + + data.Log.Topics = copy.Slice(d.Log.Topics) + + data.Host.Name = copy.Slice(d.Host.Name) + + data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow) + data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block) + data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow) + data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block) + + data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants) + + data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins) + + data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow) + data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block) + data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow) + data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block) + + data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList) + + data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics) + + data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes) + data.Router.Routes = copy.StringMap(d.Router.Routes) + + // Actual changes + data.RTMP.Enable = d.RTMP.Enable + data.RTMP.EnableTLS = d.RTMP.EnableTLS + data.RTMP.Address = d.RTMP.Address + data.RTMP.App = d.RTMP.App + data.RTMP.Token = d.RTMP.Token + + data.Version = 1 + + return data, nil +} diff --git a/config/value/auth0.go b/config/value/auth0.go new file mode 100644 index 00000000..a912134d --- /dev/null +++ b/config/value/auth0.go @@ -0,0 +1,126 @@ +package value + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "strings" +) + +// array of auth0 tenants + +type Auth0Tenant struct { + Domain string `json:"domain"` + Audience string `json:"audience"` + ClientID string `json:"clientid"` + Users []string `json:"users"` +} + +func (a *Auth0Tenant) String() string { + u := url.URL{ + Scheme: "auth0", + Host: a.Domain, + } + + if len(a.ClientID) != 0 { + u.User = url.User(a.ClientID) + } + + q := url.Values{} + q.Set("aud", a.Audience) + + for _, user := range a.Users { + q.Add("user", user) + } + + u.RawQuery = q.Encode() + + return u.String() +} + +type TenantList struct { + p *[]Auth0Tenant + separator string +} + +func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *TenantList { + v := &TenantList{ + p: p, + separator: separator, + } + + *p = val + + return v +} + +// Set allows to set a tenant list in two formats: +// - a separator separated list of bas64 encoded Auth0Tenant JSON objects +// - a separator separated list of Auth0Tenant in URL representation: auth0://[clientid]@[domain]?aud=[audience]&user=...&user=... +func (s *TenantList) Set(val string) error { + list := []Auth0Tenant{} + + for i, elm := range strings.Split(val, s.separator) { + t := Auth0Tenant{} + + if strings.HasPrefix(elm, "auth0://") { + data, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid url encoding of tenant %d: %w", i, err) + } + + t.Domain = data.Host + t.ClientID = data.User.Username() + t.Audience = data.Query().Get("aud") + t.Users = data.Query()["user"] + } else { + data, err := base64.StdEncoding.DecodeString(elm) + if err != nil { + return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err) + } + + if err := json.Unmarshal(data, &t); err != nil { + return fmt.Errorf("invalid JSON in tenant %d: %w", i, err) + } + } + + list = append(list, t) + } + + *s.p = list + + return nil +} + +func (s *TenantList) String() string { + if s.IsEmpty() { + return "(empty)" + } + + list := []string{} + + for _, t := range *s.p { + list = append(list, t.String()) + } + + return strings.Join(list, s.separator) +} + +func (s *TenantList) Validate() error { + for i, t := range *s.p { + if len(t.Domain) == 0 { + return fmt.Errorf("the domain for tenant %d is missing", i) + } + + if len(t.Audience) == 0 { + return fmt.Errorf("the audience for tenant %d is missing", i) + } + } + + return nil +} + +func (s *TenantList) IsEmpty() bool { + return len(*s.p) == 0 +} diff --git a/config/value/auth0_test.go b/config/value/auth0_test.go new file mode 100644 index 00000000..edc4eff8 --- /dev/null +++ b/config/value/auth0_test.go @@ -0,0 +1,43 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAuth0Value(t *testing.T) { + tenants := []Auth0Tenant{} + + v := NewTenantList(&tenants, nil, " ") + require.Equal(t, "(empty)", v.String()) + + v.Set("auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3") + require.Equal(t, []Auth0Tenant{ + { + Domain: "domain", + ClientID: "clientid", + Audience: "audience", + Users: []string{"user1", "user2"}, + }, + { + Domain: "domain2", + Audience: "audience2", + Users: []string{"user3"}, + }, + }, tenants) + require.Equal(t, "auth0://clientid@domain?aud=audience&user=user1&user=user2 auth0://domain2?aud=audience2&user=user3", v.String()) + require.NoError(t, v.Validate()) + + v.Set("eyJkb21haW4iOiJkYXRhcmhlaS5ldS5hdXRoMC5jb20iLCJhdWRpZW5jZSI6Imh0dHBzOi8vZGF0YXJoZWkuY29tL2NvcmUiLCJ1c2VycyI6WyJhdXRoMHx4eHgiXX0=") + require.Equal(t, []Auth0Tenant{ + { + Domain: "datarhei.eu.auth0.com", + ClientID: "", + Audience: "https://datarhei.com/core", + Users: []string{"auth0|xxx"}, + }, + }, tenants) + require.Equal(t, "auth0://datarhei.eu.auth0.com?aud=https%3A%2F%2Fdatarhei.com%2Fcore&user=auth0%7Cxxx", v.String()) + require.NoError(t, v.Validate()) +} diff --git a/config/value/network.go b/config/value/network.go new file mode 100644 index 00000000..62a9257d --- /dev/null +++ b/config/value/network.go @@ -0,0 +1,277 @@ +package value + +import ( + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/datarhei/core/v16/http/cors" +) + +// address (host?:port) + +type Address string + +func NewAddress(p *string, val string) *Address { + *p = val + + return (*Address)(p) +} + +func (s *Address) Set(val string) error { + // Check if the new value is only a port number + re := regexp.MustCompile("^[0-9]+$") + if re.MatchString(val) { + val = ":" + val + } + + *s = Address(val) + return nil +} + +func (s *Address) String() string { + return string(*s) +} + +func (s *Address) Validate() error { + _, port, err := net.SplitHostPort(string(*s)) + if err != nil { + return err + } + + re := regexp.MustCompile("^[0-9]+$") + if !re.MatchString(port) { + return fmt.Errorf("the port must be numerical") + } + + return nil +} + +func (s *Address) IsEmpty() bool { + return s.Validate() != nil +} + +// array of CIDR notation IP adresses + +type CIDRList struct { + p *[]string + separator string +} + +func NewCIDRList(p *[]string, val []string, separator string) *CIDRList { + v := &CIDRList{ + p: p, + separator: separator, + } + + *p = val + + return v +} + +func (s *CIDRList) Set(val string) error { + list := []string{} + + for _, elm := range strings.Split(val, s.separator) { + elm = strings.TrimSpace(elm) + if len(elm) != 0 { + list = append(list, elm) + } + } + + *s.p = list + + return nil +} + +func (s *CIDRList) String() string { + if s.IsEmpty() { + return "(empty)" + } + + return strings.Join(*s.p, s.separator) +} + +func (s *CIDRList) Validate() error { + for _, cidr := range *s.p { + _, _, err := net.ParseCIDR(cidr) + if err != nil { + return err + } + } + + return nil +} + +func (s *CIDRList) IsEmpty() bool { + return len(*s.p) == 0 +} + +// array of origins for CORS + +type CORSOrigins struct { + p *[]string + separator string +} + +func NewCORSOrigins(p *[]string, val []string, separator string) *CORSOrigins { + v := &CORSOrigins{ + p: p, + separator: separator, + } + + *p = val + + return v +} + +func (s *CORSOrigins) Set(val string) error { + list := []string{} + + for _, elm := range strings.Split(val, s.separator) { + elm = strings.TrimSpace(elm) + if len(elm) != 0 { + list = append(list, elm) + } + } + + *s.p = list + + return nil +} + +func (s *CORSOrigins) String() string { + if s.IsEmpty() { + return "(empty)" + } + + return strings.Join(*s.p, s.separator) +} + +func (s *CORSOrigins) Validate() error { + return cors.Validate(*s.p) +} + +func (s *CORSOrigins) IsEmpty() bool { + return len(*s.p) == 0 +} + +// network port + +type Port int + +func NewPort(p *int, val int) *Port { + *p = val + + return (*Port)(p) +} + +func (i *Port) Set(val string) error { + v, err := strconv.Atoi(val) + if err != nil { + return err + } + *i = Port(v) + return nil +} + +func (i *Port) String() string { + return strconv.Itoa(int(*i)) +} + +func (i *Port) Validate() error { + val := int(*i) + + if val < 0 || val >= (1<<16) { + return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1) + } + + return nil +} + +func (i *Port) IsEmpty() bool { + return int(*i) == 0 +} + +// url + +type URL string + +func NewURL(p *string, val string) *URL { + *p = val + + return (*URL)(p) +} + +func (u *URL) Set(val string) error { + *u = URL(val) + return nil +} + +func (u *URL) String() string { + return string(*u) +} + +func (u *URL) Validate() error { + val := string(*u) + + if len(val) == 0 { + return nil + } + + URL, err := url.Parse(val) + if err != nil { + return fmt.Errorf("%s is not a valid URL", val) + } + + if len(URL.Scheme) == 0 || len(URL.Host) == 0 { + return fmt.Errorf("%s is not a valid URL", val) + } + + return nil +} + +func (u *URL) IsEmpty() bool { + return len(string(*u)) == 0 +} + +// email address + +type Email string + +func NewEmail(p *string, val string) *Email { + *p = val + + return (*Email)(p) +} + +func (s *Email) Set(val string) error { + addr, err := mail.ParseAddress(val) + if err != nil { + return err + } + + *s = Email(addr.Address) + return nil +} + +func (s *Email) String() string { + return string(*s) +} + +func (s *Email) Validate() error { + if len(s.String()) == 0 { + return nil + } + + _, err := mail.ParseAddress(s.String()) + return err +} + +func (s *Email) IsEmpty() bool { + return len(string(*s)) == 0 +} diff --git a/config/value/network_test.go b/config/value/network_test.go new file mode 100644 index 00000000..add7190a --- /dev/null +++ b/config/value/network_test.go @@ -0,0 +1,127 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestAddressValue(t *testing.T) { + var x string + + val := NewAddress(&x, ":8080") + + require.Equal(t, ":8080", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz:9090" + + require.Equal(t, "foobaz:9090", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz:7070") + + require.Equal(t, "fooboz:7070", x) +} + +func TestCIDRListValue(t *testing.T) { + var x []string + + val := NewCIDRList(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"127.0.0.1/32", "127.0.0.2/32"} + + require.Equal(t, "127.0.0.1/32 127.0.0.2/32", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("129.0.0.1/32 129.0.0.2/32") + + require.Equal(t, []string{"129.0.0.1/32", "129.0.0.2/32"}, x) +} + +func TestCORSOriginaValue(t *testing.T) { + var x []string + + val := NewCORSOrigins(&x, []string{}, " ") + + require.Equal(t, "(empty)", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = []string{"*"} + + require.Equal(t, "*", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost") + + require.Equal(t, []string{"http://localhost"}, x) +} + +func TestPortValue(t *testing.T) { + var x int + + val := NewPort(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestURLValue(t *testing.T) { + var x string + + val := NewURL(&x, "http://localhost/foobar") + + require.Equal(t, "http://localhost/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "http://localhost:8080/foobar" + + require.Equal(t, "http://localhost:8080/foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("http://localhost:8080/fooboz/foobaz") + + require.Equal(t, "http://localhost:8080/fooboz/foobaz", x) +} + +func TestEmailValue(t *testing.T) { + var x string + + val := NewEmail(&x, "foobar@example.com") + + require.Equal(t, "foobar@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobar+baz@example.com" + + require.Equal(t, "foobar+baz@example.com", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("foobar@sub.example.com") + + require.Equal(t, "foobar@sub.example.com", x) +} diff --git a/config/value/os.go b/config/value/os.go new file mode 100644 index 00000000..6f57c1b3 --- /dev/null +++ b/config/value/os.go @@ -0,0 +1,238 @@ +package value + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/datarhei/core/v16/io/fs" +) + +// must directory + +type MustDir struct { + p *string + fs fs.Filesystem +} + +func NewMustDir(p *string, val string, fs fs.Filesystem) *MustDir { + v := &MustDir{ + p: p, + fs: fs, + } + + *p = val + + return v +} + +func (u *MustDir) Set(val string) error { + *u.p = val + return nil +} + +func (u *MustDir) String() string { + return *u.p +} + +func (u *MustDir) Validate() error { + val := *u.p + + if len(strings.TrimSpace(val)) == 0 { + return fmt.Errorf("path name must not be empty") + } + + if err := u.fs.MkdirAll(val, 0750); err != nil { + return fmt.Errorf("%s can't be created (%w)", val, err) + } + + finfo, err := u.fs.Stat(val) + if err != nil { + return fmt.Errorf("%s does not exist", val) + } + + if !finfo.IsDir() { + return fmt.Errorf("%s is not a directory", val) + } + + return nil +} + +func (u *MustDir) IsEmpty() bool { + return len(*u.p) == 0 +} + +// directory + +type Dir struct { + p *string + fs fs.Filesystem +} + +func NewDir(p *string, val string, fs fs.Filesystem) *Dir { + v := &Dir{ + p: p, + fs: fs, + } + + *p = val + + return v +} + +func (u *Dir) Set(val string) error { + *u.p = val + return nil +} + +func (u *Dir) String() string { + return *u.p +} + +func (u *Dir) Validate() error { + val := *u.p + + if len(strings.TrimSpace(val)) == 0 { + return nil + } + + finfo, err := u.fs.Stat(val) + if err != nil { + return fmt.Errorf("%s does not exist", val) + } + + if !finfo.IsDir() { + return fmt.Errorf("%s is not a directory", val) + } + + return nil +} + +func (u *Dir) IsEmpty() bool { + return len(*u.p) == 0 +} + +// executable + +type Exec struct { + p *string + fs fs.Filesystem +} + +func NewExec(p *string, val string, fs fs.Filesystem) *Exec { + v := &Exec{ + p: p, + fs: fs, + } + + *p = val + + return v +} + +func (u *Exec) Set(val string) error { + *u.p = val + return nil +} + +func (u *Exec) String() string { + return *u.p +} + +func (u *Exec) Validate() error { + val := *u.p + + _, err := u.fs.LookPath(val) + if err != nil { + return fmt.Errorf("%s not found or is not executable", val) + } + + return nil +} + +func (u *Exec) IsEmpty() bool { + return len(*u.p) == 0 +} + +// regular file + +type File struct { + p *string + fs fs.Filesystem +} + +func NewFile(p *string, val string, fs fs.Filesystem) *File { + v := &File{ + p: p, + fs: fs, + } + + *p = val + + return v +} + +func (u *File) Set(val string) error { + *u.p = val + return nil +} + +func (u *File) String() string { + return *u.p +} + +func (u *File) Validate() error { + val := *u.p + + if len(val) == 0 { + return nil + } + + finfo, err := u.fs.Stat(val) + if err != nil { + return fmt.Errorf("%s does not exist", val) + } + + if !finfo.Mode().IsRegular() { + return fmt.Errorf("%s is not a regular file", val) + } + + return nil +} + +func (u *File) IsEmpty() bool { + return len(*u.p) == 0 +} + +// absolute path + +type AbsolutePath string + +func NewAbsolutePath(p *string, val string) *AbsolutePath { + *p = filepath.Clean(val) + + return (*AbsolutePath)(p) +} + +func (s *AbsolutePath) Set(val string) error { + *s = AbsolutePath(filepath.Clean(val)) + return nil +} + +func (s *AbsolutePath) String() string { + return string(*s) +} + +func (s *AbsolutePath) Validate() error { + path := string(*s) + + if !filepath.IsAbs(path) { + return fmt.Errorf("%s is not an absolute path", path) + } + + return nil +} + +func (s *AbsolutePath) IsEmpty() bool { + return len(string(*s)) == 0 +} diff --git a/config/value/os_test.go b/config/value/os_test.go new file mode 100644 index 00000000..1706ba94 --- /dev/null +++ b/config/value/os_test.go @@ -0,0 +1,142 @@ +package value + +import ( + "testing" + + "github.com/datarhei/core/v16/io/fs" + "github.com/stretchr/testify/require" +) + +func TestMustDirValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + _, err = memfs.Stat("/foobar") + require.Error(t, err) + + var x string + + val := NewMustDir(&x, "./foobar", memfs) + + require.Equal(t, "./foobar", val.String()) + require.NoError(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + info, err := memfs.Stat("/foobar") + require.NoError(t, err) + require.True(t, info.IsDir()) + + x = "/bar/foo" + + require.Equal(t, "/bar/foo", val.String()) + + _, err = memfs.Stat("/bar/foo") + require.Error(t, err) + + require.NoError(t, val.Validate()) + + info, err = memfs.Stat("/bar/foo") + require.NoError(t, err) + require.True(t, info.IsDir()) + + memfs.WriteFile("/foo/bar", []byte("hello")) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestDirValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewDir(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + err = memfs.MkdirAll("/foobar", 0755) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + _, _, err = memfs.WriteFile("/foo/bar", []byte("hello")) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestFileValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewFile(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + _, _, err = memfs.WriteFile("/foobar", []byte("hello")) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + err = memfs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestExecValue(t *testing.T) { + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + var x string + + val := NewExec(&x, "/foobar", memfs) + + require.Equal(t, "/foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + _, _, err = memfs.WriteFile("/foobar", []byte("hello")) + require.NoError(t, err) + + require.NoError(t, val.Validate()) + + err = memfs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + val.Set("/foo/bar") + + require.Error(t, val.Validate()) +} + +func TestAbsolutePathValue(t *testing.T) { + var x string + + val := NewAbsolutePath(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Error(t, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "/foobaz" + + require.Equal(t, "/foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("/fooboz") + + require.Equal(t, "/fooboz", x) +} diff --git a/config/value/primitives.go b/config/value/primitives.go new file mode 100644 index 00000000..1dd52a94 --- /dev/null +++ b/config/value/primitives.go @@ -0,0 +1,281 @@ +package value + +import ( + "sort" + "strconv" + "strings" +) + +// string + +type String string + +func NewString(p *string, val string) *String { + *p = val + + return (*String)(p) +} + +func (s *String) Set(val string) error { + *s = String(val) + return nil +} + +func (s *String) String() string { + return string(*s) +} + +func (s *String) Validate() error { + return nil +} + +func (s *String) IsEmpty() bool { + return len(string(*s)) == 0 +} + +// array of strings + +type StringList struct { + p *[]string + separator string +} + +func NewStringList(p *[]string, val []string, separator string) *StringList { + v := &StringList{ + p: p, + separator: separator, + } + + *p = val + + return v +} + +func (s *StringList) Set(val string) error { + list := []string{} + + for _, elm := range strings.Split(val, s.separator) { + elm = strings.TrimSpace(elm) + if len(elm) != 0 { + list = append(list, elm) + } + } + + *s.p = list + + return nil +} + +func (s *StringList) String() string { + if s.IsEmpty() { + return "(empty)" + } + + return strings.Join(*s.p, s.separator) +} + +func (s *StringList) Validate() error { + return nil +} + +func (s *StringList) IsEmpty() bool { + return len(*s.p) == 0 +} + +// map of strings to strings + +type StringMapString struct { + p *map[string]string +} + +func NewStringMapString(p *map[string]string, val map[string]string) *StringMapString { + v := &StringMapString{ + p: p, + } + + if *p == nil { + *p = make(map[string]string) + } + + if val != nil { + *p = val + } + + return v +} + +func (s *StringMapString) Set(val string) error { + mappings := make(map[string]string) + + for _, elm := range strings.Split(val, " ") { + elm = strings.TrimSpace(elm) + if len(elm) == 0 { + continue + } + + mapping := strings.SplitN(elm, ":", 2) + + mappings[mapping[0]] = mapping[1] + } + + *s.p = mappings + + return nil +} + +func (s *StringMapString) String() string { + if s.IsEmpty() { + return "(empty)" + } + + sms := *s.p + + keys := []string{} + for k := range sms { + keys = append(keys, k) + } + + sort.Strings(keys) + + mappings := make([]string, len(*s.p)) + + i := 0 + for _, k := range keys { + mappings[i] = k + ":" + sms[k] + i++ + } + + return strings.Join(mappings, " ") +} + +func (s *StringMapString) Validate() error { + return nil +} + +func (s *StringMapString) IsEmpty() bool { + return len(*s.p) == 0 +} + +// boolean + +type Bool bool + +func NewBool(p *bool, val bool) *Bool { + *p = val + + return (*Bool)(p) +} + +func (b *Bool) Set(val string) error { + v, err := strconv.ParseBool(val) + if err != nil { + return err + } + *b = Bool(v) + return nil +} + +func (b *Bool) String() string { + return strconv.FormatBool(bool(*b)) +} + +func (b *Bool) Validate() error { + return nil +} + +func (b *Bool) IsEmpty() bool { + return !bool(*b) +} + +// int + +type Int int + +func NewInt(p *int, val int) *Int { + *p = val + + return (*Int)(p) +} + +func (i *Int) Set(val string) error { + v, err := strconv.Atoi(val) + if err != nil { + return err + } + *i = Int(v) + return nil +} + +func (i *Int) String() string { + return strconv.Itoa(int(*i)) +} + +func (i *Int) Validate() error { + return nil +} + +func (i *Int) IsEmpty() bool { + return int(*i) == 0 +} + +// int64 + +type Int64 int64 + +func NewInt64(p *int64, val int64) *Int64 { + *p = val + + return (*Int64)(p) +} + +func (u *Int64) Set(val string) error { + v, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return err + } + *u = Int64(v) + return nil +} + +func (u *Int64) String() string { + return strconv.FormatInt(int64(*u), 10) +} + +func (u *Int64) Validate() error { + return nil +} + +func (u *Int64) IsEmpty() bool { + return int64(*u) == 0 +} + +// uint64 + +type Uint64 uint64 + +func NewUint64(p *uint64, val uint64) *Uint64 { + *p = val + + return (*Uint64)(p) +} + +func (u *Uint64) Set(val string) error { + v, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return err + } + *u = Uint64(v) + return nil +} + +func (u *Uint64) String() string { + return strconv.FormatUint(uint64(*u), 10) +} + +func (u *Uint64) Validate() error { + return nil +} + +func (u *Uint64) IsEmpty() bool { + return uint64(*u) == 0 +} diff --git a/config/value/primitives_test.go b/config/value/primitives_test.go new file mode 100644 index 00000000..4b815b90 --- /dev/null +++ b/config/value/primitives_test.go @@ -0,0 +1,147 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestStringValue(t *testing.T) { + var x string + + val := NewString(&x, "foobar") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = "foobaz" + + require.Equal(t, "foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz") + + require.Equal(t, "fooboz", x) +} + +func TestStringListValue(t *testing.T) { + var x []string + + val := NewStringList(&x, []string{"foobar"}, " ") + + require.Equal(t, "foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = []string{"foobar", "foobaz"} + + require.Equal(t, "foobar foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("fooboz foobar") + + require.Equal(t, []string{"fooboz", "foobar"}, x) +} + +func TestStringMapStringValue(t *testing.T) { + var x map[string]string + + val := NewStringMapString(&x, map[string]string{"a": "foobar"}) + + require.Equal(t, "a:foobar", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = map[string]string{"a": "foobar", "b": "foobaz"} + + require.Equal(t, "a:foobar b:foobaz", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("x:fooboz y:foobar") + + require.Equal(t, map[string]string{"x": "fooboz", "y": "foobar"}, x) +} + +func TestBoolValue(t *testing.T) { + var x bool + + val := NewBool(&x, false) + + require.Equal(t, "false", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, true, val.IsEmpty()) + + x = true + + require.Equal(t, "true", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("false") + + require.Equal(t, false, x) +} + +func TestIntValue(t *testing.T) { + var x int + + val := NewInt(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int(77), x) +} + +func TestInt64Value(t *testing.T) { + var x int64 + + val := NewInt64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, int64(77), x) +} + +func TestUint64Value(t *testing.T) { + var x uint64 + + val := NewUint64(&x, 11) + + require.Equal(t, "11", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = 42 + + require.Equal(t, "42", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("77") + + require.Equal(t, uint64(77), x) +} diff --git a/config/value/s3.go b/config/value/s3.go new file mode 100644 index 00000000..a85a0838 --- /dev/null +++ b/config/value/s3.go @@ -0,0 +1,179 @@ +package value + +import ( + "fmt" + "net/url" + "strings" + + "golang.org/x/net/publicsuffix" +) + +// array of s3 storages +// https://access_key_id:secret_access_id@region.endpoint/bucket?name=aaa&mount=/abc&username=xxx&password=yyy + +type S3Storage struct { + Name string `json:"name"` + Mountpoint string `json:"mountpoint"` + Auth struct { + Enable bool `json:"enable"` + Username string `json:"username"` + Password string `json:"password"` + } `json:"auth"` + Endpoint string `json:"endpoint"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Bucket string `json:"bucket"` + Region string `json:"region"` + UseSSL bool `json:"use_ssl"` +} + +func (t *S3Storage) String() string { + u := url.URL{} + + if t.UseSSL { + u.Scheme = "https" + } else { + u.Scheme = "http" + } + + u.User = url.UserPassword(t.AccessKeyID, "---") + + u.Host = t.Endpoint + + if len(t.Region) != 0 { + u.Host = t.Region + "." + u.Host + } + + if len(t.Bucket) != 0 { + u.Path = "/" + t.Bucket + } + + v := url.Values{} + v.Set("name", t.Name) + v.Set("mountpoint", t.Mountpoint) + + if t.Auth.Enable { + if len(t.Auth.Username) != 0 { + v.Set("username", t.Auth.Username) + } + + if len(t.Auth.Password) != 0 { + v.Set("password", "---") + } + } + + u.RawQuery = v.Encode() + + return u.String() +} + +type s3StorageListValue struct { + p *[]S3Storage + separator string +} + +func NewS3StorageListValue(p *[]S3Storage, val []S3Storage, separator string) *s3StorageListValue { + v := &s3StorageListValue{ + p: p, + separator: separator, + } + + *p = val + return v +} + +func (s *s3StorageListValue) Set(val string) error { + list := []S3Storage{} + + for _, elm := range strings.Split(val, s.separator) { + u, err := url.Parse(elm) + if err != nil { + return fmt.Errorf("invalid S3 storage URL (%s): %w", elm, err) + } + + t := S3Storage{ + Name: u.Query().Get("name"), + Mountpoint: u.Query().Get("mountpoint"), + AccessKeyID: u.User.Username(), + } + + hostname := u.Hostname() + port := u.Port() + + domain, err := publicsuffix.EffectiveTLDPlusOne(hostname) + if err != nil { + return fmt.Errorf("invalid eTLD (%s): %w", hostname, err) + } + + t.Endpoint = domain + if len(port) != 0 { + t.Endpoint += ":" + port + } + + region := strings.TrimSuffix(hostname, domain) + if len(region) != 0 { + t.Region = strings.TrimSuffix(region, ".") + } + + secret, ok := u.User.Password() + if ok { + t.SecretAccessKey = secret + } + + t.Bucket = strings.TrimPrefix(u.Path, "/") + + if u.Scheme == "https" { + t.UseSSL = true + } + + if u.Query().Has("username") || u.Query().Has("password") { + t.Auth.Enable = true + t.Auth.Username = u.Query().Get("username") + t.Auth.Username = u.Query().Get("password") + } + + list = append(list, t) + } + + *s.p = list + + return nil +} + +func (s *s3StorageListValue) String() string { + if s.IsEmpty() { + return "(empty)" + } + + list := []string{} + + for _, t := range *s.p { + list = append(list, t.String()) + } + + return strings.Join(list, s.separator) +} + +func (s *s3StorageListValue) Validate() error { + for i, t := range *s.p { + if len(t.Name) == 0 { + return fmt.Errorf("the name for s3 storage %d is missing", i) + } + + if len(t.Mountpoint) == 0 { + return fmt.Errorf("the mountpoint for s3 storage %d is missing", i) + } + + if t.Auth.Enable { + if len(t.Auth.Username) == 0 && len(t.Auth.Password) == 0 { + return fmt.Errorf("auth is enabled, but no username and password are set for s3 storage %d", i) + } + } + } + + return nil +} + +func (s *s3StorageListValue) IsEmpty() bool { + return len(*s.p) == 0 +} diff --git a/config/value/time.go b/config/value/time.go new file mode 100644 index 00000000..7fe3fa71 --- /dev/null +++ b/config/value/time.go @@ -0,0 +1,36 @@ +package value + +import "time" + +// time + +type Time time.Time + +func NewTime(p *time.Time, val time.Time) *Time { + *p = val + + return (*Time)(p) +} + +func (u *Time) Set(val string) error { + v, err := time.Parse(time.RFC3339, val) + if err != nil { + return err + } + *u = Time(v) + return nil +} + +func (u *Time) String() string { + v := time.Time(*u) + return v.Format(time.RFC3339) +} + +func (u *Time) Validate() error { + return nil +} + +func (u *Time) IsEmpty() bool { + v := time.Time(*u) + return v.IsZero() +} diff --git a/config/value/time_test.go b/config/value/time_test.go new file mode 100644 index 00000000..3259d7d2 --- /dev/null +++ b/config/value/time_test.go @@ -0,0 +1,30 @@ +package value + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestTimeValue(t *testing.T) { + var x time.Time + + tm := time.Unix(1257894000, 0).UTC() + + val := NewTime(&x, tm) + + require.Equal(t, "2009-11-10T23:00:00Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + x = time.Unix(1257894001, 0).UTC() + + require.Equal(t, "2009-11-10T23:00:01Z", val.String()) + require.Equal(t, nil, val.Validate()) + require.Equal(t, false, val.IsEmpty()) + + val.Set("2009-11-11T23:00:00Z") + + require.Equal(t, time.Time(time.Date(2009, time.November, 11, 23, 0, 0, 0, time.UTC)), x) +} diff --git a/config/value/value.go b/config/value/value.go new file mode 100644 index 00000000..ded8051d --- /dev/null +++ b/config/value/value.go @@ -0,0 +1,21 @@ +package value + +type Value interface { + // String returns a string representation of the value. + String() string + + // Set a new value for the value. Returns an + // error if the given string representation can't + // be transformed to the value. Returns nil + // if the new value has been set. + Set(string) error + + // Validate the value. The returned error will + // indicate what is wrong with the current value. + // Returns nil if the value is OK. + Validate() error + + // IsEmpty returns whether the value represents an empty + // representation for that value. + IsEmpty() bool +} diff --git a/config/value/value_test.go b/config/value/value_test.go new file mode 100644 index 00000000..3f36b17f --- /dev/null +++ b/config/value/value_test.go @@ -0,0 +1,38 @@ +package value + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type testdata struct { + value1 int + value2 int +} + +func TestCopyStruct(t *testing.T) { + data1 := testdata{} + + NewInt(&data1.value1, 1) + NewInt(&data1.value2, 2) + + require.Equal(t, int(1), data1.value1) + require.Equal(t, int(2), data1.value2) + + data2 := testdata{} + + val21 := NewInt(&data2.value1, 3) + val22 := NewInt(&data2.value2, 4) + + require.Equal(t, int(3), data2.value1) + require.Equal(t, int(4), data2.value2) + + data2 = data1 + + require.Equal(t, int(1), data2.value1) + require.Equal(t, int(2), data2.value2) + + require.Equal(t, "1", val21.String()) + require.Equal(t, "2", val22.String()) +} diff --git a/config/vars/vars.go b/config/vars/vars.go new file mode 100644 index 00000000..35df8941 --- /dev/null +++ b/config/vars/vars.go @@ -0,0 +1,216 @@ +package vars + +import ( + "fmt" + "os" + + "github.com/datarhei/core/v16/config/value" +) + +type variable struct { + value value.Value // The actual value + defVal string // The default value in string representation + name string // A name for this value + envName string // The environment variable that corresponds to this value + envAltNames []string // Alternative environment variable names + description string // A desriptions for this value + required bool // Whether a non-empty value is required + disguise bool // Whether the value should be disguised if printed + merged bool // Whether this value has been replaced by its corresponding environment variable +} + +type Variable struct { + Value string + Name string + EnvName string + Description string + Merged bool +} + +type message struct { + message string // The log message + variable Variable // The config field this message refers to + level string // The loglevel for this message +} + +type Variables struct { + vars []*variable + logs []message +} + +func (vs *Variables) Register(val value.Value, name, envName string, envAltNames []string, description string, required, disguise bool) { + vs.vars = append(vs.vars, &variable{ + value: val, + defVal: val.String(), + name: name, + envName: envName, + envAltNames: envAltNames, + description: description, + required: required, + disguise: disguise, + }) +} + +func (vs *Variables) Transfer(vss *Variables) { + for _, v := range vs.vars { + if vss.IsMerged(v.name) { + v.merged = true + } + } +} + +func (vs *Variables) SetDefault(name string) { + v := vs.findVariable(name) + if v == nil { + return + } + + v.value.Set(v.defVal) +} + +func (vs *Variables) Get(name string) (string, error) { + v := vs.findVariable(name) + if v == nil { + return "", fmt.Errorf("variable not found") + } + + return v.value.String(), nil +} + +func (vs *Variables) Set(name, val string) error { + v := vs.findVariable(name) + if v == nil { + return fmt.Errorf("variable not found") + } + + return v.value.Set(val) +} + +func (vs *Variables) Log(level, name string, format string, args ...interface{}) { + v := vs.findVariable(name) + if v == nil { + return + } + + variable := Variable{ + Value: v.value.String(), + Name: v.name, + EnvName: v.envName, + Description: v.description, + Merged: v.merged, + } + + if v.disguise { + variable.Value = "***" + } + + l := message{ + message: fmt.Sprintf(format, args...), + variable: variable, + level: level, + } + + vs.logs = append(vs.logs, l) +} + +func (vs *Variables) Merge() { + for _, v := range vs.vars { + if len(v.envName) == 0 { + continue + } + + var envval string + var ok bool + + envval, ok = os.LookupEnv(v.envName) + if !ok { + foundAltName := false + + for _, envName := range v.envAltNames { + envval, ok = os.LookupEnv(envName) + if ok { + foundAltName = true + vs.Log("warn", v.name, "deprecated name, please use %s", v.envName) + break + } + } + + if !foundAltName { + continue + } + } + + err := v.value.Set(envval) + if err != nil { + vs.Log("error", v.name, "%s", err.Error()) + } + + v.merged = true + } +} + +func (vs *Variables) IsMerged(name string) bool { + v := vs.findVariable(name) + if v == nil { + return false + } + + return v.merged +} + +func (vs *Variables) Validate() { + for _, v := range vs.vars { + vs.Log("info", v.name, "%s", "") + + err := v.value.Validate() + if err != nil { + vs.Log("error", v.name, "%s", err.Error()) + } + + if v.required && v.value.IsEmpty() { + vs.Log("error", v.name, "a value is required") + } + } +} + +func (vs *Variables) ResetLogs() { + vs.logs = nil +} + +func (vs *Variables) Messages(logger func(level string, v Variable, message string)) { + for _, l := range vs.logs { + logger(l.level, l.variable, l.message) + } +} + +func (vs *Variables) HasErrors() bool { + for _, l := range vs.logs { + if l.level == "error" { + return true + } + } + + return false +} + +func (vs *Variables) Overrides() []string { + overrides := []string{} + + for _, v := range vs.vars { + if v.merged { + overrides = append(overrides, v.name) + } + } + + return overrides +} + +func (vs *Variables) findVariable(name string) *variable { + for _, v := range vs.vars { + if v.name == name { + return v + } + } + + return nil +} diff --git a/config/vars/vars_test.go b/config/vars/vars_test.go new file mode 100644 index 00000000..c41dd77a --- /dev/null +++ b/config/vars/vars_test.go @@ -0,0 +1,248 @@ +package vars + +import ( + "os" + "testing" + + "github.com/datarhei/core/v16/config/value" + + "github.com/stretchr/testify/require" +) + +func TestVars(t *testing.T) { + v1 := Variables{} + + s := "" + + v1.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + require.Equal(t, "foobar", s) + x, _ := v1.Get("string") + require.Equal(t, "foobar", x) + + v := v1.findVariable("string") + v.value.Set("barfoo") + + require.Equal(t, "barfoo", s) + x, _ = v1.Get("string") + require.Equal(t, "barfoo", x) + + v1.Set("string", "foobaz") + + require.Equal(t, "foobaz", s) + x, _ = v1.Get("string") + require.Equal(t, "foobaz", x) + + v1.SetDefault("string") + + require.Equal(t, "foobar", s) + x, _ = v1.Get("string") + require.Equal(t, "foobar", x) +} + +func TestSetDefault(t *testing.T) { + v := Variables{} + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + require.Equal(t, "foobar", s) + + v.Set("string", "foobaz") + + require.Equal(t, "foobaz", s) + + v.SetDefault("strong") + + require.Equal(t, "foobaz", s) + + v.SetDefault("string") + + require.Equal(t, "foobar", s) +} + +func TestGet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + value, err := v.Get("string") + require.NoError(t, err) + require.Equal(t, "foobar", value) + + value, err = v.Get("strong") + require.Error(t, err) + require.Equal(t, "", value) +} + +func TestSet(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + err := v.Set("string", "foobaz") + require.NoError(t, err) + require.Equal(t, "foobaz", s) + + err = v.Set("strong", "fooboz") + require.Error(t, err) + require.Equal(t, "foobaz", s) +} + +func TestLog(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + v.Log("info", "strong", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "foobar", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) + + v.ResetLogs() + + require.Equal(t, 0, len(v.logs)) +} + +func TestMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 0, len(v.logs)) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestMergeAlt(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRUNG", []string{"CORE_TEST_STRING"}, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobaz") + require.Equal(t, true, v.IsMerged("string")) + require.Equal(t, 1, len(v.logs)) + + require.Contains(t, v.logs[0].message, "CORE_TEST_STRUNG") + require.Equal(t, "warn", v.logs[0].level) + + os.Unsetenv("CORE_TEST_STRING") +} + +func TestNoMerge(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRONG", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + + require.Equal(t, s, "foobar") + + v.Merge() + + require.Equal(t, s, "foobar") + require.Equal(t, false, v.IsMerged("string")) + + os.Unsetenv("CORE_TEST_STRONG") +} + +func TestValidate(t *testing.T) { + v := Variables{} + + s1 := "" + s2 := "" + + v.Register(value.NewString(&s1, ""), "string", "", nil, "a string", false, false) + v.Register(value.NewString(&s2, ""), "string", "", nil, "a string", true, false) + + require.Equal(t, s1, "") + require.Equal(t, s2, "") + + require.Equal(t, false, v.HasErrors()) + + v.Validate() + + require.Equal(t, true, v.HasErrors()) + + ninfo := 0 + nerror := 0 + v.Messages(func(level string, v Variable, message string) { + if level == "info" { + ninfo++ + } else if level == "error" { + nerror++ + } + }) + + require.Equal(t, 2, ninfo) + require.Equal(t, 1, nerror) +} + +func TestOverrides(t *testing.T) { + v := Variables{} + + s := "" + os.Setenv("CORE_TEST_STRING", "foobaz") + + v.Register(value.NewString(&s, "foobar"), "string", "CORE_TEST_STRING", nil, "a string", false, false) + v.Merge() + + overrides := v.Overrides() + + require.ElementsMatch(t, []string{"string"}, overrides) +} + +func TestDisquise(t *testing.T) { + v := Variables{} + + s := "" + + v.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, true) + + v.Log("info", "string", "hello %s", "world") + require.Equal(t, 1, len(v.logs)) + + require.Equal(t, "hello world", v.logs[0].message) + require.Equal(t, "info", v.logs[0].level) + require.Equal(t, Variable{ + Value: "***", + Name: "string", + EnvName: "", + Description: "a string", + Merged: false, + }, v.logs[0].variable) +} diff --git a/docs/docs.go b/docs/docs.go index d89e181e..f1b04dd5 100644 --- a/docs/docs.go +++ b/docs/docs.go @@ -1,5 +1,4 @@ -// Package docs GENERATED BY SWAG; DO NOT EDIT -// This file was generated by swaggo/swag +// Code generated by swaggo/swag. DO NOT EDIT package docs import "github.com/swaggo/swag" @@ -467,13 +466,16 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve the currently active Restreamer configuration", "operationId": "config-3-get", "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -491,6 +493,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Update the current Restreamer configuration", "operationId": "config-3-set", "parameters": [ @@ -533,9 +538,12 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" + ], + "tags": [ + "v16.7.2" ], "summary": "Reload the currently active configuration", "operationId": "config-3-reload", @@ -549,20 +557,59 @@ const docTemplate = `{ } } }, - "/api/v3/fs/disk": { + "/api/v3/fs": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "description": "Listall registered filesystems", "produces": [ "application/json" ], - "summary": "List all files on the filesystem", - "operationId": "diskfs-3-list-files", + "tags": [ + "v16.12.0" + ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FilesystemInfo" + } + } + } + } + } + }, + "/api/v3/fs/{storage}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "produces": [ + "application/json" + ], + "tags": [ + "v16.7.2" + ], + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "glob pattern for file names", @@ -595,25 +642,35 @@ const docTemplate = `{ } } }, - "/api/v3/fs/disk/{path}": { + "/api/v3/fs/{storage}/{filepath}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the filesystem. The contents of that file are returned.", + "description": "Fetch a file from a filesystem", "produces": [ "application/data", "application/json" ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-3-get-file", + "tags": [ + "v16.7.2" + ], + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true } @@ -645,7 +702,7 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Writes or overwrites a file on the filesystem", + "description": "Writes or overwrites a file on a filesystem", "consumes": [ "application/data" ], @@ -653,13 +710,23 @@ const docTemplate = `{ "text/plain", "application/json" ], - "summary": "Add a file to the filesystem", - "operationId": "diskfs-3-put-file", + "tags": [ + "v16.7.2" + ], + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true }, @@ -703,17 +770,27 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Remove a file from the filesystem", + "description": "Remove a file from a filesystem", "produces": [ "text/plain" ], - "summary": "Remove a file from the filesystem", - "operationId": "diskfs-3-delete-file", + "tags": [ + "v16.7.2" + ], + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true } @@ -734,240 +811,6 @@ const docTemplate = `{ } } }, - "/api/v3/fs/mem": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", - "produces": [ - "application/json" - ], - "summary": "List all files on the memory filesystem", - "operationId": "memfs-3-list-files", - "parameters": [ - { - "type": "string", - "description": "glob pattern for file names", - "name": "glob", - "in": "query" - }, - { - "type": "string", - "description": "none, name, size, lastmod", - "name": "sort", - "in": "query" - }, - { - "type": "string", - "description": "asc, desc", - "name": "order", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/api.FileInfo" - } - } - } - } - } - }, - "/api/v3/fs/mem/{path}": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-3-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-3-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-3-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "patch": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Create a link to a file in the memory filesystem. The file linked to has to exist.", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Create a link to a file in the memory filesystem", - "operationId": "memfs-3-patch", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "Path to the file to link to", - "name": "url", - "in": "body", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/api/v3/log": { "get": { "security": [ @@ -979,6 +822,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Application log", "operationId": "log-3", "parameters": [ @@ -1013,6 +859,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve JSON metadata from a key", "operationId": "metadata-3-get", "parameters": [ @@ -1053,6 +902,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add JSON metadata under the given key", "operationId": "metadata-3-set", "parameters": [ @@ -1096,6 +948,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.10.0" + ], "summary": "List all known metrics with their description and labels", "operationId": "metrics-3-describe", "responses": { @@ -1123,6 +978,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Query the collected metrics", "operationId": "metrics-3-metrics", "parameters": [ @@ -1163,6 +1021,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List all known processes", "operationId": "process-3-get-all", "parameters": [ @@ -1222,6 +1083,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add a new process", "operationId": "process-3-add", "parameters": [ @@ -1262,6 +1126,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List a process by its ID", "operationId": "process-3-get", "parameters": [ @@ -1300,13 +1167,16 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Replace an existing process", + "description": "Replace an existing process.", "consumes": [ "application/json" ], "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Replace an existing process", "operationId": "process-3-update", "parameters": [ @@ -1358,6 +1228,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Delete a process by its ID", "operationId": "process-3-delete", "parameters": [ @@ -1399,6 +1272,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Issue a command to a process", "operationId": "process-3-command", "parameters": [ @@ -1452,6 +1328,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the configuration of a process", "operationId": "process-3-get-config", "parameters": [ @@ -1496,6 +1375,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve JSON metadata stored with a process under a key", "operationId": "process-3-get-process-metadata", "parameters": [ @@ -1543,6 +1425,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add JSON metadata with a process under the given key", "operationId": "process-3-set-process-metadata", "parameters": [ @@ -1600,6 +1485,9 @@ const docTemplate = `{ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Encode the errorframe", "operationId": "process-3-playout-errorframencode", "parameters": [ @@ -1655,6 +1543,9 @@ const docTemplate = `{ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Upload an error frame", "operationId": "process-3-playout-errorframe", "parameters": [ @@ -1727,6 +1618,9 @@ const docTemplate = `{ "image/png", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the last keyframe", "operationId": "process-3-playout-keyframe", "parameters": [ @@ -1785,6 +1679,9 @@ const docTemplate = `{ "produces": [ "text/plain" ], + "tags": [ + "v16.7.2" + ], "summary": "Close the current input stream", "operationId": "process-3-playout-reopen-input", "parameters": [ @@ -1836,6 +1733,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the current playout status", "operationId": "process-3-playout-status", "parameters": [ @@ -1891,6 +1791,9 @@ const docTemplate = `{ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Switch to a new stream", "operationId": "process-3-playout-stream", "parameters": [ @@ -1947,10 +1850,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Probe an existing process to get a detailed stream information on the inputs", + "description": "Probe an existing process to get a detailed stream information on the inputs.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Probe a process", "operationId": "process-3-probe", "parameters": [ @@ -1979,10 +1885,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Get the logs and the log history of a process", + "description": "Get the logs and the log history of a process.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the logs of a process", "operationId": "process-3-get-report", "parameters": [ @@ -2023,10 +1932,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Get the state and progress data of a process", + "description": "Get the state and progress data of a process.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the state of a process", "operationId": "process-3-get-state", "parameters": [ @@ -2067,10 +1979,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "List all currently publishing RTMP streams", + "description": "List all currently publishing RTMP streams.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List all publishing RTMP streams", "operationId": "rtmp-3-list-channels", "responses": { @@ -2093,10 +2008,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Get a summary of all active and past sessions of the given collector", + "description": "Get a summary of all active and past sessions of the given collector.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get a summary of all active and past sessions", "operationId": "session-3-summary", "parameters": [ @@ -2124,10 +2042,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth)", + "description": "Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth).", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get a minimal summary of all active sessions", "operationId": "session-3-current", "parameters": [ @@ -2155,10 +2076,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "List all detected FFmpeg capabilities", + "description": "List all detected FFmpeg capabilities.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "FFmpeg capabilities", "operationId": "skills-3", "responses": { @@ -2178,10 +2102,13 @@ const docTemplate = `{ "ApiKeyAuth": [] } ], - "description": "Refresh the available FFmpeg capabilities", + "description": "Refresh the available FFmpeg capabilities.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Refresh FFmpeg capabilities", "operationId": "skills-3-reload", "responses": { @@ -2205,6 +2132,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.9.0" + ], "summary": "List all publishing SRT treams", "operationId": "srt-3-list-channels", "responses": { @@ -2229,6 +2159,9 @@ const docTemplate = `{ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Fetch minimal statistics about a process", "operationId": "widget-3-get", "parameters": [ @@ -2256,140 +2189,6 @@ const docTemplate = `{ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2449,46 +2248,6 @@ const docTemplate = `{ } } } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } } }, "definitions": { @@ -2496,19 +2255,23 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2523,7 +2286,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2531,7 +2295,8 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2634,29 +2399,6 @@ const docTemplate = `{ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2717,7 +2459,7 @@ const docTemplate = `{ "tenants": { "type": "array", "items": { - "$ref": "#/definitions/config.Auth0Tenant" + "$ref": "#/definitions/value.Auth0Tenant" } } } @@ -2750,6 +2492,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2764,7 +2507,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2820,15 +2568,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2863,7 +2614,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2884,11 +2636,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2902,10 +2656,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2979,19 +2735,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -3050,13 +2810,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -3081,7 +2844,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3103,7 +2867,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -3112,6 +2877,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -3127,6 +2898,9 @@ const docTemplate = `{ "cert_file": { "type": "string" }, + "email": { + "type": "string" + }, "enable": { "type": "boolean" }, @@ -3139,7 +2913,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3156,7 +2931,8 @@ const docTemplate = `{ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -3173,13 +2949,29 @@ const docTemplate = `{ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" + } + } + }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" } } }, @@ -3261,7 +3053,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3270,7 +3063,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3292,7 +3086,8 @@ const docTemplate = `{ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3301,7 +3096,8 @@ const docTemplate = `{ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3340,20 +3136,24 @@ const docTemplate = `{ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3371,10 +3171,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3388,10 +3190,12 @@ const docTemplate = `{ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3401,7 +3205,8 @@ const docTemplate = `{ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3446,7 +3251,8 @@ const docTemplate = `{ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3465,10 +3271,12 @@ const docTemplate = `{ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3481,10 +3289,12 @@ const docTemplate = `{ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3494,7 +3304,8 @@ const docTemplate = `{ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3505,7 +3316,8 @@ const docTemplate = `{ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3563,13 +3375,15 @@ const docTemplate = `{ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3613,10 +3427,12 @@ const docTemplate = `{ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3633,10 +3449,12 @@ const docTemplate = `{ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3644,7 +3462,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3673,7 +3492,8 @@ const docTemplate = `{ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3711,7 +3531,8 @@ const docTemplate = `{ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3720,10 +3541,12 @@ const docTemplate = `{ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3735,16 +3558,19 @@ const docTemplate = `{ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3759,14 +3585,16 @@ const docTemplate = `{ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3784,14 +3612,19 @@ const docTemplate = `{ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3799,6 +3632,11 @@ const docTemplate = `{ "coder": { "type": "string" }, + "extradata_size_bytes": { + "description": "bytes", + "type": "integer", + "format": "uint64" + }, "format": { "type": "string" }, @@ -3806,23 +3644,31 @@ const docTemplate = `{ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" + }, + "keyframe": { + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3836,20 +3682,24 @@ const docTemplate = `{ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3920,7 +3770,8 @@ const docTemplate = `{ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3929,11 +3780,13 @@ const docTemplate = `{ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3941,11 +3794,13 @@ const docTemplate = `{ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3953,11 +3808,13 @@ const docTemplate = `{ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3965,79 +3822,98 @@ const docTemplate = `{ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, - "recv_loss__bytes": { + "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -4045,75 +3921,93 @@ const docTemplate = `{ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, - "sent_unique__bytes": { + "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4129,13 +4023,16 @@ const docTemplate = `{ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -4164,13 +4061,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4178,13 +4078,16 @@ const docTemplate = `{ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4225,10 +4128,12 @@ const docTemplate = `{ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4254,13 +4159,16 @@ const docTemplate = `{ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4339,7 +4247,7 @@ const docTemplate = `{ "tenants": { "type": "array", "items": { - "$ref": "#/definitions/config.Auth0Tenant" + "$ref": "#/definitions/value.Auth0Tenant" } } } @@ -4372,6 +4280,7 @@ const docTemplate = `{ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4386,7 +4295,12 @@ const docTemplate = `{ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4442,15 +4356,18 @@ const docTemplate = `{ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4485,7 +4402,8 @@ const docTemplate = `{ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4506,11 +4424,13 @@ const docTemplate = `{ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4524,10 +4444,12 @@ const docTemplate = `{ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4601,19 +4523,23 @@ const docTemplate = `{ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4672,13 +4598,16 @@ const docTemplate = `{ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4703,7 +4632,8 @@ const docTemplate = `{ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4725,7 +4655,8 @@ const docTemplate = `{ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4734,6 +4665,12 @@ const docTemplate = `{ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -4749,6 +4686,9 @@ const docTemplate = `{ "cert_file": { "type": "string" }, + "email": { + "type": "string" + }, "enable": { "type": "boolean" }, @@ -4761,7 +4701,8 @@ const docTemplate = `{ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -5018,17 +4959,42 @@ const docTemplate = `{ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" } } }, - "config.Auth0Tenant": { + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, + "value.Auth0Tenant": { "type": "object", "properties": { "audience": { @@ -5047,6 +5013,49 @@ const docTemplate = `{ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.json b/docs/swagger.json index 0270e463..7ade08b9 100644 --- a/docs/swagger.json +++ b/docs/swagger.json @@ -459,13 +459,16 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve the currently active Restreamer configuration", "operationId": "config-3-get", "responses": { "200": { "description": "OK", "schema": { - "$ref": "#/definitions/api.Config" + "$ref": "#/definitions/github_com_datarhei_core_v16_http_api.Config" } } } @@ -483,6 +486,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Update the current Restreamer configuration", "operationId": "config-3-set", "parameters": [ @@ -525,9 +531,12 @@ "ApiKeyAuth": [] } ], - "description": "Reload the currently active configuration. This will trigger a restart of the Restreamer.", + "description": "Reload the currently active configuration. This will trigger a restart of the Core.", "produces": [ - "text/plain" + "application/json" + ], + "tags": [ + "v16.7.2" ], "summary": "Reload the currently active configuration", "operationId": "config-3-reload", @@ -541,20 +550,59 @@ } } }, - "/api/v3/fs/disk": { + "/api/v3/fs": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "description": "Listall registered filesystems", "produces": [ "application/json" ], - "summary": "List all files on the filesystem", - "operationId": "diskfs-3-list-files", + "tags": [ + "v16.12.0" + ], + "summary": "List all registered filesystems", + "operationId": "filesystem-3-list", + "responses": { + "200": { + "description": "OK", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/api.FilesystemInfo" + } + } + } + } + } + }, + "/api/v3/fs/{storage}": { + "get": { + "security": [ + { + "ApiKeyAuth": [] + } + ], + "description": "List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", + "produces": [ + "application/json" + ], + "tags": [ + "v16.7.2" + ], + "summary": "List all files on a filesystem", + "operationId": "filesystem-3-list-files", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "glob pattern for file names", @@ -587,25 +635,35 @@ } } }, - "/api/v3/fs/disk/{path}": { + "/api/v3/fs/{storage}/{filepath}": { "get": { "security": [ { "ApiKeyAuth": [] } ], - "description": "Fetch a file from the filesystem. The contents of that file are returned.", + "description": "Fetch a file from a filesystem", "produces": [ "application/data", "application/json" ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-3-get-file", + "tags": [ + "v16.7.2" + ], + "summary": "Fetch a file from a filesystem", + "operationId": "filesystem-3-get-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true } @@ -637,7 +695,7 @@ "ApiKeyAuth": [] } ], - "description": "Writes or overwrites a file on the filesystem", + "description": "Writes or overwrites a file on a filesystem", "consumes": [ "application/data" ], @@ -645,13 +703,23 @@ "text/plain", "application/json" ], - "summary": "Add a file to the filesystem", - "operationId": "diskfs-3-put-file", + "tags": [ + "v16.7.2" + ], + "summary": "Add a file to a filesystem", + "operationId": "filesystem-3-put-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true }, @@ -695,17 +763,27 @@ "ApiKeyAuth": [] } ], - "description": "Remove a file from the filesystem", + "description": "Remove a file from a filesystem", "produces": [ "text/plain" ], - "summary": "Remove a file from the filesystem", - "operationId": "diskfs-3-delete-file", + "tags": [ + "v16.7.2" + ], + "summary": "Remove a file from a filesystem", + "operationId": "filesystem-3-delete-file", "parameters": [ + { + "type": "string", + "description": "Name of the filesystem", + "name": "storage", + "in": "path", + "required": true + }, { "type": "string", "description": "Path to file", - "name": "path", + "name": "filepath", "in": "path", "required": true } @@ -726,240 +804,6 @@ } } }, - "/api/v3/fs/mem": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order.", - "produces": [ - "application/json" - ], - "summary": "List all files on the memory filesystem", - "operationId": "memfs-3-list-files", - "parameters": [ - { - "type": "string", - "description": "glob pattern for file names", - "name": "glob", - "in": "query" - }, - { - "type": "string", - "description": "none, name, size, lastmod", - "name": "sort", - "in": "query" - }, - { - "type": "string", - "description": "asc, desc", - "name": "order", - "in": "query" - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "array", - "items": { - "$ref": "#/definitions/api.FileInfo" - } - } - } - } - } - }, - "/api/v3/fs/mem/{path}": { - "get": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-3-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-3-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-3-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "patch": { - "security": [ - { - "ApiKeyAuth": [] - } - ], - "description": "Create a link to a file in the memory filesystem. The file linked to has to exist.", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Create a link to a file in the memory filesystem", - "operationId": "memfs-3-patch", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "Path to the file to link to", - "name": "url", - "in": "body", - "required": true, - "schema": { - "type": "string" - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "400": { - "description": "Bad Request", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/api/v3/log": { "get": { "security": [ @@ -971,6 +815,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Application log", "operationId": "log-3", "parameters": [ @@ -1005,6 +852,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve JSON metadata from a key", "operationId": "metadata-3-get", "parameters": [ @@ -1045,6 +895,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add JSON metadata under the given key", "operationId": "metadata-3-set", "parameters": [ @@ -1088,6 +941,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.10.0" + ], "summary": "List all known metrics with their description and labels", "operationId": "metrics-3-describe", "responses": { @@ -1115,6 +971,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Query the collected metrics", "operationId": "metrics-3-metrics", "parameters": [ @@ -1155,6 +1014,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List all known processes", "operationId": "process-3-get-all", "parameters": [ @@ -1214,6 +1076,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add a new process", "operationId": "process-3-add", "parameters": [ @@ -1254,6 +1119,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List a process by its ID", "operationId": "process-3-get", "parameters": [ @@ -1292,13 +1160,16 @@ "ApiKeyAuth": [] } ], - "description": "Replace an existing process", + "description": "Replace an existing process.", "consumes": [ "application/json" ], "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Replace an existing process", "operationId": "process-3-update", "parameters": [ @@ -1350,6 +1221,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Delete a process by its ID", "operationId": "process-3-delete", "parameters": [ @@ -1391,6 +1265,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Issue a command to a process", "operationId": "process-3-command", "parameters": [ @@ -1444,6 +1321,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the configuration of a process", "operationId": "process-3-get-config", "parameters": [ @@ -1488,6 +1368,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Retrieve JSON metadata stored with a process under a key", "operationId": "process-3-get-process-metadata", "parameters": [ @@ -1535,6 +1418,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Add JSON metadata with a process under the given key", "operationId": "process-3-set-process-metadata", "parameters": [ @@ -1592,6 +1478,9 @@ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Encode the errorframe", "operationId": "process-3-playout-errorframencode", "parameters": [ @@ -1647,6 +1536,9 @@ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Upload an error frame", "operationId": "process-3-playout-errorframe", "parameters": [ @@ -1719,6 +1611,9 @@ "image/png", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the last keyframe", "operationId": "process-3-playout-keyframe", "parameters": [ @@ -1777,6 +1672,9 @@ "produces": [ "text/plain" ], + "tags": [ + "v16.7.2" + ], "summary": "Close the current input stream", "operationId": "process-3-playout-reopen-input", "parameters": [ @@ -1828,6 +1726,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the current playout status", "operationId": "process-3-playout-status", "parameters": [ @@ -1883,6 +1784,9 @@ "text/plain", "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Switch to a new stream", "operationId": "process-3-playout-stream", "parameters": [ @@ -1939,10 +1843,13 @@ "ApiKeyAuth": [] } ], - "description": "Probe an existing process to get a detailed stream information on the inputs", + "description": "Probe an existing process to get a detailed stream information on the inputs.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Probe a process", "operationId": "process-3-probe", "parameters": [ @@ -1971,10 +1878,13 @@ "ApiKeyAuth": [] } ], - "description": "Get the logs and the log history of a process", + "description": "Get the logs and the log history of a process.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the logs of a process", "operationId": "process-3-get-report", "parameters": [ @@ -2015,10 +1925,13 @@ "ApiKeyAuth": [] } ], - "description": "Get the state and progress data of a process", + "description": "Get the state and progress data of a process.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get the state of a process", "operationId": "process-3-get-state", "parameters": [ @@ -2059,10 +1972,13 @@ "ApiKeyAuth": [] } ], - "description": "List all currently publishing RTMP streams", + "description": "List all currently publishing RTMP streams.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "List all publishing RTMP streams", "operationId": "rtmp-3-list-channels", "responses": { @@ -2085,10 +2001,13 @@ "ApiKeyAuth": [] } ], - "description": "Get a summary of all active and past sessions of the given collector", + "description": "Get a summary of all active and past sessions of the given collector.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get a summary of all active and past sessions", "operationId": "session-3-summary", "parameters": [ @@ -2116,10 +2035,13 @@ "ApiKeyAuth": [] } ], - "description": "Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth)", + "description": "Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth).", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Get a minimal summary of all active sessions", "operationId": "session-3-current", "parameters": [ @@ -2147,10 +2069,13 @@ "ApiKeyAuth": [] } ], - "description": "List all detected FFmpeg capabilities", + "description": "List all detected FFmpeg capabilities.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "FFmpeg capabilities", "operationId": "skills-3", "responses": { @@ -2170,10 +2095,13 @@ "ApiKeyAuth": [] } ], - "description": "Refresh the available FFmpeg capabilities", + "description": "Refresh the available FFmpeg capabilities.", "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Refresh FFmpeg capabilities", "operationId": "skills-3-reload", "responses": { @@ -2197,6 +2125,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.9.0" + ], "summary": "List all publishing SRT treams", "operationId": "srt-3-list-channels", "responses": { @@ -2221,6 +2152,9 @@ "produces": [ "application/json" ], + "tags": [ + "v16.7.2" + ], "summary": "Fetch minimal statistics about a process", "operationId": "widget-3-get", "parameters": [ @@ -2248,140 +2182,6 @@ } } }, - "/memfs/{path}": { - "get": { - "description": "Fetch a file from the memory filesystem", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the memory filesystem", - "operationId": "memfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "put": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Writes or overwrites a file on the memory filesystem", - "consumes": [ - "application/data" - ], - "produces": [ - "text/plain", - "application/json" - ], - "summary": "Add a file to the memory filesystem", - "operationId": "memfs-put-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - }, - { - "description": "File data", - "name": "data", - "in": "body", - "required": true, - "schema": { - "type": "array", - "items": { - "type": "integer" - } - } - } - ], - "responses": { - "201": { - "description": "Created", - "schema": { - "type": "string" - } - }, - "204": { - "description": "No Content", - "schema": { - "type": "string" - } - }, - "507": { - "description": "Insufficient Storage", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - }, - "delete": { - "security": [ - { - "BasicAuth": [] - } - ], - "description": "Remove a file from the memory filesystem", - "produces": [ - "text/plain" - ], - "summary": "Remove a file from the memory filesystem", - "operationId": "memfs-delete-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } - }, "/metrics": { "get": { "description": "Prometheus metrics", @@ -2441,46 +2241,6 @@ } } } - }, - "/{path}": { - "get": { - "description": "Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists.", - "produces": [ - "application/data", - "application/json" - ], - "summary": "Fetch a file from the filesystem", - "operationId": "diskfs-get-file", - "parameters": [ - { - "type": "string", - "description": "Path to file", - "name": "path", - "in": "path", - "required": true - } - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "type": "file" - } - }, - "301": { - "description": "Moved Permanently", - "schema": { - "type": "string" - } - }, - "404": { - "description": "Not Found", - "schema": { - "$ref": "#/definitions/api.Error" - } - } - } - } } }, "definitions": { @@ -2488,19 +2248,23 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -2515,7 +2279,8 @@ "$ref": "#/definitions/api.AVstreamIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -2523,7 +2288,8 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "type": "integer" @@ -2626,29 +2392,6 @@ } } }, - "api.Config": { - "type": "object", - "properties": { - "config": { - "$ref": "#/definitions/api.ConfigData" - }, - "created_at": { - "type": "string" - }, - "loaded_at": { - "type": "string" - }, - "overrides": { - "type": "array", - "items": { - "type": "string" - } - }, - "updated_at": { - "type": "string" - } - } - }, "api.ConfigData": { "type": "object", "properties": { @@ -2709,7 +2452,7 @@ "tenants": { "type": "array", "items": { - "$ref": "#/definitions/config.Auth0Tenant" + "$ref": "#/definitions/value.Auth0Tenant" } } } @@ -2742,6 +2485,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -2756,7 +2500,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -2812,15 +2561,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2855,7 +2607,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -2876,11 +2629,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -2894,10 +2649,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -2971,19 +2728,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -3042,13 +2803,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -3073,7 +2837,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3095,7 +2860,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -3104,6 +2870,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -3119,6 +2891,9 @@ "cert_file": { "type": "string" }, + "email": { + "type": "string" + }, "enable": { "type": "boolean" }, @@ -3131,7 +2906,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3148,7 +2924,8 @@ "type": "object", "properties": { "code": { - "type": "integer" + "type": "integer", + "format": "int" }, "details": { "type": "array", @@ -3165,13 +2942,29 @@ "type": "object", "properties": { "last_modified": { - "type": "integer" + "type": "integer", + "format": "int64" }, "name": { "type": "string" }, "size_bytes": { - "type": "integer" + "type": "integer", + "format": "int64" + } + } + }, + "api.FilesystemInfo": { + "type": "object", + "properties": { + "mount": { + "type": "string" + }, + "name": { + "type": "string" + }, + "type": { + "type": "string" } } }, @@ -3253,7 +3046,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3262,7 +3056,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3284,7 +3079,8 @@ "type": "object", "properties": { "interval_sec": { - "type": "integer" + "type": "integer", + "format": "int64" }, "metrics": { "type": "array", @@ -3293,7 +3089,8 @@ } }, "timerange_sec": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3332,20 +3129,24 @@ "type": "object", "properties": { "aqueue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "debug": {}, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "duplicating": { "type": "boolean" }, "enc": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "gop": { "type": "string" @@ -3363,10 +3164,12 @@ "$ref": "#/definitions/api.PlayoutStatusIO" }, "queue": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "swap": { "$ref": "#/definitions/api.PlayoutStatusSwap" @@ -3380,10 +3183,12 @@ "type": "object", "properties": { "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "state": { "type": "string", @@ -3393,7 +3198,8 @@ ] }, "time": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3438,7 +3244,8 @@ "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3457,10 +3264,12 @@ "type": "number" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "index": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "language": { "type": "string" @@ -3473,10 +3282,12 @@ }, "sampling_hz": { "description": "audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" @@ -3486,7 +3297,8 @@ "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3497,7 +3309,8 @@ "$ref": "#/definitions/api.ProcessConfig" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "id": { "type": "string" @@ -3555,13 +3368,15 @@ "type": "boolean" }, "reconnect_delay_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "reference": { "type": "string" }, "stale_timeout_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string", @@ -3605,10 +3420,12 @@ ], "properties": { "max_file_age_seconds": { - "type": "integer" + "type": "integer", + "format": "uint" }, "max_files": { - "type": "integer" + "type": "integer", + "format": "uint" }, "pattern": { "type": "string" @@ -3625,10 +3442,12 @@ "type": "number" }, "memory_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "waitfor_seconds": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3636,7 +3455,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "history": { "type": "array", @@ -3665,7 +3485,8 @@ "type": "object", "properties": { "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "log": { "type": "array", @@ -3703,7 +3524,8 @@ "type": "string" }, "memory_bytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "order": { "type": "string" @@ -3712,10 +3534,12 @@ "$ref": "#/definitions/api.Progress" }, "reconnect_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "runtime_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3727,16 +3551,19 @@ "type": "number" }, "drop": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "dup": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "fps": { "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "inputs": { "type": "array", @@ -3751,14 +3578,16 @@ } }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "q": { "type": "number" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "speed": { "type": "number" @@ -3776,14 +3605,19 @@ }, "avstream": { "description": "avstream", - "$ref": "#/definitions/api.AVstream" + "allOf": [ + { + "$ref": "#/definitions/api.AVstream" + } + ] }, "bitrate_kbit": { "description": "kbit/s", "type": "number" }, "channels": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "codec": { "type": "string" @@ -3791,6 +3625,11 @@ "coder": { "type": "string" }, + "extradata_size_bytes": { + "description": "bytes", + "type": "integer", + "format": "uint64" + }, "format": { "type": "string" }, @@ -3798,23 +3637,31 @@ "type": "number" }, "frame": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "height": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "id": { "type": "string" }, "index": { "description": "General", - "type": "integer" + "type": "integer", + "format": "uint64" + }, + "keyframe": { + "type": "integer", + "format": "uint64" }, "layout": { "type": "string" }, "packet": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "pix_fmt": { "description": "Video", @@ -3828,20 +3675,24 @@ }, "sampling_hz": { "description": "Audio", - "type": "integer" + "type": "integer", + "format": "uint64" }, "size_kb": { "description": "kbytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "stream": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "type": { "type": "string" }, "width": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -3912,7 +3763,8 @@ } }, "ts": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -3921,11 +3773,13 @@ "properties": { "avail_recv_buf_bytes": { "description": "The available space in the receiver's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "avail_send_buf_bytes": { "description": "The available space in the sender's buffer, in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "bandwidth_mbit": { "description": "Estimated bandwidth of the network link, in Mbps", @@ -3933,11 +3787,13 @@ }, "flight_size_pkt": { "description": "The number of packets in flight", - "type": "integer" + "type": "integer", + "format": "uint64" }, "flow_window_pkt": { "description": "The maximum number of packets that can be \"in flight\"", - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_bandwidth_mbit": { "description": "Transmission bandwidth limit, in Mbps", @@ -3945,11 +3801,13 @@ }, "mss_bytes": { "description": "Maximum Segment Size (MSS), in bytes", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_recv_avg_belated_time_ms": { "description": "Accumulated difference between the current time and the time-to-play of a packet that is received late", - "type": "integer" + "type": "integer", + "format": "uint64" }, "pkt_send_period_us": { "description": "Current minimum time interval between which consecutive packets are sent, in microseconds", @@ -3957,79 +3815,98 @@ }, "recv_ack_pkt": { "description": "The total number of received ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_bytes": { "description": "Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_ms": { "description": "The timespan (msec) of acknowledged packets in the receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_buf_pkt": { "description": "The number of acknowledged packets in receiver's buffer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_bytes": { "description": "Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_bytes": { "description": "Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_drop_pkt": { "description": "The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_km_pkt": { "description": "The total number of received KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, - "recv_loss__bytes": { + "recv_loss_bytes": { "description": "Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_loss_pkt": { "description": "The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_nak_pkt": { "description": "The total number of received NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_pkt": { "description": "The total number of received DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_retran_pkts": { "description": "The total number of retransmitted packets registered at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_bytes": { "description": "Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_undecrypt_pkt": { "description": "The total number of packets that failed to be decrypted at the receiver side", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_bytes": { "description": "Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "recv_unique_pkt": { "description": "The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "reorder_tolerance_pkt": { "description": "Instant value of the packet reorder tolerance", - "type": "integer" + "type": "integer", + "format": "uint64" }, "rtt_ms": { "description": "Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds", @@ -4037,75 +3914,93 @@ }, "send_buf_bytes": { "description": "Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_ms": { "description": "The timespan (msec) of packets in the sender's buffer (unacknowledged packets)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_buf_pkt": { "description": "The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_bytes": { "description": "Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_drop_pkt": { "description": "The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_duration_us": { "description": "The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_km_pkt": { "description": "The total number of sent KM (Key Material) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_loss_pkt": { "description": "The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side.", - "type": "integer" + "type": "integer", + "format": "uint64" }, "send_tsbpd_delay_ms": { "description": "Timestamp-based Packet Delivery Delay value of the peer", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_ack_pkt": { "description": "The total number of sent ACK (Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_bytes": { "description": "Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_nak_pkt": { "description": "The total number of sent NAK (Negative Acknowledgement) control packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_pkt": { "description": "The total number of sent DATA packets, including retransmitted packets", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_bytes": { "description": "Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_retrans_pkt": { "description": "The total number of retransmitted packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, - "sent_unique__bytes": { + "sent_unique_bytes": { "description": "Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT)", - "type": "integer" + "type": "integer", + "format": "uint64" }, "sent_unique_pkt": { "description": "The total number of unique DATA packets sent by the SRT sender", - "type": "integer" + "type": "integer", + "format": "uint64" }, "timestamp_ms": { "description": "The time elapsed, in milliseconds, since the SRT socket has been created", - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4121,13 +4016,16 @@ "type": "number" }, "bytes_rx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "bytes_tx": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "created_at": { - "type": "integer" + "type": "integer", + "format": "int64" }, "extra": { "type": "string" @@ -4156,13 +4054,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4170,13 +4071,16 @@ "type": "object", "properties": { "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4217,10 +4121,12 @@ "type": "number" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4246,13 +4152,16 @@ } }, "sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_rx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "traffic_tx_mb": { - "type": "integer" + "type": "integer", + "format": "uint64" } } }, @@ -4331,7 +4240,7 @@ "tenants": { "type": "array", "items": { - "$ref": "#/definitions/config.Auth0Tenant" + "$ref": "#/definitions/value.Auth0Tenant" } } } @@ -4364,6 +4273,7 @@ } }, "created_at": { + "description": "When this config has been persisted", "type": "string" }, "db": { @@ -4378,7 +4288,12 @@ "type": "object", "properties": { "force_gc": { - "type": "integer" + "type": "integer", + "format": "int" + }, + "memory_limit_mbytes": { + "type": "integer", + "format": "int64" }, "profiling": { "type": "boolean" @@ -4434,15 +4349,18 @@ "type": "object", "properties": { "max_history": { - "type": "integer" + "type": "integer", + "format": "int" }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" } } }, "max_processes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4477,7 +4395,8 @@ ] }, "max_lines": { - "type": "integer" + "type": "integer", + "format": "int" }, "topics": { "type": "array", @@ -4498,11 +4417,13 @@ }, "interval_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" }, "range_sec": { "description": "seconds", - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4516,10 +4437,12 @@ "type": "boolean" }, "max_port": { - "type": "integer" + "type": "integer", + "format": "int" }, "min_port": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4593,19 +4516,23 @@ } }, "max_bitrate_mbit": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "persist": { "type": "boolean" }, "persist_interval_sec": { - "type": "integer" + "type": "integer", + "format": "int" }, "session_timeout_sec": { - "type": "integer" + "type": "integer", + "format": "int" } } }, @@ -4664,13 +4591,16 @@ "type": "boolean" }, "max_file_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "ttl_seconds": { - "type": "integer" + "type": "integer", + "format": "int64" }, "types": { "type": "object", @@ -4695,7 +4625,8 @@ "type": "string" }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -4717,7 +4648,8 @@ } }, "max_size_mbytes": { - "type": "integer" + "type": "integer", + "format": "int64" }, "purge": { "type": "boolean" @@ -4726,6 +4658,12 @@ }, "mimetypes_file": { "type": "string" + }, + "s3": { + "type": "array", + "items": { + "$ref": "#/definitions/value.S3Storage" + } } } }, @@ -4741,6 +4679,9 @@ "cert_file": { "type": "string" }, + "email": { + "type": "string" + }, "enable": { "type": "boolean" }, @@ -4753,7 +4694,8 @@ "type": "boolean" }, "version": { - "type": "integer" + "type": "integer", + "format": "int64" } } }, @@ -5010,17 +4952,42 @@ "type": "object", "properties": { "current_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "total_sessions": { - "type": "integer" + "type": "integer", + "format": "uint64" }, "uptime": { "type": "integer" } } }, - "config.Auth0Tenant": { + "github_com_datarhei_core_v16_http_api.Config": { + "type": "object", + "properties": { + "config": { + "$ref": "#/definitions/api.ConfigData" + }, + "created_at": { + "type": "string" + }, + "loaded_at": { + "type": "string" + }, + "overrides": { + "type": "array", + "items": { + "type": "string" + } + }, + "updated_at": { + "type": "string" + } + } + }, + "value.Auth0Tenant": { "type": "object", "properties": { "audience": { @@ -5039,6 +5006,49 @@ } } } + }, + "value.S3Storage": { + "type": "object", + "properties": { + "access_key_id": { + "type": "string" + }, + "auth": { + "type": "object", + "properties": { + "enable": { + "type": "boolean" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + } + } + }, + "bucket": { + "type": "string" + }, + "endpoint": { + "type": "string" + }, + "mountpoint": { + "type": "string" + }, + "name": { + "type": "string" + }, + "region": { + "type": "string" + }, + "secret_access_key": { + "type": "string" + }, + "use_ssl": { + "type": "boolean" + } + } } }, "securityDefinitions": { diff --git a/docs/swagger.yaml b/docs/swagger.yaml index 2c0d3181..93d5695b 100644 --- a/docs/swagger.yaml +++ b/docs/swagger.yaml @@ -3,14 +3,18 @@ definitions: api.AVstream: properties: aqueue: + format: uint64 type: integer drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -21,11 +25,13 @@ definitions: output: $ref: '#/definitions/api.AVstreamIO' queue: + format: uint64 type: integer type: object api.AVstreamIO: properties: packet: + format: uint64 type: integer size_kb: type: integer @@ -94,21 +100,6 @@ definitions: required: - command type: object - api.Config: - properties: - config: - $ref: '#/definitions/api.ConfigData' - created_at: - type: string - loaded_at: - type: string - overrides: - items: - type: string - type: array - updated_at: - type: string - type: object api.ConfigData: properties: address: @@ -148,7 +139,7 @@ definitions: type: boolean tenants: items: - $ref: '#/definitions/config.Auth0Tenant' + $ref: '#/definitions/value.Auth0Tenant' type: array type: object disable_localhost: @@ -169,6 +160,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -178,6 +170,10 @@ definitions: debug: properties: force_gc: + format: int + type: integer + memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -214,11 +210,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -243,6 +242,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -257,9 +257,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -269,8 +271,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -319,14 +323,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -365,10 +373,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -385,6 +396,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -399,12 +411,17 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -414,6 +431,8 @@ definitions: type: boolean cert_file: type: string + email: + type: string enable: type: boolean key_file: @@ -422,6 +441,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.ConfigError: @@ -433,6 +453,7 @@ definitions: api.Error: properties: code: + format: int type: integer details: items: @@ -444,12 +465,23 @@ definitions: api.FileInfo: properties: last_modified: + format: int64 type: integer name: type: string size_bytes: + format: int64 type: integer type: object + api.FilesystemInfo: + properties: + mount: + type: string + name: + type: string + type: + type: string + type: object api.GraphQuery: properties: query: @@ -502,12 +534,14 @@ definitions: api.MetricsQuery: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsQueryMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsQueryMetric: @@ -522,12 +556,14 @@ definitions: api.MetricsResponse: properties: interval_sec: + format: int64 type: integer metrics: items: $ref: '#/definitions/api.MetricsResponseMetric' type: array timerange_sec: + format: int64 type: integer type: object api.MetricsResponseMetric: @@ -553,15 +589,19 @@ definitions: api.PlayoutStatus: properties: aqueue: + format: uint64 type: integer debug: {} drop: + format: uint64 type: integer dup: + format: uint64 type: integer duplicating: type: boolean enc: + format: uint64 type: integer gop: type: string @@ -574,8 +614,10 @@ definitions: output: $ref: '#/definitions/api.PlayoutStatusIO' queue: + format: uint64 type: integer stream: + format: uint64 type: integer swap: $ref: '#/definitions/api.PlayoutStatusSwap' @@ -585,8 +627,10 @@ definitions: api.PlayoutStatusIO: properties: packet: + format: uint64 type: integer size_kb: + format: uint64 type: integer state: enum: @@ -594,6 +638,7 @@ definitions: - idle type: string time: + format: uint64 type: integer type: object api.PlayoutStatusSwap: @@ -623,6 +668,7 @@ definitions: bitrate_kbps: type: number channels: + format: uint64 type: integer codec: type: string @@ -636,8 +682,10 @@ definitions: description: video type: number height: + format: uint64 type: integer index: + format: uint64 type: integer language: type: string @@ -647,8 +695,10 @@ definitions: type: string sampling_hz: description: audio + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string @@ -656,6 +706,7 @@ definitions: description: common type: string width: + format: uint64 type: integer type: object api.Process: @@ -663,6 +714,7 @@ definitions: config: $ref: '#/definitions/api.ProcessConfig' created_at: + format: int64 type: integer id: type: string @@ -699,10 +751,12 @@ definitions: reconnect: type: boolean reconnect_delay_seconds: + format: uint64 type: integer reference: type: string stale_timeout_seconds: + format: uint64 type: integer type: enum: @@ -733,8 +787,10 @@ definitions: api.ProcessConfigIOCleanup: properties: max_file_age_seconds: + format: uint type: integer max_files: + format: uint type: integer pattern: type: string @@ -748,13 +804,16 @@ definitions: cpu_usage: type: number memory_mbytes: + format: uint64 type: integer waitfor_seconds: + format: uint64 type: integer type: object api.ProcessReport: properties: created_at: + format: int64 type: integer history: items: @@ -774,6 +833,7 @@ definitions: api.ProcessReportHistoryEntry: properties: created_at: + format: int64 type: integer log: items: @@ -799,14 +859,17 @@ definitions: last_logline: type: string memory_bytes: + format: uint64 type: integer order: type: string progress: $ref: '#/definitions/api.Progress' reconnect_seconds: + format: int64 type: integer runtime_seconds: + format: int64 type: integer type: object api.Progress: @@ -815,12 +878,15 @@ definitions: description: kbit/s type: number drop: + format: uint64 type: integer dup: + format: uint64 type: integer fps: type: number frame: + format: uint64 type: integer inputs: items: @@ -831,11 +897,13 @@ definitions: $ref: '#/definitions/api.ProgressIO' type: array packet: + format: uint64 type: integer q: type: number size_kb: description: kbytes + format: uint64 type: integer speed: type: number @@ -847,33 +915,46 @@ definitions: address: type: string avstream: - $ref: '#/definitions/api.AVstream' + allOf: + - $ref: '#/definitions/api.AVstream' description: avstream bitrate_kbit: description: kbit/s type: number channels: + format: uint64 type: integer codec: type: string coder: type: string + extradata_size_bytes: + description: bytes + format: uint64 + type: integer format: type: string fps: type: number frame: + format: uint64 type: integer height: + format: uint64 type: integer id: type: string index: description: General + format: uint64 + type: integer + keyframe: + format: uint64 type: integer layout: type: string packet: + format: uint64 type: integer pix_fmt: description: Video @@ -884,15 +965,19 @@ definitions: type: number sampling_hz: description: Audio + format: uint64 type: integer size_kb: description: kbytes + format: uint64 type: integer stream: + format: uint64 type: integer type: type: string width: + format: uint64 type: integer type: object api.RTMPChannel: @@ -939,34 +1024,41 @@ definitions: type: string type: array ts: + format: int64 type: integer type: object api.SRTStatistics: properties: avail_recv_buf_bytes: description: The available space in the receiver's buffer, in bytes + format: uint64 type: integer avail_send_buf_bytes: description: The available space in the sender's buffer, in bytes + format: uint64 type: integer bandwidth_mbit: description: Estimated bandwidth of the network link, in Mbps type: number flight_size_pkt: description: The number of packets in flight + format: uint64 type: integer flow_window_pkt: description: The maximum number of packets that can be "in flight" + format: uint64 type: integer max_bandwidth_mbit: description: Transmission bandwidth limit, in Mbps type: number mss_bytes: description: Maximum Segment Size (MSS), in bytes + format: uint64 type: integer pkt_recv_avg_belated_time_ms: description: Accumulated difference between the current time and the time-to-play of a packet that is received late + format: uint64 type: integer pkt_send_period_us: description: Current minimum time interval between which consecutive packets @@ -974,79 +1066,98 @@ definitions: type: number recv_ack_pkt: description: The total number of received ACK (Acknowledgement) control packets + format: uint64 type: integer recv_buf_bytes: description: Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer recv_buf_ms: description: The timespan (msec) of acknowledged packets in the receiver's buffer + format: uint64 type: integer recv_buf_pkt: description: The number of acknowledged packets in receiver's buffer + format: uint64 type: integer recv_bytes: description: Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_bytes: description: Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_drop_pkt: description: The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + format: uint64 type: integer recv_km_pkt: description: The total number of received KM (Key Material) control packets + format: uint64 type: integer - recv_loss__bytes: + recv_loss_bytes: description: Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + format: uint64 type: integer recv_loss_pkt: description: The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + format: uint64 type: integer recv_nak_pkt: description: The total number of received NAK (Negative Acknowledgement) control packets + format: uint64 type: integer recv_pkt: description: The total number of received DATA packets, including retransmitted packets + format: uint64 type: integer recv_retran_pkts: description: The total number of retransmitted packets registered at the receiver side + format: uint64 type: integer recv_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + format: uint64 type: integer recv_undecrypt_bytes: description: Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_undecrypt_pkt: description: The total number of packets that failed to be decrypted at the receiver side + format: uint64 type: integer recv_unique_bytes: description: Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer recv_unique_pkt: description: The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + format: uint64 type: integer reorder_tolerance_pkt: description: Instant value of the packet reorder tolerance + format: uint64 type: integer rtt_ms: description: Smoothed round-trip time (SRTT), an exponentially-weighted moving @@ -1055,71 +1166,89 @@ definitions: send_buf_bytes: description: Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + format: uint64 type: integer send_buf_ms: description: The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + format: uint64 type: integer send_buf_pkt: description: The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + format: uint64 type: integer send_drop_bytes: description: Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer send_drop_pkt: description: The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + format: uint64 type: integer send_duration_us: description: The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + format: uint64 type: integer send_km_pkt: description: The total number of sent KM (Key Material) control packets + format: uint64 type: integer send_loss_pkt: description: The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + format: uint64 type: integer send_tsbpd_delay_ms: description: Timestamp-based Packet Delivery Delay value of the peer + format: uint64 type: integer sent_ack_pkt: description: The total number of sent ACK (Acknowledgement) control packets + format: uint64 type: integer sent_bytes: description: Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_nak_pkt: description: The total number of sent NAK (Negative Acknowledgement) control packets + format: uint64 type: integer sent_pkt: description: The total number of sent DATA packets, including retransmitted packets + format: uint64 type: integer sent_retrans_bytes: description: Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_retrans_pkt: description: The total number of retransmitted packets sent by the SRT sender + format: uint64 type: integer - sent_unique__bytes: + sent_unique_bytes: description: Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + format: uint64 type: integer sent_unique_pkt: description: The total number of unique DATA packets sent by the SRT sender + format: uint64 type: integer timestamp_ms: description: The time elapsed, in milliseconds, since the SRT socket has been created + format: uint64 type: integer type: object api.Session: @@ -1131,10 +1260,13 @@ definitions: description: kbit/s type: number bytes_rx: + format: uint64 type: integer bytes_tx: + format: uint64 type: integer created_at: + format: int64 type: integer extra: type: string @@ -1154,19 +1286,25 @@ definitions: $ref: '#/definitions/api.SessionStats' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionStats: properties: sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionSummary: @@ -1195,8 +1333,10 @@ definitions: description: mbit/s type: number max_sessions: + format: uint64 type: integer sessions: + format: uint64 type: integer type: object api.SessionSummarySummary: @@ -1214,10 +1354,13 @@ definitions: $ref: '#/definitions/api.SessionPeers' type: object sessions: + format: uint64 type: integer traffic_rx_mb: + format: uint64 type: integer traffic_tx_mb: + format: uint64 type: integer type: object api.SessionsActive: @@ -1269,7 +1412,7 @@ definitions: type: boolean tenants: items: - $ref: '#/definitions/config.Auth0Tenant' + $ref: '#/definitions/value.Auth0Tenant' type: array type: object disable_localhost: @@ -1290,6 +1433,7 @@ definitions: type: boolean type: object created_at: + description: When this config has been persisted type: string db: properties: @@ -1299,6 +1443,10 @@ definitions: debug: properties: force_gc: + format: int + type: integer + memory_limit_mbytes: + format: int64 type: integer profiling: type: boolean @@ -1335,11 +1483,14 @@ definitions: log: properties: max_history: + format: int type: integer max_lines: + format: int type: integer type: object max_processes: + format: int64 type: integer type: object host: @@ -1364,6 +1515,7 @@ definitions: - silent type: string max_lines: + format: int type: integer topics: items: @@ -1378,9 +1530,11 @@ definitions: type: boolean interval_sec: description: seconds + format: int64 type: integer range_sec: description: seconds + format: int64 type: integer type: object name: @@ -1390,8 +1544,10 @@ definitions: enable: type: boolean max_port: + format: int type: integer min_port: + format: int type: integer type: object router: @@ -1440,14 +1596,18 @@ definitions: type: string type: array max_bitrate_mbit: + format: uint64 type: integer max_sessions: + format: uint64 type: integer persist: type: boolean persist_interval_sec: + format: int type: integer session_timeout_sec: + format: int type: integer type: object srt: @@ -1486,10 +1646,13 @@ definitions: enable: type: boolean max_file_size_mbytes: + format: uint64 type: integer max_size_mbytes: + format: uint64 type: integer ttl_seconds: + format: int64 type: integer types: properties: @@ -1506,6 +1669,7 @@ definitions: dir: type: string max_size_mbytes: + format: int64 type: integer type: object memory: @@ -1520,12 +1684,17 @@ definitions: type: string type: object max_size_mbytes: + format: int64 type: integer purge: type: boolean type: object mimetypes_file: type: string + s3: + items: + $ref: '#/definitions/value.S3Storage' + type: array type: object tls: properties: @@ -1535,6 +1704,8 @@ definitions: type: boolean cert_file: type: string + email: + type: string enable: type: boolean key_file: @@ -1543,6 +1714,7 @@ definitions: update_check: type: boolean version: + format: int64 type: integer type: object api.Skills: @@ -1709,13 +1881,30 @@ definitions: api.WidgetProcess: properties: current_sessions: + format: uint64 type: integer total_sessions: + format: uint64 type: integer uptime: type: integer type: object - config.Auth0Tenant: + github_com_datarhei_core_v16_http_api.Config: + properties: + config: + $ref: '#/definitions/api.ConfigData' + created_at: + type: string + loaded_at: + type: string + overrides: + items: + type: string + type: array + updated_at: + type: string + type: object + value.Auth0Tenant: properties: audience: type: string @@ -1728,6 +1917,34 @@ definitions: type: string type: array type: object + value.S3Storage: + properties: + access_key_id: + type: string + auth: + properties: + enable: + type: boolean + password: + type: string + username: + type: string + type: object + bucket: + type: string + endpoint: + type: string + mountpoint: + type: string + name: + type: string + region: + type: string + secret_access_key: + type: string + use_ssl: + type: boolean + type: object info: contact: email: hello@datarhei.com @@ -1740,34 +1957,6 @@ info: title: datarhei Core API version: "3.0" paths: - /{path}: - get: - description: Fetch a file from the filesystem. If the file is a directory, a - index.html is returned, if it exists. - operationId: diskfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the filesystem /api: get: description: API version and build infos in case auth is valid or not required. @@ -2053,10 +2242,12 @@ paths: "200": description: OK schema: - $ref: '#/definitions/api.Config' + $ref: '#/definitions/github_com_datarhei_core_v16_http_api.Config' security: - ApiKeyAuth: [] summary: Retrieve the currently active Restreamer configuration + tags: + - v16.7.2 put: consumes: - application/json @@ -2088,13 +2279,15 @@ paths: security: - ApiKeyAuth: [] summary: Update the current Restreamer configuration + tags: + - v16.7.2 /api/v3/config/reload: get: description: Reload the currently active configuration. This will trigger a - restart of the Restreamer. + restart of the Core. operationId: config-3-reload produces: - - text/plain + - application/json responses: "200": description: OK @@ -2103,12 +2296,37 @@ paths: security: - ApiKeyAuth: [] summary: Reload the currently active configuration - /api/v3/fs/disk: + tags: + - v16.7.2 + /api/v3/fs: get: - description: List all files on the filesystem. The listing can be ordered by - name, size, or date of last modification in ascending or descending order. - operationId: diskfs-3-list-files + description: Listall registered filesystems + operationId: filesystem-3-list + produces: + - application/json + responses: + "200": + description: OK + schema: + items: + $ref: '#/definitions/api.FilesystemInfo' + type: array + security: + - ApiKeyAuth: [] + summary: List all registered filesystems + tags: + - v16.12.0 + /api/v3/fs/{storage}: + get: + description: List all files on a filesystem. The listing can be ordered by name, + size, or date of last modification in ascending or descending order. + operationId: filesystem-3-list-files parameters: + - description: Name of the filesystem + in: path + name: storage + required: true + type: string - description: glob pattern for file names in: query name: glob @@ -2132,15 +2350,22 @@ paths: type: array security: - ApiKeyAuth: [] - summary: List all files on the filesystem - /api/v3/fs/disk/{path}: + summary: List all files on a filesystem + tags: + - v16.7.2 + /api/v3/fs/{storage}/{filepath}: delete: - description: Remove a file from the filesystem - operationId: diskfs-3-delete-file + description: Remove a file from a filesystem + operationId: filesystem-3-delete-file parameters: + - description: Name of the filesystem + in: path + name: storage + required: true + type: string - description: Path to file in: path - name: path + name: filepath required: true type: string produces: @@ -2156,15 +2381,21 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Remove a file from the filesystem + summary: Remove a file from a filesystem + tags: + - v16.7.2 get: - description: Fetch a file from the filesystem. The contents of that file are - returned. - operationId: diskfs-3-get-file + description: Fetch a file from a filesystem + operationId: filesystem-3-get-file parameters: + - description: Name of the filesystem + in: path + name: storage + required: true + type: string - description: Path to file in: path - name: path + name: filepath required: true type: string produces: @@ -2185,16 +2416,23 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Fetch a file from the filesystem + summary: Fetch a file from a filesystem + tags: + - v16.7.2 put: consumes: - application/data - description: Writes or overwrites a file on the filesystem - operationId: diskfs-3-put-file + description: Writes or overwrites a file on a filesystem + operationId: filesystem-3-put-file parameters: + - description: Name of the filesystem + in: path + name: storage + required: true + type: string - description: Path to file in: path - name: path + name: filepath required: true type: string - description: File data @@ -2223,160 +2461,9 @@ paths: $ref: '#/definitions/api.Error' security: - ApiKeyAuth: [] - summary: Add a file to the filesystem - /api/v3/fs/mem: - get: - description: List all files on the memory filesystem. The listing can be ordered - by name, size, or date of last modification in ascending or descending order. - operationId: memfs-3-list-files - parameters: - - description: glob pattern for file names - in: query - name: glob - type: string - - description: none, name, size, lastmod - in: query - name: sort - type: string - - description: asc, desc - in: query - name: order - type: string - produces: - - application/json - responses: - "200": - description: OK - schema: - items: - $ref: '#/definitions/api.FileInfo' - type: array - security: - - ApiKeyAuth: [] - summary: List all files on the memory filesystem - /api/v3/fs/mem/{path}: - delete: - description: Remove a file from the memory filesystem - operationId: memfs-3-delete-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - text/plain - responses: - "200": - description: OK - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Remove a file from the memory filesystem - get: - description: Fetch a file from the memory filesystem - operationId: memfs-3-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Fetch a file from the memory filesystem - patch: - consumes: - - application/data - description: Create a link to a file in the memory filesystem. The file linked - to has to exist. - operationId: memfs-3-patch - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - - description: Path to the file to link to - in: body - name: url - required: true - schema: - type: string - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "400": - description: Bad Request - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Create a link to a file in the memory filesystem - put: - consumes: - - application/data - description: Writes or overwrites a file on the memory filesystem - operationId: memfs-3-put-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - - description: File data - in: body - name: data - required: true - schema: - items: - type: integer - type: array - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "204": - description: No Content - schema: - type: string - "507": - description: Insufficient Storage - schema: - $ref: '#/definitions/api.Error' - security: - - ApiKeyAuth: [] - summary: Add a file to the memory filesystem + summary: Add a file to a filesystem + tags: + - v16.7.2 /api/v3/log: get: description: Get the last log lines of the Restreamer application @@ -2398,6 +2485,8 @@ paths: security: - ApiKeyAuth: [] summary: Application log + tags: + - v16.7.2 /api/v3/metadata/{key}: get: description: Retrieve the previously stored JSON metadata under the given key. @@ -2426,6 +2515,8 @@ paths: security: - ApiKeyAuth: [] summary: Retrieve JSON metadata from a key + tags: + - v16.7.2 put: description: Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key @@ -2455,6 +2546,8 @@ paths: security: - ApiKeyAuth: [] summary: Add JSON metadata under the given key + tags: + - v16.7.2 /api/v3/metrics: get: description: List all known metrics with their description and labels @@ -2471,6 +2564,8 @@ paths: security: - ApiKeyAuth: [] summary: List all known metrics with their description and labels + tags: + - v16.10.0 post: consumes: - application/json @@ -2497,6 +2592,8 @@ paths: security: - ApiKeyAuth: [] summary: Query the collected metrics + tags: + - v16.7.2 /api/v3/process: get: description: List all known processes. Use the query parameter to filter the @@ -2541,6 +2638,8 @@ paths: security: - ApiKeyAuth: [] summary: List all known processes + tags: + - v16.7.2 post: consumes: - application/json @@ -2567,6 +2666,8 @@ paths: security: - ApiKeyAuth: [] summary: Add a new process + tags: + - v16.7.2 /api/v3/process/{id}: delete: description: Delete a process by its ID @@ -2591,6 +2692,8 @@ paths: security: - ApiKeyAuth: [] summary: Delete a process by its ID + tags: + - v16.7.2 get: description: List a process by its ID. Use the filter parameter to specifiy the level of detail of the output. @@ -2620,10 +2723,12 @@ paths: security: - ApiKeyAuth: [] summary: List a process by its ID + tags: + - v16.7.2 put: consumes: - application/json - description: Replace an existing process + description: Replace an existing process. operationId: process-3-update parameters: - description: Process ID @@ -2655,6 +2760,8 @@ paths: security: - ApiKeyAuth: [] summary: Replace an existing process + tags: + - v16.7.2 /api/v3/process/{id}/command: put: consumes: @@ -2691,6 +2798,8 @@ paths: security: - ApiKeyAuth: [] summary: Issue a command to a process + tags: + - v16.7.2 /api/v3/process/{id}/config: get: description: Get the configuration of a process. This is the configuration as @@ -2720,6 +2829,8 @@ paths: security: - ApiKeyAuth: [] summary: Get the configuration of a process + tags: + - v16.7.2 /api/v3/process/{id}/metadata/{key}: get: description: Retrieve the previously stored JSON metadata under the given key. @@ -2753,6 +2864,8 @@ paths: security: - ApiKeyAuth: [] summary: Retrieve JSON metadata stored with a process under a key + tags: + - v16.7.2 put: description: Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key @@ -2792,6 +2905,8 @@ paths: security: - ApiKeyAuth: [] summary: Add JSON metadata with a process under the given key + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/errorframe/{name}: post: consumes: @@ -2841,6 +2956,8 @@ paths: security: - ApiKeyAuth: [] summary: Upload an error frame + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/errorframe/encode: get: description: Immediately encode the errorframe (if available and looping) @@ -2875,6 +2992,8 @@ paths: security: - ApiKeyAuth: [] summary: Encode the errorframe + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/keyframe/{name}: get: description: Get the last keyframe of an input of a process. The extension of @@ -2916,6 +3035,8 @@ paths: security: - ApiKeyAuth: [] summary: Get the last keyframe + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/reopen: get: description: Close the current input stream such that it will be automatically @@ -2950,6 +3071,8 @@ paths: security: - ApiKeyAuth: [] summary: Close the current input stream + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/status: get: description: Get the current playout status of an input of a process @@ -2983,6 +3106,8 @@ paths: security: - ApiKeyAuth: [] summary: Get the current playout status + tags: + - v16.7.2 /api/v3/process/{id}/playout/{inputid}/stream: put: consumes: @@ -3026,10 +3151,12 @@ paths: security: - ApiKeyAuth: [] summary: Switch to a new stream + tags: + - v16.7.2 /api/v3/process/{id}/probe: get: description: Probe an existing process to get a detailed stream information - on the inputs + on the inputs. operationId: process-3-probe parameters: - description: Process ID @@ -3047,9 +3174,11 @@ paths: security: - ApiKeyAuth: [] summary: Probe a process + tags: + - v16.7.2 /api/v3/process/{id}/report: get: - description: Get the logs and the log history of a process + description: Get the logs and the log history of a process. operationId: process-3-get-report parameters: - description: Process ID @@ -3075,9 +3204,11 @@ paths: security: - ApiKeyAuth: [] summary: Get the logs of a process + tags: + - v16.7.2 /api/v3/process/{id}/state: get: - description: Get the state and progress data of a process + description: Get the state and progress data of a process. operationId: process-3-get-state parameters: - description: Process ID @@ -3103,9 +3234,11 @@ paths: security: - ApiKeyAuth: [] summary: Get the state of a process + tags: + - v16.7.2 /api/v3/rtmp: get: - description: List all currently publishing RTMP streams + description: List all currently publishing RTMP streams. operationId: rtmp-3-list-channels produces: - application/json @@ -3119,9 +3252,11 @@ paths: security: - ApiKeyAuth: [] summary: List all publishing RTMP streams + tags: + - v16.7.2 /api/v3/session: get: - description: Get a summary of all active and past sessions of the given collector + description: Get a summary of all active and past sessions of the given collector. operationId: session-3-summary parameters: - description: Comma separated list of collectors @@ -3138,10 +3273,12 @@ paths: security: - ApiKeyAuth: [] summary: Get a summary of all active and past sessions + tags: + - v16.7.2 /api/v3/session/active: get: description: Get a minimal summary of all active sessions (i.e. number of sessions, - bandwidth) + bandwidth). operationId: session-3-current parameters: - description: Comma separated list of collectors @@ -3158,9 +3295,11 @@ paths: security: - ApiKeyAuth: [] summary: Get a minimal summary of all active sessions + tags: + - v16.7.2 /api/v3/skills: get: - description: List all detected FFmpeg capabilities + description: List all detected FFmpeg capabilities. operationId: skills-3 produces: - application/json @@ -3172,9 +3311,11 @@ paths: security: - ApiKeyAuth: [] summary: FFmpeg capabilities + tags: + - v16.7.2 /api/v3/skills/reload: get: - description: Refresh the available FFmpeg capabilities + description: Refresh the available FFmpeg capabilities. operationId: skills-3-reload produces: - application/json @@ -3186,6 +3327,8 @@ paths: security: - ApiKeyAuth: [] summary: Refresh FFmpeg capabilities + tags: + - v16.7.2 /api/v3/srt: get: description: List all currently publishing SRT streams. This endpoint is EXPERIMENTAL @@ -3205,6 +3348,8 @@ paths: security: - ApiKeyAuth: [] summary: List all publishing SRT treams + tags: + - v16.9.0 /api/v3/widget/process/{id}: get: description: Fetch minimal statistics about a process, which is not protected @@ -3228,94 +3373,8 @@ paths: schema: $ref: '#/definitions/api.Error' summary: Fetch minimal statistics about a process - /memfs/{path}: - delete: - description: Remove a file from the memory filesystem - operationId: memfs-delete-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - text/plain - responses: - "200": - description: OK - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Remove a file from the memory filesystem - get: - description: Fetch a file from the memory filesystem - operationId: memfs-get-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - produces: - - application/data - - application/json - responses: - "200": - description: OK - schema: - type: file - "301": - description: Moved Permanently - schema: - type: string - "404": - description: Not Found - schema: - $ref: '#/definitions/api.Error' - summary: Fetch a file from the memory filesystem - put: - consumes: - - application/data - description: Writes or overwrites a file on the memory filesystem - operationId: memfs-put-file - parameters: - - description: Path to file - in: path - name: path - required: true - type: string - - description: File data - in: body - name: data - required: true - schema: - items: - type: integer - type: array - produces: - - text/plain - - application/json - responses: - "201": - description: Created - schema: - type: string - "204": - description: No Content - schema: - type: string - "507": - description: Insufficient Storage - schema: - $ref: '#/definitions/api.Error' - security: - - BasicAuth: [] - summary: Add a file to the memory filesystem + tags: + - v16.7.2 /metrics: get: description: Prometheus metrics diff --git a/ffmpeg/parse/parser.go b/ffmpeg/parse/parser.go index 5f54d68a..edf0ca03 100644 --- a/ffmpeg/parse/parser.go +++ b/ffmpeg/parse/parser.go @@ -356,7 +356,7 @@ func (p *parser) Parse(line string) uint64 { if p.collector.IsCollectableIP(p.process.input[i].IP) { p.collector.Activate("") - p.collector.Ingress("", int64(p.stats.input[i].diff.size)*1024) + p.collector.Ingress("", int64(p.stats.input[i].diff.size)) } } } @@ -373,19 +373,18 @@ func (p *parser) Parse(line string) uint64 { if p.collector.IsCollectableIP(p.process.output[i].IP) { p.collector.Activate("") - p.collector.Egress("", int64(p.stats.output[i].diff.size)*1024) + p.collector.Egress("", int64(p.stats.output[i].diff.size)) } } } // Calculate if any of the processed frames staled. - // If one number of frames in an output is the same as - // before, then pFrames becomes 0. - var pFrames uint64 = 0 - - pFrames = p.stats.main.diff.frame + // If one number of frames in an output is the same as before, then pFrames becomes 0. + pFrames := p.stats.main.diff.frame if isFFmpegProgress { + // Only consider the outputs + pFrames = 1 for i := range p.stats.output { pFrames *= p.stats.output[i].diff.frame } @@ -411,7 +410,7 @@ func (p *parser) parseDefaultProgress(line string) error { if matches = p.re.size.FindStringSubmatch(line); matches != nil { if x, err := strconv.ParseUint(matches[1], 10, 64); err == nil { - p.progress.ffmpeg.Size = x + p.progress.ffmpeg.Size = x * 1024 } } @@ -486,6 +485,26 @@ func (p *parser) parseFFmpegProgress(line string) error { return fmt.Errorf("output length mismatch (have: %d, want: %d)", len(progress.Output), len(p.process.output)) } + if progress.Size == 0 { + progress.Size = progress.SizeKB * 1024 + } + + for i, io := range progress.Input { + if io.Size == 0 { + io.Size = io.SizeKB * 1024 + } + + progress.Input[i].Size = io.Size + } + + for i, io := range progress.Output { + if io.Size == 0 { + io.Size = io.SizeKB * 1024 + } + + progress.Output[i].Size = io.Size + } + p.progress.ffmpeg = progress return nil diff --git a/ffmpeg/parse/stats.go b/ffmpeg/parse/stats.go index df7e714b..d36ecb82 100644 --- a/ffmpeg/parse/stats.go +++ b/ffmpeg/parse/stats.go @@ -1,11 +1,11 @@ package parse type statsData struct { - frame uint64 - packet uint64 - size uint64 // kbytes - dup uint64 - drop uint64 + frame uint64 // counter + packet uint64 // counter + size uint64 // bytes + dup uint64 // counter + drop uint64 // counter } type stats struct { diff --git a/ffmpeg/parse/types.go b/ffmpeg/parse/types.go index bf031fb0..aa0c33e3 100644 --- a/ffmpeg/parse/types.go +++ b/ffmpeg/parse/types.go @@ -44,9 +44,9 @@ func (d *Duration) UnmarshalJSON(b []byte) error { type ffmpegAVstreamIO struct { State string `json:"state"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet"` // counter Time uint64 `json:"time"` - Size uint64 `json:"size_kb"` + Size uint64 `json:"size_kb"` // kbytes } func (avio *ffmpegAVstreamIO) export() app.AVstreamIO { @@ -54,7 +54,7 @@ func (avio *ffmpegAVstreamIO) export() app.AVstreamIO { State: avio.State, Packet: avio.Packet, Time: avio.Time, - Size: avio.Size, + Size: avio.Size * 1024, } } @@ -91,14 +91,17 @@ func (av *ffmpegAVstream) export() *app.AVstream { type ffmpegProgressIO struct { // common - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` - Size uint64 `json:"size_kb"` // kbytes - Bitrate float64 `json:"-"` // kbit/s - Frame uint64 `json:"frame"` - Packet uint64 `json:"packet"` - FPS float64 `json:"-"` - PPS float64 `json:"-"` + Index uint64 `json:"index"` + Stream uint64 `json:"stream"` + SizeKB uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_bytes"` // bytes + Bitrate float64 `json:"-"` // bit/s + Frame uint64 `json:"frame"` // counter + Keyframe uint64 `json:"keyframe"` // counter + Packet uint64 `json:"packet"` // counter + Extradata uint64 `json:"extradata_size_bytes"` // bytes + FPS float64 `json:"-"` // rate, frames per second + PPS float64 `json:"-"` // rate, packets per second // video Quantizer float64 `json:"q"` @@ -108,28 +111,36 @@ func (io *ffmpegProgressIO) exportTo(progress *app.ProgressIO) { progress.Index = io.Index progress.Stream = io.Stream progress.Frame = io.Frame + progress.Keyframe = io.Keyframe progress.Packet = io.Packet progress.FPS = io.FPS progress.PPS = io.PPS progress.Quantizer = io.Quantizer - progress.Size = io.Size * 1024 - progress.Bitrate = io.Bitrate * 1024 + progress.Bitrate = io.Bitrate + progress.Extradata = io.Extradata + + if io.Size == 0 { + progress.Size = io.SizeKB * 1024 + } else { + progress.Size = io.Size + } } type ffmpegProgress struct { Input []ffmpegProgressIO `json:"inputs"` Output []ffmpegProgressIO `json:"outputs"` - Frame uint64 `json:"frame"` - Packet uint64 `json:"packet"` - FPS float64 `json:"-"` - PPS float64 `json:"-"` + Frame uint64 `json:"frame"` // counter + Packet uint64 `json:"packet"` // counter + FPS float64 `json:"-"` // rate, frames per second + PPS float64 `json:"-"` // rate, packets per second Quantizer float64 `json:"q"` - Size uint64 `json:"size_kb"` // kbytes - Bitrate float64 `json:"-"` // kbit/s + SizeKB uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_bytes"` // bytes + Bitrate float64 `json:"-"` // bit/s Time Duration `json:"time"` Speed float64 `json:"speed"` - Drop uint64 `json:"drop"` - Dup uint64 `json:"dup"` + Drop uint64 `json:"drop"` // counter + Dup uint64 `json:"dup"` // counter } func (p *ffmpegProgress) exportTo(progress *app.Progress) { @@ -138,13 +149,18 @@ func (p *ffmpegProgress) exportTo(progress *app.Progress) { progress.FPS = p.FPS progress.PPS = p.PPS progress.Quantizer = p.Quantizer - progress.Size = p.Size * 1024 progress.Time = p.Time.Seconds() - progress.Bitrate = p.Bitrate * 1024 + progress.Bitrate = p.Bitrate progress.Speed = p.Speed progress.Drop = p.Drop progress.Dup = p.Dup + if p.Size == 0 { + progress.Size = p.SizeKB * 1024 + } else { + progress.Size = p.Size + } + for i := range p.Input { if len(progress.Input) <= i { break diff --git a/glob/glob.go b/glob/glob.go index 690daf61..89b57f00 100644 --- a/glob/glob.go +++ b/glob/glob.go @@ -4,6 +4,9 @@ import ( "github.com/gobwas/glob" ) +// Match returns whether the name matches the glob pattern, also considering +// one or several optionnal separator. An error is only returned if the pattern +// is invalid. func Match(pattern, name string, separators ...rune) (bool, error) { g, err := glob.Compile(pattern, separators...) if err != nil { diff --git a/go.mod b/go.mod index f88af6ba..454d2310 100644 --- a/go.mod +++ b/go.mod @@ -3,30 +3,33 @@ module github.com/datarhei/core/v16 go 1.18 require ( - github.com/99designs/gqlgen v0.17.16 + github.com/99designs/gqlgen v0.17.20 github.com/Masterminds/semver/v3 v3.1.1 github.com/atrox/haikunatorgo/v2 v2.0.1 - github.com/caddyserver/certmagic v0.16.2 - github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845 + github.com/caddyserver/certmagic v0.17.2 + github.com/datarhei/gosrt v0.3.1 github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759 - github.com/go-playground/validator/v10 v10.11.0 + github.com/go-playground/validator/v10 v10.11.1 github.com/gobwas/glob v0.2.3 - github.com/golang-jwt/jwt/v4 v4.4.2 + github.com/golang-jwt/jwt/v4 v4.4.3 github.com/google/uuid v1.3.0 github.com/invopop/jsonschema v0.4.0 github.com/joho/godotenv v1.4.0 - github.com/labstack/echo/v4 v4.9.0 + github.com/labstack/echo/v4 v4.9.1 github.com/lithammer/shortuuid/v4 v4.0.0 - github.com/mattn/go-isatty v0.0.16 + github.com/mattn/go-isatty v0.0.17 + github.com/minio/minio-go/v7 v7.0.47 github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 - github.com/prometheus/client_golang v1.13.0 - github.com/shirou/gopsutil/v3 v3.22.8 - github.com/stretchr/testify v1.8.0 - github.com/swaggo/echo-swagger v1.3.4 - github.com/swaggo/swag v1.8.5 - github.com/vektah/gqlparser/v2 v2.5.0 + github.com/prometheus/client_golang v1.14.0 + github.com/shirou/gopsutil/v3 v3.23.3 + github.com/stretchr/testify v1.8.2 + github.com/swaggo/echo-swagger v1.3.5 + github.com/swaggo/swag v1.8.7 + github.com/vektah/gqlparser/v2 v2.5.1 github.com/xeipuuv/gojsonschema v1.2.0 - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 + go.uber.org/zap v1.24.0 + golang.org/x/mod v0.7.0 + golang.org/x/net v0.7.0 ) require ( @@ -34,13 +37,14 @@ require ( github.com/agnivade/levenshtein v1.1.1 // indirect github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/go-ole/go-ole v1.2.6 // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/spec v0.20.7 // indirect + github.com/go-openapi/spec v0.20.8 // indirect github.com/go-openapi/swag v0.22.3 // indirect github.com/go-playground/locales v0.14.0 // indirect github.com/go-playground/universal-translator v0.18.0 // indirect @@ -50,42 +54,53 @@ require ( github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/iancoleman/orderedmap v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect - github.com/klauspost/cpuid/v2 v2.0.11 // indirect - github.com/labstack/gommon v0.3.1 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/cpuid/v2 v2.2.3 // indirect + github.com/labstack/gommon v0.4.0 // indirect github.com/leodido/go-urn v1.2.1 // indirect github.com/libdns/libdns v0.2.1 // indirect - github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 // indirect + github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mholt/acmez v1.0.4 // indirect - github.com/miekg/dns v1.1.46 // indirect + github.com/miekg/dns v1.1.50 // indirect + github.com/minio/md5-simd v1.1.2 // indirect + github.com/minio/sha256-simd v1.0.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c // indirect - github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.39.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect + github.com/rogpeppe/go-internal v1.8.1 // indirect + github.com/rs/xid v1.4.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/shoenig/go-m1cpu v0.1.4 // indirect + github.com/sirupsen/logrus v1.9.0 // indirect github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a // indirect - github.com/tklauser/go-sysconf v0.3.10 // indirect - github.com/tklauser/numcpus v0.5.0 // indirect + github.com/tklauser/go-sysconf v0.3.11 // indirect + github.com/tklauser/numcpus v0.6.0 // indirect github.com/urfave/cli/v2 v2.8.1 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect - github.com/valyala/fasttemplate v1.2.1 // indirect + github.com/valyala/fasttemplate v1.2.2 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect github.com/yusufpapurcu/wmi v1.2.2 // indirect - go.uber.org/atomic v1.7.0 // indirect - go.uber.org/multierr v1.6.0 // indirect - go.uber.org/zap v1.21.0 // indirect - golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect - golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 // indirect - golang.org/x/sys v0.0.0-20220907062415-87db552b00fd // indirect - golang.org/x/text v0.3.7 // indirect - golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect - golang.org/x/tools v0.1.12 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/goleak v1.1.12 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/sys v0.6.0 // indirect + golang.org/x/text v0.7.0 // indirect + golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.4.0 // indirect google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 040a6afb..dd05bdd5 100644 --- a/go.sum +++ b/go.sum @@ -1,41 +1,7 @@ -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/99designs/gqlgen v0.17.16 h1:tTIw/cQ/uvf3iXIb2I6YSkdaDkmHmH2W2eZkVe0IVLA= -github.com/99designs/gqlgen v0.17.16/go.mod h1:dnJdUkgfh8iw8CEx2hhTdgTQO/GvVWKLcm/kult5gwI= +github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbpJMw= +github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc= github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= @@ -46,11 +12,6 @@ github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaW github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8= github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q= @@ -61,27 +22,18 @@ github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLj github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4= github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/caddyserver/certmagic v0.16.2 h1:k2n3LkkUG3aMUK/kckMuF9/0VFo+0FtMX3drPYESbmQ= -github.com/caddyserver/certmagic v0.16.2/go.mod h1:PgLIr/dSJa+WA7t7z6Je5xuS/e5A/GFCPHRuZ1QP+MQ= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE= +github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845 h1:nlVb4EVMwdVUwH6e10WZrx4lW0n2utnlE+4ILMPyD5o= -github.com/datarhei/gosrt v0.2.1-0.20220817080252-d44df04a3845/go.mod h1:wyoTu+DG45XRuCgEq/y+R8nhZCrJbOyQKn+SwNrNVZ8= +github.com/datarhei/gosrt v0.3.1 h1:9A75hIvnY74IUFyeguqYXh1lsGF8Qt8fjxJS2Ewr12Q= +github.com/datarhei/gosrt v0.3.1/go.mod h1:M2nl2WPrawncUc1FtUBK6gZX4tpZRC7FqL8NjOdBZV0= github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759 h1:h8NyekuQSDvLIsZVTV172m5/RVArXkEM/cnHaUzszQU= github.com/datarhei/joy4 v0.0.0-20220914170649-23c70d207759/go.mod h1:Jcw/6jZDQQmPx8A7INEkXmuEF7E9jjBbSTfVSLwmiQw= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -89,22 +41,9 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= @@ -114,8 +53,8 @@ github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/a github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.7 h1:1Rlu/ZrOCCob0n+JKKJAWhNWMPW8bOZRg8FJaY+0SKI= -github.com/go-openapi/spec v0.20.7/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= +github.com/go-openapi/spec v0.20.8 h1:ubHmXNY3FCIOinT8RNrrPfGc9t7I1qhPtdOGoG2AxRU= +github.com/go-openapi/spec v0.20.8/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= @@ -126,107 +65,50 @@ github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.11.0 h1:0W+xRM511GY47Yy3bZUbJVitCNg2BOGlCyvTqsp/xIw= -github.com/go-playground/validator/v10 v10.11.0/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY= github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= +github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA= github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/invopop/jsonschema v0.4.0 h1:Yuy/unfgCnfV5Wl7H0HgFufp/rlurqPOOuacqyByrws= github.com/invopop/jsonschema v0.4.0/go.mod h1:O9uiLokuu0+MGFlyiaqtWxwqJm41/+8Nj0lD7A36YH0= github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg= github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/cpuid/v2 v2.0.11 h1:i2lw1Pm7Yi/4O6XCSyJWqEHI2MDw2FzUK6o/D21xn2A= -github.com/klauspost/cpuid/v2 v2.0.11/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= +github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU= +github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= @@ -235,11 +117,12 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.7.2/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= -github.com/labstack/echo/v4 v4.9.0 h1:wPOF1CE6gvt/kmbMR4dGzWvHMPT+sAEUJOwOTtvITVY= github.com/labstack/echo/v4 v4.9.0/go.mod h1:xkCDAdFCIf8jsFQ5NnbK7oqaF/yU1A1X20Ltm0OvSks= -github.com/labstack/gommon v0.3.1 h1:OomWaJXm7xR6L1HmEtGyQf26TEn7V6X88mktX9kee9o= +github.com/labstack/echo/v4 v4.9.1 h1:GliPYSpzGKlyOhqIbG8nmHBo3i1saKWFOgh41AN3b+Y= +github.com/labstack/echo/v4 v4.9.1/go.mod h1:Pop5HLc+xoc4qhTZ1ip6C0RtP7Z+4VzRLWZZFKqbbjo= github.com/labstack/gommon v0.3.1/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= +github.com/labstack/gommon v0.4.0 h1:y7cvthEAEbU0yHOf4axH8ZG2NH8knB9iNSoTO8dyIk8= +github.com/labstack/gommon v0.4.0/go.mod h1:uW6kP17uPlLJsD3ijUYn3/M5bAxtlZhMI6m3MFxTMTM= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= @@ -248,8 +131,8 @@ github.com/lithammer/shortuuid/v4 v4.0.0 h1:QRbbVkfgNippHOS8PXDkti4NaWeyYfcBTHtw github.com/lithammer/shortuuid/v4 v4.0.0/go.mod h1:Zs8puNcrvf2rV9rTH51ZLLcj7ZXqQI3lv67aw4KiB1Y= github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281 h1:aczX6NMOtt6L4YT0fQvKkDK6LZEtdOso9sUH89V1+P0= -github.com/lufia/plan9stats v0.0.0-20220517141722-cf486979b281/go.mod h1:lc+czkgO/8F7puNki5jk8QyujbfK1LOT7Wl0ON2hxyk= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c h1:VtwQ41oftZwlMnOEbMWQtSEUgU64U4s+GHk7hZK+jtY= +github.com/lufia/plan9stats v0.0.0-20220913051719-115f729f3c8c/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= @@ -257,28 +140,32 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk= github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80= github.com/mholt/acmez v1.0.4/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY= -github.com/miekg/dns v1.1.46 h1:uzwpxRtSVxtcIZmz/4Uz6/Rn7G11DvsaslXoy5LxQio= -github.com/miekg/dns v1.1.46/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= +github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= +github.com/minio/minio-go/v7 v7.0.47 h1:sLiuCKGSIcn/MI6lREmTzX91DX/oRau4ia0j6e6eOSs= +github.com/minio/minio-go/v7 v7.0.47/go.mod h1:nCrRzjoSUQh8hgKKtu3Y708OLvRLtuASMg2/nvmbarw= +github.com/minio/sha256-simd v1.0.0 h1:v1ta+49hkWZyvaKwrQB8elexRqm6Y0aMLjCNsrYxo6g= +github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -286,94 +173,82 @@ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6 github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c h1:NRoLoZvkBTKvR5gQLgA3e0hqjkY9u1wm+iOL45VN/qI= -github.com/power-devops/perfstat v0.0.0-20220216144756-c35f1ee13d7c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= +github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc= github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.13.0 h1:b71QUfeo5M8gq2+evJdTPfZhYMAU0uKPkyPJ7TPsloU= -github.com/prometheus/client_golang v1.13.0/go.mod h1:vTeo+zgvILHsnnj/39Ou/1fPN5nJFOEMgftOUOmlvYQ= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= +github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= +github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg= +github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= +github.com/rs/xid v1.4.0 h1:qd7wPTDkN6KQx2VmMBLrpHkiyQwgFXRnkOLacUiaSNY= +github.com/rs/xid v1.4.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.22.8 h1:a4s3hXogo5mE2PfdfJIonDbstO/P+9JszdfhAHSzD9Y= -github.com/shirou/gopsutil/v3 v3.22.8/go.mod h1:s648gW4IywYzUfE/KjXxUsqrqx/T2xO5VqOXxONeRfI= +github.com/shirou/gopsutil/v3 v3.23.3 h1:Syt5vVZXUDXPEXpIBt5ziWsJ4LdSAAxF4l/xZeQgSEE= +github.com/shirou/gopsutil/v3 v3.23.3/go.mod h1:lSBNN6t3+D6W5e5nXTxc8KIMMVxAcS+6IJlffjRRlMU= +github.com/shoenig/go-m1cpu v0.1.4 h1:SZPIgRM2sEF9NJy50mRHu9PKGwxyyTTJIWvCtgVbozs= +github.com/shoenig/go-m1cpu v0.1.4/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZym39dbAQ= +github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c= +github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/swaggo/echo-swagger v1.3.4 h1:8B+yVqjVm7cMy4QBLRUuRaOzrTVAqZahcrgrOSdpC5I= -github.com/swaggo/echo-swagger v1.3.4/go.mod h1:vh8QAdbHtTXwTSaWzc1Nby7zMYJd/g0FwQyArmrFHA8= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/swaggo/echo-swagger v1.3.5 h1:kCx1wvX5AKhjI6Ykt48l3PTsfL9UD40ZROOx/tYzWyY= +github.com/swaggo/echo-swagger v1.3.5/go.mod h1:3IMHd2Z8KftdWFEEjGmv6QpWj370LwMCOfovuh7vF34= github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a h1:kAe4YSu0O0UFn1DowNo2MY5p6xzqtJ/wQ7LZynSvGaY= github.com/swaggo/files v0.0.0-20220728132757-551d4a08d97a/go.mod h1:lKJPbtWzJ9JhsTN1k1gZgleJWY/cqq0psdoMmaThG3w= github.com/swaggo/swag v1.8.1/go.mod h1:ugemnJsPZm/kRwFUnzBlbHRd0JY9zE1M4F+uy2pAaPQ= -github.com/swaggo/swag v1.8.5 h1:7NgtfXsXE+jrcOwRyiftGKW7Ppydj7tZiVenuRf1fE4= -github.com/swaggo/swag v1.8.5/go.mod h1:jMLeXOOmYyjk8PvHTsXBdrubsNd9gUJTTCzL5iBnseg= -github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= -github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= -github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= -github.com/tklauser/numcpus v0.5.0 h1:ooe7gN0fg6myJ0EKoTAf5hebTZrH52px3New/D9iJ+A= -github.com/tklauser/numcpus v0.5.0/go.mod h1:OGzpTxpcIMNGYQdit2BYL1pvk/dSOaJWjKoflh+RQjo= +github.com/swaggo/swag v1.8.7 h1:2K9ivTD3teEO+2fXV6zrZKDqk5IuU2aJtBDo8U7omWU= +github.com/swaggo/swag v1.8.7/go.mod h1:ezQVUUhly8dludpVk+/PuwJWvLLanB13ygV5Pr9enSk= +github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= +github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= +github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= +github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4= github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.2.1 h1:TVEnxayobAdVkhQfrfes2IzOB6o+z4roRkPF52WA1u4= github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vektah/gqlparser/v2 v2.5.0 h1:GwEwy7AJsqPWrey0bHnn+3JLaHLZVT66wY/+O+Tf9SU= -github.com/vektah/gqlparser/v2 v2.5.0/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= +github.com/valyala/fasttemplate v1.2.2 h1:lxLXG0uE3Qnshl9QyaK6XJxMXlQZELvChBOCmQD0Loo= +github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= +github.com/vektah/gqlparser/v2 v2.5.1 h1:ZGu+bquAY23jsxDRcYpWjttRZrUz07LbiY77gUOHcr4= +github.com/vektah/gqlparser/v2 v2.5.1/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -383,174 +258,69 @@ github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17 github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM= -golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.0.0-20220907135653-1e95f45603a7 h1:1WGATo9HAhkWMbfyuVU0tEFP88OIkUvwaHFveQPvzCQ= -golang.org/x/net v0.0.0-20220907135653-1e95f45603a7/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -559,160 +329,49 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220907062415-87db552b00fd h1:AZeIEzg+8RCELJYq8w+ODLVxFgLMMigSwO/ffKPEd9U= -golang.org/x/sys v0.0.0-20220907062415-87db552b00fd/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 h1:ftMN5LMiBFjbzleLqtoBZk7KdJwhuybIU+FckUHgoyQ= -golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.4.0 h1:7mTAgkunk3fr4GAloyyCasadO6h9zSsQZbwvcaIciV4= +golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -720,13 +379,12 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -734,13 +392,3 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/http/api/avstream.go b/http/api/avstream.go index 0a9c5044..279b3352 100644 --- a/http/api/avstream.go +++ b/http/api/avstream.go @@ -6,7 +6,7 @@ import ( type AVstreamIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` + Packet uint64 `json:"packet" format:"uint64"` Time uint64 `json:"time"` Size uint64 `json:"size_kb"` } @@ -25,11 +25,11 @@ func (i *AVstreamIO) Unmarshal(io *app.AVstreamIO) { type AVstream struct { Input AVstreamIO `json:"input"` Output AVstreamIO `json:"output"` - Aqueue uint64 `json:"aqueue"` - Queue uint64 `json:"queue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Aqueue uint64 `json:"aqueue" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/config.go b/http/api/config.go index a02e0407..47040cc2 100644 --- a/http/api/config.go +++ b/http/api/config.go @@ -4,8 +4,16 @@ import ( "time" "github.com/datarhei/core/v16/config" + v1config "github.com/datarhei/core/v16/config/v1" + v2config "github.com/datarhei/core/v16/config/v2" ) +// ConfigVersion is used to only unmarshal the version field in order +// find out which SetConfig should be used. +type ConfigVersion struct { + Version int64 `json:"version"` +} + // ConfigData embeds config.Data type ConfigData struct { config.Data @@ -22,11 +30,68 @@ type Config struct { Overrides []string `json:"overrides"` } +type SetConfigV1 struct { + v1config.Data +} + +// NewSetConfigV1 creates a new SetConfigV1 based on the current +// config with downgrading. +func NewSetConfigV1(cfg *config.Config) SetConfigV1 { + v2data, _ := config.DowngradeV3toV2(&cfg.Data) + v1data, _ := v2config.DowngradeV2toV1(v2data) + + data := SetConfigV1{ + Data: *v1data, + } + + return data +} + +// MergeTo merges the v1 config into the current config. +func (s *SetConfigV1) MergeTo(cfg *config.Config) { + v2data, _ := config.DowngradeV3toV2(&cfg.Data) + + v2config.MergeV1ToV2(v2data, &s.Data) + config.MergeV2toV3(&cfg.Data, v2data) +} + +type SetConfigV2 struct { + v2config.Data +} + +// NewSetConfigV2 creates a new SetConfigV2 based on the current +// config with downgrading. +func NewSetConfigV2(cfg *config.Config) SetConfigV2 { + v2data, _ := config.DowngradeV3toV2(&cfg.Data) + + data := SetConfigV2{ + Data: *v2data, + } + + return data +} + +// MergeTo merges the v2 config into the current config. +func (s *SetConfigV2) MergeTo(cfg *config.Config) { + config.MergeV2toV3(&cfg.Data, &s.Data) +} + // SetConfig embeds config.Data. It is used to send a new config to the server. type SetConfig struct { config.Data } +// NewSetConfig converts a config.Config into a SetConfig in order to prepopulate +// a SetConfig with the current values. The uploaded config can have missing fields that +// will be filled with the current values after unmarshalling the JSON. +func NewSetConfig(cfg *config.Config) SetConfig { + data := SetConfig{ + cfg.Data, + } + + return data +} + // MergeTo merges a sent config into a config.Config func (rscfg *SetConfig) MergeTo(cfg *config.Config) { cfg.ID = rscfg.ID @@ -51,18 +116,7 @@ func (rscfg *SetConfig) MergeTo(cfg *config.Config) { cfg.Router = rscfg.Router } -// NewSetConfig converts a config.Config into a RestreamerSetConfig in order to prepopulate -// a RestreamerSetConfig with the current values. The uploaded config can have missing fields that -// will be filled with the current values after unmarshalling the JSON. -func NewSetConfig(cfg *config.Config) SetConfig { - data := SetConfig{ - cfg.Data, - } - - return data -} - -// Unmarshal converts a config.Config to a RestreamerConfig. +// Unmarshal converts a config.Config to a Config. func (c *Config) Unmarshal(cfg *config.Config) { if cfg == nil { return diff --git a/http/api/error.go b/http/api/error.go index 07477568..a87ef95a 100644 --- a/http/api/error.go +++ b/http/api/error.go @@ -8,7 +8,7 @@ import ( // Error represents an error response of the API type Error struct { - Code int `json:"code" jsonschema:"required"` + Code int `json:"code" jsonschema:"required" format:"int"` Message string `json:"message" jsonschema:""` Details []string `json:"details" jsonschema:""` } diff --git a/http/api/fs.go b/http/api/fs.go index c7d12eb4..84535bcc 100644 --- a/http/api/fs.go +++ b/http/api/fs.go @@ -3,6 +3,13 @@ package api // FileInfo represents informatiion about a file on a filesystem type FileInfo struct { Name string `json:"name" jsonschema:"minLength=1"` - Size int64 `json:"size_bytes" jsonschema:"minimum=0"` - LastMod int64 `json:"last_modified" jsonschema:"minimum=0"` + Size int64 `json:"size_bytes" jsonschema:"minimum=0" format:"int64"` + LastMod int64 `json:"last_modified" jsonschema:"minimum=0" format:"int64"` +} + +// FilesystemInfo represents information about a filesystem +type FilesystemInfo struct { + Name string `json:"name"` + Type string `json:"type"` + Mount string `json:"mount"` } diff --git a/http/api/metrics.go b/http/api/metrics.go index 49b184f9..f2476988 100644 --- a/http/api/metrics.go +++ b/http/api/metrics.go @@ -19,8 +19,8 @@ type MetricsQueryMetric struct { } type MetricsQuery struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsQueryMetric `json:"metrics"` } @@ -51,8 +51,8 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) { } type MetricsResponse struct { - Timerange int64 `json:"timerange_sec"` - Interval int64 `json:"interval_sec"` + Timerange int64 `json:"timerange_sec" format:"int64"` + Interval int64 `json:"interval_sec" format:"int64"` Metrics []MetricsResponseMetric `json:"metrics"` } diff --git a/http/api/playout.go b/http/api/playout.go index 22a192d4..ae2b0b9d 100644 --- a/http/api/playout.go +++ b/http/api/playout.go @@ -4,9 +4,9 @@ import "github.com/datarhei/core/v16/playout" type PlayoutStatusIO struct { State string `json:"state" enums:"running,idle" jsonschema:"enum=running,enum=idle"` - Packet uint64 `json:"packet"` - Time uint64 `json:"time"` - Size uint64 `json:"size_kb"` + Packet uint64 `json:"packet" format:"uint64"` + Time uint64 `json:"time" format:"uint64"` + Size uint64 `json:"size_kb" format:"uint64"` } func (i *PlayoutStatusIO) Unmarshal(io playout.StatusIO) { @@ -33,12 +33,12 @@ func (s *PlayoutStatusSwap) Unmarshal(swap playout.StatusSwap) { type PlayoutStatus struct { ID string `json:"id"` Address string `json:"url"` - Stream uint64 `json:"stream"` - Queue uint64 `json:"queue"` - AQueue uint64 `json:"aqueue"` - Dup uint64 `json:"dup"` - Drop uint64 `json:"drop"` - Enc uint64 `json:"enc"` + Stream uint64 `json:"stream" format:"uint64"` + Queue uint64 `json:"queue" format:"uint64"` + AQueue uint64 `json:"aqueue" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` + Drop uint64 `json:"drop" format:"uint64"` + Enc uint64 `json:"enc" format:"uint64"` Looping bool `json:"looping"` Duplicating bool `json:"duplicating"` GOP string `json:"gop"` diff --git a/http/api/probe.go b/http/api/probe.go index 3c538dcc..dda8b260 100644 --- a/http/api/probe.go +++ b/http/api/probe.go @@ -11,8 +11,8 @@ type ProbeIO struct { // common Address string `json:"url"` Format string `json:"format"` - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` Language string `json:"language"` Type string `json:"type"` Codec string `json:"codec"` @@ -23,13 +23,13 @@ type ProbeIO struct { // video FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Pixfmt string `json:"pix_fmt"` - Width uint64 `json:"width"` - Height uint64 `json:"height"` + Width uint64 `json:"width" format:"uint64"` + Height uint64 `json:"height" format:"uint64"` // audio - Sampling uint64 `json:"sampling_hz"` + Sampling uint64 `json:"sampling_hz" format:"uint64"` Layout string `json:"layout"` - Channels uint64 `json:"channels"` + Channels uint64 `json:"channels" format:"uint64"` } func (i *ProbeIO) Unmarshal(io *app.ProbeIO) { diff --git a/http/api/process.go b/http/api/process.go index 7365e176..e217b455 100644 --- a/http/api/process.go +++ b/http/api/process.go @@ -13,7 +13,7 @@ type Process struct { ID string `json:"id" jsonschema:"minLength=1"` Type string `json:"type" jsonschema:"enum=ffmpeg"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at" jsonschema:"minimum=0"` + CreatedAt int64 `json:"created_at" jsonschema:"minimum=0" format:"int64"` Config *ProcessConfig `json:"config,omitempty"` State *ProcessState `json:"state,omitempty"` Report *ProcessReport `json:"report,omitempty"` @@ -30,15 +30,15 @@ type ProcessConfigIO struct { type ProcessConfigIOCleanup struct { Pattern string `json:"pattern" validate:"required"` - MaxFiles uint `json:"max_files"` - MaxFileAge uint `json:"max_file_age_seconds"` + MaxFiles uint `json:"max_files" format:"uint"` + MaxFileAge uint `json:"max_file_age_seconds" format:"uint"` PurgeOnDelete bool `json:"purge_on_delete"` } type ProcessConfigLimits struct { CPU float64 `json:"cpu_usage" jsonschema:"minimum=0,maximum=100"` - Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0"` - WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0"` + Memory uint64 `json:"memory_mbytes" jsonschema:"minimum=0" format:"uint64"` + WaitFor uint64 `json:"waitfor_seconds" jsonschema:"minimum=0" format:"uint64"` } // ProcessConfig represents the configuration of an ffmpeg process @@ -50,9 +50,9 @@ type ProcessConfig struct { Output []ProcessConfigIO `json:"output" validate:"required"` Options []string `json:"options"` Reconnect bool `json:"reconnect"` - ReconnectDelay uint64 `json:"reconnect_delay_seconds"` + ReconnectDelay uint64 `json:"reconnect_delay_seconds" format:"uint64"` Autostart bool `json:"autostart"` - StaleTimeout uint64 `json:"stale_timeout_seconds"` + StaleTimeout uint64 `json:"stale_timeout_seconds" format:"uint64"` Limits ProcessConfigLimits `json:"limits"` } @@ -188,7 +188,7 @@ func (cfg *ProcessConfig) Unmarshal(c *app.Config) { // ProcessReportHistoryEntry represents the logs of a run of a restream process type ProcessReportHistoryEntry struct { - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Prelude []string `json:"prelude"` Log [][2]string `json:"log"` } @@ -235,11 +235,11 @@ func (report *ProcessReport) Unmarshal(l *app.Log) { type ProcessState struct { Order string `json:"order" jsonschema:"enum=start,enum=stop"` State string `json:"exec" jsonschema:"enum=finished,enum=starting,enum=running,enum=finishing,enum=killed,enum=failed"` - Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0"` - Reconnect int64 `json:"reconnect_seconds"` + Runtime int64 `json:"runtime_seconds" jsonschema:"minimum=0" format:"int64"` + Reconnect int64 `json:"reconnect_seconds" format:"int64"` LastLog string `json:"last_logline"` Progress *Progress `json:"progress"` - Memory uint64 `json:"memory_bytes"` + Memory uint64 `json:"memory_bytes" format:"uint64"` CPU json.Number `json:"cpu_usage" swaggertype:"number" jsonschema:"type=number"` Command []string `json:"command"` } diff --git a/http/api/progress.go b/http/api/progress.go index ed575fc7..1bf22c59 100644 --- a/http/api/progress.go +++ b/http/api/progress.go @@ -13,29 +13,31 @@ type ProgressIO struct { Address string `json:"address" jsonschema:"minLength=1"` // General - Index uint64 `json:"index"` - Stream uint64 `json:"stream"` - Format string `json:"format"` - Type string `json:"type"` - Codec string `json:"codec"` - Coder string `json:"coder"` - Frame uint64 `json:"frame"` - FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` - Packet uint64 `json:"packet"` - PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes - Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s + Index uint64 `json:"index" format:"uint64"` + Stream uint64 `json:"stream" format:"uint64"` + Format string `json:"format"` + Type string `json:"type"` + Codec string `json:"codec"` + Coder string `json:"coder"` + Frame uint64 `json:"frame" format:"uint64"` + Keyframe uint64 `json:"keyframe" format:"uint64"` + FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` + Packet uint64 `json:"packet" format:"uint64"` + PPS json.Number `json:"pps" swaggertype:"number" jsonschema:"type=number"` + Size uint64 `json:"size_kb" format:"uint64"` // kbytes + Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s + Extradata uint64 `json:"extradata_size_bytes" format:"uint64"` // bytes // Video Pixfmt string `json:"pix_fmt,omitempty"` Quantizer json.Number `json:"q,omitempty" swaggertype:"number" jsonschema:"type=number"` - Width uint64 `json:"width,omitempty"` - Height uint64 `json:"height,omitempty"` + Width uint64 `json:"width,omitempty" format:"uint64"` + Height uint64 `json:"height,omitempty" format:"uint64"` // Audio - Sampling uint64 `json:"sampling_hz,omitempty"` + Sampling uint64 `json:"sampling_hz,omitempty" format:"uint64"` Layout string `json:"layout,omitempty"` - Channels uint64 `json:"channels,omitempty"` + Channels uint64 `json:"channels,omitempty" format:"uint64"` // avstream AVstream *AVstream `json:"avstream"` @@ -56,11 +58,13 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { i.Codec = io.Codec i.Coder = io.Coder i.Frame = io.Frame + i.Keyframe = io.Keyframe i.FPS = json.Number(fmt.Sprintf("%.3f", io.FPS)) i.Packet = io.Packet i.PPS = json.Number(fmt.Sprintf("%.3f", io.PPS)) i.Size = io.Size / 1024 i.Bitrate = json.Number(fmt.Sprintf("%.3f", io.Bitrate/1024)) + i.Extradata = io.Extradata i.Pixfmt = io.Pixfmt i.Quantizer = json.Number(fmt.Sprintf("%.3f", io.Quantizer)) i.Width = io.Width @@ -79,16 +83,16 @@ func (i *ProgressIO) Unmarshal(io *app.ProgressIO) { type Progress struct { Input []ProgressIO `json:"inputs"` Output []ProgressIO `json:"outputs"` - Frame uint64 `json:"frame"` - Packet uint64 `json:"packet"` + Frame uint64 `json:"frame" format:"uint64"` + Packet uint64 `json:"packet" format:"uint64"` FPS json.Number `json:"fps" swaggertype:"number" jsonschema:"type=number"` Quantizer json.Number `json:"q" swaggertype:"number" jsonschema:"type=number"` - Size uint64 `json:"size_kb"` // kbytes + Size uint64 `json:"size_kb" format:"uint64"` // kbytes Time json.Number `json:"time" swaggertype:"number" jsonschema:"type=number"` Bitrate json.Number `json:"bitrate_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s Speed json.Number `json:"speed" swaggertype:"number" jsonschema:"type=number"` - Drop uint64 `json:"drop"` - Dup uint64 `json:"dup"` + Drop uint64 `json:"drop" format:"uint64"` + Dup uint64 `json:"dup" format:"uint64"` } // Unmarshal converts a restreamer Progress to a Progress in API representation diff --git a/http/api/session.go b/http/api/session.go index 8078531a..c616121f 100644 --- a/http/api/session.go +++ b/http/api/session.go @@ -8,9 +8,9 @@ import ( // SessionStats are the accumulated numbers for the session summary type SessionStats struct { - TotalSessions uint64 `json:"sessions"` - TotalRxBytes uint64 `json:"traffic_rx_mb"` - TotalTxBytes uint64 `json:"traffic_tx_mb"` + TotalSessions uint64 `json:"sessions" format:"uint64"` + TotalRxBytes uint64 `json:"traffic_rx_mb" format:"uint64"` + TotalTxBytes uint64 `json:"traffic_tx_mb" format:"uint64"` } // SessionPeers is for the grouping by peers in the summary @@ -24,12 +24,12 @@ type SessionPeers struct { type Session struct { ID string `json:"id"` Reference string `json:"reference"` - CreatedAt int64 `json:"created_at"` + CreatedAt int64 `json:"created_at" format:"int64"` Location string `json:"local"` Peer string `json:"remote"` Extra string `json:"extra"` - RxBytes uint64 `json:"bytes_rx"` - TxBytes uint64 `json:"bytes_tx"` + RxBytes uint64 `json:"bytes_rx" format:"uint64"` + TxBytes uint64 `json:"bytes_tx" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s TxBitrate json.Number `json:"bandwidth_tx_kbit" swaggertype:"number" jsonschema:"type=number"` // kbit/s } @@ -50,10 +50,10 @@ func (s *Session) Unmarshal(sess session.Session) { // SessionSummaryActive represents the currently active sessions type SessionSummaryActive struct { SessionList []Session `json:"list"` - Sessions uint64 `json:"sessions"` + Sessions uint64 `json:"sessions" format:"uint64"` RxBitrate json.Number `json:"bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s TxBitrate json.Number `json:"bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s - MaxSessions uint64 `json:"max_sessions"` + MaxSessions uint64 `json:"max_sessions" format:"uint64"` MaxRxBitrate json.Number `json:"max_bandwidth_rx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s MaxTxBitrate json.Number `json:"max_bandwidth_tx_mbit" swaggertype:"number" jsonschema:"type=number"` // mbit/s } diff --git a/http/api/srt.go b/http/api/srt.go index 149d3109..d31d03b1 100644 --- a/http/api/srt.go +++ b/http/api/srt.go @@ -6,98 +6,118 @@ import ( // SRTStatistics represents the statistics of a SRT connection type SRTStatistics struct { - MsTimeStamp uint64 `json:"timestamp_ms"` // The time elapsed, in milliseconds, since the SRT socket has been created + MsTimeStamp uint64 `json:"timestamp_ms" format:"uint64"` // The time elapsed, in milliseconds, since the SRT socket has been created // Accumulated - PktSent uint64 `json:"sent_pkt"` // The total number of sent DATA packets, including retransmitted packets - PktRecv uint64 `json:"recv_pkt"` // The total number of received DATA packets, including retransmitted packets - PktSentUnique uint64 `json:"sent_unique_pkt"` // The total number of unique DATA packets sent by the SRT sender - PktRecvUnique uint64 `json:"recv_unique_pkt"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. - PktSndLoss uint64 `json:"send_loss_pkt"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. - PktRcvLoss uint64 `json:"recv_loss_pkt"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side - PktRetrans uint64 `json:"sent_retrans_pkt"` // The total number of retransmitted packets sent by the SRT sender - PktRcvRetrans uint64 `json:"recv_retran_pkts"` // The total number of retransmitted packets registered at the receiver side - PktSentACK uint64 `json:"sent_ack_pkt"` // The total number of sent ACK (Acknowledgement) control packets - PktRecvACK uint64 `json:"recv_ack_pkt"` // The total number of received ACK (Acknowledgement) control packets - PktSentNAK uint64 `json:"sent_nak_pkt"` // The total number of sent NAK (Negative Acknowledgement) control packets - PktRecvNAK uint64 `json:"recv_nak_pkt"` // The total number of received NAK (Negative Acknowledgement) control packets - PktSentKM uint64 `json:"send_km_pkt"` // The total number of sent KM (Key Material) control packets - PktRecvKM uint64 `json:"recv_km_pkt"` // The total number of received KM (Key Material) control packets - UsSndDuration uint64 `json:"send_duration_us"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged - PktSndDrop uint64 `json:"send_drop_pkt"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time - PktRcvDrop uint64 `json:"recv_drop_pkt"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets - PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt"` // The total number of packets that failed to be decrypted at the receiver side + PktSent uint64 `json:"sent_pkt" format:"uint64"` // The total number of sent DATA packets, including retransmitted packets + PktRecv uint64 `json:"recv_pkt" format:"uint64"` // The total number of received DATA packets, including retransmitted packets + PktSentUnique uint64 `json:"sent_unique_pkt" format:"uint64"` // The total number of unique DATA packets sent by the SRT sender + PktRecvUnique uint64 `json:"recv_unique_pkt" format:"uint64"` // The total number of unique original, retransmitted or recovered by the packet filter DATA packets received in time, decrypted without errors and, as a result, scheduled for delivery to the upstream application by the SRT receiver. + PktSndLoss uint64 `json:"send_loss_pkt" format:"uint64"` // The total number of data packets considered or reported as lost at the sender side. Does not correspond to the packets detected as lost at the receiver side. + PktRcvLoss uint64 `json:"recv_loss_pkt" format:"uint64"` // The total number of SRT DATA packets detected as presently missing (either reordered or lost) at the receiver side + PktRetrans uint64 `json:"sent_retrans_pkt" format:"uint64"` // The total number of retransmitted packets sent by the SRT sender + PktRcvRetrans uint64 `json:"recv_retran_pkts" format:"uint64"` // The total number of retransmitted packets registered at the receiver side + PktSentACK uint64 `json:"sent_ack_pkt" format:"uint64"` // The total number of sent ACK (Acknowledgement) control packets + PktRecvACK uint64 `json:"recv_ack_pkt" format:"uint64"` // The total number of received ACK (Acknowledgement) control packets + PktSentNAK uint64 `json:"sent_nak_pkt" format:"uint64"` // The total number of sent NAK (Negative Acknowledgement) control packets + PktRecvNAK uint64 `json:"recv_nak_pkt" format:"uint64"` // The total number of received NAK (Negative Acknowledgement) control packets + PktSentKM uint64 `json:"send_km_pkt" format:"uint64"` // The total number of sent KM (Key Material) control packets + PktRecvKM uint64 `json:"recv_km_pkt" format:"uint64"` // The total number of received KM (Key Material) control packets + UsSndDuration uint64 `json:"send_duration_us" format:"uint64"` // The total accumulated time in microseconds, during which the SRT sender has some data to transmit, including packets that have been sent, but not yet acknowledged + PktSndDrop uint64 `json:"send_drop_pkt" format:"uint64"` // The total number of dropped by the SRT sender DATA packets that have no chance to be delivered in time + PktRcvDrop uint64 `json:"recv_drop_pkt" format:"uint64"` // The total number of dropped by the SRT receiver and, as a result, not delivered to the upstream application DATA packets + PktRcvUndecrypt uint64 `json:"recv_undecrypt_pkt" format:"uint64"` // The total number of packets that failed to be decrypted at the receiver side - ByteSent uint64 `json:"sent_bytes"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecv uint64 `json:"recv_bytes"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSentUnique uint64 `json:"sent_unique__bytes"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRecvUnique uint64 `json:"recv_unique_bytes"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvLoss uint64 `json:"recv_loss__bytes"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size - ByteRetrans uint64 `json:"sent_retrans_bytes"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteSndDrop uint64 `json:"send_drop_bytes"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvDrop uint64 `json:"recv_drop_bytes"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) - ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSent uint64 `json:"sent_bytes" format:"uint64"` // Same as pktSent, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecv uint64 `json:"recv_bytes" format:"uint64"` // Same as pktRecv, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSentUnique uint64 `json:"sent_unique_bytes" format:"uint64"` // Same as pktSentUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRecvUnique uint64 `json:"recv_unique_bytes" format:"uint64"` // Same as pktRecvUnique, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvLoss uint64 `json:"recv_loss_bytes" format:"uint64"` // Same as pktRcvLoss, but expressed in bytes, including payload and all the headers (IP, TCP, SRT), bytes for the presently missing (either reordered or lost) packets' payloads are estimated based on the average packet size + ByteRetrans uint64 `json:"sent_retrans_bytes" format:"uint64"` // Same as pktRetrans, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteSndDrop uint64 `json:"send_drop_bytes" format:"uint64"` // Same as pktSndDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvDrop uint64 `json:"recv_drop_bytes" format:"uint64"` // Same as pktRcvDrop, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) + ByteRcvUndecrypt uint64 `json:"recv_undecrypt_bytes" format:"uint64"` // Same as pktRcvUndecrypt, but expressed in bytes, including payload and all the headers (IP, TCP, SRT) // Instantaneous - UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds - PktFlowWindow uint64 `json:"flow_window_pkt"` // The maximum number of packets that can be "in flight" - PktFlightSize uint64 `json:"flight_size_pkt"` // The number of packets in flight - MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds - MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps - ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes"` // The available space in the sender's buffer, in bytes - ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes"` // The available space in the receiver's buffer, in bytes - MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps - ByteMSS uint64 `json:"mss_bytes"` // Maximum Segment Size (MSS), in bytes - PktSndBuf uint64 `json:"send_buf_pkt"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged - ByteSndBuf uint64 `json:"send_buf_bytes"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsSndBuf uint64 `json:"send_buf_ms"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) - MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value of the peer - PktRcvBuf uint64 `json:"recv_buf_pkt"` // The number of acknowledged packets in receiver's buffer - ByteRcvBuf uint64 `json:"recv_buf_bytes"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) - MsRcvBuf uint64 `json:"recv_buf_ms"` // The timespan (msec) of acknowledged packets in the receiver's buffer - MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY - PktReorderTolerance uint64 `json:"reorder_tolerance_pkt"` // Instant value of the packet reorder tolerance - PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms"` // Accumulated difference between the current time and the time-to-play of a packet that is received late + UsPktSndPeriod float64 `json:"pkt_send_period_us"` // Current minimum time interval between which consecutive packets are sent, in microseconds + PktFlowWindow uint64 `json:"flow_window_pkt" format:"uint64"` // The maximum number of packets that can be "in flight" + PktFlightSize uint64 `json:"flight_size_pkt" format:"uint64"` // The number of packets in flight + MsRTT float64 `json:"rtt_ms"` // Smoothed round-trip time (SRTT), an exponentially-weighted moving average (EWMA) of an endpoint's RTT samples, in milliseconds + MbpsBandwidth float64 `json:"bandwidth_mbit"` // Estimated bandwidth of the network link, in Mbps + ByteAvailSndBuf uint64 `json:"avail_send_buf_bytes" format:"uint64"` // The available space in the sender's buffer, in bytes + ByteAvailRcvBuf uint64 `json:"avail_recv_buf_bytes" format:"uint64"` // The available space in the receiver's buffer, in bytes + MbpsMaxBW float64 `json:"max_bandwidth_mbit"` // Transmission bandwidth limit, in Mbps + ByteMSS uint64 `json:"mss_bytes" format:"uint64"` // Maximum Segment Size (MSS), in bytes + PktSndBuf uint64 `json:"send_buf_pkt" format:"uint64"` // The number of packets in the sender's buffer that are already scheduled for sending or even possibly sent, but not yet acknowledged + ByteSndBuf uint64 `json:"send_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktSndBuf, but expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsSndBuf uint64 `json:"send_buf_ms" format:"uint64"` // The timespan (msec) of packets in the sender's buffer (unacknowledged packets) + MsSndTsbPdDelay uint64 `json:"send_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value of the peer + PktRcvBuf uint64 `json:"recv_buf_pkt" format:"uint64"` // The number of acknowledged packets in receiver's buffer + ByteRcvBuf uint64 `json:"recv_buf_bytes" format:"uint64"` // Instantaneous (current) value of pktRcvBuf, expressed in bytes, including payload and all headers (IP, TCP, SRT) + MsRcvBuf uint64 `json:"recv_buf_ms" format:"uint64"` // The timespan (msec) of acknowledged packets in the receiver's buffer + MsRcvTsbPdDelay uint64 `json:"recv_tsbpd_delay_ms" format:"uint64"` // Timestamp-based Packet Delivery Delay value set on the socket via SRTO_RCVLATENCY or SRTO_LATENCY + PktReorderTolerance uint64 `json:"reorder_tolerance_pkt" format:"uint64"` // Instant value of the packet reorder tolerance + PktRcvAvgBelatedTime uint64 `json:"pkt_recv_avg_belated_time_ms" format:"uint64"` // Accumulated difference between the current time and the time-to-play of a packet that is received late } // Unmarshal converts the SRT statistics into API representation func (s *SRTStatistics) Unmarshal(ss *gosrt.Statistics) { s.MsTimeStamp = ss.MsTimeStamp - s.PktSent = ss.PktSent - s.PktRecv = ss.PktRecv - s.PktSentUnique = ss.PktSentUnique - s.PktRecvUnique = ss.PktRecvUnique - s.PktSndLoss = ss.PktSndLoss - s.PktRcvLoss = ss.PktRcvLoss - s.PktRetrans = ss.PktRetrans - s.PktRcvRetrans = ss.PktRcvRetrans - s.PktSentACK = ss.PktSentACK - s.PktRecvACK = ss.PktRecvACK - s.PktSentNAK = ss.PktSentNAK - s.PktRecvNAK = ss.PktRecvNAK - s.PktSentKM = ss.PktSentKM - s.PktRecvKM = ss.PktRecvKM - s.UsSndDuration = ss.UsSndDuration - s.PktSndDrop = ss.PktSndDrop - s.PktRcvDrop = ss.PktRcvDrop - s.PktRcvUndecrypt = ss.PktRcvUndecrypt + s.PktSent = ss.Accumulated.PktSent + s.PktRecv = ss.Accumulated.PktRecv + s.PktSentUnique = ss.Accumulated.PktSentUnique + s.PktRecvUnique = ss.Accumulated.PktRecvUnique + s.PktSndLoss = ss.Accumulated.PktSendLoss + s.PktRcvLoss = ss.Accumulated.PktRecvLoss + s.PktRetrans = ss.Accumulated.PktRetrans + s.PktRcvRetrans = ss.Accumulated.PktRecvRetrans + s.PktSentACK = ss.Accumulated.PktSentACK + s.PktRecvACK = ss.Accumulated.PktRecvACK + s.PktSentNAK = ss.Accumulated.PktSentNAK + s.PktRecvNAK = ss.Accumulated.PktRecvNAK + s.PktSentKM = ss.Accumulated.PktSentKM + s.PktRecvKM = ss.Accumulated.PktRecvKM + s.UsSndDuration = ss.Accumulated.UsSndDuration + s.PktSndDrop = ss.Accumulated.PktSendDrop + s.PktRcvDrop = ss.Accumulated.PktRecvDrop + s.PktRcvUndecrypt = ss.Accumulated.PktRecvUndecrypt - s.ByteSent = ss.ByteSent - s.ByteRecv = ss.ByteRecv - s.ByteSentUnique = ss.ByteSentUnique - s.ByteRecvUnique = ss.ByteRecvUnique - s.ByteRcvLoss = ss.ByteRcvLoss - s.ByteRetrans = ss.ByteRetrans - s.ByteSndDrop = ss.ByteSndDrop - s.ByteRcvDrop = ss.ByteRcvDrop - s.ByteRcvUndecrypt = ss.ByteRcvUndecrypt + s.ByteSent = ss.Accumulated.ByteSent + s.ByteRecv = ss.Accumulated.ByteRecv + s.ByteSentUnique = ss.Accumulated.ByteSentUnique + s.ByteRecvUnique = ss.Accumulated.ByteRecvUnique + s.ByteRcvLoss = ss.Accumulated.ByteRecvLoss + s.ByteRetrans = ss.Accumulated.ByteRetrans + s.ByteSndDrop = ss.Accumulated.ByteSendDrop + s.ByteRcvDrop = ss.Accumulated.ByteRecvDrop + s.ByteRcvUndecrypt = ss.Accumulated.ByteRecvUndecrypt + + s.UsPktSndPeriod = ss.Instantaneous.UsPktSendPeriod + s.PktFlowWindow = ss.Instantaneous.PktFlowWindow + s.PktFlightSize = ss.Instantaneous.PktFlightSize + s.MsRTT = ss.Instantaneous.MsRTT + s.MbpsBandwidth = ss.Instantaneous.MbpsLinkCapacity + s.ByteAvailSndBuf = ss.Instantaneous.ByteAvailSendBuf + s.ByteAvailRcvBuf = ss.Instantaneous.ByteAvailRecvBuf + s.MbpsMaxBW = ss.Instantaneous.MbpsMaxBW + s.ByteMSS = ss.Instantaneous.ByteMSS + s.PktSndBuf = ss.Instantaneous.PktSendBuf + s.ByteSndBuf = ss.Instantaneous.ByteSendBuf + s.MsSndBuf = ss.Instantaneous.MsSendBuf + s.MsSndTsbPdDelay = ss.Instantaneous.MsSendTsbPdDelay + s.PktRcvBuf = ss.Instantaneous.PktRecvBuf + s.ByteRcvBuf = ss.Instantaneous.ByteRecvBuf + s.MsRcvBuf = ss.Instantaneous.MsRecvBuf + s.MsRcvTsbPdDelay = ss.Instantaneous.MsRecvTsbPdDelay + s.PktReorderTolerance = ss.Instantaneous.PktReorderTolerance + s.PktRcvAvgBelatedTime = ss.Instantaneous.PktRecvAvgBelatedTime } type SRTLog struct { - Timestamp int64 `json:"ts"` + Timestamp int64 `json:"ts" format:"int64"` Message []string `json:"msg"` } diff --git a/http/api/widget.go b/http/api/widget.go index d0f35e6c..5d91bda6 100644 --- a/http/api/widget.go +++ b/http/api/widget.go @@ -1,7 +1,7 @@ package api type WidgetProcess struct { - CurrentSessions uint64 `json:"current_sessions"` - TotalSessions uint64 `json:"total_sessions"` + CurrentSessions uint64 `json:"current_sessions" format:"uint64"` + TotalSessions uint64 `json:"total_sessions" format:"uint64"` Uptime int64 `json:"uptime"` } diff --git a/http/fs/cluster.go b/http/fs/cluster.go index e42d091a..309f8ca2 100644 --- a/http/fs/cluster.go +++ b/http/fs/cluster.go @@ -2,6 +2,7 @@ package fs import ( "io" + gofs "io/fs" "time" "github.com/datarhei/core/v16/cluster" @@ -15,14 +16,18 @@ type Filesystem interface { type filesystem struct { fs.Filesystem - what string + name string cluster cluster.ClusterReader } -func NewClusterFS(what string, fs fs.Filesystem, cluster cluster.Cluster) Filesystem { +func NewClusterFS(name string, fs fs.Filesystem, cluster cluster.Cluster) Filesystem { + if cluster == nil { + return fs + } + f := &filesystem{ Filesystem: fs, - what: what, + name: name, cluster: cluster, } @@ -36,7 +41,7 @@ func (fs *filesystem) Open(path string) fs.File { } // Check if the file is available in the cluster - data, err := fs.cluster.GetFile(fs.what + ":" + path) + data, err := fs.cluster.GetFile(fs.name + ":" + path) if err != nil { return nil } @@ -63,6 +68,10 @@ func (f *file) Stat() (fs.FileInfo, error) { return f, nil } +func (f *file) Mode() gofs.FileMode { + return gofs.FileMode(gofs.ModePerm) +} + func (f *file) Size() int64 { return 0 } diff --git a/http/fs/fs.go b/http/fs/fs.go new file mode 100644 index 00000000..500ab733 --- /dev/null +++ b/http/fs/fs.go @@ -0,0 +1,25 @@ +package fs + +import ( + "github.com/datarhei/core/v16/http/cache" + "github.com/datarhei/core/v16/io/fs" +) + +type FS struct { + Name string + Mountpoint string + + AllowWrite bool + + EnableAuth bool + Username string + Password string + + DefaultFile string + DefaultContentType string + Gzip bool + + Filesystem fs.Filesystem + + Cache cache.Cacher +} diff --git a/http/handler/api/config.go b/http/handler/api/config.go index 8d137358..d2484de2 100644 --- a/http/handler/api/config.go +++ b/http/handler/api/config.go @@ -1,11 +1,14 @@ package api import ( + "io" "net/http" + "time" - "github.com/datarhei/core/v16/config" + cfgstore "github.com/datarhei/core/v16/config/store" + cfgvars "github.com/datarhei/core/v16/config/vars" + "github.com/datarhei/core/v16/encoding/json" "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler/util" "github.com/labstack/echo/v4" ) @@ -13,11 +16,11 @@ import ( // The ConfigHandler type provides handler functions for reading and manipulating // the current config. type ConfigHandler struct { - store config.Store + store cfgstore.Store } // NewConfig return a new Config type. You have to provide a valid config store. -func NewConfig(store config.Store) *ConfigHandler { +func NewConfig(store cfgstore.Store) *ConfigHandler { return &ConfigHandler{ store: store, } @@ -26,6 +29,7 @@ func NewConfig(store config.Store) *ConfigHandler { // Get returns the currently active Restreamer configuration // @Summary Retrieve the currently active Restreamer configuration // @Description Retrieve the currently active Restreamer configuration +// @Tags v16.7.2 // @ID config-3-get // @Produce json // @Success 200 {object} api.Config @@ -43,6 +47,7 @@ func (p *ConfigHandler) Get(c echo.Context) error { // Set will set the given configuration as new active configuration // @Summary Update the current Restreamer configuration // @Description Update the current Restreamer configuration by providing a complete or partial configuration. Fields that are not provided will not be changed. +// @Tags v16.7.2 // @ID config-3-set // @Accept json // @Produce json @@ -53,25 +58,80 @@ func (p *ConfigHandler) Get(c echo.Context) error { // @Security ApiKeyAuth // @Router /api/v3/config [put] func (p *ConfigHandler) Set(c echo.Context) error { - cfg := p.store.Get() + version := api.ConfigVersion{} - // Set the current config as default config value. This will - // allow to set a partial config without destroying the other - // values. - setConfig := api.NewSetConfig(cfg) + req := c.Request() - if err := util.ShouldBindJSON(c, &setConfig); err != nil { + body, err := io.ReadAll(req.Body) + if err != nil { return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) } - // Merge it into the current config - setConfig.MergeTo(cfg) + if err := json.Unmarshal(body, &version); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err)) + } + + cfg := p.store.Get() + cfgActive := p.store.GetActive() + + // Copy the timestamp of when this config has been used + cfg.LoadedAt = cfgActive.LoadedAt + + // For each version, set the current config as default config value. This will + // allow to set a partial config without destroying the other values. + if version.Version == 1 { + // Downgrade to v1 in order to have a populated v1 config + v1SetConfig := api.NewSetConfigV1(cfg) + + if err := json.Unmarshal(body, &v1SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err)) + } + + if err := c.Validate(v1SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) + } + + // Merge it into the current config + v1SetConfig.MergeTo(cfg) + } else if version.Version == 2 { + // Downgrade to v2 in order to have a populated v2 config + v2SetConfig := api.NewSetConfigV2(cfg) + + if err := json.Unmarshal(body, &v2SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err)) + } + + if err := c.Validate(v2SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) + } + + // Merge it into the current config + v2SetConfig.MergeTo(cfg) + } else if version.Version == 3 { + v3SetConfig := api.NewSetConfig(cfg) + + if err := json.Unmarshal(body, &v3SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err)) + } + + if err := c.Validate(v3SetConfig); err != nil { + return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) + } + + // Merge it into the current config + v3SetConfig.MergeTo(cfg) + } else { + return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version) + } + + cfg.CreatedAt = time.Now() + cfg.UpdatedAt = cfg.CreatedAt // Now we make a copy from the config and merge it with the environment // variables. If this configuration is valid, we will store the un-merged // one to disk. - mergedConfig := config.NewConfigFrom(cfg) + mergedConfig := cfg.Clone() mergedConfig.Merge() // Validate the new merged config @@ -79,7 +139,7 @@ func (p *ConfigHandler) Set(c echo.Context) error { if mergedConfig.HasErrors() { errors := make(map[string][]string) - mergedConfig.Messages(func(level string, v config.Variable, message string) { + mergedConfig.Messages(func(level string, v cfgvars.Variable, message string) { if level != "error" { return } @@ -105,14 +165,15 @@ func (p *ConfigHandler) Set(c echo.Context) error { // Reload will reload the currently active configuration // @Summary Reload the currently active configuration -// @Description Reload the currently active configuration. This will trigger a restart of the Restreamer. +// @Description Reload the currently active configuration. This will trigger a restart of the Core. +// @Tags v16.7.2 // @ID config-3-reload -// @Produce plain -// @Success 200 {string} string "OK" +// @Produce json +// @Success 200 {string} string // @Security ApiKeyAuth // @Router /api/v3/config/reload [get] func (p *ConfigHandler) Reload(c echo.Context) error { p.store.Reload() - return c.String(http.StatusOK, "OK") + return c.JSON(http.StatusOK, "OK") } diff --git a/http/handler/api/config_test.go b/http/handler/api/config_test.go index bf6d211f..0410eaf2 100644 --- a/http/handler/api/config_test.go +++ b/http/handler/api/config_test.go @@ -4,28 +4,43 @@ import ( "bytes" "encoding/json" "net/http" + "strings" "testing" "github.com/datarhei/core/v16/config" + "github.com/datarhei/core/v16/config/store" + v1 "github.com/datarhei/core/v16/config/v1" "github.com/datarhei/core/v16/http/mock" + "github.com/datarhei/core/v16/io/fs" "github.com/labstack/echo/v4" + "github.com/stretchr/testify/require" ) -func getDummyConfigRouter() *echo.Echo { +func getDummyConfigRouter(t *testing.T) (*echo.Echo, store.Store) { router := mock.DummyEcho() - config := config.NewDummyStore() + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + require.NoError(t, err) + + _, _, err = memfs.WriteFileReader("./mime.types", strings.NewReader("xxxxx")) + require.NoError(t, err) + + _, _, err = memfs.WriteFileReader("/bin/ffmpeg", strings.NewReader("xxxxx")) + require.NoError(t, err) + + config, err := store.NewJSON(memfs, "/config.json", nil) + require.NoError(t, err) handler := NewConfig(config) router.Add("GET", "/", handler.Get) router.Add("PUT", "/", handler.Set) - return router + return router, config } func TestConfigGet(t *testing.T) { - router := getDummyConfigRouter() + router, _ := getDummyConfigRouter(t) mock.Request(t, http.StatusOK, router, "GET", "/", nil) @@ -33,29 +48,96 @@ func TestConfigGet(t *testing.T) { } func TestConfigSetConflict(t *testing.T) { - router := getDummyConfigRouter() + router, _ := getDummyConfigRouter(t) + + cfg := config.New(nil) + cfg.Storage.MimeTypes = "/path/to/mime.types" var data bytes.Buffer encoder := json.NewEncoder(&data) - encoder.Encode(config.New()) + encoder.Encode(cfg) mock.Request(t, http.StatusConflict, router, "PUT", "/", &data) } func TestConfigSet(t *testing.T) { - router := getDummyConfigRouter() + router, store := getDummyConfigRouter(t) + + storedcfg := store.Get() + + require.Equal(t, []string{}, storedcfg.Host.Name) var data bytes.Buffer + encoder := json.NewEncoder(&data) - cfg := config.New() - cfg.FFmpeg.Binary = "true" + // Setting a new v3 config + cfg := config.New(nil) cfg.DB.Dir = "." cfg.Storage.Disk.Dir = "." - cfg.Storage.MimeTypes = "" + cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"} + cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"} + cfg.Host.Name = []string{"foobar.com"} - encoder := json.NewEncoder(&data) encoder.Encode(cfg) mock.Request(t, http.StatusOK, router, "PUT", "/", &data) + + storedcfg = store.Get() + + require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name) + require.Equal(t, []string{".aaa"}, cfg.Storage.Disk.Cache.Types.Allow) + require.Equal(t, []string{".zzz"}, cfg.Storage.Disk.Cache.Types.Block) + require.Equal(t, "cert@datarhei.com", cfg.TLS.Email) + + // Setting a complete v1 config + cfgv1 := v1.New(nil) + cfgv1.DB.Dir = "." + cfgv1.Storage.Disk.Dir = "." + cfgv1.Storage.Disk.Cache.Types = []string{".bbb"} + cfgv1.Host.Name = []string{"foobar.com"} + + data.Reset() + + encoder.Encode(cfgv1) + + mock.Request(t, http.StatusOK, router, "PUT", "/", &data) + + storedcfg = store.Get() + + require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name) + require.Equal(t, []string{".bbb"}, storedcfg.Storage.Disk.Cache.Types.Allow) + require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block) + require.Equal(t, "cert@datarhei.com", cfg.TLS.Email) + + // Setting a partial v1 config + type customconfig struct { + Version int `json:"version"` + Storage struct { + Disk struct { + Cache struct { + Types []string `json:"types"` + } `json:"cache"` + } `json:"disk"` + } `json:"storage"` + } + + customcfg := customconfig{ + Version: 1, + } + + customcfg.Storage.Disk.Cache.Types = []string{".ccc"} + + data.Reset() + + encoder.Encode(customcfg) + + mock.Request(t, http.StatusOK, router, "PUT", "/", &data) + + storedcfg = store.Get() + + require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name) + require.Equal(t, []string{".ccc"}, storedcfg.Storage.Disk.Cache.Types.Allow) + require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block) + require.Equal(t, "cert@datarhei.com", cfg.TLS.Email) } diff --git a/http/handler/api/diskfs.go b/http/handler/api/diskfs.go deleted file mode 100644 index b4bc7fb7..00000000 --- a/http/handler/api/diskfs.go +++ /dev/null @@ -1,211 +0,0 @@ -package api - -import ( - "net/http" - "path/filepath" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem - handler *handler.DiskFSHandler -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - handler: handler.NewDiskFS(fs, cache), - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. The contents of that file are returned. -// @ID diskfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, _ := file.Stat() - - if stat.IsDir() { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the filesystem -// @Description Writes or overwrites a file on the filesystem -// @ID diskfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [put] -func (h *DiskFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, path) - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the filesystem -// @Description Remove a file from the filesystem -// @ID diskfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk/{path} [delete] -func (h *DiskFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - if h.cache != nil { - h.cache.Delete(path) - } - - return c.String(http.StatusOK, "OK") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the filesystem -// @Description List all files on the filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @ID diskfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/disk [get] -func (h *DiskFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - fileinfos := []api.FileInfo{} - - for _, f := range files { - if f.IsDir() { - continue - } - - fileinfos = append(fileinfos, api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - }) - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/api/filesystems.go b/http/handler/api/filesystems.go new file mode 100644 index 00000000..4b42a56c --- /dev/null +++ b/http/handler/api/filesystems.go @@ -0,0 +1,151 @@ +package api + +import ( + "net/http" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/handler" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +type FSConfig struct { + Type string + Mountpoint string + Handler *handler.FSHandler +} + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + filesystems map[string]FSConfig +} + +// NewFS return a new FSHanlder type. You have to provide a filesystem to act on. +func NewFS(filesystems map[string]FSConfig) *FSHandler { + return &FSHandler{ + filesystems: filesystems, + } +} + +// GetFileAPI returns the file at the given path +// @Summary Fetch a file from a filesystem +// @Description Fetch a file from a filesystem +// @Tags v16.7.2 +// @ID filesystem-3-get-file +// @Produce application/data +// @Produce json +// @Param storage path string true "Name of the filesystem" +// @Param filepath path string true "Path to file" +// @Success 200 {file} byte +// @Success 301 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{storage}/{filepath} [get] +func (h *FSHandler) GetFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.GetFile(c) +} + +// PutFileAPI adds or overwrites a file at the given path +// @Summary Add a file to a filesystem +// @Description Writes or overwrites a file on a filesystem +// @Tags v16.7.2 +// @ID filesystem-3-put-file +// @Accept application/data +// @Produce text/plain +// @Produce json +// @Param storage path string true "Name of the filesystem" +// @Param filepath path string true "Path to file" +// @Param data body []byte true "File data" +// @Success 201 {string} string +// @Success 204 {string} string +// @Failure 507 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{storage}/{filepath} [put] +func (h *FSHandler) PutFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.PutFile(c) +} + +// DeleteFileAPI removes a file from a filesystem +// @Summary Remove a file from a filesystem +// @Description Remove a file from a filesystem +// @Tags v16.7.2 +// @ID filesystem-3-delete-file +// @Produce text/plain +// @Param storage path string true "Name of the filesystem" +// @Param filepath path string true "Path to file" +// @Success 200 {string} string +// @Failure 404 {object} api.Error +// @Security ApiKeyAuth +// @Router /api/v3/fs/{storage}/{filepath} [delete] +func (h *FSHandler) DeleteFile(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.DeleteFile(c) +} + +// ListFiles lists all files on a filesystem +// @Summary List all files on a filesystem +// @Description List all files on a filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. +// @Tags v16.7.2 +// @ID filesystem-3-list-files +// @Produce json +// @Param storage path string true "Name of the filesystem" +// @Param glob query string false "glob pattern for file names" +// @Param sort query string false "none, name, size, lastmod" +// @Param order query string false "asc, desc" +// @Success 200 {array} api.FileInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs/{storage} [get] +func (h *FSHandler) ListFiles(c echo.Context) error { + name := util.PathParam(c, "name") + + config, ok := h.filesystems[name] + if !ok { + return api.Err(http.StatusNotFound, "File not found", "unknown filesystem: %s", name) + } + + return config.Handler.ListFiles(c) +} + +// List lists all registered filesystems +// @Summary List all registered filesystems +// @Description Listall registered filesystems +// @Tags v16.12.0 +// @ID filesystem-3-list +// @Produce json +// @Success 200 {array} api.FilesystemInfo +// @Security ApiKeyAuth +// @Router /api/v3/fs [get] +func (h *FSHandler) List(c echo.Context) error { + fss := []api.FilesystemInfo{} + + for name, config := range h.filesystems { + fss = append(fss, api.FilesystemInfo{ + Name: name, + Type: config.Type, + Mount: config.Mountpoint, + }) + } + + return c.JSON(http.StatusOK, fss) +} diff --git a/http/handler/api/log.go b/http/handler/api/log.go index 22c33864..8ef4dbed 100644 --- a/http/handler/api/log.go +++ b/http/handler/api/log.go @@ -31,6 +31,7 @@ func NewLog(buffer log.BufferWriter) *LogHandler { // Log returns the last log lines of the Restreamer application // @Summary Application log // @Description Get the last log lines of the Restreamer application +// @Tags v16.7.2 // @ID log-3 // @Param format query string false "Format of the list of log events (*console, raw)" // @Produce json diff --git a/http/handler/api/memfs.go b/http/handler/api/memfs.go deleted file mode 100644 index 2c6bd101..00000000 --- a/http/handler/api/memfs.go +++ /dev/null @@ -1,172 +0,0 @@ -package api - -import ( - "io" - "net/http" - "net/url" - "sort" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem - handler *handler.MemFSHandler -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - handler: handler.NewMemFS(fs), - } -} - -// GetFileAPI returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @ID memfs-3-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - return h.handler.GetFile(c) -} - -// PutFileAPI adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @ID memfs-3-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - return h.handler.PutFile(c) -} - -// DeleteFileAPI removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @ID memfs-3-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - return h.handler.DeleteFile(c) -} - -// PatchFile creates a symbolic link to a file in the filesystem -// @Summary Create a link to a file in the memory filesystem -// @Description Create a link to a file in the memory filesystem. The file linked to has to exist. -// @ID memfs-3-patch -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param url body string true "Path to the file to link to" -// @Success 201 {string} string -// @Failure 400 {object} api.Error -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem/{path} [patch] -func (h *MemFSHandler) PatchFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - body, err := io.ReadAll(req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "Failed reading request body", "%s", err) - } - - u, err := url.Parse(string(body)) - if err != nil { - return api.Err(http.StatusBadRequest, "Body doesn't contain a valid path", "%s", err) - } - - if err := h.filesystem.Symlink(u.Path, path); err != nil { - return api.Err(http.StatusBadRequest, "Failed to create symlink", "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - return c.String(http.StatusCreated, "") -} - -// ListFiles lists all files on the filesystem -// @Summary List all files on the memory filesystem -// @Description List all files on the memory filesystem. The listing can be ordered by name, size, or date of last modification in ascending or descending order. -// @ID memfs-3-list-files -// @Produce json -// @Param glob query string false "glob pattern for file names" -// @Param sort query string false "none, name, size, lastmod" -// @Param order query string false "asc, desc" -// @Success 200 {array} api.FileInfo -// @Security ApiKeyAuth -// @Router /api/v3/fs/mem [get] -func (h *MemFSHandler) ListFiles(c echo.Context) error { - pattern := util.DefaultQuery(c, "glob", "") - sortby := util.DefaultQuery(c, "sort", "none") - order := util.DefaultQuery(c, "order", "asc") - - files := h.filesystem.List(pattern) - - var sortFunc func(i, j int) bool - - switch sortby { - case "name": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } - } else { - sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } - } - case "size": - if order == "desc" { - sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } - } else { - sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } - } - default: - if order == "asc" { - sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } - } else { - sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } - } - } - - sort.Slice(files, sortFunc) - - var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) - - for i, f := range files { - fileinfos[i] = api.FileInfo{ - Name: f.Name(), - Size: f.Size(), - LastMod: f.ModTime().Unix(), - } - } - - return c.JSON(http.StatusOK, fileinfos) -} diff --git a/http/handler/api/metrics.go b/http/handler/api/metrics.go index d1356686..06bd80f7 100644 --- a/http/handler/api/metrics.go +++ b/http/handler/api/metrics.go @@ -32,6 +32,7 @@ func NewMetrics(config MetricsConfig) *MetricsHandler { // Describe the known metrics // @Summary List all known metrics with their description and labels // @Description List all known metrics with their description and labels +// @Tags v16.10.0 // @ID metrics-3-describe // @Produce json // @Success 200 {array} api.MetricsDescription @@ -60,6 +61,7 @@ func (r *MetricsHandler) Describe(c echo.Context) error { // Query the collected metrics // @Summary Query the collected metrics // @Description Query the collected metrics +// @Tags v16.7.2 // @ID metrics-3-metrics // @Accept json // @Produce json diff --git a/http/handler/api/playout.go b/http/handler/api/playout.go index 9387172f..cc073001 100644 --- a/http/handler/api/playout.go +++ b/http/handler/api/playout.go @@ -31,6 +31,7 @@ func NewPlayout(restream restream.Restreamer) *PlayoutHandler { // Status return the current playout status // @Summary Get the current playout status // @Description Get the current playout status of an input of a process +// @Tags v16.7.2 // @ID process-3-playout-status // @Produce json // @Param id path string true "Process ID" @@ -84,6 +85,7 @@ func (h *PlayoutHandler) Status(c echo.Context) error { // Keyframe returns the last keyframe // @Summary Get the last keyframe // @Description Get the last keyframe of an input of a process. The extension of the name determines the return type. +// @Tags v16.7.2 // @ID process-3-playout-keyframe // @Produce image/jpeg // @Produce image/png @@ -133,6 +135,7 @@ func (h *PlayoutHandler) Keyframe(c echo.Context) error { // EncodeErrorframe encodes the errorframe // @Summary Encode the errorframe // @Description Immediately encode the errorframe (if available and looping) +// @Tags v16.7.2 // @ID process-3-playout-errorframencode // @Produce text/plain // @Produce json @@ -173,6 +176,7 @@ func (h *PlayoutHandler) EncodeErrorframe(c echo.Context) error { // SetErrorframe sets an errorframe // @Summary Upload an error frame // @Description Upload an error frame which will be encoded immediately +// @Tags v16.7.2 // @ID process-3-playout-errorframe // @Produce text/plain // @Produce json @@ -221,6 +225,7 @@ func (h *PlayoutHandler) SetErrorframe(c echo.Context) error { // ReopenInput closes the current input stream // @Summary Close the current input stream // @Description Close the current input stream such that it will be automatically re-opened +// @Tags v16.7.2 // @ID process-3-playout-reopen-input // @Produce plain // @Param id path string true "Process ID" @@ -260,6 +265,7 @@ func (h *PlayoutHandler) ReopenInput(c echo.Context) error { // SetStream replaces the current stream // @Summary Switch to a new stream // @Description Replace the current stream with the one from the given URL. The switch will only happen if the stream parameters match. +// @Tags v16.7.2 // @ID process-3-playout-stream // @Produce text/plain // @Produce json diff --git a/http/handler/api/restream.go b/http/handler/api/restream.go index 4ffa284e..c61f363a 100644 --- a/http/handler/api/restream.go +++ b/http/handler/api/restream.go @@ -27,6 +27,7 @@ func NewRestream(restream restream.Restreamer) *RestreamHandler { // Add adds a new process // @Summary Add a new process // @Description Add a new FFmpeg process +// @Tags v16.7.2 // @ID process-3-add // @Accept json // @Produce json @@ -50,7 +51,7 @@ func (h *RestreamHandler) Add(c echo.Context) error { return api.Err(http.StatusBadRequest, "Unsupported process type", "Supported process types are: ffmpeg") } - if len(process.Input) == 0 && len(process.Output) == 0 { + if len(process.Input) == 0 || len(process.Output) == 0 { return api.Err(http.StatusBadRequest, "At least one input and one output need to be defined") } @@ -68,6 +69,7 @@ func (h *RestreamHandler) Add(c echo.Context) error { // GetAll returns all known processes // @Summary List all known processes // @Description List all known processes. Use the query parameter to filter the listed processes. +// @Tags v16.7.2 // @ID process-3-get-all // @Produce json // @Param filter query string false "Comma separated list of fields (config, state, report, metadata) that will be part of the output. If empty, all fields will be part of the output." @@ -118,6 +120,7 @@ func (h *RestreamHandler) GetAll(c echo.Context) error { // Get returns the process with the given ID // @Summary List a process by its ID // @Description List a process by its ID. Use the filter parameter to specifiy the level of detail of the output. +// @Tags v16.7.2 // @ID process-3-get // @Produce json // @Param id path string true "Process ID" @@ -141,6 +144,7 @@ func (h *RestreamHandler) Get(c echo.Context) error { // Delete deletes the process with the given ID // @Summary Delete a process by its ID // @Description Delete a process by its ID +// @Tags v16.7.2 // @ID process-3-delete // @Produce json // @Param id path string true "Process ID" @@ -164,7 +168,8 @@ func (h *RestreamHandler) Delete(c echo.Context) error { // Update replaces an existing process // @Summary Replace an existing process -// @Description Replace an existing process +// @Description Replace an existing process. +// @Tags v16.7.2 // @ID process-3-update // @Accept json // @Produce json @@ -184,6 +189,14 @@ func (h *RestreamHandler) Update(c echo.Context) error { Autostart: true, } + current, err := h.restream.GetProcess(id) + if err != nil { + return api.Err(http.StatusNotFound, "Process not found", "%s", id) + } + + // Prefill the config with the current values + process.Unmarshal(current.Config) + if err := util.ShouldBindJSON(c, &process); err != nil { return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err) } @@ -206,6 +219,7 @@ func (h *RestreamHandler) Update(c echo.Context) error { // Command issues a command to a process // @Summary Issue a command to a process // @Description Issue a command to a process: start, stop, reload, restart +// @Tags v16.7.2 // @ID process-3-command // @Accept json // @Produce json @@ -248,6 +262,7 @@ func (h *RestreamHandler) Command(c echo.Context) error { // GetConfig returns the configuration of a process // @Summary Get the configuration of a process // @Description Get the configuration of a process. This is the configuration as provided by Add or Update. +// @Tags v16.7.2 // @ID process-3-get-config // @Produce json // @Param id path string true "Process ID" @@ -272,7 +287,8 @@ func (h *RestreamHandler) GetConfig(c echo.Context) error { // GetState returns the current state of a process // @Summary Get the state of a process -// @Description Get the state and progress data of a process +// @Description Get the state and progress data of a process. +// @Tags v16.7.2 // @ID process-3-get-state // @Produce json // @Param id path string true "Process ID" @@ -297,7 +313,8 @@ func (h *RestreamHandler) GetState(c echo.Context) error { // GetReport return the current log and the log history of a process // @Summary Get the logs of a process -// @Description Get the logs and the log history of a process +// @Description Get the logs and the log history of a process. +// @Tags v16.7.2 // @ID process-3-get-report // @Produce json // @Param id path string true "Process ID" @@ -322,7 +339,8 @@ func (h *RestreamHandler) GetReport(c echo.Context) error { // Probe probes a process // @Summary Probe a process -// @Description Probe an existing process to get a detailed stream information on the inputs +// @Description Probe an existing process to get a detailed stream information on the inputs. +// @Tags v16.7.2 // @ID process-3-probe // @Produce json // @Param id path string true "Process ID" @@ -342,7 +360,8 @@ func (h *RestreamHandler) Probe(c echo.Context) error { // Skills returns the detected FFmpeg capabilities // @Summary FFmpeg capabilities -// @Description List all detected FFmpeg capabilities +// @Description List all detected FFmpeg capabilities. +// @Tags v16.7.2 // @ID skills-3 // @Produce json // @Success 200 {object} api.Skills @@ -359,7 +378,8 @@ func (h *RestreamHandler) Skills(c echo.Context) error { // ReloadSkills will refresh the FFmpeg capabilities // @Summary Refresh FFmpeg capabilities -// @Description Refresh the available FFmpeg capabilities +// @Description Refresh the available FFmpeg capabilities. +// @Tags v16.7.2 // @ID skills-3-reload // @Produce json // @Success 200 {object} api.Skills @@ -378,6 +398,7 @@ func (h *RestreamHandler) ReloadSkills(c echo.Context) error { // GetProcessMetadata returns the metadata stored with a process // @Summary Retrieve JSON metadata stored with a process under a key // @Description Retrieve the previously stored JSON metadata under the given key. If the key is empty, all metadata will be returned. +// @Tags v16.7.2 // @ID process-3-get-process-metadata // @Produce json // @Param id path string true "Process ID" @@ -402,6 +423,7 @@ func (h *RestreamHandler) GetProcessMetadata(c echo.Context) error { // SetProcessMetadata stores metadata with a process // @Summary Add JSON metadata with a process under the given key // @Description Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key doesn't exist, it will be created. +// @Tags v16.7.2 // @ID process-3-set-process-metadata // @Produce json // @Param id path string true "Process ID" @@ -436,6 +458,7 @@ func (h *RestreamHandler) SetProcessMetadata(c echo.Context) error { // GetMetadata returns the metadata stored with the Restreamer // @Summary Retrieve JSON metadata from a key // @Description Retrieve the previously stored JSON metadata under the given key. If the key is empty, all metadata will be returned. +// @Tags v16.7.2 // @ID metadata-3-get // @Produce json // @Param key path string true "Key for data store" @@ -458,6 +481,7 @@ func (h *RestreamHandler) GetMetadata(c echo.Context) error { // SetMetadata stores metadata with the Restreamer // @Summary Add JSON metadata under the given key // @Description Add arbitrary JSON metadata under the given key. If the key exists, all already stored metadata with this key will be overwritten. If the key doesn't exist, it will be created. +// @Tags v16.7.2 // @ID metadata-3-set // @Produce json // @Param key path string true "Key for data store" diff --git a/http/handler/api/rtmp.go b/http/handler/api/rtmp.go index 1adde4a9..3f2dbbb5 100644 --- a/http/handler/api/rtmp.go +++ b/http/handler/api/rtmp.go @@ -23,7 +23,8 @@ func NewRTMP(rtmp rtmp.Server) *RTMPHandler { // ListChannels lists all currently publishing RTMP streams // @Summary List all publishing RTMP streams -// @Description List all currently publishing RTMP streams +// @Description List all currently publishing RTMP streams. +// @Tags v16.7.2 // @ID rtmp-3-list-channels // @Produce json // @Success 200 {array} api.RTMPChannel diff --git a/http/handler/api/session.go b/http/handler/api/session.go index de297cd9..bc45f5cd 100644 --- a/http/handler/api/session.go +++ b/http/handler/api/session.go @@ -25,7 +25,8 @@ func NewSession(registry session.RegistryReader) *SessionHandler { // Summary returns a summary of all active and past sessions // @Summary Get a summary of all active and past sessions -// @Description Get a summary of all active and past sessions of the given collector +// @Description Get a summary of all active and past sessions of the given collector. +// @Tags v16.7.2 // @ID session-3-summary // @Produce json // @Security ApiKeyAuth @@ -49,7 +50,8 @@ func (s *SessionHandler) Summary(c echo.Context) error { // Active returns a list of active sessions // @Summary Get a minimal summary of all active sessions -// @Description Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth) +// @Description Get a minimal summary of all active sessions (i.e. number of sessions, bandwidth). +// @Tags v16.7.2 // @ID session-3-current // @Produce json // @Security ApiKeyAuth diff --git a/http/handler/api/srt.go b/http/handler/api/srt.go index 859820f9..427fbf89 100644 --- a/http/handler/api/srt.go +++ b/http/handler/api/srt.go @@ -24,6 +24,7 @@ func NewSRT(srt srt.Server) *SRTHandler { // ListChannels lists all currently publishing SRT streams // @Summary List all publishing SRT treams // @Description List all currently publishing SRT streams. This endpoint is EXPERIMENTAL and may change in future. +// @Tags v16.9.0 // @ID srt-3-list-channels // @Produce json // @Success 200 {array} []api.SRTChannel diff --git a/http/handler/api/widget.go b/http/handler/api/widget.go index 7bd22602..bb4688f2 100644 --- a/http/handler/api/widget.go +++ b/http/handler/api/widget.go @@ -2,6 +2,7 @@ package api import ( "net/http" + "strings" "github.com/datarhei/core/v16/http/api" "github.com/datarhei/core/v16/http/handler/util" @@ -33,6 +34,7 @@ func NewWidget(config WidgetConfig) *WidgetHandler { // Get returns minimal public statistics about a process // @Summary Fetch minimal statistics about a process // @Description Fetch minimal statistics about a process, which is not protected by any auth. +// @Tags v16.7.2 // @ID widget-3-get // @Produce json // @Param id path string true "ID of a process" @@ -73,13 +75,19 @@ func (w *WidgetHandler) Get(c echo.Context) error { summary := collector.Summary() for _, session := range summary.Active { - if session.Reference == process.Reference { - data.CurrentSessions++ + if !strings.HasPrefix(session.Reference, process.Reference) { + continue } + + data.CurrentSessions++ } - if s, ok := summary.Summary.References[process.Reference]; ok { - data.TotalSessions = s.TotalSessions + for reference, s := range summary.Summary.References { + if !strings.HasPrefix(reference, process.Reference) { + continue + } + + data.TotalSessions += s.TotalSessions } return c.JSON(http.StatusOK, data) diff --git a/http/handler/diskfs.go b/http/handler/diskfs.go deleted file mode 100644 index 545cf149..00000000 --- a/http/handler/diskfs.go +++ /dev/null @@ -1,94 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/cache" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The DiskFSHandler type provides handlers for manipulating a filesystem -type DiskFSHandler struct { - cache cache.Cacher - filesystem fs.Filesystem -} - -// NewDiskFS return a new DiskFS type. You have to provide a filesystem to act on and optionally -// a Cacher where files will be purged from if the Cacher is related to the filesystem. -func NewDiskFS(fs fs.Filesystem, cache cache.Cacher) *DiskFSHandler { - return &DiskFSHandler{ - cache: cache, - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the filesystem -// @Description Fetch a file from the filesystem. If the file is a directory, a index.html is returned, if it exists. -// @ID diskfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /{path} [get] -func (h *DiskFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, err := file.Stat() - if err != nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - if stat.IsDir() { - path = filepath.Join(path, "index.html") - - file.Close() - - file = h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - stat, err = file.Stat() - if err != nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - } - - defer file.Close() - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} diff --git a/http/handler/filesystem.go b/http/handler/filesystem.go new file mode 100644 index 00000000..9e00e37c --- /dev/null +++ b/http/handler/filesystem.go @@ -0,0 +1,179 @@ +package handler + +import ( + "net/http" + "path/filepath" + "sort" + "strings" + + "github.com/datarhei/core/v16/http/api" + "github.com/datarhei/core/v16/http/fs" + "github.com/datarhei/core/v16/http/handler/util" + + "github.com/labstack/echo/v4" +) + +// The FSHandler type provides handlers for manipulating a filesystem +type FSHandler struct { + fs fs.FS +} + +// NewFS return a new FSHandler type. You have to provide a filesystem to act on. +func NewFS(fs fs.FS) *FSHandler { + return &FSHandler{ + fs: fs, + } +} + +func (h *FSHandler) GetFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + mimeType := c.Response().Header().Get(echo.HeaderContentType) + c.Response().Header().Del(echo.HeaderContentType) + + file := h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ := file.Stat() + + if len(h.fs.DefaultFile) != 0 { + if stat.IsDir() { + path = filepath.Join(path, h.fs.DefaultFile) + + file.Close() + + file = h.fs.Filesystem.Open(path) + if file == nil { + return api.Err(http.StatusNotFound, "File not found", path) + } + + stat, _ = file.Stat() + } + } + + defer file.Close() + + c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) + + if path, ok := stat.IsLink(); ok { + path = filepath.Clean("/" + path) + + if path[0] == '/' { + path = path[1:] + } + + return c.Redirect(http.StatusMovedPermanently, path) + } + + c.Response().Header().Set(echo.HeaderContentType, mimeType) + + if c.Request().Method == "HEAD" { + return c.Blob(http.StatusOK, "application/data", nil) + } + + return c.Stream(http.StatusOK, "application/data", file) +} + +func (h *FSHandler) PutFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + req := c.Request() + + _, created, err := h.fs.Filesystem.WriteFileReader(path, req.Body) + if err != nil { + return api.Err(http.StatusBadRequest, "Bad request", "%s", err) + } + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + + if len(h.fs.DefaultFile) != 0 { + if strings.HasSuffix(path, "/"+h.fs.DefaultFile) { + path := strings.TrimSuffix(path, h.fs.DefaultFile) + h.fs.Cache.Delete(path) + } + } + } + + c.Response().Header().Set("Content-Location", req.URL.RequestURI()) + + if created { + return c.String(http.StatusCreated, "") + } + + return c.NoContent(http.StatusNoContent) +} + +func (h *FSHandler) DeleteFile(c echo.Context) error { + path := util.PathWildcardParam(c) + + c.Response().Header().Del(echo.HeaderContentType) + + size := h.fs.Filesystem.Remove(path) + + if h.fs.Cache != nil { + h.fs.Cache.Delete(path) + + if len(h.fs.DefaultFile) != 0 { + if strings.HasSuffix(path, "/"+h.fs.DefaultFile) { + path := strings.TrimSuffix(path, h.fs.DefaultFile) + h.fs.Cache.Delete(path) + } + } + } + + if size < 0 { + return api.Err(http.StatusNotFound, "File not found", path) + } + + return c.String(http.StatusOK, "Deleted: "+path) +} + +func (h *FSHandler) ListFiles(c echo.Context) error { + pattern := util.DefaultQuery(c, "glob", "") + sortby := util.DefaultQuery(c, "sort", "none") + order := util.DefaultQuery(c, "order", "asc") + + files := h.fs.Filesystem.List("/", pattern) + + var sortFunc func(i, j int) bool + + switch sortby { + case "name": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Name() > files[j].Name() } + } else { + sortFunc = func(i, j int) bool { return files[i].Name() < files[j].Name() } + } + case "size": + if order == "desc" { + sortFunc = func(i, j int) bool { return files[i].Size() > files[j].Size() } + } else { + sortFunc = func(i, j int) bool { return files[i].Size() < files[j].Size() } + } + default: + if order == "asc" { + sortFunc = func(i, j int) bool { return files[i].ModTime().Before(files[j].ModTime()) } + } else { + sortFunc = func(i, j int) bool { return files[i].ModTime().After(files[j].ModTime()) } + } + } + + sort.Slice(files, sortFunc) + + var fileinfos []api.FileInfo = make([]api.FileInfo, len(files)) + + for i, f := range files { + fileinfos[i] = api.FileInfo{ + Name: f.Name(), + Size: f.Size(), + LastMod: f.ModTime().Unix(), + } + } + + return c.JSON(http.StatusOK, fileinfos) +} diff --git a/http/handler/memfs.go b/http/handler/memfs.go deleted file mode 100644 index 81632506..00000000 --- a/http/handler/memfs.go +++ /dev/null @@ -1,133 +0,0 @@ -package handler - -import ( - "net/http" - "path/filepath" - - "github.com/datarhei/core/v16/http/api" - "github.com/datarhei/core/v16/http/handler/util" - "github.com/datarhei/core/v16/io/fs" - - "github.com/labstack/echo/v4" -) - -// The MemFSHandler type provides handlers for manipulating a filesystem -type MemFSHandler struct { - filesystem fs.Filesystem -} - -// NewMemFS return a new MemFS type. You have to provide a filesystem to act on. -func NewMemFS(fs fs.Filesystem) *MemFSHandler { - return &MemFSHandler{ - filesystem: fs, - } -} - -// GetFile returns the file at the given path -// @Summary Fetch a file from the memory filesystem -// @Description Fetch a file from the memory filesystem -// @ID memfs-get-file -// @Produce application/data -// @Produce json -// @Param path path string true "Path to file" -// @Success 200 {file} byte -// @Success 301 {string} string -// @Failure 404 {object} api.Error -// @Router /memfs/{path} [get] -func (h *MemFSHandler) GetFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - mimeType := c.Response().Header().Get(echo.HeaderContentType) - c.Response().Header().Del(echo.HeaderContentType) - - file := h.filesystem.Open(path) - if file == nil { - return api.Err(http.StatusNotFound, "File not found", path) - } - - defer file.Close() - - stat, err := file.Stat() - if err != nil { - return api.Err(http.StatusInternalServerError, "File.Stat() failed", "%s", err) - } - - c.Response().Header().Set("Last-Modified", stat.ModTime().UTC().Format("Mon, 02 Jan 2006 15:04:05 GMT")) - - if path, ok := stat.IsLink(); ok { - path = filepath.Clean("/" + path) - - if path[0] == '/' { - path = path[1:] - } - - return c.Redirect(http.StatusMovedPermanently, path) - } - - c.Response().Header().Set(echo.HeaderContentType, mimeType) - - if c.Request().Method == "HEAD" { - return c.Blob(http.StatusOK, "application/data", nil) - } - - return c.Stream(http.StatusOK, "application/data", file) -} - -// PutFile adds or overwrites a file at the given path -// @Summary Add a file to the memory filesystem -// @Description Writes or overwrites a file on the memory filesystem -// @ID memfs-put-file -// @Accept application/data -// @Produce text/plain -// @Produce json -// @Param path path string true "Path to file" -// @Param data body []byte true "File data" -// @Success 201 {string} string -// @Success 204 {string} string -// @Failure 507 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [put] -func (h *MemFSHandler) PutFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - req := c.Request() - - _, created, err := h.filesystem.Store(path, req.Body) - if err != nil { - return api.Err(http.StatusBadRequest, "%s", err) - } - - c.Response().Header().Set("Content-Location", req.URL.RequestURI()) - - if created { - return c.String(http.StatusCreated, "") - } - - return c.NoContent(http.StatusNoContent) -} - -// DeleteFile removes a file from the filesystem -// @Summary Remove a file from the memory filesystem -// @Description Remove a file from the memory filesystem -// @ID memfs-delete-file -// @Produce text/plain -// @Param path path string true "Path to file" -// @Success 200 {string} string -// @Failure 404 {object} api.Error -// @Security BasicAuth -// @Router /memfs/{path} [delete] -func (h *MemFSHandler) DeleteFile(c echo.Context) error { - path := util.PathWildcardParam(c) - - c.Response().Header().Del(echo.HeaderContentType) - - size := h.filesystem.Delete(path) - - if size < 0 { - return api.Err(http.StatusNotFound, "File not found", path) - } - - return c.String(http.StatusOK, "Deleted: "+path) -} diff --git a/http/middleware/cache/cache.go b/http/middleware/cache/cache.go index 25344e19..a32e35e7 100644 --- a/http/middleware/cache/cache.go +++ b/http/middleware/cache/cache.go @@ -57,31 +57,18 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { if req.Method != "GET" { res.Header().Set("X-Cache", "SKIP ONLYGET") - - if err := next(c); err != nil { - c.Error(err) - } - - return nil + return next(c) } - res.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.0f", config.Cache.TTL().Seconds())) - key := strings.TrimPrefix(req.URL.Path, config.Prefix) if !config.Cache.IsExtensionCacheable(path.Ext(req.URL.Path)) { res.Header().Set("X-Cache", "SKIP EXT") - - if err := next(c); err != nil { - c.Error(err) - } - - return nil + return next(c) } if obj, expireIn, _ := config.Cache.Get(key); obj == nil { // cache miss - writer := res.Writer w := &cacheWriter{ @@ -105,6 +92,7 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { if res.Status != 200 { res.Header().Set("X-Cache", "SKIP NOTOK") + res.Writer.WriteHeader(res.Status) return nil } @@ -112,6 +100,7 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { if !config.Cache.IsSizeCacheable(size) { res.Header().Set("X-Cache", "SKIP TOOBIG") + res.Writer.WriteHeader(res.Status) return nil } @@ -123,11 +112,13 @@ func NewWithConfig(config Config) echo.MiddlewareFunc { if err := config.Cache.Put(key, o, size); err != nil { res.Header().Set("X-Cache", "SKIP TOOBIG") + res.Writer.WriteHeader(res.Status) return nil } res.Header().Set("Cache-Control", fmt.Sprintf("max-age=%.0f", expireIn.Seconds())) res.Header().Set("X-Cache", "MISS") + res.Writer.WriteHeader(res.Status) } else { // cache hit o := obj.(*cacheObject) @@ -190,7 +181,5 @@ func (w *cacheWriter) WriteHeader(code int) { } func (w *cacheWriter) Write(body []byte) (int, error) { - n, err := w.body.Write(body) - - return n, err + return w.body.Write(body) } diff --git a/http/middleware/cache/cache_test.go b/http/middleware/cache/cache_test.go new file mode 100644 index 00000000..7748a970 --- /dev/null +++ b/http/middleware/cache/cache_test.go @@ -0,0 +1,100 @@ +package cache + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/datarhei/core/v16/http/cache" + + "github.com/labstack/echo/v4" + "github.com/stretchr/testify/require" +) + +func TestCache(t *testing.T) { + c, err := cache.NewLRUCache(cache.LRUConfig{ + TTL: 300 * time.Second, + MaxSize: 0, + MaxFileSize: 16, + AllowExtensions: []string{".js"}, + BlockExtensions: []string{".ts"}, + Logger: nil, + }) + + require.NoError(t, err) + + e := echo.New() + req := httptest.NewRequest(http.MethodGet, "/found.js", nil) + rec := httptest.NewRecorder() + ctx := e.NewContext(req, rec) + + handler := NewWithConfig(Config{ + Cache: c, + })(func(c echo.Context) error { + if c.Request().URL.Path == "/found.js" { + c.Response().Write([]byte("test")) + } else if c.Request().URL.Path == "/toobig.js" { + c.Response().Write([]byte("testtesttesttesttest")) + } else if c.Request().URL.Path == "/blocked.ts" { + c.Response().Write([]byte("blocked")) + } + + c.Response().WriteHeader(http.StatusNotFound) + return nil + }) + + handler(ctx) + + require.Equal(t, "test", rec.Body.String()) + require.Equal(t, 200, rec.Result().StatusCode) + require.Equal(t, "MISS", rec.Result().Header.Get("x-cache")) + + rec = httptest.NewRecorder() + ctx = e.NewContext(req, rec) + + handler(ctx) + + require.Equal(t, "test", rec.Body.String()) + require.Equal(t, 200, rec.Result().StatusCode) + require.Equal(t, "HIT", rec.Result().Header.Get("x-cache")[:3]) + + req = httptest.NewRequest(http.MethodGet, "/notfound.js", nil) + rec = httptest.NewRecorder() + ctx = e.NewContext(req, rec) + + handler(ctx) + + require.Equal(t, 404, rec.Result().StatusCode) + require.Equal(t, "SKIP NOTOK", rec.Result().Header.Get("x-cache")) + + req = httptest.NewRequest(http.MethodGet, "/toobig.js", nil) + rec = httptest.NewRecorder() + ctx = e.NewContext(req, rec) + + handler(ctx) + + require.Equal(t, "testtesttesttesttest", rec.Body.String()) + require.Equal(t, 200, rec.Result().StatusCode) + require.Equal(t, "SKIP TOOBIG", rec.Result().Header.Get("x-cache")) + + req = httptest.NewRequest(http.MethodGet, "/blocked.ts", nil) + rec = httptest.NewRecorder() + ctx = e.NewContext(req, rec) + + handler(ctx) + + require.Equal(t, "blocked", rec.Body.String()) + require.Equal(t, 200, rec.Result().StatusCode) + require.Equal(t, "SKIP EXT", rec.Result().Header.Get("x-cache")) + + req = httptest.NewRequest(http.MethodPost, "/found.js", nil) + rec = httptest.NewRecorder() + ctx = e.NewContext(req, rec) + + handler(ctx) + + require.Equal(t, "test", rec.Body.String()) + require.Equal(t, 200, rec.Result().StatusCode) + require.Equal(t, "SKIP ONLYGET", rec.Result().Header.Get("x-cache")) +} diff --git a/http/mock/mock.go b/http/mock/mock.go index 8bdc0c55..621204a7 100644 --- a/http/mock/mock.go +++ b/http/mock/mock.go @@ -17,6 +17,7 @@ import ( "github.com/datarhei/core/v16/http/errorhandler" "github.com/datarhei/core/v16/http/validator" "github.com/datarhei/core/v16/internal/testhelper" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/restream" "github.com/datarhei/core/v16/restream/store" @@ -32,7 +33,17 @@ func DummyRestreamer(pathPrefix string) (restream.Restreamer, error) { return nil, fmt.Errorf("failed to build helper program: %w", err) } - store := store.NewDummyStore(store.DummyConfig{}) + memfs, err := fs.NewMemFilesystem(fs.MemConfig{}) + if err != nil { + return nil, fmt.Errorf("failed to create memory filesystem: %w", err) + } + + store, err := store.NewJSON(store.JSONConfig{ + Filesystem: memfs, + }) + if err != nil { + return nil, err + } ffmpeg, err := ffmpeg.New(ffmpeg.Config{ Binary: binary, diff --git a/http/server.go b/http/server.go index 34949e8a..35de6505 100644 --- a/http/server.go +++ b/http/server.go @@ -29,21 +29,21 @@ package http import ( + "fmt" "net/http" "strings" "github.com/datarhei/core/v16/cluster" - "github.com/datarhei/core/v16/config" + cfgstore "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/http/cache" "github.com/datarhei/core/v16/http/errorhandler" - clusterfs "github.com/datarhei/core/v16/http/fs" + "github.com/datarhei/core/v16/http/fs" "github.com/datarhei/core/v16/http/graph/resolver" "github.com/datarhei/core/v16/http/handler" api "github.com/datarhei/core/v16/http/handler/api" "github.com/datarhei/core/v16/http/jwt" "github.com/datarhei/core/v16/http/router" "github.com/datarhei/core/v16/http/validator" - "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/monitor" "github.com/datarhei/core/v16/net" @@ -81,15 +81,14 @@ type Config struct { Metrics monitor.HistoryReader Prometheus prometheus.Reader MimeTypesFile string - DiskFS fs.Filesystem - MemFS MemFSConfig + Filesystems []fs.FS IPLimiter net.IPLimitValidator Profiling bool Cors CorsConfig RTMP rtmp.Server SRT srt.Server JWT jwt.JWT - Config config.Store + Config cfgstore.Store Cache cache.Cacher Sessions session.RegistryReader Router router.Router @@ -97,13 +96,6 @@ type Config struct { Cluster cluster.Cluster } -type MemFSConfig struct { - EnableAuth bool - Username string - Password string - Filesystem fs.Filesystem -} - type CorsConfig struct { Origins []string } @@ -117,8 +109,6 @@ type server struct { handler struct { about *api.AboutHandler - memfs *handler.MemFSHandler - diskfs *handler.DiskFSHandler prometheus *handler.PrometheusHandler profiling *handler.ProfilingHandler ping *handler.PingHandler @@ -130,8 +120,6 @@ type server struct { log *api.LogHandler restream *api.RestreamHandler playout *api.PlayoutHandler - memfs *api.MemFSHandler - diskfs *api.DiskFSHandler rtmp *api.RTMPHandler srt *api.SRTHandler config *api.ConfigHandler @@ -152,18 +140,12 @@ type server struct { hlsrewrite echo.MiddlewareFunc } - memfs struct { - enableAuth bool - username string - password string - } - - diskfs fs.Filesystem - gzip struct { mimetypes []string } + filesystems map[string]*filesystem + router *echo.Echo mimeTypesFile string profiling bool @@ -171,37 +153,67 @@ type server struct { readOnly bool } +type filesystem struct { + fs.FS + + handler *handler.FSHandler + middleware echo.MiddlewareFunc +} + func NewServer(config Config) (Server, error) { s := &server{ logger: config.Logger, mimeTypesFile: config.MimeTypesFile, profiling: config.Profiling, - diskfs: config.DiskFS, readOnly: config.ReadOnly, } - s.v3handler.diskfs = api.NewDiskFS( - config.DiskFS, - config.Cache, - ) + s.filesystems = map[string]*filesystem{} - filesystem := config.DiskFS - if config.Cluster != nil { - filesystem = clusterfs.NewClusterFS("diskfs", filesystem, config.Cluster) + corsPrefixes := map[string][]string{ + "/api": {"*"}, } - s.handler.diskfs = handler.NewDiskFS( - filesystem, - config.Cache, - ) + for _, httpfs := range config.Filesystems { + if _, ok := s.filesystems[httpfs.Name]; ok { + return nil, fmt.Errorf("the filesystem name '%s' is already in use", httpfs.Name) + } - s.middleware.hlsrewrite = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ - PathPrefix: config.DiskFS.Base(), - }) + if !strings.HasPrefix(httpfs.Mountpoint, "/") { + httpfs.Mountpoint = "/" + httpfs.Mountpoint + } - s.memfs.enableAuth = config.MemFS.EnableAuth - s.memfs.username = config.MemFS.Username - s.memfs.password = config.MemFS.Password + if !strings.HasSuffix(httpfs.Mountpoint, "/") { + httpfs.Mountpoint = strings.TrimSuffix(httpfs.Mountpoint, "/") + } + + if _, ok := corsPrefixes[httpfs.Mountpoint]; ok { + return nil, fmt.Errorf("the mount point '%s' is already in use (%s)", httpfs.Mountpoint, httpfs.Name) + } + + corsPrefixes[httpfs.Mountpoint] = config.Cors.Origins + + if httpfs.Filesystem.Type() == "disk" || httpfs.Filesystem.Type() == "mem" { + httpfs.Filesystem = fs.NewClusterFS(httpfs.Filesystem.Name(), httpfs.Filesystem, config.Cluster) + } + + filesystem := &filesystem{ + FS: httpfs, + handler: handler.NewFS(httpfs), + } + + if httpfs.Filesystem.Type() == "disk" { + filesystem.middleware = mwhlsrewrite.NewHLSRewriteWithConfig(mwhlsrewrite.HLSRewriteConfig{ + PathPrefix: httpfs.Filesystem.Metadata("base"), + }) + } + + s.filesystems[filesystem.Name] = filesystem + } + + if _, ok := corsPrefixes["/"]; !ok { + return nil, fmt.Errorf("one filesystem must be mounted at /") + } if config.Logger == nil { s.logger = log.New("HTTP") @@ -233,21 +245,6 @@ func NewServer(config Config) (Server, error) { ) } - if config.MemFS.Filesystem != nil { - s.v3handler.memfs = api.NewMemFS( - config.MemFS.Filesystem, - ) - - filesystem := config.MemFS.Filesystem - if config.Cluster != nil { - filesystem = clusterfs.NewClusterFS("memfs", filesystem, config.Cluster) - } - - s.handler.memfs = handler.NewMemFS( - filesystem, - ) - } - if config.Prometheus != nil { s.handler.prometheus = handler.NewPrometheus( config.Prometheus.HTTPHandler(), @@ -306,12 +303,6 @@ func NewServer(config Config) (Server, error) { Logger: s.logger, }) - if config.Cache != nil { - s.middleware.cache = mwcache.NewWithConfig(mwcache.Config{ - Cache: config.Cache, - }) - } - s.v3handler.widget = api.NewWidget(api.WidgetConfig{ Restream: config.Restream, Registry: config.Sessions, @@ -326,11 +317,7 @@ func NewServer(config Config) (Server, error) { } if middleware, err := mwcors.NewWithConfig(mwcors.Config{ - Prefixes: map[string][]string{ - "/": config.Cors.Origins, - "/api": {"*"}, - "/memfs": config.Cors.Origins, - }, + Prefixes: corsPrefixes, }); err != nil { return nil, err } else { @@ -455,65 +442,66 @@ func (s *server) setRoutes() { doc.Use(gzipMiddleware) doc.GET("", echoSwagger.WrapHandler) - // Serve static data - fs := s.router.Group("/*") - fs.Use(mwmime.NewWithConfig(mwmime.Config{ - MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "text/html", - })) - fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), - })) - if s.middleware.cache != nil { - fs.Use(s.middleware.cache) - } - fs.Use(s.middleware.hlsrewrite) - if s.middleware.session != nil { - fs.Use(s.middleware.session) - } + // Mount filesystems + for _, filesystem := range s.filesystems { + // Define a local variable because later in the loop we have a closure + filesystem := filesystem - fs.GET("", s.handler.diskfs.GetFile) - fs.HEAD("", s.handler.diskfs.GetFile) - - // Memory FS - if s.handler.memfs != nil { - memfs := s.router.Group("/memfs/*") - memfs.Use(mwmime.NewWithConfig(mwmime.Config{ - MimeTypesFile: s.mimeTypesFile, - DefaultContentType: "application/data", - })) - memfs.Use(mwgzip.NewWithConfig(mwgzip.Config{ - Level: mwgzip.BestSpeed, - MinLength: 1000, - Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), - })) - if s.middleware.session != nil { - memfs.Use(s.middleware.session) + mountpoint := filesystem.Mountpoint + "/*" + if filesystem.Mountpoint == "/" { + mountpoint = "/*" } - memfs.HEAD("", s.handler.memfs.GetFile) - memfs.GET("", s.handler.memfs.GetFile) + fs := s.router.Group(mountpoint) + fs.Use(mwmime.NewWithConfig(mwmime.Config{ + MimeTypesFile: s.mimeTypesFile, + DefaultContentType: filesystem.DefaultContentType, + })) - var authmw echo.MiddlewareFunc + if filesystem.Gzip { + fs.Use(mwgzip.NewWithConfig(mwgzip.Config{ + Skipper: mwgzip.ContentTypeSkipper(s.gzip.mimetypes), + Level: mwgzip.BestSpeed, + MinLength: 1000, + })) + } - if s.memfs.enableAuth { - authmw = middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { - if username == s.memfs.username && password == s.memfs.password { - return true, nil - } - - return false, nil + if filesystem.Cache != nil { + mwcache := mwcache.NewWithConfig(mwcache.Config{ + Cache: filesystem.Cache, }) + fs.Use(mwcache) + } - memfs.POST("", s.handler.memfs.PutFile, authmw) - memfs.PUT("", s.handler.memfs.PutFile, authmw) - memfs.DELETE("", s.handler.memfs.DeleteFile, authmw) - } else { - memfs.POST("", s.handler.memfs.PutFile) - memfs.PUT("", s.handler.memfs.PutFile) - memfs.DELETE("", s.handler.memfs.DeleteFile) + if filesystem.middleware != nil { + fs.Use(filesystem.middleware) + } + + if s.middleware.session != nil { + fs.Use(s.middleware.session) + } + + fs.GET("", filesystem.handler.GetFile) + fs.HEAD("", filesystem.handler.GetFile) + + if filesystem.AllowWrite { + if filesystem.EnableAuth { + authmw := middleware.BasicAuth(func(username, password string, c echo.Context) (bool, error) { + if username == filesystem.Username && password == filesystem.Password { + return true, nil + } + + return false, nil + }) + + fs.POST("", filesystem.handler.PutFile, authmw) + fs.PUT("", filesystem.handler.PutFile, authmw) + fs.DELETE("", filesystem.handler.DeleteFile, authmw) + } else { + fs.POST("", filesystem.handler.PutFile) + fs.PUT("", filesystem.handler.PutFile) + fs.DELETE("", filesystem.handler.DeleteFile) + } } } @@ -611,32 +599,33 @@ func (s *server) setRoutesV3(v3 *echo.Group) { } } - // v3 Memory FS - if s.v3handler.memfs != nil { - v3.GET("/fs/mem", s.v3handler.memfs.ListFiles) - v3.GET("/fs/mem/*", s.v3handler.memfs.GetFile) - - if !s.readOnly { - v3.DELETE("/fs/mem/*", s.v3handler.memfs.DeleteFile) - v3.PUT("/fs/mem/*", s.v3handler.memfs.PutFile) - v3.PATCH("/fs/mem/*", s.v3handler.memfs.PatchFile) + // v3 Filesystems + fshandlers := map[string]api.FSConfig{} + for _, fs := range s.filesystems { + fshandlers[fs.Name] = api.FSConfig{ + Type: fs.Filesystem.Type(), + Mountpoint: fs.Mountpoint, + Handler: fs.handler, } } - // v3 Disk FS - v3.GET("/fs/disk", s.v3handler.diskfs.ListFiles) - v3.GET("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + handler := api.NewFS(fshandlers) + + v3.GET("/fs", handler.List) + + v3.GET("/fs/:name", handler.ListFiles) + v3.GET("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) - v3.HEAD("/fs/disk/*", s.v3handler.diskfs.GetFile, mwmime.NewWithConfig(mwmime.Config{ + v3.HEAD("/fs/:name/*", handler.GetFile, mwmime.NewWithConfig(mwmime.Config{ MimeTypesFile: s.mimeTypesFile, DefaultContentType: "application/data", })) if !s.readOnly { - v3.PUT("/fs/disk/*", s.v3handler.diskfs.PutFile) - v3.DELETE("/fs/disk/*", s.v3handler.diskfs.DeleteFile) + v3.PUT("/fs/:name/*", handler.PutFile) + v3.DELETE("/fs/:name/*", handler.DeleteFile) } // v3 RTMP diff --git a/io/file/file.go b/io/file/file.go index 386cce14..d3acfb76 100644 --- a/io/file/file.go +++ b/io/file/file.go @@ -17,6 +17,18 @@ func Rename(src, dst string) error { } // If renaming the file fails, copy the data + Copy(src, dst) + + if err := os.Remove(src); err != nil { + os.Remove(dst) + return fmt.Errorf("failed to remove source file: %w", err) + } + + return nil +} + +// Copy copies a file from src to dst. +func Copy(src, dst string) error { source, err := os.Open(src) if err != nil { return fmt.Errorf("failed to open source file: %w", err) @@ -37,10 +49,5 @@ func Rename(src, dst string) error { source.Close() - if err := os.Remove(src); err != nil { - os.Remove(dst) - return fmt.Errorf("failed to remove source file: %w", err) - } - return nil } diff --git a/io/fs/disk.go b/io/fs/disk.go index bf9e1843..88352c72 100644 --- a/io/fs/disk.go +++ b/io/fs/disk.go @@ -1,25 +1,30 @@ package fs import ( + "bytes" "fmt" "io" + "io/fs" "os" "path/filepath" "strings" + "sync" "time" "github.com/datarhei/core/v16/glob" "github.com/datarhei/core/v16/log" ) -// DiskConfig is the config required to create a new disk -// filesystem. +// DiskConfig is the config required to create a new disk filesystem. type DiskConfig struct { - // Dir is the path to the directory to observe - Dir string + // For logging, optional + Logger log.Logger +} - // Size of the filesystem in bytes - Size int64 +// RootedDiskConfig is the config required to create a new rooted disk filesystem. +type RootedDiskConfig struct { + // Root is the path this filesystem is rooted to + Root string // For logging, optional Logger log.Logger @@ -27,8 +32,9 @@ type DiskConfig struct { // diskFileInfo implements the FileInfo interface type diskFileInfo struct { - dir string + root string name string + mode os.FileMode finfo os.FileInfo } @@ -37,31 +43,37 @@ func (fi *diskFileInfo) Name() string { } func (fi *diskFileInfo) Size() int64 { + if fi.finfo.IsDir() { + return 0 + } + return fi.finfo.Size() } +func (fi *diskFileInfo) Mode() fs.FileMode { + return fi.mode +} + func (fi *diskFileInfo) ModTime() time.Time { return fi.finfo.ModTime() } func (fi *diskFileInfo) IsLink() (string, bool) { - mode := fi.finfo.Mode() - if mode&os.ModeSymlink == 0 { + if fi.mode&os.ModeSymlink == 0 { return fi.name, false } - path, err := os.Readlink(filepath.Join(fi.dir, fi.name)) + path, err := os.Readlink(filepath.Join(fi.root, fi.name)) if err != nil { return fi.name, false } - path = filepath.Join(fi.dir, path) - - if !strings.HasPrefix(path, fi.dir) { + if !strings.HasPrefix(path, fi.root) { return fi.name, false } - name := strings.TrimPrefix(path, fi.dir) + name := strings.TrimPrefix(path, fi.root) + if name[0] != os.PathSeparator { name = string(os.PathSeparator) + name } @@ -75,8 +87,9 @@ func (fi *diskFileInfo) IsDir() bool { // diskFile implements the File interface type diskFile struct { - dir string + root string name string + mode os.FileMode file *os.File } @@ -91,8 +104,9 @@ func (f *diskFile) Stat() (FileInfo, error) { } dif := &diskFileInfo{ - dir: f.dir, + root: f.root, name: f.name, + mode: f.mode, finfo: finfo, } @@ -109,11 +123,11 @@ func (f *diskFile) Read(p []byte) (int, error) { // diskFilesystem implements the Filesystem interface type diskFilesystem struct { - dir string + metadata map[string]string + lock sync.RWMutex - // Max. size of the filesystem in bytes as - // given by the config - maxSize int64 + root string + cwd string // Current size of the filesystem in bytes currentSize int64 @@ -123,53 +137,102 @@ type diskFilesystem struct { logger log.Logger } -// NewDiskFilesystem returns a new filesystem that is backed by a disk -// that implements the Filesystem interface +// NewDiskFilesystem returns a new filesystem that is backed by the disk filesystem. +// The root is / and the working directory is whatever is returned by os.Getwd(). The value +// of Root in the config will be ignored. func NewDiskFilesystem(config DiskConfig) (Filesystem, error) { fs := &diskFilesystem{ - maxSize: config.Size, - logger: config.Logger, + metadata: make(map[string]string), + root: "/", + cwd: "/", + logger: config.Logger, + } + + cwd, err := os.Getwd() + if err != nil { + return nil, err + } + + fs.cwd = cwd + + if len(fs.cwd) == 0 { + fs.cwd = "/" + } + + fs.cwd = filepath.Clean(fs.cwd) + if !filepath.IsAbs(fs.cwd) { + return nil, fmt.Errorf("the current working directory must be an absolute path") } if fs.logger == nil { - fs.logger = log.New("DiskFS") - } - - if err := fs.Rebase(config.Dir); err != nil { - return nil, err + fs.logger = log.New("") } return fs, nil } -func (fs *diskFilesystem) Base() string { - return fs.dir +// NewRootedDiskFilesystem returns a filesystem that is backed by the disk filesystem. The +// root of the filesystem is defined by DiskConfig.Root. The working directory is "/". Root +// must be directory. If it doesn't exist, it will be created +func NewRootedDiskFilesystem(config RootedDiskConfig) (Filesystem, error) { + fs := &diskFilesystem{ + metadata: make(map[string]string), + root: config.Root, + cwd: "/", + logger: config.Logger, + } + + if len(fs.root) == 0 { + fs.root = "/" + } + + if root, err := filepath.Abs(fs.root); err != nil { + return nil, err + } else { + fs.root = root + } + + err := os.MkdirAll(fs.root, 0700) + if err != nil { + return nil, err + } + + info, err := os.Stat(fs.root) + if err != nil { + return nil, err + } + + if !info.IsDir() { + return nil, fmt.Errorf("root is not a directory") + } + + if fs.logger == nil { + fs.logger = log.New("") + } + + return fs, nil } -func (fs *diskFilesystem) Rebase(base string) error { - if len(base) == 0 { - return fmt.Errorf("invalid base path provided") - } +func (fs *diskFilesystem) Name() string { + return "disk" +} - dir, err := filepath.Abs(base) - if err != nil { - return err - } +func (fs *diskFilesystem) Type() string { + return "disk" +} - base = dir +func (fs *diskFilesystem) Metadata(key string) string { + fs.lock.RLock() + defer fs.lock.RUnlock() - finfo, err := os.Stat(base) - if err != nil { - return fmt.Errorf("the provided base path '%s' doesn't exist", fs.dir) - } + return fs.metadata[key] +} - if !finfo.IsDir() { - return fmt.Errorf("the provided base path '%s' must be a directory", fs.dir) - } +func (fs *diskFilesystem) SetMetadata(key, data string) { + fs.lock.Lock() + defer fs.lock.Unlock() - fs.dir = base - - return nil + fs.metadata[key] = data } func (fs *diskFilesystem) Size() (int64, int64) { @@ -178,7 +241,11 @@ func (fs *diskFilesystem) Size() (int64, int64) { if time.Since(fs.lastSizeCheck) >= 10*time.Second { var size int64 = 0 - fs.walk(func(path string, info os.FileInfo) { + fs.walk(fs.root, func(path string, info os.FileInfo) { + if info.IsDir() { + return + } + size += info.Size() }) @@ -187,17 +254,21 @@ func (fs *diskFilesystem) Size() (int64, int64) { fs.lastSizeCheck = time.Now() } - return fs.currentSize, fs.maxSize + return fs.currentSize, -1 } -func (fs *diskFilesystem) Resize(size int64) { - fs.maxSize = size +func (fs *diskFilesystem) Purge(size int64) int64 { + return 0 } func (fs *diskFilesystem) Files() int64 { var nfiles int64 = 0 - fs.walk(func(path string, info os.FileInfo) { + fs.walk(fs.root, func(path string, info os.FileInfo) { + if info.IsDir() { + return + } + nfiles++ }) @@ -205,38 +276,58 @@ func (fs *diskFilesystem) Files() int64 { } func (fs *diskFilesystem) Symlink(oldname, newname string) error { - oldname = filepath.Join(fs.dir, filepath.Clean("/"+oldname)) + oldname = fs.cleanPath(oldname) + newname = fs.cleanPath(newname) - if !filepath.IsAbs(newname) { - return nil + info, err := os.Lstat(oldname) + if err != nil { + return err } - newname = filepath.Join(fs.dir, filepath.Clean("/"+newname)) + if info.Mode()&os.ModeSymlink != 0 { + return fmt.Errorf("%s can't link to another link (%s)", newname, oldname) + } - err := os.Symlink(oldname, newname) + if info.IsDir() { + return fmt.Errorf("can't symlink directories") + } - return err + return os.Symlink(oldname, newname) } func (fs *diskFilesystem) Open(path string) File { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) + path = fs.cleanPath(path) + + df := &diskFile{ + root: fs.root, + name: strings.TrimPrefix(path, fs.root), + } + + info, err := os.Lstat(path) + if err != nil { + return nil + } + + df.mode = info.Mode() f, err := os.Open(path) if err != nil { return nil } - df := &diskFile{ - dir: fs.dir, - name: path, - file: f, - } + df.file = f return df } -func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) +func (fs *diskFilesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + + return os.ReadFile(path) +} + +func (fs *diskFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) replace := true @@ -258,16 +349,155 @@ func (fs *diskFilesystem) Store(path string, r io.Reader) (int64, bool, error) { replace = false } + defer f.Close() + size, err := f.ReadFrom(r) if err != nil { return -1, false, fmt.Errorf("reading data failed: %w", err) } + fs.lastSizeCheck = time.Time{} + return size, !replace, nil } -func (fs *diskFilesystem) Delete(path string) int64 { - path = filepath.Join(fs.dir, filepath.Clean("/"+path)) +func (fs *diskFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *diskFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + path = fs.cleanPath(path) + dir, filename := filepath.Split(path) + + tmpfile, err := os.CreateTemp(dir, filename) + if err != nil { + return -1, false, err + } + + defer os.Remove(tmpfile.Name()) + + size, err := tmpfile.Write(data) + if err != nil { + return -1, false, err + } + + if err := tmpfile.Close(); err != nil { + return -1, false, err + } + + replace := false + if _, err := fs.Stat(path); err == nil { + replace = true + } + + if err := fs.rename(tmpfile.Name(), path); err != nil { + return -1, false, err + } + + fs.lastSizeCheck = time.Time{} + + return int64(size), !replace, nil +} + +func (fs *diskFilesystem) Rename(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + return fs.rename(src, dst) +} + +func (fs *diskFilesystem) rename(src, dst string) error { + if src == dst { + return nil + } + + // First try to rename the file + if err := os.Rename(src, dst); err == nil { + return nil + } + + // If renaming the file fails, copy the data + if err := fs.copy(src, dst); err != nil { + os.Remove(dst) + return fmt.Errorf("failed to copy files: %w", err) + } + + if err := os.Remove(src); err != nil { + os.Remove(dst) + return fmt.Errorf("failed to remove source file: %w", err) + } + + return nil +} + +func (fs *diskFilesystem) Copy(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + return fs.copy(src, dst) +} + +func (fs *diskFilesystem) copy(src, dst string) error { + source, err := os.Open(src) + if err != nil { + return fmt.Errorf("failed to open source file: %w", err) + } + + destination, err := os.Create(dst) + if err != nil { + source.Close() + return fmt.Errorf("failed to create destination file: %w", err) + } + defer destination.Close() + + if _, err := io.Copy(destination, source); err != nil { + source.Close() + os.Remove(dst) + return fmt.Errorf("failed to copy data from source to destination: %w", err) + } + + source.Close() + + fs.lastSizeCheck = time.Time{} + + return nil +} + +func (fs *diskFilesystem) MkdirAll(path string, perm os.FileMode) error { + path = fs.cleanPath(path) + + return os.MkdirAll(path, perm) +} + +func (fs *diskFilesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + dif := &diskFileInfo{ + root: fs.root, + name: strings.TrimPrefix(path, fs.root), + } + + info, err := os.Lstat(path) + if err != nil { + return nil, err + } + + dif.mode = info.Mode() + + if info.Mode()&os.ModeSymlink != 0 { + info, err = os.Stat(path) + if err != nil { + return nil, err + } + } + + dif.finfo = info + + return dif, nil +} + +func (fs *diskFilesystem) Remove(path string) int64 { + path = fs.cleanPath(path) finfo, err := os.Stat(path) if err != nil { @@ -280,28 +510,31 @@ func (fs *diskFilesystem) Delete(path string) int64 { return -1 } + fs.lastSizeCheck = time.Time{} + return size } -func (fs *diskFilesystem) DeleteAll() int64 { +func (fs *diskFilesystem) RemoveAll() int64 { return 0 } -func (fs *diskFilesystem) List(pattern string) []FileInfo { +func (fs *diskFilesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) files := []FileInfo{} - fs.walk(func(path string, info os.FileInfo) { - if path == fs.dir { + fs.walk(path, func(path string, info os.FileInfo) { + if path == fs.root { return } - name := strings.TrimPrefix(path, fs.dir) + name := strings.TrimPrefix(path, fs.root) if name[0] != os.PathSeparator { name = string(os.PathSeparator) + name } if info.IsDir() { - name += "/" + return } if len(pattern) != 0 { @@ -311,7 +544,7 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo { } files = append(files, &diskFileInfo{ - dir: fs.dir, + root: fs.root, name: name, finfo: info, }) @@ -320,8 +553,53 @@ func (fs *diskFilesystem) List(pattern string) []FileInfo { return files } -func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) { - filepath.Walk(fs.dir, func(path string, info os.FileInfo, err error) error { +func (fs *diskFilesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + err := fs.findExecutable(file) + if err == nil { + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if err := fs.findExecutable(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *diskFilesystem) findExecutable(file string) error { + d, err := fs.Stat(file) + if err != nil { + return err + } + + m := d.Mode() + if m.IsDir() { + return fmt.Errorf("is a directory") + } + + if m&0111 != 0 { + return nil + } + + return os.ErrPermission +} + +func (fs *diskFilesystem) walk(path string, walkfn func(path string, info os.FileInfo)) { + filepath.Walk(path, func(path string, info os.FileInfo, err error) error { if err != nil { return nil } @@ -341,3 +619,11 @@ func (fs *diskFilesystem) walk(walkfn func(path string, info os.FileInfo)) { return nil }) } + +func (fs *diskFilesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join(fs.cwd, path) + } + + return filepath.Join(fs.root, filepath.Clean(path)) +} diff --git a/io/fs/dummy.go b/io/fs/dummy.go deleted file mode 100644 index 442d1586..00000000 --- a/io/fs/dummy.go +++ /dev/null @@ -1,40 +0,0 @@ -package fs - -import ( - "io" - "time" -) - -type dummyFileInfo struct{} - -func (d *dummyFileInfo) Name() string { return "" } -func (d *dummyFileInfo) Size() int64 { return 0 } -func (d *dummyFileInfo) ModTime() time.Time { return time.Date(2000, 1, 1, 0, 0, 0, 0, nil) } -func (d *dummyFileInfo) IsLink() (string, bool) { return "", false } -func (d *dummyFileInfo) IsDir() bool { return false } - -type dummyFile struct{} - -func (d *dummyFile) Read(p []byte) (int, error) { return 0, io.EOF } -func (d *dummyFile) Close() error { return nil } -func (d *dummyFile) Name() string { return "" } -func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil } - -type dummyFilesystem struct{} - -func (d *dummyFilesystem) Base() string { return "/" } -func (d *dummyFilesystem) Rebase(string) error { return nil } -func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 } -func (d *dummyFilesystem) Resize(int64) {} -func (d *dummyFilesystem) Files() int64 { return 0 } -func (d *dummyFilesystem) Symlink(string, string) error { return nil } -func (d *dummyFilesystem) Open(string) File { return &dummyFile{} } -func (d *dummyFilesystem) Store(string, io.Reader) (int64, bool, error) { return 0, true, nil } -func (d *dummyFilesystem) Delete(string) int64 { return 0 } -func (d *dummyFilesystem) DeleteAll() int64 { return 0 } -func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} } - -// NewDummyFilesystem return a dummy filesystem -func NewDummyFilesystem() Filesystem { - return &dummyFilesystem{} -} diff --git a/io/fs/fs.go b/io/fs/fs.go index d1923c47..9f3b8661 100644 --- a/io/fs/fs.go +++ b/io/fs/fs.go @@ -3,24 +3,29 @@ package fs import ( "io" + "io/fs" + "os" "time" ) // FileInfo describes a file and is returned by Stat. type FileInfo interface { - // Name returns the full name of the file + // Name returns the full name of the file. Name() string - // Size reports the size of the file in bytes + // Size reports the size of the file in bytes. Size() int64 - // ModTime returns the time of last modification + // Mode returns the file mode. + Mode() fs.FileMode + + // ModTime returns the time of last modification. ModTime() time.Time // IsLink returns the path this file is linking to and true. Otherwise an empty string and false. IsLink() (string, bool) - // IsDir returns whether the file represents a directory + // IsDir returns whether the file represents a directory. IsDir() bool } @@ -28,52 +33,101 @@ type FileInfo interface { type File interface { io.ReadCloser - // Name returns the Name of the file + // Name returns the Name of the file. Name() string - // Stat returns the FileInfo to this file. In case of an error - // FileInfo is nil and the error is non-nil. + // Stat returns the FileInfo to this file. In case of an error FileInfo is nil + // and the error is non-nil. If the file is a symlink, the info reports the name and mode + // of the link itself, but the modification time and size of the linked file. Stat() (FileInfo, error) } -// Filesystem is an interface that provides access to a filesystem. -type Filesystem interface { - // Base returns the base path of this filesystem - Base() string - - // Rebase sets a new base path for this filesystem - Rebase(string) error - +type ReadFilesystem interface { // Size returns the consumed size and capacity of the filesystem in bytes. The - // capacity is negative if the filesystem can consume as much space as it can. + // capacity is zero or negative if the filesystem can consume as much space as it wants. Size() (int64, int64) - // Resize resizes the filesystem to the new size. Files may need to be deleted. - Resize(size int64) - // Files returns the current number of files in the filesystem. Files() int64 + // Open returns the file stored at the given path. It returns nil if the + // file doesn't exist. If the file is a symlink, the name is the name of + // the link, but it will read the contents of the linked file. + Open(path string) File + + // ReadFile reads the content of the file at the given path into the writer. Returns + // the number of bytes read or an error. + ReadFile(path string) ([]byte, error) + + // Stat returns info about the file at path. If the file doesn't exist, an error + // will be returned. If the file is a symlink, the info reports the name and mode + // of the link itself, but the modification time and size are of the linked file. + Stat(path string) (FileInfo, error) + + // List lists all files that are currently on the filesystem. + List(path, pattern string) []FileInfo + + // LookPath searches for an executable named file in the directories named by the PATH environment + // variable. If file contains a slash, it is tried directly and the PATH is not consulted. Otherwise, + // on success, the result is an absolute path. On non-disk filesystems. Only the mere existence + // of that file is verfied. + LookPath(file string) (string, error) +} + +type WriteFilesystem interface { // Symlink creates newname as a symbolic link to oldname. Symlink(oldname, newname string) error - // Open returns the file stored at the given path. It returns nil if the - // file doesn't exist. - Open(path string) File - - // Store adds a file to the filesystem. Returns the size of the data that has been + // WriteFileReader adds a file to the filesystem. Returns the size of the data that has been // stored in bytes and whether the file is new. The size is negative if there was // an error adding the file and error is not nil. - Store(path string, r io.Reader) (int64, bool, error) + WriteFileReader(path string, r io.Reader) (int64, bool, error) - // Delete removes a file at the given path from the filesystem. Returns the size of + // WriteFile adds a file to the filesystem. Returns the size of the data that has been + // stored in bytes and whether the file is new. The size is negative if there was + // an error adding the file and error is not nil. + WriteFile(path string, data []byte) (int64, bool, error) + + // WriteFileSafe adds a file to the filesystem by first writing it to a tempfile and then + // renaming it to the actual path. Returns the size of the data that has been + // stored in bytes and whether the file is new. The size is negative if there was + // an error adding the file and error is not nil. + WriteFileSafe(path string, data []byte) (int64, bool, error) + + // MkdirAll creates a directory named path, along with any necessary parents, and returns nil, + // or else returns an error. The permission bits perm (before umask) are used for all directories + // that MkdirAll creates. If path is already a directory, MkdirAll does nothing and returns nil. + MkdirAll(path string, perm os.FileMode) error + + // Rename renames the file from src to dst. If src and dst can't be renamed + // regularly, the data is copied from src to dst. dst will be overwritten + // if it already exists. src will be removed after all data has been copied + // successfully. Both files exist during copying. + Rename(src, dst string) error + + // Copy copies a file from src to dst. + Copy(src, dst string) error + + // Remove removes a file at the given path from the filesystem. Returns the size of // the remove file in bytes. The size is negative if the file doesn't exist. - Delete(path string) int64 + Remove(path string) int64 - // DeleteAll removes all files from the filesystem. Returns the size of the + // RemoveAll removes all files from the filesystem. Returns the size of the // removed files in bytes. - DeleteAll() int64 - - // List lists all files that are currently on the filesystem. - List(pattern string) []FileInfo + RemoveAll() int64 +} + +// Filesystem is an interface that provides access to a filesystem. +type Filesystem interface { + ReadFilesystem + WriteFilesystem + + // Name returns the name of the filesystem. + Name() string + + // Type returns the type of the filesystem, e.g. disk, mem, s3 + Type() string + + Metadata(key string) string + SetMetadata(key string, data string) } diff --git a/io/fs/fs_test.go b/io/fs/fs_test.go new file mode 100644 index 00000000..18a7aa9f --- /dev/null +++ b/io/fs/fs_test.go @@ -0,0 +1,742 @@ +package fs + +import ( + "errors" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +var ErrNoMinio = errors.New("minio binary not found") + +func startMinio(t *testing.T, path string) (*exec.Cmd, error) { + err := os.MkdirAll(path, 0700) + require.NoError(t, err) + + minio, err := exec.LookPath("minio") + if err != nil { + return nil, ErrNoMinio + } + + proc := exec.Command(minio, "server", path, "--address", "127.0.0.1:9000") + proc.Stderr = os.Stderr + proc.Stdout = os.Stdout + err = proc.Start() + require.NoError(t, err) + + time.Sleep(5 * time.Second) + + return proc, nil +} + +func stopMinio(t *testing.T, proc *exec.Cmd) { + err := proc.Process.Signal(os.Interrupt) + require.NoError(t, err) + + proc.Wait() +} + +func TestFilesystem(t *testing.T) { + miniopath, err := filepath.Abs("./minio") + require.NoError(t, err) + + err = os.RemoveAll(miniopath) + require.NoError(t, err) + + minio, err := startMinio(t, miniopath) + if err != nil { + if err != ErrNoMinio { + require.NoError(t, err) + } + } + + os.RemoveAll("./testing/") + + filesystems := map[string]func(string) (Filesystem, error){ + "memfs": func(name string) (Filesystem, error) { + return NewMemFilesystem(MemConfig{}) + }, + "diskfs": func(name string) (Filesystem, error) { + return NewRootedDiskFilesystem(RootedDiskConfig{ + Root: "./testing/" + name, + }) + }, + "s3fs": func(name string) (Filesystem, error) { + return NewS3Filesystem(S3Config{ + Name: name, + Endpoint: "127.0.0.1:9000", + AccessKeyID: "minioadmin", + SecretAccessKey: "minioadmin", + Region: "", + Bucket: strings.ToLower(name), + UseSSL: false, + Logger: nil, + }) + }, + } + + tests := map[string]func(*testing.T, Filesystem){ + "new": testNew, + "metadata": testMetadata, + "writeFile": testWriteFile, + "writeFileSafe": testWriteFileSafe, + "writeFileReader": testWriteFileReader, + "delete": testDelete, + "files": testFiles, + "replace": testReplace, + "list": testList, + "listGlob": testListGlob, + "deleteAll": testDeleteAll, + "data": testData, + "statDir": testStatDir, + "mkdirAll": testMkdirAll, + "rename": testRename, + "renameOverwrite": testRenameOverwrite, + "copy": testCopy, + "symlink": testSymlink, + "stat": testStat, + "copyOverwrite": testCopyOverwrite, + "symlinkErrors": testSymlinkErrors, + "symlinkOpenStat": testSymlinkOpenStat, + "open": testOpen, + } + + for fsname, fs := range filesystems { + for name, test := range tests { + t.Run(fsname+"-"+name, func(t *testing.T) { + if fsname == "s3fs" && minio == nil { + t.Skip("minio server not available") + } + filesystem, err := fs(name) + require.NoError(t, err) + test(t, filesystem) + }) + } + } + + os.RemoveAll("./testing/") + + if minio != nil { + stopMinio(t, minio) + } + + os.RemoveAll(miniopath) +} + +func testNew(t *testing.T, fs Filesystem) { + cur, max := fs.Size() + + require.Equal(t, int64(0), cur, "current size") + require.Equal(t, int64(-1), max, "max size") + + cur = fs.Files() + + require.Equal(t, int64(0), cur, "number of files") +} + +func testMetadata(t *testing.T, fs Filesystem) { + fs.SetMetadata("foo", "bar") + require.Equal(t, "bar", fs.Metadata("foo")) +} + +func testWriteFile(t *testing.T, fs Filesystem) { + size, created, err := fs.WriteFile("/foobar", []byte("xxxxx")) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testWriteFileSafe(t *testing.T, fs Filesystem) { + size, created, err := fs.WriteFileSafe("/foobar", []byte("xxxxx")) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testWriteFileReader(t *testing.T, fs Filesystem) { + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testOpen(t *testing.T, fs Filesystem) { + file := fs.Open("/foobar") + require.Nil(t, file) + + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + require.NoError(t, err) + + file = fs.Open("/foobar") + require.NotNil(t, file) + require.Equal(t, "/foobar", file.Name()) + + stat, err := file.Stat() + require.NoError(t, err) + require.Equal(t, "/foobar", stat.Name()) + require.Equal(t, int64(5), stat.Size()) + require.Equal(t, false, stat.IsDir()) +} + +func testDelete(t *testing.T, fs Filesystem) { + size := fs.Remove("/foobar") + + require.Equal(t, int64(-1), size) + + data := strings.NewReader("xxxxx") + + fs.WriteFileReader("/foobar", data) + + size = fs.Remove("/foobar") + + require.Equal(t, int64(5), size) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func testFiles(t *testing.T, fs Filesystem) { + require.Equal(t, int64(0), fs.Files()) + + fs.WriteFileReader("/foobar.txt", strings.NewReader("bar")) + + require.Equal(t, int64(1), fs.Files()) + + fs.MkdirAll("/path/to/foo", 0777) + + require.Equal(t, int64(1), fs.Files()) + + fs.Remove("/foobar.txt") + + require.Equal(t, int64(0), fs.Files()) +} + +func testReplace(t *testing.T, fs Filesystem) { + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func testList(t *testing.T, fs Filesystem) { + fs.WriteFileReader("/foobar1", strings.NewReader("a")) + fs.WriteFileReader("/foobar2", strings.NewReader("bb")) + fs.WriteFileReader("/foobar3", strings.NewReader("ccc")) + fs.WriteFileReader("/foobar4", strings.NewReader("dddd")) + fs.WriteFileReader("/path/foobar3", strings.NewReader("ccc")) + fs.WriteFileReader("/path/to/foobar4", strings.NewReader("dddd")) + + cur, max := fs.Size() + + require.Equal(t, int64(17), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(6), cur) + + getNames := func(files []FileInfo) []string { + names := []string{} + for _, f := range files { + names = append(names, f.Name()) + } + return names + } + + files := fs.List("/", "") + + require.Equal(t, 6, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar2", "/foobar3", "/foobar4", "/path/foobar3", "/path/to/foobar4"}, getNames(files)) + + files = fs.List("/path", "") + + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar3", "/path/to/foobar4"}, getNames(files)) +} + +func testListGlob(t *testing.T, fs Filesystem) { + fs.WriteFileReader("/foobar1", strings.NewReader("a")) + fs.WriteFileReader("/path/foobar2", strings.NewReader("a")) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("a")) + fs.WriteFileReader("/foobar4", strings.NewReader("a")) + + cur := fs.Files() + + require.Equal(t, int64(4), cur) + + getNames := func(files []FileInfo) []string { + names := []string{} + for _, f := range files { + names = append(names, f.Name()) + } + return names + } + + files := getNames(fs.List("/", "/foo*")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files) + + files = getNames(fs.List("/", "/*bar?")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/foobar1", "/foobar4"}, files) + + files = getNames(fs.List("/", "/path/*")) + require.Equal(t, 1, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2"}, files) + + files = getNames(fs.List("/", "/path/**")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files) + + files = getNames(fs.List("/path", "/**")) + require.Equal(t, 2, len(files)) + require.ElementsMatch(t, []string{"/path/foobar2", "/path/to/foobar3"}, files) +} + +func testDeleteAll(t *testing.T, fs Filesystem) { + if _, ok := fs.(*diskFilesystem); ok { + return + } + + fs.WriteFileReader("/foobar1", strings.NewReader("abc")) + fs.WriteFileReader("/path/foobar2", strings.NewReader("abc")) + fs.WriteFileReader("/path/to/foobar3", strings.NewReader("abc")) + fs.WriteFileReader("/foobar4", strings.NewReader("abc")) + + cur := fs.Files() + + require.Equal(t, int64(4), cur) + + size := fs.RemoveAll() + require.Equal(t, int64(12), size) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func testData(t *testing.T, fs Filesystem) { + file := fs.Open("/foobar") + require.Nil(t, file) + + _, err := fs.ReadFile("/foobar") + require.Error(t, err) + + data := "gduwotoxqb" + + data1 := strings.NewReader(data) + + _, _, err = fs.WriteFileReader("/foobar", data1) + require.NoError(t, err) + + file = fs.Open("/foobar") + require.NotNil(t, file) + + data2 := make([]byte, len(data)+1) + n, err := file.Read(data2) + if err != nil { + if err != io.EOF { + require.NoError(t, err) + } + } + + require.Equal(t, len(data), n) + require.Equal(t, []byte(data), data2[:n]) + + data3, err := fs.ReadFile("/foobar") + + require.NoError(t, err) + require.Equal(t, []byte(data), data3) +} + +func testStatDir(t *testing.T, fs Filesystem) { + info, err := fs.Stat("/") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, true, info.IsDir()) + + data := strings.NewReader("gduwotoxqb") + fs.WriteFileReader("/these/are/some/directories/foobar", data) + + info, err = fs.Stat("/foobar") + require.Error(t, err) + require.Nil(t, info) + + info, err = fs.Stat("/these/are/some/directories/foobar") + require.NoError(t, err) + require.Equal(t, "/these/are/some/directories/foobar", info.Name()) + require.Equal(t, int64(10), info.Size()) + require.Equal(t, false, info.IsDir()) + + info, err = fs.Stat("/these") + require.NoError(t, err) + require.Equal(t, "/these", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/") + require.NoError(t, err) + require.Equal(t, "/these/are", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/some") + require.NoError(t, err) + require.Equal(t, "/these/are/some", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/these/are/some/directories") + require.NoError(t, err) + require.Equal(t, "/these/are/some/directories", info.Name()) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) +} + +func testMkdirAll(t *testing.T, fs Filesystem) { + info, err := fs.Stat("/foo/bar/dir") + require.Error(t, err) + require.Nil(t, info) + + err = fs.MkdirAll("/foo/bar/dir", 0755) + require.NoError(t, err) + + err = fs.MkdirAll("/foo/bar", 0755) + require.NoError(t, err) + + info, err = fs.Stat("/foo/bar/dir") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/foo") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + info, err = fs.Stat("/foo/bar") + require.NoError(t, err) + require.NotNil(t, info) + require.Equal(t, int64(0), info.Size()) + require.Equal(t, true, info.IsDir()) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + err = fs.MkdirAll("/foobar", 0755) + require.Error(t, err) +} + +func testRename(t *testing.T, fs Filesystem) { + err := fs.Rename("/foobar", "/foobaz") + require.Error(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + err = fs.Rename("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) +} + +func testRenameOverwrite(t *testing.T, fs Filesystem) { + _, err := fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + err = fs.Rename("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + data, err := fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlink(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + err := fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.NoError(t, err) + + file := fs.Open("/foobaz") + require.NotNil(t, file) + require.Equal(t, "/foobaz", file.Name()) + + data := make([]byte, 10) + n, err := file.Read(data) + if err != nil { + if err != io.EOF { + require.NoError(t, err) + } + } + require.NoError(t, err) + require.Equal(t, 6, n) + require.Equal(t, "foobar", string(data[:n])) + + stat, err := fs.Stat("/foobaz") + require.NoError(t, err) + require.Equal(t, "/foobaz", stat.Name()) + require.Equal(t, int64(6), stat.Size()) + require.NotEqual(t, 0, int(stat.Mode()&os.ModeSymlink)) + + link, ok := stat.IsLink() + require.Equal(t, "/foobar", link) + require.Equal(t, true, ok) + + data, err = fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlinkOpenStat(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.NoError(t, err) + + file := fs.Open("/foobaz") + require.NotNil(t, file) + require.Equal(t, "/foobaz", file.Name()) + + fstat, err := file.Stat() + require.NoError(t, err) + + stat, err := fs.Stat("/foobaz") + require.NoError(t, err) + + require.Equal(t, "/foobaz", fstat.Name()) + require.Equal(t, fstat.Name(), stat.Name()) + + require.Equal(t, int64(6), fstat.Size()) + require.Equal(t, fstat.Size(), stat.Size()) + + require.NotEqual(t, 0, int(fstat.Mode()&os.ModeSymlink)) + require.Equal(t, fstat.Mode(), stat.Mode()) +} + +func testStat(t *testing.T, fs Filesystem) { + _, _, err := fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + file := fs.Open("/foobar") + require.NotNil(t, file) + + stat1, err := fs.Stat("/foobar") + require.NoError(t, err) + + stat2, err := file.Stat() + require.NoError(t, err) + + require.Equal(t, stat1, stat2) +} + +func testCopy(t *testing.T, fs Filesystem) { + err := fs.Rename("/foobar", "/foobaz") + require.Error(t, err) + + _, err = fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("gduwotoxqb")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + err = fs.Copy("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) +} + +func testCopyOverwrite(t *testing.T, fs Filesystem) { + _, err := fs.Stat("/foobar") + require.Error(t, err) + + _, err = fs.Stat("/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + err = fs.Copy("/foobar", "/foobaz") + require.NoError(t, err) + + _, err = fs.Stat("/foobar") + require.NoError(t, err) + + _, err = fs.Stat("/foobaz") + require.NoError(t, err) + + data, err := fs.ReadFile("/foobaz") + require.NoError(t, err) + require.Equal(t, "foobar", string(data)) +} + +func testSymlinkErrors(t *testing.T, fs Filesystem) { + if _, ok := fs.(*s3Filesystem); ok { + return + } + + err := fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobar", strings.NewReader("foobar")) + require.NoError(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("foobaz")) + require.NoError(t, err) + + err = fs.Symlink("/foobar", "/foobaz") + require.Error(t, err) + + err = fs.Symlink("/foobar", "/bazfoo") + require.NoError(t, err) + + err = fs.Symlink("/bazfoo", "/barfoo") + require.Error(t, err) +} diff --git a/io/fs/mem.go b/io/fs/mem.go index d682d0a3..a75eb932 100644 --- a/io/fs/mem.go +++ b/io/fs/mem.go @@ -4,7 +4,11 @@ import ( "bytes" "fmt" "io" + "io/fs" + "os" + "path/filepath" "sort" + "strings" "sync" "time" @@ -15,25 +19,15 @@ import ( // MemConfig is the config that is required for creating // a new memory filesystem. type MemConfig struct { - // Base is the base path to be reported for this filesystem - Base string - - // Size is the capacity of the filesystem in bytes - Size int64 - - // Set true to automatically delete the oldest files until there's - // enough space to store a new file - Purge bool - - // For logging, optional - Logger log.Logger + Logger log.Logger // For logging, optional } type memFileInfo struct { - name string - size int64 - lastMod time.Time - linkTo string + name string // Full name of the file (including path) + size int64 // The size of the file in bytes + dir bool // Whether this file represents a directory + lastMod time.Time // The time of the last modification of the file + linkTo string // Where the file links to, empty if it's not a link } func (f *memFileInfo) Name() string { @@ -44,6 +38,20 @@ func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() fs.FileMode { + mode := fs.FileMode(fs.ModePerm) + + if f.dir { + mode |= fs.ModeDir + } + + if len(f.linkTo) != 0 { + mode |= fs.ModeSymlink + } + + return mode +} + func (f *memFileInfo) ModTime() time.Time { return f.lastMod } @@ -53,24 +61,12 @@ func (f *memFileInfo) IsLink() (string, bool) { } func (f *memFileInfo) IsDir() bool { - return false + return f.dir } type memFile struct { - // Name of the file - name string - - // Size of the file in bytes - size int64 - - // Last modification of the file as a UNIX timestamp - lastMod time.Time - - // Contents of the file - data *bytes.Buffer - - // Link to another file - linkTo string + memFileInfo + data *bytes.Buffer // Contents of the file } func (f *memFile) Name() string { @@ -81,6 +77,7 @@ func (f *memFile) Stat() (FileInfo, error) { info := &memFileInfo{ name: f.name, size: f.size, + dir: f.dir, lastMod: f.lastMod, linkTo: f.linkTo, } @@ -107,7 +104,8 @@ func (f *memFile) Close() error { } type memFilesystem struct { - base string + metadata map[string]string + metaLock sync.RWMutex // Mapping of path to file files map[string]*memFile @@ -118,34 +116,27 @@ type memFilesystem struct { // Pool for the storage of the contents of files dataPool sync.Pool - // Max. size of the filesystem in bytes as - // given by the config - maxSize int64 - // Current size of the filesystem in bytes currentSize int64 - // Purge setting from the config - purge bool - // Logger from the config logger log.Logger } // NewMemFilesystem creates a new filesystem in memory that implements // the Filesystem interface. -func NewMemFilesystem(config MemConfig) Filesystem { +func NewMemFilesystem(config MemConfig) (Filesystem, error) { fs := &memFilesystem{ - base: config.Base, - maxSize: config.Size, - purge: config.Purge, - logger: config.Logger, + metadata: make(map[string]string), + logger: config.Logger, } if fs.logger == nil { - fs.logger = log.New("MemFS") + fs.logger = log.New("") } + fs.logger = fs.logger.WithField("type", "mem") + fs.files = make(map[string]*memFile) fs.dataPool = sync.Pool{ @@ -154,61 +145,105 @@ func NewMemFilesystem(config MemConfig) Filesystem { }, } - fs.logger.WithFields(log.Fields{ - "size_bytes": fs.maxSize, - "purge": fs.purge, - }).Debug().Log("Created") + fs.logger.Debug().Log("Created") - return fs + return fs, nil } -func (fs *memFilesystem) Base() string { - return fs.base +func NewMemFilesystemFromDir(dir string, config MemConfig) (Filesystem, error) { + mem, err := NewMemFilesystem(config) + if err != nil { + return nil, err + } + + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + return nil + } + + if info.IsDir() { + return nil + } + + mode := info.Mode() + if !mode.IsRegular() { + return nil + } + + if mode&os.ModeSymlink != 0 { + return nil + } + + file, err := os.Open(path) + if err != nil { + return nil + } + + defer file.Close() + + _, _, err = mem.WriteFileReader(path, file) + if err != nil { + return fmt.Errorf("can't copy %s", path) + } + + return nil + }) + if err != nil { + return nil, err + } + + return mem, nil } -func (fs *memFilesystem) Rebase(base string) error { - fs.base = base +func (fs *memFilesystem) Name() string { + return "mem" +} - return nil +func (fs *memFilesystem) Type() string { + return "mem" +} + +func (fs *memFilesystem) Metadata(key string) string { + fs.metaLock.RLock() + defer fs.metaLock.RUnlock() + + return fs.metadata[key] +} + +func (fs *memFilesystem) SetMetadata(key, data string) { + fs.metaLock.Lock() + defer fs.metaLock.Unlock() + + fs.metadata[key] = data } func (fs *memFilesystem) Size() (int64, int64) { fs.filesLock.RLock() defer fs.filesLock.RUnlock() - return fs.currentSize, fs.maxSize -} - -func (fs *memFilesystem) Resize(size int64) { - fs.filesLock.Lock() - defer fs.filesLock.Unlock() - - diffSize := fs.maxSize - size - - if diffSize == 0 { - return - } - - if diffSize > 0 { - fs.free(diffSize) - } - - fs.logger.WithFields(log.Fields{ - "from_bytes": fs.maxSize, - "to_bytes": size, - }).Debug().Log("Resizing") - - fs.maxSize = size + return fs.currentSize, -1 } func (fs *memFilesystem) Files() int64 { fs.filesLock.RLock() defer fs.filesLock.RUnlock() - return int64(len(fs.files)) + nfiles := int64(0) + + for _, f := range fs.files { + if f.dir { + continue + } + + nfiles++ + } + + return nfiles } func (fs *memFilesystem) Open(path string) File { + path = fs.cleanPath(path) + fs.filesLock.RLock() file, ok := fs.files[path] fs.filesLock.RUnlock() @@ -218,29 +253,68 @@ func (fs *memFilesystem) Open(path string) File { } newFile := &memFile{ - name: file.name, - size: file.size, - lastMod: file.lastMod, - linkTo: file.linkTo, + memFileInfo: memFileInfo{ + name: file.name, + size: file.size, + lastMod: file.lastMod, + linkTo: file.linkTo, + }, + } + + if len(file.linkTo) != 0 { + file, ok = fs.files[file.linkTo] + if !ok { + return nil + } } if file.data != nil { + newFile.lastMod = file.lastMod newFile.data = bytes.NewBuffer(file.data.Bytes()) + newFile.size = int64(newFile.data.Len()) } return newFile } +func (fs *memFilesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + + fs.filesLock.RLock() + file, ok := fs.files[path] + fs.filesLock.RUnlock() + + if !ok { + return nil, os.ErrNotExist + } + + if len(file.linkTo) != 0 { + file, ok = fs.files[file.linkTo] + if !ok { + return nil, os.ErrNotExist + } + } + + if file.data != nil { + return file.data.Bytes(), nil + } + + return nil, nil +} + func (fs *memFilesystem) Symlink(oldname, newname string) error { + oldname = fs.cleanPath(oldname) + newname = fs.cleanPath(newname) + fs.filesLock.Lock() defer fs.filesLock.Unlock() - if _, ok := fs.files[newname]; ok { - return fmt.Errorf("%s already exist", newname) + if _, ok := fs.files[oldname]; !ok { + return os.ErrNotExist } - if oldname[0] != '/' { - oldname = "/" + oldname + if _, ok := fs.files[newname]; ok { + return os.ErrExist } if file, ok := fs.files[oldname]; ok { @@ -250,11 +324,14 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { } newFile := &memFile{ - name: newname, - size: 0, - lastMod: time.Now(), - data: nil, - linkTo: oldname, + memFileInfo: memFileInfo{ + name: newname, + dir: false, + size: 0, + lastMod: time.Now(), + linkTo: oldname, + }, + data: nil, } fs.files[newname] = newFile @@ -262,18 +339,21 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error { return nil } -func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { +func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) + newFile := &memFile{ - name: path, - size: 0, - lastMod: time.Now(), - data: nil, + memFileInfo: memFileInfo{ + name: path, + dir: false, + size: 0, + lastMod: time.Now(), + }, + data: fs.dataPool.Get().(*bytes.Buffer), } - data := fs.dataPool.Get().(*bytes.Buffer) - data.Reset() - - size, err := data.ReadFrom(r) + newFile.data.Reset() + size, err := newFile.data.ReadFrom(r) if err != nil { fs.logger.WithFields(log.Fields{ "path": path, @@ -281,55 +361,26 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { "error": err, }).Warn().Log("Incomplete file") } - newFile.size = size - newFile.data = data - // reject if the new file is larger than the available space - if fs.maxSize > 0 && newFile.size > fs.maxSize { - fs.dataPool.Put(data) - return -1, false, fmt.Errorf("File is too big") - } + newFile.size = size fs.filesLock.Lock() defer fs.filesLock.Unlock() - // calculate the new size of the filesystem - newSize := fs.currentSize + newFile.size - file, replace := fs.files[path] if replace { - newSize -= file.size + delete(fs.files, path) + + fs.currentSize -= file.size + + fs.dataPool.Put(file.data) + file.data = nil } - if fs.maxSize > 0 { - if newSize > fs.maxSize { - if !fs.purge { - fs.dataPool.Put(data) - return -1, false, fmt.Errorf("not enough space on device") - } - - if replace { - delete(fs.files, path) - fs.currentSize -= file.size - - fs.dataPool.Put(file.data) - file.data = nil - } - - newSize -= fs.free(fs.currentSize + newFile.size - fs.maxSize) - } - } else { - if replace { - delete(fs.files, path) - - fs.dataPool.Put(file.data) - file.data = nil - } - } - - fs.currentSize = newSize fs.files[path] = newFile + fs.currentSize += newFile.size + logger := fs.logger.WithFields(log.Fields{ "path": newFile.name, "filesize_bytes": newFile.size, @@ -345,7 +396,18 @@ func (fs *memFilesystem) Store(path string, r io.Reader) (int64, bool, error) { return newFile.size, !replace, nil } -func (fs *memFilesystem) free(size int64) int64 { +func (fs *memFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *memFilesystem) Purge(size int64) int64 { + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + files := []*memFile{} for _, f := range fs.files { @@ -383,7 +445,190 @@ func (fs *memFilesystem) free(size int64) int64 { return freed } -func (fs *memFilesystem) Delete(path string) int64 { +func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error { + path = fs.cleanPath(path) + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + info, err := fs.stat(path) + if err == nil { + if info.IsDir() { + return nil + } + + return os.ErrExist + } + + f := &memFile{ + memFileInfo: memFileInfo{ + name: path, + size: 0, + dir: true, + lastMod: time.Now(), + }, + data: nil, + } + + fs.files[path] = f + + return nil +} + +func (fs *memFilesystem) Rename(src, dst string) error { + src = filepath.Join("/", filepath.Clean(src)) + dst = filepath.Join("/", filepath.Clean(dst)) + + if src == dst { + return nil + } + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + srcFile, ok := fs.files[src] + if !ok { + return os.ErrNotExist + } + + dstFile, ok := fs.files[dst] + if ok { + fs.currentSize -= dstFile.size + + fs.dataPool.Put(dstFile.data) + dstFile.data = nil + } + + fs.files[dst] = srcFile + delete(fs.files, src) + + return nil +} + +func (fs *memFilesystem) Copy(src, dst string) error { + src = filepath.Join("/", filepath.Clean(src)) + dst = filepath.Join("/", filepath.Clean(dst)) + + if src == dst { + return nil + } + + fs.filesLock.Lock() + defer fs.filesLock.Unlock() + + srcFile, ok := fs.files[src] + if !ok { + return os.ErrNotExist + } + + if srcFile.dir { + return os.ErrNotExist + } + + if fs.isDir(dst) { + return os.ErrInvalid + } + + dstFile, ok := fs.files[dst] + if ok { + fs.currentSize -= dstFile.size + } else { + dstFile = &memFile{ + memFileInfo: memFileInfo{ + name: dst, + dir: false, + size: srcFile.size, + lastMod: time.Now(), + }, + data: fs.dataPool.Get().(*bytes.Buffer), + } + } + + dstFile.data.Reset() + dstFile.data.Write(srcFile.data.Bytes()) + + fs.currentSize += dstFile.size + + fs.files[dst] = dstFile + + return nil +} + +func (fs *memFilesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + fs.filesLock.RLock() + defer fs.filesLock.RUnlock() + + return fs.stat(path) +} + +func (fs *memFilesystem) stat(path string) (FileInfo, error) { + file, ok := fs.files[path] + if ok { + f := &memFileInfo{ + name: file.name, + size: file.size, + dir: file.dir, + lastMod: file.lastMod, + linkTo: file.linkTo, + } + + if len(f.linkTo) != 0 { + file, ok := fs.files[f.linkTo] + if !ok { + return nil, os.ErrNotExist + } + + f.lastMod = file.lastMod + f.size = file.size + } + + return f, nil + } + + // Check for directories + if !fs.isDir(path) { + return nil, os.ErrNotExist + } + + f := &memFileInfo{ + name: path, + size: 0, + dir: true, + lastMod: time.Now(), + linkTo: "", + } + + return f, nil +} + +func (fs *memFilesystem) isDir(path string) bool { + file, ok := fs.files[path] + if ok { + return file.dir + } + + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + if path == "/" { + return true + } + + for k := range fs.files { + if strings.HasPrefix(k, path) { + return true + } + } + + return false +} + +func (fs *memFilesystem) Remove(path string) int64 { + path = fs.cleanPath(path) + fs.filesLock.Lock() defer fs.filesLock.Unlock() @@ -407,7 +652,7 @@ func (fs *memFilesystem) Delete(path string) int64 { return file.size } -func (fs *memFilesystem) DeleteAll() int64 { +func (fs *memFilesystem) RemoveAll() int64 { fs.filesLock.Lock() defer fs.filesLock.Unlock() @@ -419,19 +664,28 @@ func (fs *memFilesystem) DeleteAll() int64 { return size } -func (fs *memFilesystem) List(pattern string) []FileInfo { +func (fs *memFilesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) files := []FileInfo{} fs.filesLock.RLock() defer fs.filesLock.RUnlock() for _, file := range fs.files { + if !strings.HasPrefix(file.name, path) { + continue + } + if len(pattern) != 0 { if ok, _ := glob.Match(pattern, file.name, '/'); !ok { continue } } + if file.dir { + continue + } + files = append(files, &memFileInfo{ name: file.name, size: file.size, @@ -442,3 +696,44 @@ func (fs *memFilesystem) List(pattern string) []FileInfo { return files } + +func (fs *memFilesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + info, err := fs.Stat(file) + if err == nil { + if !info.Mode().IsRegular() { + return file, os.ErrNotExist + } + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if info, err := fs.Stat(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + if !info.Mode().IsRegular() { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *memFilesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + + return filepath.Join("/", filepath.Clean(path)) +} diff --git a/io/fs/mem_test.go b/io/fs/mem_test.go index 64794c10..d28a0d92 100644 --- a/io/fs/mem_test.go +++ b/io/fs/mem_test.go @@ -1,406 +1,30 @@ package fs import ( - "strings" "testing" - "time" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -func TestNew(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) +func TestMemFromDir(t *testing.T) { + mem, err := NewMemFilesystemFromDir(".", MemConfig{}) + require.NoError(t, err) - cur, max := mem.Size() + names := []string{} + for _, f := range mem.List("/", "/*.go") { + names = append(names, f.Name()) + } - assert.Equal(t, int64(0), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(0), cur) -} - -func TestSimplePutNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestSimpleDelete(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - size := mem.Delete("/foobar") - - assert.Equal(t, int64(-1), size) - - data := strings.NewReader("xxxxx") - - mem.Store("/foobar", data) - - size = mem.Delete("/foobar") - - assert.Equal(t, int64(5), size) - - cur, max := mem.Size() - - assert.Equal(t, int64(0), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(0), cur) -} - -func TestReplaceNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) - - data = strings.NewReader("yyy") - - size, created, err = mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(3), size) - assert.Equal(t, false, created) - - cur, max = mem.Size() - - assert.Equal(t, int64(3), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestReplacePurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxx") - data2 := strings.NewReader("yyy") - data3 := strings.NewReader("zzz") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - - cur, max := mem.Size() - - assert.Equal(t, int64(9), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(3), cur) - - data4 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar1", data4) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(8), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestReplaceUnlimited(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 0, - Purge: false, - }) - - data := strings.NewReader("xxxxx") - - size, created, err := mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(5), size) - assert.Equal(t, true, created) - - cur, max := mem.Size() - - assert.Equal(t, int64(5), cur) - assert.Equal(t, int64(0), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) - - data = strings.NewReader("yyy") - - size, created, err = mem.Store("/foobar", data) - - assert.Nil(t, err) - assert.Equal(t, int64(3), size) - assert.Equal(t, false, created) - - cur, max = mem.Size() - - assert.Equal(t, int64(3), cur) - assert.Equal(t, int64(0), max) - - cur = mem.Files() - - assert.Equal(t, int64(1), cur) -} - -func TestTooBigNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := strings.NewReader("xxxxxyyyyyz") - - size, _, _ := mem.Store("/foobar", data) - - assert.Equal(t, int64(-1), size) -} - -func TestTooBigPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - data := strings.NewReader("xxxxxyyyyyz") - - size, _, _ := mem.Store("/foobar", data) - - assert.Equal(t, int64(-1), size) -} - -func TestFullSpaceNoPurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) - - data3 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar3", data3) - - assert.Equal(t, int64(-1), size) -} - -func TestFullSpacePurge(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) - - data3 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar3", data3) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestFullSpacePurgeMulti(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxx") - data2 := strings.NewReader("yyy") - data3 := strings.NewReader("zzz") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - - cur, max := mem.Size() - - assert.Equal(t, int64(9), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(3), cur) - - data4 := strings.NewReader("zzzzz") - - size, _, _ := mem.Store("/foobar4", data4) - - assert.Equal(t, int64(5), size) - - cur, max = mem.Size() - - assert.Equal(t, int64(8), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(2), cur) -} - -func TestPurgeOrder(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: true, - }) - - data1 := strings.NewReader("xxxxx") - data2 := strings.NewReader("yyyyy") - data3 := strings.NewReader("zzzzz") - - mem.Store("/foobar1", data1) - time.Sleep(1 * time.Second) - mem.Store("/foobar2", data2) - time.Sleep(1 * time.Second) - mem.Store("/foobar3", data3) - - file := mem.Open("/foobar1") - - assert.Nil(t, file) -} - -func TestList(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data1 := strings.NewReader("a") - data2 := strings.NewReader("bb") - data3 := strings.NewReader("ccc") - data4 := strings.NewReader("dddd") - - mem.Store("/foobar1", data1) - mem.Store("/foobar2", data2) - mem.Store("/foobar3", data3) - mem.Store("/foobar4", data4) - - cur, max := mem.Size() - - assert.Equal(t, int64(10), cur) - assert.Equal(t, int64(10), max) - - cur = mem.Files() - - assert.Equal(t, int64(4), cur) - - files := mem.List("") - - assert.Equal(t, 4, len(files)) -} - -func TestData(t *testing.T) { - mem := NewMemFilesystem(MemConfig{ - Size: 10, - Purge: false, - }) - - data := "gduwotoxqb" - - data1 := strings.NewReader(data) - - mem.Store("/foobar", data1) - - file := mem.Open("/foobar") - - data2 := make([]byte, len(data)+1) - n, _ := file.Read(data2) - - assert.Equal(t, len(data), n) - assert.Equal(t, []byte(data), data2[:n]) + require.ElementsMatch(t, []string{ + "/disk.go", + "/fs_test.go", + "/fs.go", + "/mem_test.go", + "/mem.go", + "/readonly_test.go", + "/readonly.go", + "/s3.go", + "/sized_test.go", + "/sized.go", + }, names) } diff --git a/io/fs/readonly.go b/io/fs/readonly.go new file mode 100644 index 00000000..889672a4 --- /dev/null +++ b/io/fs/readonly.go @@ -0,0 +1,54 @@ +package fs + +import ( + "io" + "os" +) + +type readOnlyFilesystem struct { + Filesystem +} + +func NewReadOnlyFilesystem(fs Filesystem) (Filesystem, error) { + r := &readOnlyFilesystem{ + Filesystem: fs, + } + + return r, nil +} + +func (r *readOnlyFilesystem) Symlink(oldname, newname string) error { + return os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return -1, false, os.ErrPermission +} + +func (r *readOnlyFilesystem) MkdirAll(path string, perm os.FileMode) error { + return os.ErrPermission +} + +func (r *readOnlyFilesystem) Remove(path string) int64 { + return -1 +} + +func (r *readOnlyFilesystem) RemoveAll() int64 { + return 0 +} + +func (r *readOnlyFilesystem) Purge(size int64) int64 { + return 0 +} + +func (r *readOnlyFilesystem) Resize(size int64) error { + return os.ErrPermission +} diff --git a/io/fs/readonly_test.go b/io/fs/readonly_test.go new file mode 100644 index 00000000..13360b47 --- /dev/null +++ b/io/fs/readonly_test.go @@ -0,0 +1,50 @@ +package fs + +import ( + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestReadOnly(t *testing.T) { + mem, err := NewMemFilesystemFromDir(".", MemConfig{}) + require.NoError(t, err) + + ro, err := NewReadOnlyFilesystem(mem) + require.NoError(t, err) + + err = ro.Symlink("/readonly.go", "/foobar.go") + require.Error(t, err) + + _, _, err = ro.WriteFile("/readonly.go", []byte("foobar")) + require.Error(t, err) + + _, _, err = ro.WriteFileReader("/readonly.go", strings.NewReader("foobar")) + require.Error(t, err) + + _, _, err = ro.WriteFileSafe("/readonly.go", []byte("foobar")) + require.Error(t, err) + + err = ro.MkdirAll("/foobar/baz", 0700) + require.Error(t, err) + + res := ro.Remove("/readonly.go") + require.Equal(t, int64(-1), res) + + res = ro.RemoveAll() + require.Equal(t, int64(0), res) + + rop, ok := ro.(PurgeFilesystem) + require.True(t, ok, "must implement PurgeFilesystem") + + size, _ := ro.Size() + res = rop.Purge(size) + require.Equal(t, int64(0), res) + + ros, ok := ro.(SizedFilesystem) + require.True(t, ok, "must implement SizedFilesystem") + + err = ros.Resize(100) + require.Error(t, err) +} diff --git a/io/fs/s3.go b/io/fs/s3.go new file mode 100644 index 00000000..22c66d05 --- /dev/null +++ b/io/fs/s3.go @@ -0,0 +1,649 @@ +package fs + +import ( + "bytes" + "context" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/log" + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +type S3Config struct { + // Namee is the name of the filesystem + Name string + Endpoint string + AccessKeyID string + SecretAccessKey string + Region string + Bucket string + UseSSL bool + + Logger log.Logger +} + +type s3Filesystem struct { + metadata map[string]string + metaLock sync.RWMutex + + name string + + endpoint string + accessKeyID string + secretAccessKey string + region string + bucket string + useSSL bool + + client *minio.Client + + logger log.Logger +} + +var fakeDirEntry = "..." + +func NewS3Filesystem(config S3Config) (Filesystem, error) { + fs := &s3Filesystem{ + metadata: make(map[string]string), + name: config.Name, + endpoint: config.Endpoint, + accessKeyID: config.AccessKeyID, + secretAccessKey: config.SecretAccessKey, + region: config.Region, + bucket: config.Bucket, + useSSL: config.UseSSL, + logger: config.Logger, + } + + if fs.logger == nil { + fs.logger = log.New("") + } + + client, err := minio.New(fs.endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""), + Region: fs.region, + Secure: fs.useSSL, + }) + + if err != nil { + return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err) + } + + fs.logger = fs.logger.WithFields(log.Fields{ + "name": fs.name, + "type": "s3", + "bucket": fs.bucket, + "region": fs.region, + "endpoint": fs.endpoint, + }) + + fs.logger.Debug().Log("Connected") + + ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second)) + defer cancel() + + exists, err := client.BucketExists(ctx, fs.bucket) + if err != nil { + fs.logger.WithError(err).Log("Can't access bucket") + return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err) + } + + if exists { + fs.logger.Debug().Log("Bucket already exists") + } else { + fs.logger.Debug().Log("Bucket doesn't exists") + err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region}) + if err != nil { + fs.logger.WithError(err).Log("Can't create bucket") + return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err) + } else { + fs.logger.Debug().Log("Bucket created") + } + } + + fs.client = client + + return fs, nil +} + +func (fs *s3Filesystem) Name() string { + return fs.name +} + +func (fs *s3Filesystem) Type() string { + return "s3" +} + +func (fs *s3Filesystem) Metadata(key string) string { + fs.metaLock.RLock() + defer fs.metaLock.RUnlock() + + return fs.metadata[key] +} + +func (fs *s3Filesystem) SetMetadata(key, data string) { + fs.metaLock.Lock() + defer fs.metaLock.Unlock() + + fs.metadata[key] = data +} + +func (fs *s3Filesystem) Size() (int64, int64) { + size := int64(0) + + files := fs.List("/", "") + + for _, file := range files { + size += file.Size() + } + + return size, -1 +} + +func (fs *s3Filesystem) Files() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: "", + Recursive: true, + MaxKeys: 0, + StartAfter: "", + UseV1: false, + }) + + nfiles := int64(0) + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + } + + if strings.HasSuffix("/"+object.Key, "/"+fakeDirEntry) { + // Skip fake entries (see MkdirAll) + continue + } + + nfiles++ + } + + return nfiles +} + +func (fs *s3Filesystem) Symlink(oldname, newname string) error { + return fmt.Errorf("not implemented") +} + +func (fs *s3Filesystem) Stat(path string) (FileInfo, error) { + path = fs.cleanPath(path) + + if len(path) == 0 { + return &s3FileInfo{ + name: "/", + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + ctx := context.Background() + + object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{}) + if err != nil { + if fs.isDir(path) { + return &s3FileInfo{ + name: "/" + path, + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + fs.logger.Debug().WithField("key", path).WithError(err).Log("Not found") + return nil, err + } + + defer object.Close() + + stat, err := object.Stat() + if err != nil { + if fs.isDir(path) { + return &s3FileInfo{ + name: "/" + path, + size: 0, + dir: true, + lastModified: time.Now(), + }, nil + } + + fs.logger.Debug().WithField("key", path).WithError(err).Log("Stat failed") + return nil, err + } + + return &s3FileInfo{ + name: "/" + stat.Key, + size: stat.Size, + lastModified: stat.LastModified, + }, nil +} + +func (fs *s3Filesystem) Open(path string) File { + path = fs.cleanPath(path) + ctx := context.Background() + + object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return nil + } + + stat, err := object.Stat() + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Stat failed") + return nil + } + + file := &s3File{ + data: object, + name: "/" + stat.Key, + size: stat.Size, + lastModified: stat.LastModified, + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Opened") + + return file +} + +func (fs *s3Filesystem) ReadFile(path string) ([]byte, error) { + path = fs.cleanPath(path) + file := fs.Open(path) + if file == nil { + return nil, os.ErrNotExist + } + + defer file.Close() + + buf := &bytes.Buffer{} + + _, err := buf.ReadFrom(file) + if err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +func (fs *s3Filesystem) write(path string, r io.Reader) (int64, bool, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + overwrite := false + + _, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err == nil { + overwrite = true + } + + info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{ + UserMetadata: map[string]string{}, + UserTags: map[string]string{}, + Progress: nil, + ContentType: "", + ContentEncoding: "", + ContentDisposition: "", + ContentLanguage: "", + CacheControl: "", + Mode: "", + RetainUntilDate: time.Time{}, + ServerSideEncryption: nil, + NumThreads: 0, + StorageClass: "", + WebsiteRedirectLocation: "", + PartSize: 0, + LegalHold: "", + SendContentMd5: false, + DisableContentSha256: false, + DisableMultipart: false, + Internal: minio.AdvancedPutOptions{}, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", path).Log("Failed to store file") + return -1, false, err + } + + fs.logger.Debug().WithFields(log.Fields{ + "key": path, + "overwrite": overwrite, + }).Log("Stored") + + return info.Size, !overwrite, nil +} + +func (fs *s3Filesystem) WriteFileReader(path string, r io.Reader) (int64, bool, error) { + path = fs.cleanPath(path) + return fs.write(path, r) +} + +func (fs *s3Filesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *s3Filesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + return fs.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (fs *s3Filesystem) Rename(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + err := fs.Copy(src, dst) + if err != nil { + return err + } + + res := fs.Remove(src) + if res == -1 { + return fmt.Errorf("failed to remove source file: %s", src) + } + + return nil +} + +func (fs *s3Filesystem) Copy(src, dst string) error { + src = fs.cleanPath(src) + dst = fs.cleanPath(dst) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + _, err := fs.client.CopyObject(ctx, minio.CopyDestOptions{ + Bucket: fs.bucket, + Object: dst, + }, minio.CopySrcOptions{ + Bucket: fs.bucket, + Object: src, + }) + + return err +} + +func (fs *s3Filesystem) MkdirAll(path string, perm os.FileMode) error { + if path == "/" { + return nil + } + + info, err := fs.Stat(path) + if err == nil { + if !info.IsDir() { + return os.ErrExist + } + + return nil + } + + path = filepath.Join(path, fakeDirEntry) + + _, _, err = fs.write(path, strings.NewReader("")) + if err != nil { + return fmt.Errorf("can't create directory") + } + + return nil +} + +func (fs *s3Filesystem) Remove(path string) int64 { + path = fs.cleanPath(path) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{}) + if err != nil { + fs.logger.Debug().WithField("key", path).Log("Not found") + return -1 + } + + err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{ + GovernanceBypass: true, + }) + if err != nil { + fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file") + return -1 + } + + fs.logger.Debug().WithField("key", stat.Key).Log("Deleted") + + return stat.Size +} + +func (fs *s3Filesystem) RemoveAll() int64 { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + totalSize := int64(0) + + objectsCh := make(chan minio.ObjectInfo) + + // Send object names that are needed to be removed to objectsCh + go func() { + defer close(objectsCh) + + for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + Recursive: true, + }) { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + totalSize += object.Size + objectsCh <- object + } + }() + + for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{ + GovernanceBypass: true, + }) { + fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed") + } + + fs.logger.Debug().Log("Deleted all files") + + return totalSize +} + +func (fs *s3Filesystem) List(path, pattern string) []FileInfo { + path = fs.cleanPath(path) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: path, + Recursive: true, + MaxKeys: 0, + StartAfter: "", + UseV1: false, + }) + + files := []FileInfo{} + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + + key := "/" + object.Key + if strings.HasSuffix(key, "/"+fakeDirEntry) { + // filter out fake directory entries (see MkdirAll) + continue + } + + if len(pattern) != 0 { + if ok, _ := glob.Match(pattern, key, '/'); !ok { + continue + } + } + + f := &s3FileInfo{ + name: key, + size: object.Size, + lastModified: object.LastModified, + } + + files = append(files, f) + } + + return files +} + +func (fs *s3Filesystem) LookPath(file string) (string, error) { + if strings.Contains(file, "/") { + file = fs.cleanPath(file) + info, err := fs.Stat(file) + if err == nil { + if !info.Mode().IsRegular() { + return file, os.ErrNotExist + } + return file, nil + } + return "", os.ErrNotExist + } + path := os.Getenv("PATH") + for _, dir := range filepath.SplitList(path) { + if dir == "" { + // Unix shell semantics: path element "" means "." + dir = "." + } + path := filepath.Join(dir, file) + path = fs.cleanPath(path) + if info, err := fs.Stat(path); err == nil { + if !filepath.IsAbs(path) { + return path, os.ErrNotExist + } + if !info.Mode().IsRegular() { + return path, os.ErrNotExist + } + return path, nil + } + } + return "", os.ErrNotExist +} + +func (fs *s3Filesystem) isDir(path string) bool { + if !strings.HasSuffix(path, "/") { + path = path + "/" + } + + if path == "/" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{ + WithVersions: false, + WithMetadata: false, + Prefix: path, + Recursive: true, + MaxKeys: 1, + StartAfter: "", + UseV1: false, + }) + + files := uint64(0) + + for object := range ch { + if object.Err != nil { + fs.logger.WithError(object.Err).Log("Listing object failed") + continue + } + + files++ + } + + return files > 0 +} + +func (fs *s3Filesystem) cleanPath(path string) string { + if !filepath.IsAbs(path) { + path = filepath.Join("/", path) + } + + path = strings.TrimSuffix(path, "/"+fakeDirEntry) + + return filepath.Join("/", filepath.Clean(path))[1:] +} + +type s3FileInfo struct { + name string + size int64 + dir bool + lastModified time.Time +} + +func (f *s3FileInfo) Name() string { + return f.name +} + +func (f *s3FileInfo) Size() int64 { + return f.size +} + +func (f *s3FileInfo) Mode() os.FileMode { + return fs.FileMode(fs.ModePerm) +} + +func (f *s3FileInfo) ModTime() time.Time { + return f.lastModified +} + +func (f *s3FileInfo) IsLink() (string, bool) { + return "", false +} + +func (f *s3FileInfo) IsDir() bool { + return f.dir +} + +type s3File struct { + data io.ReadCloser + name string + size int64 + lastModified time.Time +} + +func (f *s3File) Read(p []byte) (int, error) { + return f.data.Read(p) +} + +func (f *s3File) Close() error { + return f.data.Close() +} + +func (f *s3File) Name() string { + return f.name +} + +func (f *s3File) Stat() (FileInfo, error) { + return &s3FileInfo{ + name: f.name, + size: f.size, + lastModified: f.lastModified, + }, nil +} diff --git a/io/fs/sized.go b/io/fs/sized.go new file mode 100644 index 00000000..366ef6f5 --- /dev/null +++ b/io/fs/sized.go @@ -0,0 +1,168 @@ +package fs + +import ( + "bytes" + "fmt" + "io" +) + +type SizedFilesystem interface { + Filesystem + + // Resize resizes the filesystem to the new size. Files may need to be deleted. + Resize(size int64) error +} + +type PurgeFilesystem interface { + // Purge will free up at least size number of bytes and returns the actual + // freed space in bytes. + Purge(size int64) int64 +} + +type sizedFilesystem struct { + Filesystem + + // Size is the capacity of the filesystem in bytes + maxSize int64 + + // Set true to automatically delete the oldest files until there's + // enough space to store a new file + purge bool +} + +var _ PurgeFilesystem = &sizedFilesystem{} + +func NewSizedFilesystem(fs Filesystem, maxSize int64, purge bool) (SizedFilesystem, error) { + r := &sizedFilesystem{ + Filesystem: fs, + maxSize: maxSize, + purge: purge, + } + + return r, nil +} + +func (r *sizedFilesystem) Size() (int64, int64) { + currentSize, _ := r.Filesystem.Size() + + return currentSize, r.maxSize +} + +func (r *sizedFilesystem) Resize(size int64) error { + currentSize, _ := r.Size() + if size >= currentSize { + // If the new size is the same or larger than the current size, + // nothing to do. + r.maxSize = size + return nil + } + + // If the new size is less than the current size, purge some files. + r.Purge(currentSize - size) + + r.maxSize = size + + return nil +} + +func (r *sizedFilesystem) WriteFileReader(path string, rd io.Reader) (int64, bool, error) { + currentSize, maxSize := r.Size() + if maxSize <= 0 { + return r.Filesystem.WriteFileReader(path, rd) + } + + data := bytes.Buffer{} + size, err := data.ReadFrom(rd) + if err != nil { + return -1, false, err + } + + // reject if the new file is larger than the available space + if size > maxSize { + return -1, false, fmt.Errorf("File is too big") + } + + // Calculate the new size of the filesystem + newSize := currentSize + size + + // If the the new size is larger than the allowed size, we have to free + // some space. + if newSize > maxSize { + if !r.purge { + return -1, false, fmt.Errorf("not enough space on device") + } + + if r.Purge(size) < size { + return -1, false, fmt.Errorf("not enough space on device") + } + } + + return r.Filesystem.WriteFileReader(path, &data) +} + +func (r *sizedFilesystem) WriteFile(path string, data []byte) (int64, bool, error) { + return r.WriteFileReader(path, bytes.NewBuffer(data)) +} + +func (r *sizedFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, error) { + currentSize, maxSize := r.Size() + if maxSize <= 0 { + return r.Filesystem.WriteFile(path, data) + } + + size := int64(len(data)) + + // reject if the new file is larger than the available space + if size > maxSize { + return -1, false, fmt.Errorf("File is too big") + } + + // Calculate the new size of the filesystem + newSize := currentSize + size + + // If the the new size is larger than the allowed size, we have to free + // some space. + if newSize > maxSize { + if !r.purge { + return -1, false, fmt.Errorf("not enough space on device") + } + + if r.Purge(size) < size { + return -1, false, fmt.Errorf("not enough space on device") + } + } + + return r.Filesystem.WriteFileSafe(path, data) +} + +func (r *sizedFilesystem) Purge(size int64) int64 { + if purger, ok := r.Filesystem.(PurgeFilesystem); ok { + return purger.Purge(size) + } + + return 0 + /* + files := r.Filesystem.List("/", "") + + sort.Slice(files, func(i, j int) bool { + return files[i].ModTime().Before(files[j].ModTime()) + }) + + var freed int64 = 0 + + for _, f := range files { + r.Filesystem.Remove(f.Name()) + size -= f.Size() + freed += f.Size() + r.currentSize -= f.Size() + + if size <= 0 { + break + } + } + + files = nil + + return freed + */ +} diff --git a/io/fs/sized_test.go b/io/fs/sized_test.go new file mode 100644 index 00000000..e158c422 --- /dev/null +++ b/io/fs/sized_test.go @@ -0,0 +1,350 @@ +package fs + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func newMemFS() Filesystem { + mem, _ := NewMemFilesystem(MemConfig{}) + + return mem +} + +func TestNewSized(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(0), cur) +} + +func TestSizedResize(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + err := fs.Resize(20) + require.NoError(t, err) + + cur, max = fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(20), max) +} + +func TestSizedResizePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + fs.WriteFileReader("/foobar", strings.NewReader("xxxxxxxxxx")) + + cur, max = fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + err := fs.Resize(5) + require.NoError(t, err) + + cur, max = fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(5), max) +} + +func TestSizedWrite(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + cur, max := fs.Size() + + require.Equal(t, int64(0), cur) + require.Equal(t, int64(10), max) + + size, created, err := fs.WriteFileReader("/foobar", strings.NewReader("xxxxx")) + require.NoError(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max = fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(10), max) + + _, _, err = fs.WriteFile("/foobaz", []byte("xxxxxx")) + require.Error(t, err) + + _, _, err = fs.WriteFileReader("/foobaz", strings.NewReader("xxxxxx")) + require.Error(t, err) + + _, _, err = fs.WriteFileSafe("/foobaz", []byte("xxxxxx")) + require.Error(t, err) +} + +func TestSizedReplaceNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func TestSizedReplacePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxx") + data2 := strings.NewReader("yyy") + data3 := strings.NewReader("zzz") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar3", data3) + + cur, max := fs.Size() + + require.Equal(t, int64(9), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(3), cur) + + data4 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar1", data4) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(8), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedReplaceUnlimited(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), -1, false) + + data := strings.NewReader("xxxxx") + + size, created, err := fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(5), size) + require.Equal(t, true, created) + + cur, max := fs.Size() + + require.Equal(t, int64(5), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) + + data = strings.NewReader("yyy") + + size, created, err = fs.WriteFileReader("/foobar", data) + + require.Nil(t, err) + require.Equal(t, int64(3), size) + require.Equal(t, false, created) + + cur, max = fs.Size() + + require.Equal(t, int64(3), cur) + require.Equal(t, int64(-1), max) + + cur = fs.Files() + + require.Equal(t, int64(1), cur) +} + +func TestSizedTooBigNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data := strings.NewReader("xxxxxyyyyyz") + + size, _, err := fs.WriteFileReader("/foobar", data) + require.Error(t, err) + require.Equal(t, int64(-1), size) +} + +func TestSizedTooBigPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + data := strings.NewReader("xxxxxyyyyyz") + + size, _, err := fs.WriteFileReader("/foobar", data) + require.Error(t, err) + require.Equal(t, int64(-1), size) + + require.Equal(t, int64(2), fs.Files()) +} + +func TestSizedFullSpaceNoPurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, false) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + cur, max := fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) + + data3 := strings.NewReader("zzzzz") + + size, _, err := fs.WriteFileReader("/foobar3", data3) + require.Error(t, err) + require.Equal(t, int64(-1), size) +} + +func TestSizedFullSpacePurge(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + + cur, max := fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) + + data3 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar3", data3) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(10), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedFullSpacePurgeMulti(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxx") + data2 := strings.NewReader("yyy") + data3 := strings.NewReader("zzz") + + fs.WriteFileReader("/foobar1", data1) + fs.WriteFileReader("/foobar2", data2) + fs.WriteFileReader("/foobar3", data3) + + cur, max := fs.Size() + + require.Equal(t, int64(9), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(3), cur) + + data4 := strings.NewReader("zzzzz") + + size, _, _ := fs.WriteFileReader("/foobar4", data4) + + require.Equal(t, int64(5), size) + + cur, max = fs.Size() + + require.Equal(t, int64(8), cur) + require.Equal(t, int64(10), max) + + cur = fs.Files() + + require.Equal(t, int64(2), cur) +} + +func TestSizedPurgeOrder(t *testing.T) { + fs, _ := NewSizedFilesystem(newMemFS(), 10, true) + + data1 := strings.NewReader("xxxxx") + data2 := strings.NewReader("yyyyy") + data3 := strings.NewReader("zzzzz") + + fs.WriteFileReader("/foobar1", data1) + time.Sleep(1 * time.Second) + fs.WriteFileReader("/foobar2", data2) + time.Sleep(1 * time.Second) + fs.WriteFileReader("/foobar3", data3) + + file := fs.Open("/foobar1") + + require.Nil(t, file) +} diff --git a/log/log.go b/log/log.go index be226028..14a78e2c 100644 --- a/log/log.go +++ b/log/log.go @@ -103,7 +103,6 @@ type Logger interface { type logger struct { output Writer component string - topics map[string]struct{} } // New returns an implementation of the Logger interface. @@ -121,14 +120,6 @@ func (l *logger) clone() *logger { component: l.component, } - if len(l.topics) != 0 { - clone.topics = make(map[string]struct{}) - - for topic := range l.topics { - clone.topics[topic] = struct{}{} - } - } - return clone } diff --git a/log/log_test.go b/log/log_test.go index 1a04a1f0..3ed0910c 100644 --- a/log/log_test.go +++ b/log/log_test.go @@ -5,15 +5,15 @@ import ( "bytes" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestLoglevelNames(t *testing.T) { - assert.Equal(t, "DEBUG", Ldebug.String()) - assert.Equal(t, "ERROR", Lerror.String()) - assert.Equal(t, "WARN", Lwarn.String()) - assert.Equal(t, "INFO", Linfo.String()) - assert.Equal(t, `SILENT`, Lsilent.String()) + require.Equal(t, "DEBUG", Ldebug.String()) + require.Equal(t, "ERROR", Lerror.String()) + require.Equal(t, "WARN", Lwarn.String()) + require.Equal(t, "INFO", Linfo.String()) + require.Equal(t, `SILENT`, Lsilent.String()) } func TestLogColorToNotTTY(t *testing.T) { @@ -23,7 +23,7 @@ func TestLogColorToNotTTY(t *testing.T) { w := NewConsoleWriter(writer, Linfo, true).(*syncWriter) formatter := w.writer.(*consoleWriter).formatter.(*consoleFormatter) - assert.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") + require.NotEqual(t, true, formatter.color, "Color should not be used on a buffer logger") } func TestLogContext(t *testing.T) { @@ -53,7 +53,7 @@ func TestLogContext(t *testing.T) { lenWithoutCtx := buffer.Len() buffer.Reset() - assert.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") + require.Greater(t, lenWithCtx, lenWithoutCtx, "Log line length without context is not shorter than with context") } func TestLogClone(t *testing.T) { @@ -65,7 +65,7 @@ func TestLogClone(t *testing.T) { logger.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="test"`) + require.Contains(t, buffer.String(), `component="test"`) buffer.Reset() @@ -74,7 +74,7 @@ func TestLogClone(t *testing.T) { logger2.Info().Log("info") writer.Flush() - assert.Contains(t, buffer.String(), `component="tset"`) + require.Contains(t, buffer.String(), `component="tset"`) } func TestLogSilent(t *testing.T) { @@ -85,22 +85,22 @@ func TestLogSilent(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() } @@ -112,22 +112,22 @@ func TestLogDebug(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -139,22 +139,22 @@ func TestLogInfo(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -166,22 +166,22 @@ func TestLogWarn(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } @@ -193,21 +193,43 @@ func TestLogError(t *testing.T) { logger.Debug().Log("debug") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Info().Log("info") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Warn().Log("warn") writer.Flush() - assert.Equal(t, 0, buffer.Len(), "Buffer should be empty") + require.Equal(t, 0, buffer.Len(), "Buffer should be empty") buffer.Reset() logger.Error().Log("error") writer.Flush() - assert.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") + require.NotEqual(t, 0, buffer.Len(), "Buffer should not be empty") buffer.Reset() } + +func TestLogWithField(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + logger := New("test").WithOutput(bufwriter) + logger = logger.WithField("foo", "bar") + logger.Info().Log("hello") + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Empty(t, events[0].err) + require.Equal(t, "bar", events[0].Data["foo"]) + + logger = logger.WithField("func", func() bool { return true }) + logger.Info().Log("hello") + + events = bufwriter.Events() + require.Equal(t, 2, len(events)) + require.NotEmpty(t, events[1].err) + require.Equal(t, "bar", events[0].Data["foo"]) +} diff --git a/log/writer_test.go b/log/writer_test.go new file mode 100644 index 00000000..7951cf29 --- /dev/null +++ b/log/writer_test.go @@ -0,0 +1,181 @@ +package log + +import ( + "bytes" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestJSONWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewJSONWriter(&buffer, Linfo) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `{"Time":"2009-11-10T23:00:00Z","Level":"INFO","Component":"test","Caller":"me","Message":"hello world","Data":{"caller":"me","component":"test","foo":"bar","message":"hello world","ts":"2009-11-10T23:00:00Z"}}`, buffer.String()) +} + +func TestConsoleWriter(t *testing.T) { + buffer := bytes.Buffer{} + + writer := NewConsoleWriter(&buffer, Linfo, false) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, `ts=2009-11-10T23:00:00Z level=INFO component="test" msg="hello world" foo="bar"`+"\n", buffer.String()) +} + +func TestTopicWriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + writer1 := NewTopicWriter(bufwriter, []string{}) + writer2 := NewTopicWriter(bufwriter, []string{"foobar"}) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter.Events())) + + writer1.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + writer2.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 3, len(bufwriter.Events())) +} + +func TestMultiwriter(t *testing.T) { + bufwriter1 := NewBufferWriter(Linfo, 10) + bufwriter2 := NewBufferWriter(Linfo, 10) + + writer := NewMultiWriter(bufwriter1, bufwriter2) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + require.Equal(t, 1, len(bufwriter1.Events())) + require.Equal(t, 1, len(bufwriter2.Events())) +} + +func TestLevelRewriter(t *testing.T) { + bufwriter := NewBufferWriter(Linfo, 10) + + rule := LevelRewriteRule{ + Level: Lwarn, + Component: "foobar", + Match: map[string]string{ + "foo": "bar", + }, + } + + writer := NewLevelRewriter(bufwriter, []LevelRewriteRule{rule}) + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events := bufwriter.Events() + + require.Equal(t, 1, len(events)) + require.Equal(t, Lwarn, events[0].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "foobar", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"bar": "foo"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 2, len(events)) + require.Equal(t, Linfo, events[1].Level) + + writer.Write(&Event{ + logger: &logger{}, + Time: time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC), + Level: Linfo, + Component: "test", + Caller: "me", + Message: "hello world", + err: "", + Data: map[string]interface{}{"foo": "bar"}, + }) + + events = bufwriter.Events() + + require.Equal(t, 3, len(events)) + require.Equal(t, Linfo, events[2].Level) +} diff --git a/main.go b/main.go index 4606f67d..377af7e5 100644 --- a/main.go +++ b/main.go @@ -5,6 +5,7 @@ import ( "os/signal" "github.com/datarhei/core/v16/app/api" + "github.com/datarhei/core/v16/config/store" "github.com/datarhei/core/v16/log" _ "github.com/joho/godotenv/autoload" @@ -13,7 +14,9 @@ import ( func main() { logger := log.New("Core").WithOutput(log.NewConsoleWriter(os.Stderr, log.Lwarn, true)) - app, err := api.New(os.Getenv("CORE_CONFIGFILE"), os.Stderr) + configfile := store.Location(os.Getenv("CORE_CONFIGFILE")) + + app, err := api.New(configfile, os.Stderr) if err != nil { logger.Error().WithError(err).Log("Failed to create new API") os.Exit(1) diff --git a/monitor/ffmpeg.go b/monitor/ffmpeg.go index a447901a..7d76d430 100644 --- a/monitor/ffmpeg.go +++ b/monitor/ffmpeg.go @@ -17,7 +17,7 @@ func NewFFmpegCollector(f ffmpeg.FFmpeg) metric.Collector { ffmpeg: f, } - c.processDescr = metric.NewDesc("ffmpeg_process", "State of the ffmpeg process", []string{"state"}) + c.processDescr = metric.NewDesc("ffmpeg_process", "Accumulated state changes of all ffmpeg processes", []string{"state"}) return c } diff --git a/monitor/metric/metric.go b/monitor/metric/metric.go index a327c6d0..f2e88e42 100644 --- a/monitor/metric/metric.go +++ b/monitor/metric/metric.go @@ -12,7 +12,7 @@ type Pattern interface { Name() string // Match returns whether a map of labels with its label values - // match this pattern. + // match this pattern. All labels have to be present and need to match. Match(labels map[string]string) bool // IsValid returns whether the pattern is valid. @@ -26,7 +26,7 @@ type pattern struct { } // NewPattern creates a new pattern with the given prefix and group name. There -// has to be an even number of parameter, which is ("label", "labelvalue", "label", +// has to be an even number of labels, which is ("label", "labelvalue", "label", // "labelvalue" ...). The label value will be interpreted as regular expression. func NewPattern(name string, labels ...string) Pattern { p := &pattern{ @@ -38,7 +38,6 @@ func NewPattern(name string, labels ...string) Pattern { for i := 0; i < len(labels); i += 2 { exp, err := regexp.Compile(labels[i+1]) if err != nil { - fmt.Printf("error: %s\n", err) continue } @@ -84,19 +83,35 @@ func (p *pattern) IsValid() bool { return p.valid } +// Metrics is a collection of values type Metrics interface { + // Value returns the first value that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Value(name string, labels ...string) Value + + // Values returns all values that matches the name and the labels. The labels + // are used to create a pattern and therefore must obey to the rules of NewPattern. Values(name string, labels ...string) []Value + + // Labels return a list of all values for a label. Labels(name string, label string) []string + + // All returns all values currently stored in the collection. All() []Value + + // Add adds a value to the collection. Add(v Value) + + // String return a string representation of all collected values. String() string } +// metrics is an implementation of the Metrics interface. type metrics struct { values []Value } +// NewMetrics returns a new metrics instance. func NewMetrics() *metrics { return &metrics{} } @@ -231,8 +246,15 @@ func (v *value) Hash() string { func (v *value) String() string { s := fmt.Sprintf("%s: %f {", v.name, v.value) - for k, v := range v.labels { - s += k + "=" + v + " " + keys := []string{} + for k := range v.labels { + keys = append(keys, k) + } + + sort.Strings(keys) + + for _, k := range keys { + s += k + "=" + v.labels[k] + " " } s += "}" diff --git a/monitor/metric/metric_test.go b/monitor/metric/metric_test.go index 743739a7..615ce7cb 100644 --- a/monitor/metric/metric_test.go +++ b/monitor/metric/metric_test.go @@ -2,25 +2,154 @@ package metric import ( "testing" + + "github.com/stretchr/testify/require" ) -func TestValue(t *testing.T) { - d := NewDesc("group", "", []string{"name"}) - v := NewValue(d, 42, "foobar") +func TestPattern(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) - if v.L("name") != "foobar" { - t.Fatalf("label name doesn't have the expected value") - } + p = NewPattern("bla", "label1", "value1", "label2", "valu(e2") + require.Equal(t, false, p.IsValid()) + + p = NewPattern("bla") + require.Equal(t, true, p.IsValid()) + require.Equal(t, "bla", p.Name()) + + p = NewPattern("bla", "label1", "value1", "label2", "value2") + require.Equal(t, true, p.IsValid()) +} + +func TestPatternMatch(t *testing.T) { + p := NewPattern("bla", "label1", "value1", "label2") + require.Equal(t, false, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + + p0 := NewPattern("bla") + require.Equal(t, true, p0.IsValid()) + require.Equal(t, true, p0.Match(map[string]string{})) + require.Equal(t, true, p0.Match(map[string]string{"labelX": "foobar"})) + + p = NewPattern("bla", "label1", "value.", "label2", "val?ue2") + require.Equal(t, true, p.IsValid()) + require.Equal(t, false, p.Match(map[string]string{})) + require.Equal(t, false, p.Match(map[string]string{"label1": "value1"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value1", "label2": "value2"})) + require.Equal(t, true, p.Match(map[string]string{"label1": "value5", "label2": "vaue2"})) +} + +func TestValue(t *testing.T) { + d := NewDesc("group", "", []string{"label1", "label2"}) + v := NewValue(d, 42, "foobar") + require.Nil(t, v) + + v = NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v) + require.Equal(t, float64(42), v.Val()) + + require.Equal(t, "", v.L("labelX")) + require.Equal(t, "foobar", v.L("label1")) + require.Equal(t, "foobaz", v.L("label2")) + require.Equal(t, "group", v.Name()) + require.Equal(t, "group:label1=foobar label2=foobaz ", v.Hash()) + require.Equal(t, "group: 42.000000 {label1=foobar label2=foobaz }", v.String()) + + require.Equal(t, map[string]string{"label1": "foobar", "label2": "foobaz"}, v.Labels()) +} + +func TestValuePattern(t *testing.T) { + d := NewDesc("group", "", []string{"label1", "label2"}) + v := NewValue(d, 42, "foobar", "foobaz") p1 := NewPattern("group") + p2 := NewPattern("group", "label1", "foobar") + p3 := NewPattern("group", "label2", "foobaz") + p4 := NewPattern("group", "label2", "foobaz", "label1", "foobar") - if v.Match([]Pattern{p1}) == false { - t.Fatalf("pattern p1 should have matched") - } + require.Equal(t, true, v.Match(nil)) + require.Equal(t, true, v.Match([]Pattern{p1})) + require.Equal(t, true, v.Match([]Pattern{p2})) + require.Equal(t, true, v.Match([]Pattern{p3})) + require.Equal(t, true, v.Match([]Pattern{p4})) + require.Equal(t, true, v.Match([]Pattern{p1, p2, p3, p4})) - p2 := NewPattern("group", "name", "foobar") + p5 := NewPattern("group", "label1", "foobaz") - if v.Match([]Pattern{p2}) == false { - t.Fatalf("pattern p2 should have matched") - } + require.Equal(t, false, v.Match([]Pattern{p5})) + + require.Equal(t, true, v.Match([]Pattern{p4, p5})) + require.Equal(t, true, v.Match([]Pattern{p5, p4})) +} + +func TestDescription(t *testing.T) { + d := NewDesc("name", "blabla", []string{"label"}) + + require.Equal(t, "name", d.Name()) + require.Equal(t, "blabla", d.Description()) + require.ElementsMatch(t, []string{"label"}, d.Labels()) + require.Equal(t, "name: blabla (label)", d.String()) +} + +func TestMetri(t *testing.T) { + m := NewMetrics() + + require.Equal(t, "", m.String()) + require.Equal(t, 0, len(m.All())) + + d := NewDesc("group", "", []string{"label1", "label2"}) + v1 := NewValue(d, 42, "foobar", "foobaz") + require.NotNil(t, v1) + + m.Add(v1) + + require.Equal(t, v1.String(), m.String()) + require.Equal(t, 1, len(m.All())) + + l := m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz"}, l) + + v2 := NewValue(d, 77, "barfoo", "bazfoo") + + m.Add(v2) + + require.Equal(t, v1.String()+v2.String(), m.String()) + require.Equal(t, 2, len(m.All())) + + l = m.Labels("group", "label2") + + require.ElementsMatch(t, []string{"foobaz", "bazfoo"}, l) + + v := m.Value("bla", "label1", "foo*") + + require.Equal(t, nullValue, v) + + v = m.Value("group") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label1", "foo*") + + require.NotEqual(t, nullValue, v) + + v = m.Value("group", "label2", "baz") + + require.NotEqual(t, nullValue, v) + + vs := m.Values("group") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label1", "foo*") + + require.Equal(t, 2, len(vs)) + + vs = m.Values("group", "label2", "*baz*") + + require.NotEqual(t, 2, len(vs)) + + vs = m.Values("group", "label1") + + require.Equal(t, 0, len(vs)) } diff --git a/monitor/restream.go b/monitor/restream.go index cfd069f4..c83e17ff 100644 --- a/monitor/restream.go +++ b/monitor/restream.go @@ -25,7 +25,7 @@ func NewRestreamCollector(r restream.Restreamer) metric.Collector { c.restreamProcessDescr = metric.NewDesc("restream_process", "Current process values by name", []string{"processid", "state", "order", "name"}) c.restreamProcessStatesDescr = metric.NewDesc("restream_process_states", "Current process state", []string{"processid", "state"}) c.restreamProcessIODescr = metric.NewDesc("restream_io", "Current process IO values by name", []string{"processid", "type", "id", "address", "index", "stream", "media", "name"}) - c.restreamStatesDescr = metric.NewDesc("restream_state", "Summarized process states", []string{"state"}) + c.restreamStatesDescr = metric.NewDesc("restream_state", "Summarized current process states", []string{"state"}) return c } diff --git a/net/ip.go b/net/ip.go index e3cec22f..4c0b14f3 100644 --- a/net/ip.go +++ b/net/ip.go @@ -4,7 +4,11 @@ package net import ( "fmt" + "io" "net" + "net/http" + "sync" + "time" ) var ( @@ -58,3 +62,69 @@ func ipVersion(ipAddress string) int { return 0 } + +// GetPublicIPs will try to figure out the public IPs (v4 and v6) +// we're running on. If it fails, an empty list will be returned. +func GetPublicIPs(timeout time.Duration) []string { + var wg sync.WaitGroup + + ipv4 := "" + ipv6 := "" + + wg.Add(2) + + go func() { + defer wg.Done() + + ipv4 = doRequest("https://api.ipify.org", timeout) + }() + + go func() { + defer wg.Done() + + ipv6 = doRequest("https://api6.ipify.org", timeout) + }() + + wg.Wait() + + ips := []string{} + + if len(ipv4) != 0 { + ips = append(ips, ipv4) + } + + if len(ipv6) != 0 && ipv4 != ipv6 { + ips = append(ips, ipv6) + } + + return ips +} + +func doRequest(url string, timeout time.Duration) string { + client := &http.Client{ + Timeout: timeout, + } + + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return "" + } + + resp, err := client.Do(req) + if err != nil { + return "" + } + + defer resp.Body.Close() + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "" + } + + if resp.StatusCode != 200 { + return "" + } + + return string(body) +} diff --git a/net/ip_test.go b/net/ip_test.go index eaca6bc3..bd9bd575 100644 --- a/net/ip_test.go +++ b/net/ip_test.go @@ -3,18 +3,27 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAnonymizeIPString(t *testing.T) { + _, err := AnonymizeIPString("127.987.475.21") + require.Error(t, err) + + _, err = AnonymizeIPString("bbd1:xxxx") + require.Error(t, err) + + _, err = AnonymizeIPString("hello-world") + require.Error(t, err) + ipv4 := "192.168.1.42" ipv6 := "bbd1:e95a:adbb:b29a:e38b:577f:6f9a:1fa7" anonymizedIPv4, err := AnonymizeIPString(ipv4) - assert.Nil(t, err) - assert.Equal(t, "192.168.1.0", anonymizedIPv4) + require.NoError(t, err) + require.Equal(t, "192.168.1.0", anonymizedIPv4) anonymizedIPv6, err := AnonymizeIPString(ipv6) - assert.Nil(t, err) - assert.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) + require.NoError(t, err) + require.Equal(t, "bbd1:e95a:adbb:b29a::", anonymizedIPv6) } diff --git a/net/port_test.go b/net/port_test.go index 019afcf0..dec2d5b9 100644 --- a/net/port_test.go +++ b/net/port_test.go @@ -3,19 +3,30 @@ package net import ( "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNewPortrange(t *testing.T) { _, err := NewPortrange(1000, 1999) - assert.Nil(t, err, "Valid port range not accepted: %s", err) + require.Nil(t, err, "Valid port range not accepted: %s", err) } func TestInvalidPortrange(t *testing.T) { _, err := NewPortrange(1999, 1000) - assert.NotNil(t, err, "Invalid port range accepted") + require.NotNil(t, err, "Invalid port range accepted") +} + +func TestOutOfRangePortrange(t *testing.T) { + p, err := NewPortrange(-1, 70000) + + require.NoError(t, err) + + portrange := p.(*portrange) + + require.Equal(t, 1, portrange.min) + require.Equal(t, 65535, len(portrange.ports)) } func TestGetPort(t *testing.T) { @@ -23,26 +34,26 @@ func TestGetPort(t *testing.T) { port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestGetPutPort(t *testing.T) { portrange, _ := NewPortrange(1000, 1999) port, err := portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1001, port) + require.Nil(t, err) + require.Equal(t, 1001, port) portrange.Put(1000) port, err = portrange.Get() - assert.Nil(t, err) - assert.Equal(t, 1000, port) + require.Nil(t, err) + require.Equal(t, 1000, port) } func TestPortUnavailable(t *testing.T) { @@ -50,12 +61,12 @@ func TestPortUnavailable(t *testing.T) { for i := 0; i < 1000; i++ { port, _ := portrange.Get() - assert.Equal(t, 1000+i, port, "at index %d", i) + require.Equal(t, 1000+i, port, "at index %d", i) } port, err := portrange.Get() - assert.NotNil(t, err) - assert.Less(t, port, 0) + require.NotNil(t, err) + require.Less(t, port, 0) } func TestPutPort(t *testing.T) { @@ -73,16 +84,27 @@ func TestClampRange(t *testing.T) { port, _ := portrange.Get() - assert.Equal(t, 65000, port) + require.Equal(t, 65000, port) portrange.Put(65000) for i := 65000; i <= 65535; i++ { port, _ := portrange.Get() - assert.Equal(t, i, port, "at index %d", i) + require.Equal(t, i, port, "at index %d", i) } port, _ = portrange.Get() - assert.Less(t, port, 0) + require.Less(t, port, 0) +} + +func TestDummyPortranger(t *testing.T) { + portrange := NewDummyPortrange() + + port, err := portrange.Get() + + require.Error(t, err) + require.Equal(t, 0, port) + + portrange.Put(42) } diff --git a/net/url/url.go b/net/url/url.go index d99b9ebd..1f89b240 100644 --- a/net/url/url.go +++ b/net/url/url.go @@ -4,25 +4,99 @@ import ( "net" "net/url" "regexp" + "strings" ) -var reScheme = regexp.MustCompile(`(?i)^([a-z][a-z0-9.+-:]*)://`) +type URL struct { + Scheme string + Opaque string // encoded opaque data + User *url.Userinfo // username and password information + Host string // host or host:port + RawPath string // path (relative paths may omit leading slash) + RawQuery string // encoded query values, without '?' + RawFragment string // fragment for references, without '#' +} -// Validate checks whether the given address is a valid URL +func (u *URL) Hostname() string { + if !strings.Contains(u.Host, ":") { + return u.Host + } + + hostname, _, _ := net.SplitHostPort(u.Host) + + return hostname +} + +func (u *URL) Port() string { + if !strings.Contains(u.Host, ":") { + return "" + } + + _, port, _ := net.SplitHostPort(u.Host) + + return port +} + +var reScheme = regexp.MustCompile(`(?i)^([a-z][a-z0-9.+-:]*):/{1,3}`) + +// Validate checks whether the given address is a valid URL, based on the +// relaxed version of Parse in this package. func Validate(address string) error { _, err := Parse(address) return err } -// Parse parses an URL into its components. Returns a net/url.URL or -// an error if the URL couldn't be parsed. -func Parse(address string) (*url.URL, error) { - address = reScheme.ReplaceAllString(address, "//") +// Parse parses an URL into its components. It is a more relaxed version of +// url.Parse as it's not checking the escaping of the path, query, and fragment. +func Parse(address string) (*URL, error) { + address, frag, _ := strings.Cut(address, "#") - u, err := url.Parse(address) + u := &URL{ + RawFragment: frag, + } - return u, err + matches := reScheme.FindStringSubmatch(address) + if matches != nil { + u.Scheme = matches[1] + address = strings.Replace(address, u.Scheme+":", "", 1) + } + + address, query, _ := strings.Cut(address, "?") + u.RawQuery = query + + if strings.HasPrefix(address, "///") { + u.RawPath = strings.TrimPrefix(address, "//") + return u, nil + } + + if strings.HasPrefix(address, "//") { + host, path, _ := strings.Cut(address[2:], "/") + u.RawPath = "/" + path + + parsedHost, err := url.Parse("//" + host) + if err != nil { + return nil, err + } + + u.User = parsedHost.User + u.Host = parsedHost.Host + + return u, nil + } + + if strings.HasPrefix(address, "/") { + u.RawPath = address + + return u, nil + } + + scheme, address, _ := strings.Cut(address, ":") + + u.Scheme = scheme + u.Opaque = address + + return u, nil } // HasScheme returns whether the address has an URL scheme prefix @@ -48,15 +122,11 @@ func Lookup(address string) (string, error) { return "", err } - if len(u.Host) == 0 { + host := u.Hostname() + if len(host) == 0 { return "", nil } - host, _, err := net.SplitHostPort(u.Host) - if err != nil { - host = u.Host - } - addrs, err := net.LookupHost(host) if err != nil { return "", err diff --git a/net/url/url_test.go b/net/url/url_test.go index 977a5123..dff373b8 100644 --- a/net/url/url_test.go +++ b/net/url/url_test.go @@ -7,9 +7,20 @@ import ( ) func TestLookup(t *testing.T) { - _, err := Lookup("https://www.google.com") + ip, err := Lookup("/localhost:8080/foobar") require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("http://") + + require.NoError(t, err) + require.Equal(t, "", ip) + + ip, err = Lookup("https://www.google.com") + + require.NoError(t, err) + require.NotEmpty(t, ip) } func TestLocalhost(t *testing.T) { @@ -18,3 +29,154 @@ func TestLocalhost(t *testing.T) { require.NoError(t, err) require.Subset(t, []string{"127.0.0.1", "::1"}, []string{ip}) } + +func TestValidate(t *testing.T) { + err := Validate("http://localhost/foobar") + require.NoError(t, err) + + err = Validate("foobar") + require.NoError(t, err) + + err = Validate("http://localhost/foobar_%25v") + require.NoError(t, err) + + err = Validate("http://localhost/foobar_%v") + require.NoError(t, err) +} + +func TestScheme(t *testing.T) { + r := HasScheme("http://localhost/foobar") + require.True(t, r) + + r = HasScheme("iueriherfd://localhost/foobar") + require.True(t, r) + + r = HasScheme("//localhost/foobar") + require.False(t, r) +} + +func TestPars(t *testing.T) { + u, err := Parse("http://localhost/foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "", + User: nil, + Host: "localhost", + RawPath: "/foobar", + RawQuery: "", + RawFragment: "", + }, u) + + u, err = Parse("iueriherfd://localhost/foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "iueriherfd", + Opaque: "", + User: nil, + Host: "localhost", + RawPath: "/foobar", + RawQuery: "", + RawFragment: "", + }, u) + + u, err = Parse("//localhost/foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "", + Opaque: "", + User: nil, + Host: "localhost", + RawPath: "/foobar", + RawQuery: "", + RawFragment: "", + }, u) + + u, err = Parse("http://localhost/foobar_%v?foo=bar#foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "", + User: nil, + Host: "localhost", + RawPath: "/foobar_%v", + RawQuery: "foo=bar", + RawFragment: "foobar", + }, u) + + u, err = Parse("http:localhost/foobar_%v?foo=bar#foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "localhost/foobar_%v", + User: nil, + Host: "", + RawPath: "", + RawQuery: "foo=bar", + RawFragment: "foobar", + }, u) + + u, err = Parse("http:/localhost/foobar_%v?foo=bar#foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "", + User: nil, + Host: "", + RawPath: "/localhost/foobar_%v", + RawQuery: "foo=bar", + RawFragment: "foobar", + }, u) + + u, err = Parse("http:///localhost/foobar_%v?foo=bar#foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "", + User: nil, + Host: "", + RawPath: "/localhost/foobar_%v", + RawQuery: "foo=bar", + RawFragment: "foobar", + }, u) + + u, err = Parse("foo:bar://localhost/foobar_%v?foo=bar#foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "foo:bar", + Opaque: "", + User: nil, + Host: "localhost", + RawPath: "/foobar_%v", + RawQuery: "foo=bar", + RawFragment: "foobar", + }, u) + + u, err = Parse("http://localhost:8080/foobar") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "http", + Opaque: "", + User: nil, + Host: "localhost:8080", + RawPath: "/foobar", + RawQuery: "", + RawFragment: "", + }, u) + require.Equal(t, "localhost", u.Hostname()) + require.Equal(t, "8080", u.Port()) + + u, err = Parse("https://www.google.com") + require.NoError(t, err) + require.Equal(t, &URL{ + Scheme: "https", + Opaque: "", + User: nil, + Host: "www.google.com", + RawPath: "/", + RawQuery: "", + RawFragment: "", + }, u) + require.Equal(t, "www.google.com", u.Hostname()) + require.Equal(t, "", u.Port()) +} diff --git a/process/limits.go b/process/limits.go index 71ea7c0b..0c43b3ba 100644 --- a/process/limits.go +++ b/process/limits.go @@ -104,6 +104,8 @@ func (l *limiter) Stop() { l.proc.Stop() l.proc = nil + + l.reset() } func (l *limiter) ticker(ctx context.Context) { diff --git a/process/process.go b/process/process.go index 3c927e2b..4bfcb4b4 100644 --- a/process/process.go +++ b/process/process.go @@ -192,6 +192,7 @@ type process struct { onStart func() onExit func() onStateChange func(from, to string) + lock sync.Mutex } limits Limiter } @@ -588,6 +589,7 @@ func (p *process) stop(wait bool) error { if wait { wg.Add(1) + p.callbacks.lock.Lock() if p.callbacks.onExit == nil { p.callbacks.onExit = func() { wg.Done() @@ -601,6 +603,7 @@ func (p *process) stop(wait bool) error { p.callbacks.onExit = cb } } + p.callbacks.lock.Unlock() } var err error @@ -829,10 +832,12 @@ func (p *process) waiter() { // Reset the parser stats p.parser.ResetStats() - // Call the onStop callback + // Call the onExit callback + p.callbacks.lock.Lock() if p.callbacks.onExit != nil { go p.callbacks.onExit() } + p.callbacks.lock.Unlock() p.order.lock.Lock() defer p.order.lock.Unlock() diff --git a/psutil/process.go b/psutil/process.go index 9986d455..1b1ab3bd 100644 --- a/psutil/process.go +++ b/psutil/process.go @@ -98,8 +98,7 @@ func (p *process) cpuTimes() (*cpuTimesStat, error) { } s := &cpuTimesStat{ - total: times.User + times.System + times.Idle + times.Nice + times.Iowait + times.Irq + - times.Softirq + times.Steal + times.Guest + times.GuestNice, + total: cpuTotal(times), system: times.System, user: times.User, } diff --git a/psutil/psutil.go b/psutil/psutil.go index 77e3e683..16b306bd 100644 --- a/psutil/psutil.go +++ b/psutil/psutil.go @@ -2,6 +2,7 @@ package psutil import ( "context" + "errors" "fmt" "io" "io/fs" @@ -284,9 +285,12 @@ func (u *util) cpuTimes() (*cpuTimesStat, error) { return nil, err } + if len(times) == 0 { + return nil, errors.New("cpu.Times() returned an empty slice") + } + s := &cpuTimesStat{ - total: times[0].User + times[0].System + times[0].Idle + times[0].Nice + times[0].Iowait + times[0].Irq + - times[0].Softirq + times[0].Steal + times[0].Guest + times[0].GuestNice, + total: cpuTotal(×[0]), system: times[0].System, user: times[0].User, idle: times[0].Idle, @@ -497,3 +501,8 @@ func (u *util) readFile(path string) ([]string, error) { return lines, nil } + +func cpuTotal(c *cpu.TimesStat) float64 { + return c.User + c.System + c.Idle + c.Nice + c.Iowait + c.Irq + + c.Softirq + c.Steal + c.Guest + c.GuestNice +} diff --git a/restream/app/avstream.go b/restream/app/avstream.go index fcfb8ded..70cf9634 100644 --- a/restream/app/avstream.go +++ b/restream/app/avstream.go @@ -2,19 +2,19 @@ package app type AVstreamIO struct { State string - Packet uint64 + Packet uint64 // counter Time uint64 - Size uint64 + Size uint64 // bytes } type AVstream struct { Input AVstreamIO Output AVstreamIO - Aqueue uint64 - Queue uint64 - Dup uint64 - Drop uint64 - Enc uint64 + Aqueue uint64 // gauge + Queue uint64 // gauge + Dup uint64 // counter + Drop uint64 // counter + Enc uint64 // counter Looping bool Duplicating bool GOP string diff --git a/restream/app/process.go b/restream/app/process.go index b8fb75d8..4ec6036a 100644 --- a/restream/app/process.go +++ b/restream/app/process.go @@ -2,7 +2,6 @@ package app import ( "github.com/datarhei/core/v16/process" - "github.com/datarhei/core/v16/restream/replace" ) type ConfigIOCleanup struct { @@ -37,6 +36,7 @@ func (io ConfigIO) Clone() ConfigIO { type Config struct { ID string `json:"id"` Reference string `json:"reference"` + FFVersion string `json:"ffversion"` Input []ConfigIO `json:"input"` Output []ConfigIO `json:"output"` Options []string `json:"options"` @@ -53,6 +53,7 @@ func (config *Config) Clone() *Config { clone := &Config{ ID: config.ID, Reference: config.Reference, + FFVersion: config.FFVersion, Reconnect: config.Reconnect, ReconnectDelay: config.ReconnectDelay, Autostart: config.Autostart, @@ -78,79 +79,6 @@ func (config *Config) Clone() *Config { return clone } -// ReplacePlaceholders replaces all placeholders in the config. The config -// will be modified in place. -func (config *Config) ResolvePlaceholders(r replace.Replacer) { - for i, option := range config.Options { - // Replace any known placeholders - option = r.Replace(option, "diskfs", "") - - config.Options[i] = option - } - - // Resolving the given inputs - for i, input := range config.Input { - // Replace any known placeholders - input.ID = r.Replace(input.ID, "processid", config.ID) - input.ID = r.Replace(input.ID, "reference", config.Reference) - input.Address = r.Replace(input.Address, "inputid", input.ID) - input.Address = r.Replace(input.Address, "processid", config.ID) - input.Address = r.Replace(input.Address, "reference", config.Reference) - input.Address = r.Replace(input.Address, "diskfs", "") - input.Address = r.Replace(input.Address, "memfs", "") - input.Address = r.Replace(input.Address, "rtmp", "") - input.Address = r.Replace(input.Address, "srt", "") - - for j, option := range input.Options { - // Replace any known placeholders - option = r.Replace(option, "inputid", input.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - input.Options[j] = option - } - - config.Input[i] = input - } - - // Resolving the given outputs - for i, output := range config.Output { - // Replace any known placeholders - output.ID = r.Replace(output.ID, "processid", config.ID) - output.Address = r.Replace(output.Address, "outputid", output.ID) - output.Address = r.Replace(output.Address, "processid", config.ID) - output.Address = r.Replace(output.Address, "reference", config.Reference) - output.Address = r.Replace(output.Address, "diskfs", "") - output.Address = r.Replace(output.Address, "memfs", "") - output.Address = r.Replace(output.Address, "rtmp", "") - output.Address = r.Replace(output.Address, "srt", "") - - for j, option := range output.Options { - // Replace any known placeholders - option = r.Replace(option, "outputid", output.ID) - option = r.Replace(option, "processid", config.ID) - option = r.Replace(option, "reference", config.Reference) - option = r.Replace(option, "diskfs", "") - option = r.Replace(option, "memfs", "") - - output.Options[j] = option - } - - for j, cleanup := range output.Cleanup { - // Replace any known placeholders - cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID) - cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference) - - output.Cleanup[j] = cleanup - } - - config.Output[i] = output - } -} - // CreateCommand created the FFmpeg command from this config. func (config *Config) CreateCommand() []string { var command []string diff --git a/restream/app/progress.go b/restream/app/progress.go index 7d081d39..c9f1fcd5 100644 --- a/restream/app/progress.go +++ b/restream/app/progress.go @@ -5,18 +5,20 @@ type ProgressIO struct { Address string // General - Index uint64 - Stream uint64 - Format string - Type string - Codec string - Coder string - Frame uint64 - FPS float64 - Packet uint64 - PPS float64 - Size uint64 // bytes - Bitrate float64 // bit/s + Index uint64 + Stream uint64 + Format string + Type string + Codec string + Coder string + Frame uint64 // counter + Keyframe uint64 // counter + FPS float64 // rate, frames per second + Packet uint64 // counter + PPS float64 // rate, packets per second + Size uint64 // bytes + Bitrate float64 // bit/s + Extradata uint64 // bytes // Video Pixfmt string @@ -36,15 +38,15 @@ type ProgressIO struct { type Progress struct { Input []ProgressIO Output []ProgressIO - Frame uint64 - Packet uint64 - FPS float64 - PPS float64 - Quantizer float64 - Size uint64 // bytes - Time float64 + Frame uint64 // counter + Packet uint64 // counter + FPS float64 // rate, frames per second + PPS float64 // rate, packets per second + Quantizer float64 // gauge + Size uint64 // bytes + Time float64 // seconds with fractions Bitrate float64 // bit/s - Speed float64 - Drop uint64 - Dup uint64 + Speed float64 // gauge + Drop uint64 // counter + Dup uint64 // counter } diff --git a/restream/fs/fs.go b/restream/fs/fs.go index 29216aa9..0e676da9 100644 --- a/restream/fs/fs.go +++ b/restream/fs/fs.go @@ -62,6 +62,11 @@ func New(config Config) Filesystem { rfs.logger = log.New("") } + rfs.logger = rfs.logger.WithFields(log.Fields{ + "name": config.FS.Name(), + "type": config.FS.Type(), + }) + rfs.cleanupPatterns = make(map[string][]Pattern) // already drain the stop @@ -130,7 +135,7 @@ func (rfs *filesystem) cleanup() { for _, patterns := range rfs.cleanupPatterns { for _, pattern := range patterns { - filesAndDirs := rfs.Filesystem.List(pattern.Pattern) + filesAndDirs := rfs.Filesystem.List("/", pattern.Pattern) files := []fs.FileInfo{} for _, f := range filesAndDirs { @@ -146,7 +151,7 @@ func (rfs *filesystem) cleanup() { if pattern.MaxFiles > 0 && uint(len(files)) > pattern.MaxFiles { for i := uint(0); i < uint(len(files))-pattern.MaxFiles; i++ { rfs.logger.Debug().WithField("path", files[i].Name()).Log("Remove file because MaxFiles is exceeded") - rfs.Filesystem.Delete(files[i].Name()) + rfs.Filesystem.Remove(files[i].Name()) } } @@ -156,7 +161,7 @@ func (rfs *filesystem) cleanup() { for _, f := range files { if f.ModTime().Before(bestBefore) { rfs.logger.Debug().WithField("path", f.Name()).Log("Remove file because MaxFileAge is exceeded") - rfs.Filesystem.Delete(f.Name()) + rfs.Filesystem.Remove(f.Name()) } } } @@ -170,11 +175,11 @@ func (rfs *filesystem) purge(patterns []Pattern) (nfiles uint64) { continue } - files := rfs.Filesystem.List(pattern.Pattern) + files := rfs.Filesystem.List("/", pattern.Pattern) sort.Slice(files, func(i, j int) bool { return len(files[i].Name()) > len(files[j].Name()) }) for _, f := range files { rfs.logger.Debug().WithField("path", f.Name()).Log("Purging file") - rfs.Filesystem.Delete(f.Name()) + rfs.Filesystem.Remove(f.Name()) nfiles++ } } diff --git a/restream/fs/fs_test.go b/restream/fs/fs_test.go index ace6e9fa..0162be1d 100644 --- a/restream/fs/fs_test.go +++ b/restream/fs/fs_test.go @@ -10,11 +10,7 @@ import ( ) func TestMaxFiles(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -30,15 +26,15 @@ func TestMaxFiles(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -47,7 +43,7 @@ func TestMaxFiles(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -60,11 +56,7 @@ func TestMaxFiles(t *testing.T) { } func TestMaxAge(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -80,15 +72,15 @@ func TestMaxAge(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 0 }, 5*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 1 { @@ -97,7 +89,7 @@ func TestMaxAge(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -110,11 +102,7 @@ func TestMaxAge(t *testing.T) { } func TestUnsetCleanup(t *testing.T) { - memfs := fs.NewMemFilesystem(fs.MemConfig{ - Base: "/", - Size: 1024, - Purge: false, - }) + memfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) cleanfs := New(Config{ FS: memfs, @@ -130,15 +118,15 @@ func TestUnsetCleanup(t *testing.T) { }, }) - cleanfs.Store("/chunk_0.ts", strings.NewReader("chunk_0")) - cleanfs.Store("/chunk_1.ts", strings.NewReader("chunk_1")) - cleanfs.Store("/chunk_2.ts", strings.NewReader("chunk_2")) + cleanfs.WriteFileReader("/chunk_0.ts", strings.NewReader("chunk_0")) + cleanfs.WriteFileReader("/chunk_1.ts", strings.NewReader("chunk_1")) + cleanfs.WriteFileReader("/chunk_2.ts", strings.NewReader("chunk_2")) require.Eventually(t, func() bool { return cleanfs.Files() == 3 }, 3*time.Second, time.Second) - cleanfs.Store("/chunk_3.ts", strings.NewReader("chunk_3")) + cleanfs.WriteFileReader("/chunk_3.ts", strings.NewReader("chunk_3")) require.Eventually(t, func() bool { if cleanfs.Files() != 3 { @@ -147,7 +135,7 @@ func TestUnsetCleanup(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } @@ -158,7 +146,7 @@ func TestUnsetCleanup(t *testing.T) { cleanfs.UnsetCleanup("foobar") - cleanfs.Store("/chunk_4.ts", strings.NewReader("chunk_4")) + cleanfs.WriteFileReader("/chunk_4.ts", strings.NewReader("chunk_4")) require.Eventually(t, func() bool { if cleanfs.Files() != 4 { @@ -167,7 +155,7 @@ func TestUnsetCleanup(t *testing.T) { names := []string{} - for _, f := range cleanfs.List("/*.ts") { + for _, f := range cleanfs.List("/", "/*.ts") { names = append(names, f.Name()) } diff --git a/restream/replace/replace.go b/restream/replace/replace.go index 47885a38..e9b45adc 100644 --- a/restream/replace/replace.go +++ b/restream/replace/replace.go @@ -4,17 +4,23 @@ import ( "net/url" "regexp" "strings" + + "github.com/datarhei/core/v16/glob" + "github.com/datarhei/core/v16/restream/app" ) +type TemplateFn func(config *app.Config, section string) string + type Replacer interface { // RegisterTemplate registers a template for a specific placeholder. Template // may contain placeholders as well of the form {name}. They will be replaced - // by the parameters of the placeholder (see Replace). - RegisterTemplate(placeholder, template string) + // by the parameters of the placeholder (see Replace). If a parameter is not of + // a template is not present, default values can be provided. + RegisterTemplate(placeholder, template string, defaults map[string]string) // RegisterTemplateFunc does the same as RegisterTemplate, but the template // is returned by the template function. - RegisterTemplateFunc(placeholder string, template func() string) + RegisterTemplateFunc(placeholder string, template TemplateFn, defaults map[string]string) // Replace replaces all occurences of placeholder in str with value. The placeholder is of the // form {placeholder}. It is possible to escape a characters in value with \\ by appending a ^ @@ -24,12 +30,18 @@ type Replacer interface { // the value of the corresponding key in the parameters. // If the value is an empty string, the registered templates will be searched for that // placeholder. If no template is found, the placeholder will be replaced by the empty string. - // A placeholder name may consist on of the letters a-z. - Replace(str, placeholder, value string) string + // A placeholder name may consist on of the letters a-z and ':'. The placeholder may contain + // a glob pattern to find the appropriate template. + Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string +} + +type template struct { + fn TemplateFn + defaults map[string]string } type replacer struct { - templates map[string]func() string + templates map[string]template re *regexp.Regexp templateRe *regexp.Regexp @@ -38,41 +50,51 @@ type replacer struct { // New returns a Replacer func New() Replacer { r := &replacer{ - templates: make(map[string]func() string), - re: regexp.MustCompile(`{([a-z]+)(?:\^(.))?(?:,(.*?))?}`), - templateRe: regexp.MustCompile(`{([a-z]+)}`), + templates: make(map[string]template), + re: regexp.MustCompile(`{([a-z:]+)(?:\^(.))?(?:,(.*?))?}`), + templateRe: regexp.MustCompile(`{([a-z:]+)}`), } return r } -func (r *replacer) RegisterTemplate(placeholder, template string) { - r.templates[placeholder] = func() string { return template } +func (r *replacer) RegisterTemplate(placeholder, tmpl string, defaults map[string]string) { + r.RegisterTemplateFunc(placeholder, func(*app.Config, string) string { return tmpl }, defaults) } -func (r *replacer) RegisterTemplateFunc(placeholder string, template func() string) { - r.templates[placeholder] = template +func (r *replacer) RegisterTemplateFunc(placeholder string, templateFn TemplateFn, defaults map[string]string) { + r.templates[placeholder] = template{ + fn: templateFn, + defaults: defaults, + } } -func (r *replacer) Replace(str, placeholder, value string) string { +func (r *replacer) Replace(str, placeholder, value string, vars map[string]string, config *app.Config, section string) string { str = r.re.ReplaceAllStringFunc(str, func(match string) string { matches := r.re.FindStringSubmatch(match) - if matches[1] != placeholder { + + if ok, _ := glob.Match(placeholder, matches[1], ':'); !ok { return match } + placeholder := matches[1] + // We need a copy from the value v := value + var tmpl template = template{ + fn: func(*app.Config, string) string { return v }, + } // Check for a registered template if len(v) == 0 { - tmplFunc, ok := r.templates[placeholder] + t, ok := r.templates[placeholder] if ok { - v = tmplFunc() + tmpl = t } } - v = r.compileTemplate(v, matches[3]) + v = tmpl.fn(config, section) + v = r.compileTemplate(v, matches[3], vars, tmpl.defaults) if len(matches[2]) != 0 { // If there's a character to escape, we also have to escape the @@ -97,13 +119,18 @@ func (r *replacer) Replace(str, placeholder, value string) string { // placeholder name and will be replaced with the value. The resulting string is "Hello World!". // If a placeholder name is not present in the params string, it will not be replaced. The key // and values can be escaped as in net/url.QueryEscape. -func (r *replacer) compileTemplate(str, params string) string { - if len(params) == 0 { +func (r *replacer) compileTemplate(str, params string, vars map[string]string, defaults map[string]string) string { + if len(params) == 0 && len(defaults) == 0 { return str } p := make(map[string]string) + // Copy the defaults + for key, value := range defaults { + p[key] = value + } + // taken from net/url.ParseQuery for params != "" { var key string @@ -111,15 +138,22 @@ func (r *replacer) compileTemplate(str, params string) string { if key == "" { continue } + key, value, _ := strings.Cut(key, "=") key, err := url.QueryUnescape(key) if err != nil { continue } + value, err = url.QueryUnescape(value) if err != nil { continue } + + for name, v := range vars { + value = strings.ReplaceAll(value, "$"+name, v) + } + p[key] = value } diff --git a/restream/replace/replace_test.go b/restream/replace/replace_test.go index 7474775d..1d9ccfe0 100644 --- a/restream/replace/replace_test.go +++ b/restream/replace/replace_test.go @@ -3,6 +3,7 @@ package replace import ( "testing" + "github.com/datarhei/core/v16/restream/app" "github.com/stretchr/testify/require" ) @@ -24,28 +25,56 @@ func TestReplace(t *testing.T) { r := New() for _, e := range samples { - replaced := r.Replace(e[0], "foobar", foobar) + replaced := r.Replace(e[0], "foobar", foobar, nil, nil, "") require.Equal(t, e[1], replaced, e[0]) } - replaced := r.Replace("{foobar}", "foobar", "") + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") require.Equal(t, "", replaced) } func TestReplaceTemplate(t *testing.T) { r := New() - r.RegisterTemplate("foobar", "Hello {who}! {what}?") + r.RegisterTemplate("foo:bar", "Hello {who}! {what}?", nil) - replaced := r.Replace("{foobar,who=World}", "foobar", "") + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! {what}?", replaced) - replaced = r.Replace("{foobar,who=World,what=E%3dmc^2}", "foobar", "") + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc^2?", replaced) - replaced = r.Replace("{foobar^:,who=World,what=E%3dmc:2}", "foobar", "") + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) } +func TestReplaceTemplateFunc(t *testing.T) { + r := New() + r.RegisterTemplateFunc("foo:bar", func(config *app.Config, kind string) string { return "Hello {who}! {what}?" }, nil) + + replaced := r.Replace("{foo:bar,who=World}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! {what}?", replaced) + + replaced = r.Replace("{foo:bar,who=World,what=E%3dmc^2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc^2?", replaced) + + replaced = r.Replace("{foo:bar^:,who=World,what=E%3dmc:2}", "foo:bar", "", nil, nil, "") + require.Equal(t, "Hello World! E=mc\\\\:2?", replaced) +} + +func TestReplaceTemplateDefaults(t *testing.T) { + r := New() + r.RegisterTemplate("foobar", "Hello {who}! {what}?", map[string]string{ + "who": "someone", + "what": "something", + }) + + replaced := r.Replace("{foobar}", "foobar", "", nil, nil, "") + require.Equal(t, "Hello someone! something?", replaced) + + replaced = r.Replace("{foobar,who=World}", "foobar", "", nil, nil, "") + require.Equal(t, "Hello World! something?", replaced) +} + func TestReplaceCompileTemplate(t *testing.T) { samples := [][3]string{ {"Hello {who}!", "who=World", "Hello World!"}, @@ -58,7 +87,58 @@ func TestReplaceCompileTemplate(t *testing.T) { r := New().(*replacer) for _, e := range samples { - replaced := r.compileTemplate(e[0], e[1]) + replaced := r.compileTemplate(e[0], e[1], nil, nil) require.Equal(t, e[2], replaced, e[0]) } } + +func TestReplaceCompileTemplateDefaults(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "", "Hello someone!"}, + {"Hello {who}!", "who=World", "Hello World!"}, + {"Hello {who}! {what}?", "who=World", "Hello World! something?"}, + {"Hello {who}! {what}?", "who=World,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=World,what=", "Hello World! ?"}, + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], nil, map[string]string{ + "who": "someone", + "what": "something", + }) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceCompileTemplateWithVars(t *testing.T) { + samples := [][3]string{ + {"Hello {who}!", "who=$processid", "Hello 123456789!"}, + {"Hello {who}! {what}?", "who=$location", "Hello World! {what}?"}, + {"Hello {who}! {what}?", "who=$location,what=Yeah", "Hello World! Yeah?"}, + {"Hello {who}! {what}?", "who=$location,what=$processid", "Hello World! 123456789?"}, + {"Hello {who}!", "who=$processidxxx", "Hello 123456789xxx!"}, + } + + vars := map[string]string{ + "processid": "123456789", + "location": "World", + } + + r := New().(*replacer) + + for _, e := range samples { + replaced := r.compileTemplate(e[0], e[1], vars, nil) + require.Equal(t, e[2], replaced, e[0]) + } +} + +func TestReplaceGlob(t *testing.T) { + r := New() + r.RegisterTemplate("foo:bar", "Hello foobar", nil) + r.RegisterTemplate("foo:baz", "Hello foobaz", nil) + + replaced := r.Replace("{foo:baz}, {foo:bar}", "foo:*", "", nil, nil, "") + require.Equal(t, "Hello foobaz, Hello foobar", replaced) +} diff --git a/restream/restream.go b/restream/restream.go index abde48a0..fcf38999 100644 --- a/restream/restream.go +++ b/restream/restream.go @@ -24,34 +24,37 @@ import ( rfs "github.com/datarhei/core/v16/restream/fs" "github.com/datarhei/core/v16/restream/replace" "github.com/datarhei/core/v16/restream/store" + + "github.com/Masterminds/semver/v3" ) // The Restreamer interface type Restreamer interface { - ID() string // ID of this instance - Name() string // Arbitrary name of this instance - CreatedAt() time.Time // Time of when this instance has been created - Start() // Start all processes that have a "start" order - Stop() // Stop all running process but keep their "start" order - AddProcess(config *app.Config) error // Add a new process - GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference - DeleteProcess(id string) error // Delete a process - UpdateProcess(id string, config *app.Config) error // Update a process - StartProcess(id string) error // Start a process - StopProcess(id string) error // Stop a process - RestartProcess(id string) error // Restart a process - ReloadProcess(id string) error // Reload a process - GetProcess(id string) (*app.Process, error) // Get a process - GetProcessState(id string) (*app.State, error) // Get the state of a process - GetProcessLog(id string) (*app.Log, error) // Get the logs of a process - GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process - Probe(id string) app.Probe // Probe a process - Skills() skills.Skills // Get the ffmpeg skills - ReloadSkills() error // Reload the ffmpeg skills - SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process - GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process - SetMetadata(key string, data interface{}) error // Set general metadata - GetMetadata(key string) (interface{}, error) // Get previously set general metadata + ID() string // ID of this instance + Name() string // Arbitrary name of this instance + CreatedAt() time.Time // Time of when this instance has been created + Start() // Start all processes that have a "start" order + Stop() // Stop all running process but keep their "start" order + AddProcess(config *app.Config) error // Add a new process + GetProcessIDs(idpattern, refpattern string) []string // Get a list of process IDs based on patterns for ID and reference + DeleteProcess(id string) error // Delete a process + UpdateProcess(id string, config *app.Config) error // Update a process + StartProcess(id string) error // Start a process + StopProcess(id string) error // Stop a process + RestartProcess(id string) error // Restart a process + ReloadProcess(id string) error // Reload a process + GetProcess(id string) (*app.Process, error) // Get a process + GetProcessState(id string) (*app.State, error) // Get the state of a process + GetProcessLog(id string) (*app.Log, error) // Get the logs of a process + GetPlayout(id, inputid string) (string, error) // Get the URL of the playout API for a process + Probe(id string) app.Probe // Probe a process + ProbeWithTimeout(id string, timeout time.Duration) app.Probe // Probe a process with specific timeout + Skills() skills.Skills // Get the ffmpeg skills + ReloadSkills() error // Reload the ffmpeg skills + SetProcessMetadata(id, key string, data interface{}) error // Set metatdata to a process + GetProcessMetadata(id, key string) (interface{}, error) // Get previously set metadata from a process + SetMetadata(key string, data interface{}) error // Set general metadata + GetMetadata(key string) (interface{}, error) // Get previously set general metadata } // Config is the required configuration for a new restreamer instance. @@ -59,8 +62,7 @@ type Config struct { ID string Name string Store store.Store - DiskFS fs.Filesystem - MemFS fs.Filesystem + Filesystems []fs.Filesystem Replace replace.Replacer FFmpeg ffmpeg.FFmpeg MaxProcesses int64 @@ -91,8 +93,8 @@ type restream struct { maxProc int64 nProc int64 fs struct { - diskfs rfs.Filesystem - memfs rfs.Filesystem + list []rfs.Filesystem + diskfs []rfs.Filesystem stopObserver context.CancelFunc } replace replace.Replacer @@ -122,29 +124,28 @@ func New(config Config) (Restreamer, error) { } if r.store == nil { - r.store = store.NewDummyStore(store.DummyConfig{}) + dummyfs, _ := fs.NewMemFilesystem(fs.MemConfig{}) + s, err := store.NewJSON(store.JSONConfig{ + Filesystem: dummyfs, + }) + if err != nil { + return nil, err + } + r.store = s } - if config.DiskFS != nil { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: config.DiskFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "diskfs"), + for _, fs := range config.Filesystems { + fs := rfs.New(rfs.Config{ + FS: fs, + Logger: r.logger.WithComponent("Cleanup"), }) - } else { - r.fs.diskfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), - }) - } - if config.MemFS != nil { - r.fs.memfs = rfs.New(rfs.Config{ - FS: config.MemFS, - Logger: r.logger.WithComponent("Cleanup").WithField("type", "memfs"), - }) - } else { - r.fs.memfs = rfs.New(rfs.Config{ - FS: fs.NewDummyFilesystem(), - }) + r.fs.list = append(r.fs.list, fs) + + // Add the diskfs filesystems also to a separate array. We need it later for input and output validation + if fs.Type() == "disk" { + r.fs.diskfs = append(r.fs.diskfs, fs) + } } if r.replace == nil { @@ -183,12 +184,16 @@ func (r *restream) Start() { r.setCleanup(id, t.config) } - r.fs.diskfs.Start() - r.fs.memfs.Start() - ctx, cancel := context.WithCancel(context.Background()) r.fs.stopObserver = cancel - go r.observe(ctx, 10*time.Second) + + for _, fs := range r.fs.list { + fs.Start() + + if fs.Type() == "disk" { + go r.observe(ctx, fs, 10*time.Second) + } + } r.stopOnce = sync.Once{} }) @@ -212,14 +217,16 @@ func (r *restream) Stop() { r.fs.stopObserver() - r.fs.diskfs.Stop() - r.fs.memfs.Stop() + // Stop the cleanup jobs + for _, fs := range r.fs.list { + fs.Stop() + } r.startOnce = sync.Once{} }) } -func (r *restream) observe(ctx context.Context, interval time.Duration) { +func (r *restream) observe(ctx context.Context, fs fs.Filesystem, interval time.Duration) { ticker := time.NewTicker(interval) defer ticker.Stop() @@ -228,14 +235,14 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { case <-ctx.Done(): return case <-ticker.C: - size, limit := r.fs.diskfs.Size() + size, limit := fs.Size() isFull := false if limit > 0 && size >= limit { isFull = true } if isFull { - // Stop all tasks that write to disk + // Stop all tasks that write to this filesystem r.lock.Lock() for id, t := range r.tasks { if !t.valid { @@ -250,7 +257,7 @@ func (r *restream) observe(ctx context.Context, interval time.Duration) { continue } - r.logger.Warn().Log("Shutting down because disk is full") + r.logger.Warn().Log("Shutting down because filesystem is full") r.stopProcess(id) } r.lock.Unlock() @@ -267,7 +274,18 @@ func (r *restream) load() error { tasks := make(map[string]*task) + skills := r.ffmpeg.Skills() + ffversion := skills.FFmpeg.Version + if v, err := semver.NewVersion(ffversion); err == nil { + // Remove the patch level for the constraint + ffversion = fmt.Sprintf("%d.%d.0", v.Major(), v.Minor()) + } + for id, process := range data.Process { + if len(process.Config.FFVersion) == 0 { + process.Config.FFVersion = "^" + ffversion + } + t := &task{ id: id, reference: process.Reference, @@ -277,7 +295,7 @@ func (r *restream) load() error { } // Replace all placeholders in the config - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) tasks[id] = t } @@ -295,6 +313,23 @@ func (r *restream) load() error { // replaced, we can resolve references and validate the // inputs and outputs. for _, t := range tasks { + // Just warn if the ffmpeg version constraint doesn't match the available ffmpeg version + if c, err := semver.NewConstraint(t.config.FFVersion); err == nil { + if v, err := semver.NewVersion(skills.FFmpeg.Version); err == nil { + if !c.Check(v) { + r.logger.Warn().WithFields(log.Fields{ + "id": t.id, + "constraint": t.config.FFVersion, + "version": skills.FFmpeg.Version, + }).WithError(fmt.Errorf("available FFmpeg version doesn't fit constraint; you have to update this process to adjust the constraint")).Log("") + } + } else { + r.logger.Warn().WithField("id", t.id).WithError(err).Log("") + } + } else { + r.logger.Warn().WithField("id", t.id).WithError(err).Log("") + } + err := r.resolveAddresses(tasks, t.config) if err != nil { r.logger.Warn().WithField("id", t.id).WithError(err).Log("Ignoring") @@ -407,6 +442,12 @@ func (r *restream) createTask(config *app.Config) (*task, error) { return nil, fmt.Errorf("an empty ID is not allowed") } + config.FFVersion = "^" + r.ffmpeg.Skills().FFmpeg.Version + if v, err := semver.NewVersion(config.FFVersion); err == nil { + // Remove the patch level for the constraint + config.FFVersion = fmt.Sprintf("^%d.%d.0", v.Major(), v.Minor()) + } + process := &app.Process{ ID: config.ID, Reference: config.Reference, @@ -427,7 +468,7 @@ func (r *restream) createTask(config *app.Config) (*task, error) { logger: r.logger.WithField("id", process.ID), } - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -466,34 +507,50 @@ func (r *restream) createTask(config *app.Config) (*task, error) { } func (r *restream) setCleanup(id string, config *app.Config) { + rePrefix := regexp.MustCompile(`^([a-z]+):`) + for _, output := range config.Output { for _, c := range output.Cleanup { - if strings.HasPrefix(c.Pattern, "memfs:") { - r.fs.memfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "memfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, - }) - } else if strings.HasPrefix(c.Pattern, "diskfs:") { - r.fs.diskfs.SetCleanup(id, []rfs.Pattern{ - { - Pattern: strings.TrimPrefix(c.Pattern, "diskfs:"), - MaxFiles: c.MaxFiles, - MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, - PurgeOnDelete: c.PurgeOnDelete, - }, + matches := rePrefix.FindStringSubmatch(c.Pattern) + if matches == nil { + continue + } + + name := matches[1] + + // Support legacy names + if name == "diskfs" { + name = "disk" + } else if name == "memfs" { + name = "mem" + } + + for _, fs := range r.fs.list { + if fs.Name() != name { + continue + } + + pattern := rfs.Pattern{ + Pattern: rePrefix.ReplaceAllString(c.Pattern, ""), + MaxFiles: c.MaxFiles, + MaxFileAge: time.Duration(c.MaxFileAge) * time.Second, + PurgeOnDelete: c.PurgeOnDelete, + } + + fs.SetCleanup(id, []rfs.Pattern{ + pattern, }) + + break } } } } func (r *restream) unsetCleanup(id string) { - r.fs.diskfs.UnsetCleanup(id) - r.fs.memfs.UnsetCleanup(id) + for _, fs := range r.fs.list { + fs.UnsetCleanup(id) + } } func (r *restream) setPlayoutPorts(t *task) error { @@ -582,9 +639,23 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for input '#%s:%s' must not be empty", config.ID, io.ID) } - io.Address, err = r.validateInputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + io.Address, err = r.validateInputAddress(io.Address, fs.Metadata("base")) + if err != nil { + maxFails++ + } + } + + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } + } else { + io.Address, err = r.validateInputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for input '#%s:%s' (%s) is invalid: %w", config.ID, io.ID, io.Address, err) + } } } @@ -614,15 +685,33 @@ func (r *restream) validateConfig(config *app.Config) (bool, error) { return false, fmt.Errorf("the address for output '#%s:%s' must not be empty", config.ID, io.ID) } - isFile := false + if len(r.fs.diskfs) != 0 { + maxFails := 0 + for _, fs := range r.fs.diskfs { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, fs.Metadata("base")) + if err != nil { + maxFails++ + } - io.Address, isFile, err = r.validateOutputAddress(io.Address, r.fs.diskfs.Base()) - if err != nil { - return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) - } + if isFile { + hasFiles = true + } + } - if isFile { - hasFiles = true + if maxFails == len(r.fs.diskfs) { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + } else { + isFile := false + io.Address, isFile, err = r.validateOutputAddress(io.Address, "/") + if err != nil { + return false, fmt.Errorf("the address for output '#%s:%s' is invalid: %w", config.ID, io.ID, err) + } + + if isFile { + hasFiles = true + } } } @@ -1053,7 +1142,7 @@ func (r *restream) reloadProcess(id string) error { t.config = t.process.Config.Clone() - t.config.ResolvePlaceholders(r.replace) + resolvePlaceholders(t.config, r.replace) err := r.resolveAddresses(r.tasks, t.config) if err != nil { @@ -1215,6 +1304,10 @@ func (r *restream) GetProcessLog(id string) (*app.Log, error) { } func (r *restream) Probe(id string) app.Probe { + return r.ProbeWithTimeout(id, 20*time.Second) +} + +func (r *restream) ProbeWithTimeout(id string, timeout time.Duration) app.Probe { r.lock.RLock() appprobe := app.Probe{} @@ -1252,7 +1345,7 @@ func (r *restream) Probe(id string) app.Probe { ffmpeg, err := r.ffmpeg.New(ffmpeg.ProcessConfig{ Reconnect: false, ReconnectDelay: 0, - StaleTimeout: 20 * time.Second, + StaleTimeout: timeout, Command: command, Parser: prober, Logger: task.logger, @@ -1401,3 +1494,97 @@ func (r *restream) GetMetadata(key string) (interface{}, error) { return data, nil } + +// resolvePlaceholders replaces all placeholders in the config. The config +// will be modified in place. +func resolvePlaceholders(config *app.Config, r replace.Replacer) { + vars := map[string]string{ + "processid": config.ID, + "reference": config.Reference, + } + + for i, option := range config.Options { + // Replace any known placeholders + option = r.Replace(option, "diskfs", "", vars, config, "global") + option = r.Replace(option, "fs:*", "", vars, config, "global") + + config.Options[i] = option + } + + // Resolving the given inputs + for i, input := range config.Input { + // Replace any known placeholders + input.ID = r.Replace(input.ID, "processid", config.ID, nil, nil, "input") + input.ID = r.Replace(input.ID, "reference", config.Reference, nil, nil, "input") + + vars["inputid"] = input.ID + + input.Address = r.Replace(input.Address, "inputid", input.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "processid", config.ID, nil, nil, "input") + input.Address = r.Replace(input.Address, "reference", config.Reference, nil, nil, "input") + input.Address = r.Replace(input.Address, "diskfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "memfs", "", vars, config, "input") + input.Address = r.Replace(input.Address, "fs:*", "", vars, config, "input") + input.Address = r.Replace(input.Address, "rtmp", "", vars, config, "input") + input.Address = r.Replace(input.Address, "srt", "", vars, config, "input") + + for j, option := range input.Options { + // Replace any known placeholders + option = r.Replace(option, "inputid", input.ID, nil, nil, "input") + option = r.Replace(option, "processid", config.ID, nil, nil, "input") + option = r.Replace(option, "reference", config.Reference, nil, nil, "input") + option = r.Replace(option, "diskfs", "", vars, config, "input") + option = r.Replace(option, "memfs", "", vars, config, "input") + option = r.Replace(option, "fs:*", "", vars, config, "input") + + input.Options[j] = option + } + + delete(vars, "inputid") + + config.Input[i] = input + } + + // Resolving the given outputs + for i, output := range config.Output { + // Replace any known placeholders + output.ID = r.Replace(output.ID, "processid", config.ID, nil, nil, "output") + output.ID = r.Replace(output.ID, "reference", config.Reference, nil, nil, "output") + + vars["outputid"] = output.ID + + output.Address = r.Replace(output.Address, "outputid", output.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "processid", config.ID, nil, nil, "output") + output.Address = r.Replace(output.Address, "reference", config.Reference, nil, nil, "output") + output.Address = r.Replace(output.Address, "diskfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "memfs", "", vars, config, "output") + output.Address = r.Replace(output.Address, "fs:*", "", vars, config, "output") + output.Address = r.Replace(output.Address, "rtmp", "", vars, config, "output") + output.Address = r.Replace(output.Address, "srt", "", vars, config, "output") + + for j, option := range output.Options { + // Replace any known placeholders + option = r.Replace(option, "outputid", output.ID, nil, nil, "output") + option = r.Replace(option, "processid", config.ID, nil, nil, "output") + option = r.Replace(option, "reference", config.Reference, nil, nil, "output") + option = r.Replace(option, "diskfs", "", vars, config, "output") + option = r.Replace(option, "memfs", "", vars, config, "output") + option = r.Replace(option, "fs:*", "", vars, config, "output") + + output.Options[j] = option + } + + for j, cleanup := range output.Cleanup { + // Replace any known placeholders + cleanup.Pattern = r.Replace(cleanup.Pattern, "outputid", output.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "processid", config.ID, nil, nil, "output") + cleanup.Pattern = r.Replace(cleanup.Pattern, "reference", config.Reference, nil, nil, "output") + + output.Cleanup[j] = cleanup + } + + delete(vars, "outputid") + + config.Output[i] = output + } +} diff --git a/restream/restream_test.go b/restream/restream_test.go index 18c53bf5..11b08240 100644 --- a/restream/restream_test.go +++ b/restream/restream_test.go @@ -9,11 +9,12 @@ import ( "github.com/datarhei/core/v16/internal/testhelper" "github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/restream/app" + "github.com/datarhei/core/v16/restream/replace" "github.com/stretchr/testify/require" ) -func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator) (Restreamer, error) { +func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmpeg.Validator, replacer replace.Replacer) (Restreamer, error) { binary, err := testhelper.BuildBinary("ffmpeg", "../internal/testhelper") if err != nil { return nil, fmt.Errorf("failed to build helper program: %w", err) @@ -30,7 +31,8 @@ func getDummyRestreamer(portrange net.Portranger, validatorIn, validatorOut ffmp } rs, err := New(Config{ - FFmpeg: ffmpeg, + FFmpeg: ffmpeg, + Replace: replacer, }) if err != nil { return nil, err @@ -77,7 +79,7 @@ func getDummyProcess() *app.Config { } func TestAddProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -97,7 +99,7 @@ func TestAddProcess(t *testing.T) { } func TestAutostartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -112,7 +114,7 @@ func TestAutostartProcess(t *testing.T) { } func TestAddInvalidProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) // Invalid process ID @@ -180,7 +182,7 @@ func TestAddInvalidProcess(t *testing.T) { } func TestRemoveProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -195,24 +197,98 @@ func TestRemoveProcess(t *testing.T) { require.NotEqual(t, nil, err, "Unset process found (%s)", process.ID) } -func TestGetProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) +func TestUpdateProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) - process := getDummyProcess() + process1 := getDummyProcess() + require.NotNil(t, process1) + process1.ID = "process1" - rs.AddProcess(process) + process2 := getDummyProcess() + require.NotNil(t, process2) + process2.ID = "process2" - _, err = rs.GetProcess(process.ID) - require.Equal(t, nil, err, "Process not found (%s)", process.ID) + err = rs.AddProcess(process1) + require.Equal(t, nil, err) + + err = rs.AddProcess(process2) + require.Equal(t, nil, err) + + process3 := getDummyProcess() + require.NotNil(t, process3) + process3.ID = "process2" + + err = rs.UpdateProcess("process1", process3) + require.Error(t, err) + + process3.ID = "process3" + err = rs.UpdateProcess("process1", process3) + require.NoError(t, err) + + _, err = rs.GetProcess(process1.ID) + require.Error(t, err) + + _, err = rs.GetProcess(process3.ID) + require.NoError(t, err) +} + +func TestGetProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process1 := getDummyProcess() + process1.ID = "foo_aaa_1" + process1.Reference = "foo_aaa_1" + process2 := getDummyProcess() + process2.ID = "bar_bbb_2" + process2.Reference = "bar_bbb_2" + process3 := getDummyProcess() + process3.ID = "foo_ccc_3" + process3.Reference = "foo_ccc_3" + process4 := getDummyProcess() + process4.ID = "bar_ddd_4" + process4.Reference = "bar_ddd_4" + + rs.AddProcess(process1) + rs.AddProcess(process2) + rs.AddProcess(process3) + rs.AddProcess(process4) + + _, err = rs.GetProcess(process1.ID) + require.Equal(t, nil, err) list := rs.GetProcessIDs("", "") - require.Len(t, list, 1, "expected 1 process") - require.Equal(t, process.ID, list[0], "expected same process ID") + require.Len(t, list, 4) + require.ElementsMatch(t, []string{"foo_aaa_1", "bar_bbb_2", "foo_ccc_3", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("foo_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("bar_*", "") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("*_bbb_*", "") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) + + list = rs.GetProcessIDs("", "foo_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"foo_aaa_1", "foo_ccc_3"}, list) + + list = rs.GetProcessIDs("", "bar_*") + require.Len(t, list, 2) + require.ElementsMatch(t, []string{"bar_bbb_2", "bar_ddd_4"}, list) + + list = rs.GetProcessIDs("", "*_bbb_*") + require.Len(t, list, 1) + require.ElementsMatch(t, []string{"bar_bbb_2"}, list) } func TestStartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -238,7 +314,7 @@ func TestStartProcess(t *testing.T) { } func TestStopProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -263,7 +339,7 @@ func TestStopProcess(t *testing.T) { } func TestRestartProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -288,7 +364,7 @@ func TestRestartProcess(t *testing.T) { } func TestReloadProcess(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -318,8 +394,21 @@ func TestReloadProcess(t *testing.T) { rs.StopProcess(process.ID) } -func TestProcessData(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) +func TestProbeProcess(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + rs.AddProcess(process) + + probe := rs.ProbeWithTimeout(process.ID, 5*time.Second) + + require.Equal(t, 3, len(probe.Streams)) +} + +func TestProcessMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -340,7 +429,7 @@ func TestProcessData(t *testing.T) { } func TestLog(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -373,7 +462,7 @@ func TestLog(t *testing.T) { } func TestPlayoutNoRange(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -396,7 +485,7 @@ func TestPlayoutRange(t *testing.T) { portrange, err := net.NewPortrange(3000, 3001) require.NoError(t, err) - rs, err := getDummyRestreamer(portrange, nil, nil) + rs, err := getDummyRestreamer(portrange, nil, nil, nil) require.NoError(t, err) process := getDummyProcess() @@ -417,7 +506,7 @@ func TestPlayoutRange(t *testing.T) { } func TestAddressReference(t *testing.T) { - rs, err := getDummyRestreamer(nil, nil, nil) + rs, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) process1 := getDummyProcess() @@ -449,7 +538,7 @@ func TestAddressReference(t *testing.T) { } func TestConfigValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -496,7 +585,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { valOut, err := ffmpeg.NewValidator([]string{"^https?://", "^rtmp://"}, nil) require.NoError(t, err) - rsi, err := getDummyRestreamer(nil, valIn, valOut) + rsi, err := getDummyRestreamer(nil, valIn, valOut, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -522,7 +611,7 @@ func TestConfigValidationFFmpeg(t *testing.T) { } func TestOutputAddressValidation(t *testing.T) { - rsi, err := getDummyRestreamer(nil, nil, nil) + rsi, err := getDummyRestreamer(nil, nil, nil, nil) require.NoError(t, err) rs := rsi.(*restream) @@ -561,3 +650,196 @@ func TestOutputAddressValidation(t *testing.T) { require.Equal(t, r.path, path) } } + +func TestMetadata(t *testing.T) { + rs, err := getDummyRestreamer(nil, nil, nil, nil) + require.NoError(t, err) + + process := getDummyProcess() + + data, _ := rs.GetMetadata("foobar") + require.Equal(t, nil, data, "nothing should be stored under the key") + + rs.SetMetadata("foobar", process) + + data, _ = rs.GetMetadata("foobar") + require.NotEqual(t, nil, data, "there should be something stored under the key") + + p := data.(*app.Config) + + require.Equal(t, process.ID, p.ID, "failed to retrieve stored data") +} + +func TestReplacer(t *testing.T) { + replacer := replace.New() + + replacer.RegisterTemplateFunc("diskfs", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:disk", func(config *app.Config, section string) string { + return "/mnt/diskfs" + }, nil) + + replacer.RegisterTemplateFunc("memfs", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("fs:mem", func(config *app.Config, section string) string { + return "http://localhost/mnt/memfs" + }, nil) + + replacer.RegisterTemplateFunc("rtmp", func(config *app.Config, section string) string { + return "rtmp://localhost/app/{name}?token=foobar" + }, nil) + + replacer.RegisterTemplateFunc("srt", func(config *app.Config, section string) string { + template := "srt://localhost:6000?mode=caller&transtype=live&latency={latency}&streamid={name}" + if section == "output" { + template += ",mode:publish" + } else { + template += ",mode:request" + } + template += ",token:abcfoobar&passphrase=secret" + + return template + }, map[string]string{ + "latency": "20000", // 20 milliseconds, FFmpeg requires microseconds + }) + + rsi, err := getDummyRestreamer(nil, nil, nil, replacer) + require.NoError(t, err) + + process := &app.Config{ + ID: "314159265359", + Reference: "refref", + Input: []app.ConfigIO{ + { + ID: "in_{processid}_{reference}", + Address: "input:{inputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=pmtr}_srt:{srt,name=trs}_rtmp:{rtmp,name=$inputid}", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:{inputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$inputid.txt", + }, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_{processid}_{reference}", + Address: "output:{outputid}_process:{processid}_reference:{reference}_diskfs:{diskfs}/disk.txt_memfs:{memfs}/mem.txt_fsdisk:{fs:disk}/fsdisk.txt_fsmem:{fs:mem}/fsmem.txt_rtmp:{rtmp,name=$processid}_srt:{srt,name=$reference,latency=42}_rtmp:{rtmp,name=$outputid}", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:{outputid}", + "process:{processid}", + "reference:{reference}", + "diskfs:{diskfs}/disk.txt", + "memfs:{memfs}/mem.txt", + "fsdisk:{fs:disk}/fsdisk.txt", + "fsmem:{fs:mem}/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_{outputid}_{processid}_{reference}_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "{diskfs}/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "{fs:disk}/foobar_on_disk_aswell.txt", + "{fs:mem}/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + err = rsi.AddProcess(process) + require.NoError(t, err) + + rs := rsi.(*restream) + + process = &app.Config{ + ID: "314159265359", + Reference: "refref", + FFVersion: "^4.0.2", + Input: []app.ConfigIO{ + { + ID: "in_314159265359_refref", + Address: "input:in_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/pmtr?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=20000&streamid=trs,mode:request,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/in_314159265359_refref?token=foobar", + Options: []string{ + "-f", + "lavfi", + "-re", + "input:in_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$inputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{}, + }, + }, + Output: []app.ConfigIO{ + { + ID: "out_314159265359_refref", + Address: "output:out_314159265359_refref_process:314159265359_reference:refref_diskfs:/mnt/diskfs/disk.txt_memfs:http://localhost/mnt/memfs/mem.txt_fsdisk:/mnt/diskfs/fsdisk.txt_fsmem:http://localhost/mnt/memfs/fsmem.txt_rtmp:rtmp://localhost/app/314159265359?token=foobar_srt:srt://localhost:6000?mode=caller&transtype=live&latency=42&streamid=refref,mode:publish,token:abcfoobar&passphrase=secret_rtmp:rtmp://localhost/app/out_314159265359_refref?token=foobar", + Options: []string{ + "-codec", + "copy", + "-f", + "null", + "output:out_314159265359_refref", + "process:314159265359", + "reference:refref", + "diskfs:/mnt/diskfs/disk.txt", + "memfs:http://localhost/mnt/memfs/mem.txt", + "fsdisk:/mnt/diskfs/fsdisk.txt", + "fsmem:http://localhost/mnt/memfs/$outputid.txt", + }, + Cleanup: []app.ConfigIOCleanup{ + { + Pattern: "pattern_out_314159265359_refref_314159265359_refref_{rtmp,name=$outputid}", + MaxFiles: 0, + MaxFileAge: 0, + PurgeOnDelete: false, + }, + }, + }, + }, + Options: []string{ + "-loglevel", + "info", + "/mnt/diskfs/foobar_on_disk.txt", + "{memfs}/foobar_in_mem.txt", + "/mnt/diskfs/foobar_on_disk_aswell.txt", + "http://localhost/mnt/memfs/foobar_in_mem_aswell.txt", + }, + Reconnect: true, + ReconnectDelay: 10, + Autostart: false, + StaleTimeout: 0, + } + + require.Equal(t, process, rs.tasks["314159265359"].config) +} diff --git a/restream/store/dummy.go b/restream/store/dummy.go deleted file mode 100644 index ea978e1f..00000000 --- a/restream/store/dummy.go +++ /dev/null @@ -1,37 +0,0 @@ -package store - -import ( - "github.com/datarhei/core/v16/log" -) - -type DummyConfig struct { - Logger log.Logger -} - -type dummyStore struct { - logger log.Logger -} - -func NewDummyStore(config DummyConfig) Store { - s := &dummyStore{ - logger: config.Logger, - } - - if s.logger == nil { - s.logger = log.New("") - } - - return s -} - -func (sb *dummyStore) Store(data StoreData) error { - sb.logger.Debug().Log("Data stored") - - return nil -} - -func (sb *dummyStore) Load() (StoreData, error) { - sb.logger.Debug().Log("Data loaded") - - return NewStoreData(), nil -} diff --git a/restream/store/fixtures/v3_empty.json b/restream/store/fixtures/v3_empty.json new file mode 100644 index 00000000..af2b7bd4 --- /dev/null +++ b/restream/store/fixtures/v3_empty.json @@ -0,0 +1,3 @@ +{ + "version": 3 +} \ No newline at end of file diff --git a/restream/store/json.go b/restream/store/json.go index 41e7956a..36e5720e 100644 --- a/restream/store/json.go +++ b/restream/store/json.go @@ -7,18 +7,19 @@ import ( "sync" "github.com/datarhei/core/v16/encoding/json" - "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" ) type JSONConfig struct { - Dir string - Logger log.Logger + Filesystem fs.Filesystem + Filepath string // Full path to the database file + Logger log.Logger } type jsonStore struct { - filename string - dir string + fs fs.Filesystem + filepath string logger log.Logger // Mutex to serialize access to the backend @@ -27,25 +28,33 @@ type jsonStore struct { var version uint64 = 4 -func NewJSONStore(config JSONConfig) Store { +func NewJSON(config JSONConfig) (Store, error) { s := &jsonStore{ - filename: "db.json", - dir: config.Dir, + fs: config.Filesystem, + filepath: config.Filepath, logger: config.Logger, } - if s.logger == nil { - s.logger = log.New("JSONStore") + if len(s.filepath) == 0 { + s.filepath = "/db.json" } - return s + if s.fs == nil { + return nil, fmt.Errorf("no valid filesystem provided") + } + + if s.logger == nil { + s.logger = log.New("") + } + + return s, nil } func (s *jsonStore) Load() (StoreData, error) { s.lock.Lock() defer s.lock.Unlock() - data, err := s.load(version) + data, err := s.load(s.filepath, version) if err != nil { return NewStoreData(), err } @@ -63,7 +72,7 @@ func (s *jsonStore) Store(data StoreData) error { s.lock.RLock() defer s.lock.RUnlock() - err := s.store(data) + err := s.store(s.filepath, data) if err != nil { return fmt.Errorf("failed to store data: %w", err) } @@ -71,34 +80,18 @@ func (s *jsonStore) Store(data StoreData) error { return nil } -func (s *jsonStore) store(data StoreData) error { +func (s *jsonStore) store(filepath string, data StoreData) error { jsondata, err := gojson.MarshalIndent(&data, "", " ") if err != nil { return err } - tmpfile, err := os.CreateTemp(s.dir, s.filename) + _, _, err = s.fs.WriteFileSafe(filepath, jsondata) if err != nil { return err } - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return err - } - - if err := tmpfile.Close(); err != nil { - return err - } - - filename := s.dir + "/" + s.filename - - if err := file.Rename(tmpfile.Name(), filename); err != nil { - return err - } - - s.logger.WithField("file", filename).Debug().Log("Stored data") + s.logger.WithField("file", filepath).Debug().Log("Stored data") return nil } @@ -107,12 +100,10 @@ type storeVersion struct { Version uint64 `json:"version"` } -func (s *jsonStore) load(version uint64) (StoreData, error) { +func (s *jsonStore) load(filepath string, version uint64) (StoreData, error) { r := NewStoreData() - filename := s.dir + "/" + s.filename - - _, err := os.Stat(filename) + _, err := s.fs.Stat(filepath) if err != nil { if os.IsNotExist(err) { return r, nil @@ -121,7 +112,7 @@ func (s *jsonStore) load(version uint64) (StoreData, error) { return r, err } - jsondata, err := os.ReadFile(filename) + jsondata, err := s.fs.ReadFile(filepath) if err != nil { return r, err } @@ -140,7 +131,7 @@ func (s *jsonStore) load(version uint64) (StoreData, error) { return r, json.FormatError(jsondata, err) } - s.logger.WithField("file", filename).Debug().Log("Read data") + s.logger.WithField("file", filepath).Debug().Log("Read data") return r, nil } diff --git a/restream/store/json_test.go b/restream/store/json_test.go index 80200ff6..8b2c4698 100644 --- a/restream/store/json_test.go +++ b/restream/store/json_test.go @@ -3,46 +3,110 @@ package store import ( "testing" - "github.com/datarhei/core/v16/log" - + "github.com/datarhei/core/v16/io/fs" "github.com/stretchr/testify/require" ) -func TestNew(t *testing.T) { - store := NewJSONStore(JSONConfig{}) +func getFS(t *testing.T) fs.Filesystem { + fs, err := fs.NewRootedDiskFilesystem(fs.RootedDiskConfig{ + Root: ".", + }) + require.NoError(t, err) + info, err := fs.Stat("./fixtures/v4_empty.json") + require.NoError(t, err) + require.Equal(t, "/fixtures/v4_empty.json", info.Name()) + + return fs +} + +func TestNew(t *testing.T) { + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + }) + require.NoError(t, err) require.NotEmpty(t, store) } func TestLoad(t *testing.T) { - store := &jsonStore{ - filename: "v4_empty.json", - dir: "./fixtures", - logger: log.New(""), - } + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_empty.json", + }) + require.NoError(t, err) - _, err := store.Load() - require.Equal(t, nil, err) + _, err = store.Load() + require.NoError(t, err) } func TestLoadFailed(t *testing.T) { - store := &jsonStore{ - filename: "v4_invalid.json", - dir: "./fixtures", - logger: log.New(""), - } + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_invalid.json", + }) + require.NoError(t, err) - _, err := store.Load() - require.NotEqual(t, nil, err) + _, err = store.Load() + require.Error(t, err) } func TestIsEmpty(t *testing.T) { - store := &jsonStore{ - filename: "v4_empty.json", - dir: "./fixtures", - logger: log.New(""), - } + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_empty.json", + }) + require.NoError(t, err) - data, _ := store.Load() + data, err := store.Load() + require.NoError(t, err) + require.Equal(t, true, data.IsEmpty()) +} + +func TestNotExists(t *testing.T) { + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v4_notexist.json", + }) + require.NoError(t, err) + + data, err := store.Load() + require.NoError(t, err) + require.Equal(t, true, data.IsEmpty()) +} + +func TestStore(t *testing.T) { + fs := getFS(t) + fs.Remove("./fixtures/v4_store.json") + + store, err := NewJSON(JSONConfig{ + Filesystem: fs, + Filepath: "./fixtures/v4_store.json", + }) + require.NoError(t, err) + + data, err := store.Load() + require.NoError(t, err) + require.Equal(t, true, data.IsEmpty()) + + data.Metadata.System["somedata"] = "foobar" + + store.Store(data) + + data2, err := store.Load() + require.NoError(t, err) + require.Equal(t, data, data2) + + fs.Remove("./fixtures/v4_store.json") +} + +func TestInvalidVersion(t *testing.T) { + store, err := NewJSON(JSONConfig{ + Filesystem: getFS(t), + Filepath: "./fixtures/v3_empty.json", + }) + require.NoError(t, err) + + data, err := store.Load() + require.Error(t, err) require.Equal(t, true, data.IsEmpty()) } diff --git a/rtmp/channel.go b/rtmp/channel.go index 9ea68fe3..26206e5a 100644 --- a/rtmp/channel.go +++ b/rtmp/channel.go @@ -3,7 +3,6 @@ package rtmp import ( "context" "net" - "net/url" "sync" "time" @@ -94,11 +93,11 @@ type channel struct { isProxy bool } -func newChannel(conn connection, u *url.URL, reference string, remote net.Addr, streams []av.CodecData, isProxy bool, collector session.Collector) *channel { +func newChannel(conn connection, playPath string, reference string, remote net.Addr, streams []av.CodecData, isProxy bool, collector session.Collector) *channel { ch := &channel{ - path: u.Path, + path: playPath, reference: reference, - publisher: newClient(conn, u.Path, collector), + publisher: newClient(conn, playPath, collector), subscriber: make(map[string]*client), collector: collector, streams: streams, diff --git a/rtmp/rtmp.go b/rtmp/rtmp.go index 8366b60c..f7ff12bc 100644 --- a/rtmp/rtmp.go +++ b/rtmp/rtmp.go @@ -198,38 +198,72 @@ func (s *server) log(who, action, path, message string, client net.Addr) { }).Log(message) } -// handlePlay is called when a RTMP client wants to play a stream -func (s *server) handlePlay(conn *rtmp.Conn) { - defer conn.Close() - - remote := conn.NetConn().RemoteAddr() - - // Check the token - q := conn.URL.Query() +// getToken returns the path and the token found in the URL. If the token +// was part of the path, the token is removed from the path. The token in +// the query string takes precedence. The token in the path is assumed to +// be the last path element. +func getToken(u *url.URL) (string, string) { + q := u.Query() token := q.Get("token") - if len(s.token) != 0 && s.token != token { - s.log("PLAY", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", remote) - return + if len(token) != 0 { + // The token was in the query. Return the unmomdified path and the token + return u.Path, token + } + + pathElements := strings.Split(u.EscapedPath(), "/") + nPathElements := len(pathElements) + + if nPathElements == 0 { + return u.Path, "" + } + + // Return the path without the token + return strings.Join(pathElements[:nPathElements-1], "/"), pathElements[nPathElements-1] +} + +// handlePlay is called when a RTMP client wants to play a stream +func (s *server) handlePlay(conn *rtmp.Conn) { + client := conn.NetConn().RemoteAddr() + + defer conn.Close() + + playPath := conn.URL.Path + + // Check the token in the URL if one is required + if len(s.token) != 0 { + path, token := getToken(conn.URL) + + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) + return + } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } // Look for the stream s.lock.RLock() - ch := s.channels[conn.URL.Path] + ch := s.channels[playPath] s.lock.RUnlock() if ch == nil { // Check in the cluster for that stream url, err := s.cluster.GetURL("rtmp:" + conn.URL.Path) if err != nil { - s.log("PLAY", "NOTFOUND", conn.URL.Path, "", remote) + s.log("PLAY", "NOTFOUND", conn.URL.Path, "", client) return } src, err := avutil.Open(url) if err != nil { s.logger.Error().WithField("address", url).WithError(err).Log("Proxying address failed") - s.log("PLAY", "NOTFOUND", conn.URL.Path, "", remote) + s.log("PLAY", "NOTFOUND", conn.URL.Path, "", client) return } @@ -239,13 +273,13 @@ func (s *server) handlePlay(conn *rtmp.Conn) { wg.Add(1) go func() { - s.log("PLAY", "PROXYSTART", url, "", remote) + s.log("PLAY", "PROXYSTART", url, "", client) wg.Done() - err := s.publish(c, conn.URL, remote, true) + err := s.publish(c, playPath, client, true) if err != nil { s.logger.Error().WithField("address", url).WithError(err).Log("Proxying address failed") } - s.log("PLAY", "PROXYSTOP", url, "", remote) + s.log("PLAY", "PROXYSTOP", url, "", client) }() // Wait for the goroutine to start @@ -276,7 +310,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) { // Send the metadata to the client conn.WriteHeader(ch.streams) - s.log("PLAY", "START", conn.URL.Path, "", remote) + s.log("PLAY", "START", playPath, "", client) // Get a cursor and apply filters cursor := ch.queue.Oldest() @@ -289,7 +323,7 @@ func (s *server) handlePlay(conn *rtmp.Conn) { } // Adjust the timestamp such that the stream starts from 0 - filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: true}) + filters = append(filters, &pktque.FixTime{StartFromZero: true, MakeIncrement: false}) demuxer := &pktque.FilterDemuxer{ Filter: filters, @@ -303,57 +337,64 @@ func (s *server) handlePlay(conn *rtmp.Conn) { ch.RemoveSubscriber(id) - s.log("PLAY", "STOP", conn.URL.Path, "", remote) + s.log("PLAY", "STOP", playPath, "", client) } else { - s.log("PLAY", "NOTFOUND", conn.URL.Path, "", remote) + s.log("PLAY", "NOTFOUND", playPath, "", client) } } // handlePublish is called when a RTMP client wants to publish a stream func (s *server) handlePublish(conn *rtmp.Conn) { + client := conn.NetConn().RemoteAddr() + defer conn.Close() - remote := conn.NetConn().RemoteAddr() + playPath := conn.URL.Path if len(s.token) != 0 { - // Check the token - token := conn.URL.Query().Get("token") + path, token := getToken(conn.URL) - if s.token != token { - s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid token ("+token+")", remote) + if len(token) == 0 { + s.log("PLAY", "FORBIDDEN", path, "no streamkey provided", client) return } + + if s.token != token { + s.log("PLAY", "FORBIDDEN", path, "invalid streamkey ("+token+")", client) + return + } + + playPath = path } // Check the app patch - if !strings.HasPrefix(conn.URL.Path, s.app) { - s.log("PUBLISH", "FORBIDDEN", conn.URL.Path, "invalid app", remote) + if !strings.HasPrefix(playPath, s.app) { + s.log("PUBLISH", "FORBIDDEN", playPath, "invalid app", client) return } - err := s.publish(conn, conn.URL, remote, false) + err := s.publish(conn, playPath, client, false) if err != nil { - s.logger.WithField("path", conn.URL.Path).WithError(err).Log("") + s.logger.WithField("path", playPath).WithError(err).Log("") } } -func (s *server) publish(src connection, u *url.URL, remote net.Addr, isProxy bool) error { +func (s *server) publish(src connection, playPath string, client net.Addr, isProxy bool) error { // Check the streams if it contains any valid/known streams streams, _ := src.Streams() if len(streams) == 0 { - s.log("PUBLISH", "INVALID", u.Path, "no streams available", remote) - return fmt.Errorf("no streams are available") + s.log("PUBLISH", "INVALID", playPath, "no streams available", client) } s.lock.Lock() - ch := s.channels[u.Path] + ch := s.channels[playPath] if ch == nil { - reference := strings.TrimPrefix(strings.TrimSuffix(u.Path, filepath.Ext(u.Path)), s.app+"/") + reference := strings.TrimPrefix(strings.TrimSuffix(playPath, filepath.Ext(playPath)), s.app+"/") // Create a new channel - ch = newChannel(src, u, reference, remote, streams, isProxy, s.collector) + ch = newChannel(src, playPath, reference, client, streams, isProxy, s.collector) for _, stream := range streams { typ := stream.Type() @@ -366,7 +407,7 @@ func (s *server) publish(src connection, u *url.URL, remote net.Addr, isProxy bo } } - s.channels[u.Path] = ch + s.channels[playPath] = ch } else { ch = nil } @@ -374,26 +415,26 @@ func (s *server) publish(src connection, u *url.URL, remote net.Addr, isProxy bo s.lock.Unlock() if ch == nil { - s.log("PUBLISH", "CONFLICT", u.Path, "already publishing", remote) + s.log("PUBLISH", "CONFLICT", playPath, "already publishing", client) return fmt.Errorf("already publishing") } - s.log("PUBLISH", "START", u.Path, "", remote) + s.log("PUBLISH", "START", playPath, "", client) for _, stream := range streams { - s.log("PUBLISH", "STREAM", u.Path, stream.Type().String(), remote) + s.log("PUBLISH", "STREAM", playPath, stream.Type().String(), client) } // Ingest the data, blocks until done avutil.CopyPackets(ch.queue, src) s.lock.Lock() - delete(s.channels, u.Path) + delete(s.channels, playPath) s.lock.Unlock() ch.Close() - s.log("PUBLISH", "STOP", u.Path, "", remote) + s.log("PUBLISH", "STOP", playPath, "", client) return nil } diff --git a/rtmp/rtmp_test.go b/rtmp/rtmp_test.go new file mode 100644 index 00000000..20bb5274 --- /dev/null +++ b/rtmp/rtmp_test.go @@ -0,0 +1,26 @@ +package rtmp + +import ( + "net/url" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestToken(t *testing.T) { + data := [][]string{ + {"/foo/bar", "/foo", "bar"}, + {"/foo/bar?token=abc", "/foo/bar", "abc"}, + {"/foo/bar/abc", "/foo/bar", "abc"}, + } + + for _, d := range data { + u, err := url.Parse(d[0]) + require.NoError(t, err) + + path, token := getToken(u) + + require.Equal(t, d[1], path, "url=%s", u.String()) + require.Equal(t, d[2], token, "url=%s", u.String()) + } +} diff --git a/run.sh b/run.sh index 1521c30f..9dce22ee 100755 --- a/run.sh +++ b/run.sh @@ -8,6 +8,15 @@ if [ $? -ne 0 ]; then exit 1 fi +# Run the FFmpeg migration program. In case a FFmpeg 5 binary is present, it will create a +# backup of the current DB and modify the FFmpeg parameter such that they are compatible +# with FFmpeg 5. + +./bin/ffmigrate +if [ $? -ne 0 ]; then + exit 1 +fi + # Now run the core with the possibly converted configuration. ./bin/core diff --git a/service/api/api.go b/service/api/api.go index 30060289..3afd02c5 100644 --- a/service/api/api.go +++ b/service/api/api.go @@ -9,6 +9,8 @@ import ( "net/http" "strings" "time" + + "github.com/datarhei/core/v16/log" ) type API interface { @@ -19,6 +21,7 @@ type Config struct { URL string Token string Client *http.Client + Logger log.Logger } type api struct { @@ -29,6 +32,8 @@ type api struct { accessTokenType string client *http.Client + + logger log.Logger } func New(config Config) (API, error) { @@ -36,6 +41,11 @@ func New(config Config) (API, error) { url: config.URL, token: config.Token, client: config.Client, + logger: config.Logger, + } + + if a.logger == nil { + a.logger = log.New("") } if !strings.HasSuffix(a.url, "/") { @@ -95,7 +105,7 @@ func (c *copyReader) Read(p []byte) (int, error) { if err == io.EOF { c.reader = c.copy - c.copy = new(bytes.Buffer) + c.copy = &bytes.Buffer{} } return i, err diff --git a/service/service.go b/service/service.go index 861927d5..c90c00b6 100644 --- a/service/service.go +++ b/service/service.go @@ -55,7 +55,7 @@ func New(config Config) (Service, error) { } if s.logger == nil { - s.logger = log.New("Service") + s.logger = log.New("") } s.logger = s.logger.WithField("url", config.URL) @@ -214,7 +214,10 @@ func (s *service) collect() (time.Duration, error) { return 15 * time.Minute, fmt.Errorf("failed to send monitor data to service: %w", err) } - s.logger.Debug().WithField("next", r.Next).Log("Sent monitor data") + s.logger.Debug().WithFields(log.Fields{ + "next": r.Next, + "data": data, + }).Log("Sent monitor data") if r.Next == 0 { r.Next = 5 * 60 @@ -230,6 +233,8 @@ func (s *service) Start() { go s.tick(ctx, time.Second) s.stopOnce = sync.Once{} + + s.logger.Info().Log("Connected") }) } @@ -237,6 +242,8 @@ func (s *service) Stop() { s.stopOnce.Do(func() { s.stopTicker() s.startOnce = sync.Once{} + + s.logger.Info().Log("Disconnected") }) } diff --git a/session/collector.go b/session/collector.go index c000a674..f8a473d9 100644 --- a/session/collector.go +++ b/session/collector.go @@ -3,13 +3,11 @@ package session import ( "context" "encoding/json" - "os" - "path/filepath" "sort" "sync" "time" - "github.com/datarhei/core/v16/io/file" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/net" @@ -244,6 +242,7 @@ type collector struct { persist struct { enable bool + fs fs.Filesystem path string interval time.Duration done context.CancelFunc @@ -275,7 +274,7 @@ const ( // NewCollector returns a new collector according to the provided configuration. If such a // collector can't be created, a NullCollector is returned. func NewCollector(config CollectorConfig) Collector { - collector, err := newCollector("", "", nil, config) + collector, err := newCollector("", nil, nil, config) if err != nil { return NewNullCollector() } @@ -285,7 +284,7 @@ func NewCollector(config CollectorConfig) Collector { return collector } -func newCollector(id, persistPath string, logger log.Logger, config CollectorConfig) (*collector, error) { +func newCollector(id string, persistFS fs.Filesystem, logger log.Logger, config CollectorConfig) (*collector, error) { c := &collector{ maxRxBitrate: float64(config.MaxRxBitrate), maxTxBitrate: float64(config.MaxTxBitrate), @@ -379,11 +378,12 @@ func newCollector(id, persistPath string, logger log.Logger, config CollectorCon c.history.Sessions = make(map[string]totals) - c.persist.enable = len(persistPath) != 0 - c.persist.path = persistPath + c.persist.enable = persistFS != nil + c.persist.fs = persistFS + c.persist.path = "/" + id + ".json" c.persist.interval = config.PersistInterval - c.loadHistory(c.persist.path, &c.history) + c.loadHistory(c.persist.fs, c.persist.path, &c.history) c.stopOnce.Do(func() {}) @@ -433,7 +433,7 @@ func (c *collector) Persist() { c.lock.history.RLock() defer c.lock.history.RUnlock() - c.saveHistory(c.persist.path, &c.history) + c.saveHistory(c.persist.fs, c.persist.path, &c.history) } func (c *collector) persister(ctx context.Context, interval time.Duration) { @@ -450,17 +450,20 @@ func (c *collector) persister(ctx context.Context, interval time.Duration) { } } -func (c *collector) loadHistory(path string, data *history) { - c.logger.WithComponent("SessionStore").WithField("path", path).Debug().Log("Loading history") - - if len(path) == 0 { +func (c *collector) loadHistory(fs fs.Filesystem, path string, data *history) { + if fs == nil { return } + c.logger.WithComponent("SessionStore").WithFields(log.Fields{ + "base": fs.Metadata("base"), + "path": path, + }).Debug().Log("Loading history") + c.lock.persist.Lock() defer c.lock.persist.Unlock() - jsondata, err := os.ReadFile(path) + jsondata, err := fs.ReadFile(path) if err != nil { return } @@ -470,12 +473,15 @@ func (c *collector) loadHistory(path string, data *history) { } } -func (c *collector) saveHistory(path string, data *history) { - if len(path) == 0 { +func (c *collector) saveHistory(fs fs.Filesystem, path string, data *history) { + if fs == nil { return } - c.logger.WithComponent("SessionStore").WithField("path", path).Debug().Log("Storing history") + c.logger.WithComponent("SessionStore").WithFields(log.Fields{ + "base": fs.Metadata("base"), + "path": path, + }).Debug().Log("Storing history") c.lock.persist.Lock() defer c.lock.persist.Unlock() @@ -485,27 +491,10 @@ func (c *collector) saveHistory(path string, data *history) { return } - dir := filepath.Dir(path) - filename := filepath.Base(path) - - tmpfile, err := os.CreateTemp(dir, filename) + _, _, err = fs.WriteFileSafe(path, jsondata) if err != nil { return } - - defer os.Remove(tmpfile.Name()) - - if _, err := tmpfile.Write(jsondata); err != nil { - return - } - - if err := tmpfile.Close(); err != nil { - return - } - - if err := file.Rename(tmpfile.Name(), path); err != nil { - return - } } func (c *collector) IsCollectableIP(ip string) bool { diff --git a/session/collector_test.go b/session/collector_test.go index 2e5b44f8..4e9a0d52 100644 --- a/session/collector_test.go +++ b/session/collector_test.go @@ -8,7 +8,7 @@ import ( ) func TestRegisterSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Hour, SessionTimeout: time.Hour, }) @@ -31,7 +31,7 @@ func TestRegisterSession(t *testing.T) { } func TestInactiveSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -52,7 +52,7 @@ func TestInactiveSession(t *testing.T) { } func TestActivateSession(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Second, }) @@ -73,7 +73,7 @@ func TestActivateSession(t *testing.T) { } func TestIngress(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -92,7 +92,7 @@ func TestIngress(t *testing.T) { } func TestEgress(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Second, SessionTimeout: time.Hour, }) @@ -111,7 +111,7 @@ func TestEgress(t *testing.T) { } func TestNbSessions(t *testing.T) { - c, err := newCollector("", "", nil, CollectorConfig{ + c, err := newCollector("", nil, nil, CollectorConfig{ InactiveTimeout: time.Hour, SessionTimeout: time.Hour, }) diff --git a/session/registry.go b/session/registry.go index adf40530..405f010a 100644 --- a/session/registry.go +++ b/session/registry.go @@ -2,20 +2,18 @@ package session import ( "fmt" - "os" - "path/filepath" "regexp" "sync" + "github.com/datarhei/core/v16/io/fs" "github.com/datarhei/core/v16/log" ) // Config is the configuration for creating a new registry type Config struct { - // PersistDir is a path to the directory where the session - // history will be persisted. If it is an empty value, the + // PersistFS is a filesystem in whose root the session history will be persisted. If it is nil, the // history will not be persisted. - PersistDir string + PersistFS fs.Filesystem // Logger is an instance of a logger. If it is nil, no logs // will be written. @@ -52,9 +50,9 @@ type Registry interface { } type registry struct { - collector map[string]*collector - persistDir string - logger log.Logger + collector map[string]*collector + persistFS fs.Filesystem + logger log.Logger lock sync.Mutex } @@ -63,21 +61,15 @@ type registry struct { // is non-nil if the PersistDir from the config can't be created. func New(conf Config) (Registry, error) { r := ®istry{ - collector: make(map[string]*collector), - persistDir: conf.PersistDir, - logger: conf.Logger, + collector: make(map[string]*collector), + persistFS: conf.PersistFS, + logger: conf.Logger, } if r.logger == nil { r.logger = log.New("Session") } - if len(r.persistDir) != 0 { - if err := os.MkdirAll(r.persistDir, 0700); err != nil { - return nil, err - } - } - return r, nil } @@ -99,12 +91,7 @@ func (r *registry) Register(id string, conf CollectorConfig) (Collector, error) return nil, fmt.Errorf("a collector with the ID '%s' already exists", id) } - persistPath := "" - if len(r.persistDir) != 0 { - persistPath = filepath.Join(r.persistDir, id+".json") - } - - m, err := newCollector(id, persistPath, r.logger, conf) + m, err := newCollector(id, r.persistFS, r.logger, conf) if err != nil { return nil, err } diff --git a/srt/channel.go b/srt/channel.go index b42c06aa..801d4f90 100644 --- a/srt/channel.go +++ b/srt/channel.go @@ -49,10 +49,11 @@ func (c *client) ticker(ctx context.Context) { case <-ctx.Done(): return case <-ticker.C: - stats := c.conn.Stats() + stats := &srt.Statistics{} + c.conn.Stats(stats) - rxbytes := stats.ByteRecv - txbytes := stats.ByteSent + rxbytes := stats.Accumulated.ByteRecv + txbytes := stats.Accumulated.ByteSent c.collector.Ingress(c.id, int64(rxbytes-c.rxbytes)) c.collector.Egress(c.id, int64(txbytes-c.txbytes)) diff --git a/srt/srt.go b/srt/srt.go index 4bb475f5..c9cb6cb2 100644 --- a/srt/srt.go +++ b/srt/srt.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "net" + "regexp" "strings" "sync" "time" @@ -111,8 +112,6 @@ func New(config Config) (Server, error) { srtconfig := srt.DefaultConfig() - srtconfig.KMPreAnnounce = 200 - srtconfig.KMRefreshRate = 10000 srtconfig.Passphrase = config.Passphrase srtconfig.Logger = s.srtlogger @@ -176,8 +175,11 @@ func (s *server) Channels() []Channel { socket2channel[socketId] = len(channels) + stats := &srt.Statistics{} + ch.publisher.conn.Stats(stats) + channel.Connections[socketId] = Connection{ - Stats: ch.publisher.conn.Stats(), + Stats: *stats, Log: map[string][]Log{}, } @@ -185,8 +187,11 @@ func (s *server) Channels() []Channel { socketId := c.conn.SocketId() channel.Subscriber = append(channel.Subscriber, socketId) + stats := &srt.Statistics{} + c.conn.Stats(stats) + channel.Connections[socketId] = Connection{ - Stats: c.conn.Stats(), + Stats: *stats, Log: map[string][]Log{}, } @@ -276,6 +281,59 @@ type streamInfo struct { func parseStreamId(streamid string) (streamInfo, error) { si := streamInfo{} + if strings.HasPrefix(streamid, "#!:") { + return parseOldStreamId(streamid) + } + + re := regexp.MustCompile(`,(token|mode):(.+)`) + + results := map[string]string{} + + idEnd := -1 + value := streamid + key := "" + + for { + matches := re.FindStringSubmatchIndex(value) + if matches == nil { + break + } + + if idEnd < 0 { + idEnd = matches[2] - 1 + } + + if len(key) != 0 { + results[key] = value[:matches[2]-1] + } + + key = value[matches[2]:matches[3]] + value = value[matches[4]:matches[5]] + + results[key] = value + } + + if idEnd < 0 { + idEnd = len(streamid) + } + + si.resource = streamid[:idEnd] + if token, ok := results["token"]; ok { + si.token = token + } + + if mode, ok := results["mode"]; ok { + si.mode = mode + } else { + si.mode = "request" + } + + return si, nil +} + +func parseOldStreamId(streamid string) (streamInfo, error) { + si := streamInfo{} + if !strings.HasPrefix(streamid, "#!:") { return si, fmt.Errorf("unknown streamid format") } @@ -284,7 +342,7 @@ func parseStreamId(streamid string) (streamInfo, error) { kvs := strings.Split(streamid, ",") - split := func(s, sep string) (string, string, error) { + splitFn := func(s, sep string) (string, string, error) { splitted := strings.SplitN(s, sep, 2) if len(splitted) != 2 { @@ -295,7 +353,7 @@ func parseStreamId(streamid string) (streamInfo, error) { } for _, kv := range kvs { - key, value, err := split(kv, "=") + key, value, err := splitFn(kv, "=") if err != nil { continue } diff --git a/srt/srt_test.go b/srt/srt_test.go index b4b2d843..91ae7ed1 100644 --- a/srt/srt_test.go +++ b/srt/srt_test.go @@ -8,7 +8,25 @@ import ( func TestParseStreamId(t *testing.T) { streamids := map[string]streamInfo{ - "bla": {}, + "bla": {resource: "bla", mode: "request"}, + "bla,mode:publish": {resource: "bla", mode: "publish"}, + "123456789": {resource: "123456789", mode: "request"}, + "bla,token:foobar": {resource: "bla", token: "foobar", mode: "request"}, + "bla,token:foo,bar": {resource: "bla", token: "foo,bar", mode: "request"}, + "123456789,mode:publish,token:foobar": {resource: "123456789", token: "foobar", mode: "publish"}, + "mode:publish": {resource: "mode:publish", mode: "request"}, + } + + for streamid, wantsi := range streamids { + si, err := parseStreamId(streamid) + + require.NoError(t, err) + require.Equal(t, wantsi, si) + } +} + +func TestParseOldStreamId(t *testing.T) { + streamids := map[string]streamInfo{ "#!:": {}, "#!:key=value": {}, "#!:m=publish": {mode: "publish"}, @@ -19,7 +37,7 @@ func TestParseStreamId(t *testing.T) { } for streamid, wantsi := range streamids { - si, _ := parseStreamId(streamid) + si, _ := parseOldStreamId(streamid) require.Equal(t, wantsi, si) } diff --git a/vendor/github.com/99designs/gqlgen/CHANGELOG.md b/vendor/github.com/99designs/gqlgen/CHANGELOG.md index a50f8012..93c0f3f5 100644 --- a/vendor/github.com/99designs/gqlgen/CHANGELOG.md +++ b/vendor/github.com/99designs/gqlgen/CHANGELOG.md @@ -5,10 +5,138 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.14...HEAD) +## [Unreleased](https://github.com/99designs/gqlgen/compare/v0.17.19...HEAD) + +## [v0.17.19](https://github.com/99designs/gqlgen/compare/v0.17.18...v0.17.19) - 2022-09-15 +- 588c6ac1 release v0.17.19 + +- c6713170 v0.17.18 postrelease bump + + + + + + +## [v0.17.18](https://github.com/99designs/gqlgen/compare/v0.17.17...v0.17.18) - 2022-09-15 +- 1d41c808 release v0.17.18 + +- 4dbe2e47 update graphiql to 2.0.7 (#2375) + +