mirror of
https://github.com/datarhei/core.git
synced 2025-09-26 20:11:29 +08:00
Fix proper version handling for uploading a new config
This commit is contained in:
@@ -16,6 +16,8 @@ import (
|
||||
|
||||
"github.com/datarhei/core/v16/app"
|
||||
"github.com/datarhei/core/v16/config"
|
||||
configstore "github.com/datarhei/core/v16/config/store"
|
||||
configvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/ffmpeg"
|
||||
"github.com/datarhei/core/v16/http"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
@@ -96,7 +98,7 @@ type api struct {
|
||||
|
||||
config struct {
|
||||
path string
|
||||
store config.Store
|
||||
store configstore.Store
|
||||
config *config.Config
|
||||
}
|
||||
|
||||
@@ -145,7 +147,7 @@ func (a *api) Reload() error {
|
||||
|
||||
logger := log.New("Core").WithOutput(log.NewConsoleWriter(a.log.writer, log.Lwarn, true))
|
||||
|
||||
store, err := config.NewJSONStore(a.config.path, func() {
|
||||
store, err := configstore.NewJSON(a.config.path, func() {
|
||||
a.errorChan <- ErrConfigReload
|
||||
})
|
||||
if err != nil {
|
||||
@@ -157,7 +159,7 @@ func (a *api) Reload() error {
|
||||
cfg.Merge()
|
||||
|
||||
if len(cfg.Host.Name) == 0 && cfg.Host.Auto {
|
||||
cfg.SetPublicIPs()
|
||||
cfg.Host.Name = net.GetPublicIPs(5 * time.Second)
|
||||
}
|
||||
|
||||
cfg.Validate(false)
|
||||
@@ -226,7 +228,7 @@ func (a *api) Reload() error {
|
||||
logger.Info().WithFields(logfields).Log("")
|
||||
|
||||
configlogger := logger.WithComponent("Config")
|
||||
cfg.Messages(func(level string, v config.Variable, message string) {
|
||||
cfg.Messages(func(level string, v configvars.Variable, message string) {
|
||||
configlogger = configlogger.WithFields(log.Fields{
|
||||
"variable": v.Name,
|
||||
"value": v.Value,
|
||||
|
@@ -4,7 +4,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/log"
|
||||
"github.com/datarhei/core/v16/restream/store"
|
||||
|
||||
@@ -14,7 +15,7 @@ import (
|
||||
func main() {
|
||||
logger := log.New("Import").WithOutput(log.NewConsoleWriter(os.Stderr, log.Linfo, true)).WithField("version", "v1")
|
||||
|
||||
configstore, err := config.NewJSONStore(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
configstore, err := cfgstore.NewJSON(os.Getenv("CORE_CONFIGFILE"), nil)
|
||||
if err != nil {
|
||||
logger.Error().WithError(err).Log("Loading configuration failed")
|
||||
os.Exit(1)
|
||||
@@ -25,7 +26,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func doImport(logger log.Logger, configstore config.Store) error {
|
||||
func doImport(logger log.Logger, configstore cfgstore.Store) error {
|
||||
if logger == nil {
|
||||
logger = log.New("")
|
||||
}
|
||||
@@ -41,7 +42,7 @@ func doImport(logger log.Logger, configstore config.Store) error {
|
||||
if cfg.HasErrors() {
|
||||
logger.Error().Log("The configuration contains errors")
|
||||
messages := []string{}
|
||||
cfg.Messages(func(level string, v config.Variable, message string) {
|
||||
cfg.Messages(func(level string, v cfgvars.Variable, message string) {
|
||||
if level == "error" {
|
||||
logger.Error().WithFields(log.Fields{
|
||||
"variable": v.Name,
|
||||
|
@@ -3,12 +3,12 @@ package main
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestImport(t *testing.T) {
|
||||
configstore := config.NewDummyStore()
|
||||
configstore := store.NewDummy()
|
||||
|
||||
cfg := configstore.Get()
|
||||
|
||||
|
492
config/config.go
492
config/config.go
@@ -3,60 +3,49 @@ package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
/*
|
||||
type Config interface {
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
Merge()
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
Validate(resetLogs bool)
|
||||
|
||||
// Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'.
|
||||
// The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message.
|
||||
Messages(logger func(level string, v vars.Variable, message string))
|
||||
|
||||
// HasErrors returns whether there are some error messages in the log.
|
||||
HasErrors() bool
|
||||
|
||||
// Overrides returns a list of configuration value names that have been overriden by an environment variable.
|
||||
Overrides() []string
|
||||
|
||||
Get(name string) (string, error)
|
||||
Set(name, val string) error
|
||||
}
|
||||
*/
|
||||
|
||||
const version int64 = 3
|
||||
|
||||
type variable struct {
|
||||
value value // The actual value
|
||||
defVal string // The default value in string representation
|
||||
name string // A name for this value
|
||||
envName string // The environment variable that corresponds to this value
|
||||
envAltNames []string // Alternative environment variable names
|
||||
description string // A desriptions for this value
|
||||
required bool // Whether a non-empty value is required
|
||||
disguise bool // Whether the value should be disguised if printed
|
||||
merged bool // Whether this value has been replaced by its corresponding environment variable
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Value string
|
||||
Name string
|
||||
EnvName string
|
||||
Description string
|
||||
Merged bool
|
||||
}
|
||||
|
||||
type message struct {
|
||||
message string // The log message
|
||||
variable Variable // The config field this message refers to
|
||||
level string // The loglevel for this message
|
||||
}
|
||||
|
||||
type Auth0Tenant struct {
|
||||
Domain string `json:"domain"`
|
||||
Audience string `json:"audience"`
|
||||
ClientID string `json:"clientid"`
|
||||
Users []string `json:"users"`
|
||||
}
|
||||
|
||||
type DataVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars []*variable
|
||||
logs []message
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
@@ -70,8 +59,16 @@ func New() *Config {
|
||||
return config
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func NewConfigFrom(d *Config) *Config {
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
@@ -100,286 +97,201 @@ func NewConfigFrom(d *Config) *Config {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types.Allow)
|
||||
data.Storage.Disk.Cache.Types.Block = copyStringSlice(d.Storage.Disk.Cache.Types.Block)
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
|
||||
data.Storage.Disk.Cache.Types.Block = copy.Slice(d.Storage.Disk.Cache.Types.Block)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
for i, v := range d.vars {
|
||||
data.vars[i].merged = v.merged
|
||||
}
|
||||
data.vars.Transfer(&d.vars)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.val(newInt64Value(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.val(newTimeValue(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.val(newStringValue(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.val(newStringValue(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.val(newAddressValue(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.val(newBoolValue(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.val(newStringValue(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.val(newStringListValue(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.val(newIntValue(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.val(newMustDirValue(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.val(newStringListValue(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.val(newBoolValue(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.val(newBoolValue(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.val(newCIDRListValue(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.val(newBoolValue(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.val(newBoolValue(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.val(newStringValue(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.val(newStringValue(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.val(newStringValue(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.val(newBoolValue(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.val(newTenantListValue(&d.API.Auth.Auth0.Tenants, []Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.val(newAddressValue(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.val(newBoolValue(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.val(newEmailValue(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.val(newFileValue(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.val(newFileValue(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewEmail(&d.TLS.Email, "cert@datarhei.com"), "tls.email", "CORE_TLS_EMAIL", nil, "Email for Let's Encrypt registration", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.val(newFileValue(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.val(newMustDirValue(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.val(newInt64Value(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.val(newBoolValue(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.val(newUint64Value(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.val(newInt64Value(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.val(newUint64Value(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
d.val(newStringListValue(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Allow, []string{}, " "), "storage.disk.cache.type.allow", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types.Block, []string{".m3u8", ".mpd"}, " "), "storage.disk.cache.type.block", "CORE_STORAGE_DISK_CACHE_TYPES_BLOCK", nil, "File extensions not to cache, empty for none", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.val(newBoolValue(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.val(newStringValue(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.val(newStringValue(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.val(newInt64Value(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.val(newBoolValue(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.val(newCORSOriginsValue(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.val(newBoolValue(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.val(newBoolValue(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.val(newAddressValue(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.val(newAddressValue(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.val(newAbsolutePathValue(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.val(newStringValue(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.val(newBoolValue(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.val(newAddressValue(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.val(newStringValue(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.val(newStringValue(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.val(newBoolValue(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.val(newStringListValue(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.val(newExecValue(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.val(newInt64Value(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.val(newStringListValue(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.val(newIntValue(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.val(newIntValue(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.val(newBoolValue(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.val(newPortValue(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.val(newPortValue(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.val(newBoolValue(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.val(newIntValue(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.val(newBoolValue(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.val(newBoolValue(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.val(newInt64Value(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.val(newInt64Value(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.val(newBoolValue(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.val(newCIDRListValue(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.val(newIntValue(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.val(newBoolValue(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.val(newIntValue(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.val(newUint64Value(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.val(newUint64Value(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.val(newBoolValue(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.val(newStringValue(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.val(newURLValue(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.val(newStringListValue(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.val(newStringMapStringValue(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.val(newDirValue(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
func (d *Config) val(val value, name, envName string, envAltNames []string, description string, required, disguise bool) {
|
||||
d.vars = append(d.vars, &variable{
|
||||
value: val,
|
||||
defVal: val.String(),
|
||||
name: name,
|
||||
envName: envName,
|
||||
envAltNames: envAltNames,
|
||||
description: description,
|
||||
required: required,
|
||||
disguise: disguise,
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Config) log(level string, v *variable, format string, args ...interface{}) {
|
||||
variable := Variable{
|
||||
Value: v.value.String(),
|
||||
Name: v.name,
|
||||
EnvName: v.envName,
|
||||
Description: v.description,
|
||||
Merged: v.merged,
|
||||
}
|
||||
|
||||
if v.disguise {
|
||||
variable.Value = "***"
|
||||
}
|
||||
|
||||
l := message{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
variable: variable,
|
||||
level: level,
|
||||
}
|
||||
|
||||
d.logs = append(d.logs, l)
|
||||
}
|
||||
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
func (d *Config) Merge() {
|
||||
for _, v := range d.vars {
|
||||
if len(v.envName) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var envval string
|
||||
var ok bool
|
||||
|
||||
envval, ok = os.LookupEnv(v.envName)
|
||||
if !ok {
|
||||
foundAltName := false
|
||||
|
||||
for _, envName := range v.envAltNames {
|
||||
envval, ok = os.LookupEnv(envName)
|
||||
if ok {
|
||||
foundAltName = true
|
||||
d.log("warn", v, "deprecated name, please use %s", v.envName)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundAltName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err := v.value.Set(envval)
|
||||
if err != nil {
|
||||
d.log("error", v, "%s", err.Error())
|
||||
}
|
||||
|
||||
v.merged = true
|
||||
}
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.logs = nil
|
||||
d.vars.ResetLogs()
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.log("error", d.findVariable("version"), "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range d.vars {
|
||||
d.log("info", v, "%s", "")
|
||||
|
||||
err := v.value.Validate()
|
||||
if err != nil {
|
||||
d.log("error", v, "%s", err.Error())
|
||||
}
|
||||
|
||||
if v.required && v.value.IsEmpty() {
|
||||
d.log("error", v, "a value is required")
|
||||
}
|
||||
}
|
||||
d.vars.Validate()
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.log("error", d.findVariable("api.auth.enable"), "api.auth.username and api.auth.password must be set")
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.log("error", d.findVariable("api.auth.auth0.enable"), "at least one tenants must be set")
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.log("error", d.findVariable("api.auth.auth0.tenants"), "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,14 +299,14 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.log("error", d.findVariable("tls.enable"), "tls.certfile and tls.keyfile must be set")
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.log("error", d.findVariable("host.name"), "a hostname must be set in order to get an automatic TLS certificate")
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
@@ -404,7 +316,7 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.log("error", d.findVariable("host.name"), "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
@@ -412,7 +324,7 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.log("error", d.findVariable("host.name"), "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
@@ -423,32 +335,31 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a non-empty email address
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.TLS.Email) == 0 {
|
||||
v := d.findVariable("tls.email")
|
||||
v.value.Set(v.defVal)
|
||||
d.vars.SetDefault("tls.email")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.log("error", d.findVariable("rtmp.enable"), "RTMP server must be enabled if RTMPS server is enabled")
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.log("error", d.findVariable("rtmp.enable_tls"), "RTMPS server can only be enabled if TLS is enabled")
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
|
||||
if len(d.RTMP.AddressTLS) == 0 {
|
||||
d.log("error", d.findVariable("rtmp.address_tls"), "RTMPS server address must be set")
|
||||
d.vars.Log("error", "rtmp.address_tls", "RTMPS server address must be set")
|
||||
}
|
||||
|
||||
if d.RTMP.Enable && d.RTMP.Address == d.RTMP.AddressTLS {
|
||||
d.log("error", d.findVariable("rtmp.address"), "The RTMP and RTMPS server can't listen on the same address")
|
||||
d.vars.Log("error", "rtmp.address", "The RTMP and RTMPS server can't listen on the same address")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.findVariable("storage.memory.auth.username").merged && d.findVariable("storage.memory.auth.password").merged {
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
@@ -459,121 +370,76 @@ func (d *Config) Validate(resetLogs bool) {
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.log("error", d.findVariable("storage.memory.auth.enable"), "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.log("error", d.findVariable("playout.min_port"), "must be bigger than playout.max_port")
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.log("error", d.findVariable("storage.disk.cache.ttl_seconds"), "must be equal or greater than 0")
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.log("error", d.findVariable("stats.session_timeout_sec"), "must be equal or greater than 1")
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.log("error", d.findVariable("stats.persist_interval_sec"), "must be at equal or greater than 0")
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.log("error", d.findVariable("service.token"), "must be non-empty")
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.log("error", d.findVariable("service.url"), "must be non-empty")
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.log("error", d.findVariable("metrics.range"), "must be greater 0")
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.log("error", d.findVariable("metrics.interval"), "must be greater 0")
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.log("error", d.findVariable("metrics.interval"), "must be smaller than the range")
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Config) findVariable(name string) *variable {
|
||||
for _, v := range d.vars {
|
||||
if v.name == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
// Merge merges the values of the known environment variables into the configuration
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
}
|
||||
|
||||
// Messages calls for each log entry the provided callback. The level has the values 'error', 'warn', or 'info'.
|
||||
// The name is the name of the configuration value, e.g. 'api.auth.enable'. The message is the log message.
|
||||
func (d *Config) Messages(logger func(level string, v Variable, message string)) {
|
||||
for _, l := range d.logs {
|
||||
logger(l.level, l.variable, l.message)
|
||||
}
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
}
|
||||
|
||||
// HasErrors returns whether there are some error messages in the log.
|
||||
func (d *Config) HasErrors() bool {
|
||||
for _, l := range d.logs {
|
||||
if l.level == "error" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
return d.vars.HasErrors()
|
||||
}
|
||||
|
||||
// Overrides returns a list of configuration value names that have been overriden by an environment variable.
|
||||
func (d *Config) Overrides() []string {
|
||||
overrides := []string{}
|
||||
|
||||
for _, v := range d.vars {
|
||||
if v.merged {
|
||||
overrides = append(overrides, v.name)
|
||||
}
|
||||
}
|
||||
|
||||
return overrides
|
||||
}
|
||||
|
||||
func copyStringSlice(src []string) []string {
|
||||
dst := make([]string, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func copyStringMap(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func copyTenantSlice(src []Auth0Tenant) []Auth0Tenant {
|
||||
dst := make([]Auth0Tenant, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
return d.vars.Overrides()
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ package config
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigCopy(t *testing.T) {
|
||||
@@ -12,44 +12,41 @@ func TestConfigCopy(t *testing.T) {
|
||||
config1.Version = 42
|
||||
config1.DB.Dir = "foo"
|
||||
|
||||
val1 := config1.findVariable("version")
|
||||
val2 := config1.findVariable("db.dir")
|
||||
val3 := config1.findVariable("host.name")
|
||||
val1, _ := config1.Get("version")
|
||||
val2, _ := config1.Get("db.dir")
|
||||
val3, _ := config1.Get("host.name")
|
||||
|
||||
assert.Equal(t, "42", val1.value.String())
|
||||
assert.Equal(t, nil, val1.value.Validate())
|
||||
assert.Equal(t, false, val1.value.IsEmpty())
|
||||
require.Equal(t, "42", val1)
|
||||
require.Equal(t, "foo", val2)
|
||||
require.Equal(t, "(empty)", val3)
|
||||
|
||||
assert.Equal(t, "foo", val2.value.String())
|
||||
assert.Equal(t, "(empty)", val3.value.String())
|
||||
config1.Set("host.name", "foo.com")
|
||||
val3, _ = config1.Get("host.name")
|
||||
require.Equal(t, "foo.com", val3)
|
||||
|
||||
val3.value.Set("foo.com")
|
||||
config2 := config1.Clone()
|
||||
|
||||
assert.Equal(t, "foo.com", val3.value.String())
|
||||
require.Equal(t, int64(42), config2.Version)
|
||||
require.Equal(t, "foo", config2.DB.Dir)
|
||||
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
|
||||
config2 := NewConfigFrom(config1)
|
||||
config1.Set("version", "77")
|
||||
|
||||
assert.Equal(t, int64(42), config2.Version)
|
||||
assert.Equal(t, "foo", config2.DB.Dir)
|
||||
assert.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
require.Equal(t, int64(77), config1.Version)
|
||||
require.Equal(t, int64(42), config2.Version)
|
||||
|
||||
val1.value.Set("77")
|
||||
config1.Set("db.dir", "bar")
|
||||
|
||||
assert.Equal(t, int64(77), config1.Version)
|
||||
assert.Equal(t, int64(42), config2.Version)
|
||||
|
||||
val2.value.Set("bar")
|
||||
|
||||
assert.Equal(t, "bar", config1.DB.Dir)
|
||||
assert.Equal(t, "foo", config2.DB.Dir)
|
||||
require.Equal(t, "bar", config1.DB.Dir)
|
||||
require.Equal(t, "foo", config2.DB.Dir)
|
||||
|
||||
config2.DB.Dir = "baz"
|
||||
|
||||
assert.Equal(t, "bar", config1.DB.Dir)
|
||||
assert.Equal(t, "baz", config2.DB.Dir)
|
||||
require.Equal(t, "bar", config1.DB.Dir)
|
||||
require.Equal(t, "baz", config2.DB.Dir)
|
||||
|
||||
config1.Host.Name[0] = "bar.com"
|
||||
|
||||
assert.Equal(t, []string{"bar.com"}, config1.Host.Name)
|
||||
assert.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
require.Equal(t, []string{"bar.com"}, config1.Host.Name)
|
||||
require.Equal(t, []string{"foo.com"}, config2.Host.Name)
|
||||
}
|
||||
|
30
config/copy/copy.go
Normal file
30
config/copy/copy.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package copy
|
||||
|
||||
import "github.com/datarhei/core/v16/config/value"
|
||||
|
||||
func StringMap(src map[string]string) map[string]string {
|
||||
dst := make(map[string]string)
|
||||
|
||||
for k, v := range src {
|
||||
dst[k] = v
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func TenantSlice(src []value.Auth0Tenant) []value.Auth0Tenant {
|
||||
dst := Slice(src)
|
||||
|
||||
for i, t := range src {
|
||||
dst[i].Users = Slice(t.Users)
|
||||
}
|
||||
|
||||
return dst
|
||||
}
|
||||
|
||||
func Slice[T any](src []T) []T {
|
||||
dst := make([]T, len(src))
|
||||
copy(dst, src)
|
||||
|
||||
return dst
|
||||
}
|
138
config/data.go
138
config/data.go
@@ -1,6 +1,12 @@
|
||||
package config
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
// Data is the actual configuration data for the app
|
||||
type Data struct {
|
||||
@@ -45,8 +51,8 @@ type Data struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
||||
@@ -159,8 +165,90 @@ type Data struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data := &Data{}
|
||||
func UpgradeV2ToV3(d *v2.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
|
||||
return MergeV2toV3(&cfg.Data, d)
|
||||
}
|
||||
|
||||
func MergeV2toV3(data *Data, d *v2.Data) (*Data, error) {
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
|
||||
// Actual changes
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
data.TLS.Address = d.TLS.Address
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
data.TLS.CertFile = d.TLS.CertFile
|
||||
data.TLS.KeyFile = d.TLS.KeyFile
|
||||
|
||||
data.Storage.Disk.Dir = d.Storage.Disk.Dir
|
||||
data.Storage.Disk.Size = d.Storage.Disk.Size
|
||||
data.Storage.Disk.Cache.Enable = d.Storage.Disk.Cache.Enable
|
||||
data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size
|
||||
data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize
|
||||
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
|
||||
data.Storage.Disk.Cache.Types.Allow = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.Version = 3
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func DowngradeV3toV2(d *Data) (*v2.Data, error) {
|
||||
data := &v2.Data{}
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
@@ -185,30 +273,30 @@ func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.TLS.Enable = d.TLS.Enable
|
||||
@@ -216,12 +304,11 @@ func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data.TLS.Auto = d.TLS.Auto
|
||||
data.TLS.CertFile = d.TLS.CertFile
|
||||
data.TLS.KeyFile = d.TLS.KeyFile
|
||||
data.TLS.Email = "cert@datarhei.com"
|
||||
|
||||
data.Storage.MimeTypes = d.Storage.MimeTypes
|
||||
|
||||
data.Storage.CORS = d.Storage.CORS
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.Storage.Memory = d.Storage.Memory
|
||||
|
||||
@@ -231,10 +318,9 @@ func NewV3FromV2(d *dataV2) (*Data, error) {
|
||||
data.Storage.Disk.Cache.Size = d.Storage.Disk.Cache.Size
|
||||
data.Storage.Disk.Cache.FileSize = d.Storage.Disk.Cache.FileSize
|
||||
data.Storage.Disk.Cache.TTL = d.Storage.Disk.Cache.TTL
|
||||
data.Storage.Disk.Cache.Types.Allow = copyStringSlice(d.Storage.Disk.Cache.Types)
|
||||
data.Storage.Disk.Cache.Types.Block = []string{}
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types.Allow)
|
||||
|
||||
data.Version = 3
|
||||
data.Version = 2
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
71
config/ip.go
71
config/ip.go
@@ -1,71 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// SetPublicIPs will try to figure out the public IPs (v4 and v6)
|
||||
// we're running on. There's a timeout of max. 5 seconds to do it.
|
||||
// If it fails, the IPs will simply not be set.
|
||||
func (d *Config) SetPublicIPs() {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ipv4 := ""
|
||||
ipv6 := ""
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv4 = doRequest("https://api.ipify.org")
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv6 = doRequest("https://api6.ipify.org")
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(ipv4) != 0 {
|
||||
d.Host.Name = append(d.Host.Name, ipv4)
|
||||
}
|
||||
|
||||
if len(ipv6) != 0 && ipv4 != ipv6 {
|
||||
d.Host.Name = append(d.Host.Name, ipv6)
|
||||
}
|
||||
}
|
||||
|
||||
func doRequest(url string) string {
|
||||
client := &http.Client{
|
||||
Timeout: 5 * time.Second,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(body)
|
||||
}
|
@@ -1,17 +1,21 @@
|
||||
package config
|
||||
package store
|
||||
|
||||
import "fmt"
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
)
|
||||
|
||||
type dummyStore struct {
|
||||
current *Config
|
||||
active *Config
|
||||
current *config.Config
|
||||
active *config.Config
|
||||
}
|
||||
|
||||
// NewDummyStore returns a store that returns the default config
|
||||
func NewDummyStore() Store {
|
||||
func NewDummy() Store {
|
||||
s := &dummyStore{}
|
||||
|
||||
cfg := New()
|
||||
cfg := config.New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
@@ -20,7 +24,7 @@ func NewDummyStore() Store {
|
||||
|
||||
s.current = cfg
|
||||
|
||||
cfg = New()
|
||||
cfg = config.New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
@@ -32,48 +36,34 @@ func NewDummyStore() Store {
|
||||
return s
|
||||
}
|
||||
|
||||
func (c *dummyStore) Get() *Config {
|
||||
cfg := New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
return cfg
|
||||
func (c *dummyStore) Get() *config.Config {
|
||||
return c.current.Clone()
|
||||
}
|
||||
|
||||
func (c *dummyStore) Set(d *Config) error {
|
||||
func (c *dummyStore) Set(d *config.Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.current = NewConfigFrom(d)
|
||||
c.current = d.Clone()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *dummyStore) GetActive() *Config {
|
||||
cfg := New()
|
||||
|
||||
cfg.DB.Dir = "."
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
|
||||
return cfg
|
||||
func (c *dummyStore) GetActive() *config.Config {
|
||||
return c.active.Clone()
|
||||
}
|
||||
|
||||
func (c *dummyStore) SetActive(d *Config) error {
|
||||
func (c *dummyStore) SetActive(d *config.Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.active = NewConfigFrom(d)
|
||||
c.active = d.Clone()
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package config
|
||||
package store
|
||||
|
||||
import (
|
||||
gojson "encoding/json"
|
||||
@@ -7,6 +7,9 @@ import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
v2 "github.com/datarhei/core/v16/config/v2"
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/io/file"
|
||||
)
|
||||
@@ -14,7 +17,7 @@ import (
|
||||
type jsonStore struct {
|
||||
path string
|
||||
|
||||
data map[string]*Config
|
||||
data map[string]*config.Config
|
||||
|
||||
reloadFn func()
|
||||
}
|
||||
@@ -23,14 +26,14 @@ type jsonStore struct {
|
||||
// back to the path. The returned error will be nil if everything went fine.
|
||||
// If the path doesn't exist, a default JSON config file will be written to that path.
|
||||
// The returned ConfigStore can be used to retrieve or write the config.
|
||||
func NewJSONStore(path string, reloadFn func()) (Store, error) {
|
||||
func NewJSON(path string, reloadFn func()) (Store, error) {
|
||||
c := &jsonStore{
|
||||
path: path,
|
||||
data: make(map[string]*Config),
|
||||
data: make(map[string]*config.Config),
|
||||
reloadFn: reloadFn,
|
||||
}
|
||||
|
||||
c.data["base"] = New()
|
||||
c.data["base"] = config.New()
|
||||
|
||||
if err := c.load(c.data["base"]); err != nil {
|
||||
return nil, fmt.Errorf("failed to read JSON from '%s': %w", path, err)
|
||||
@@ -43,16 +46,16 @@ func NewJSONStore(path string, reloadFn func()) (Store, error) {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) Get() *Config {
|
||||
return NewConfigFrom(c.data["base"])
|
||||
func (c *jsonStore) Get() *config.Config {
|
||||
return c.data["base"].Clone()
|
||||
}
|
||||
|
||||
func (c *jsonStore) Set(d *Config) error {
|
||||
func (c *jsonStore) Set(d *config.Config) error {
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
data := NewConfigFrom(d)
|
||||
data := d.Clone()
|
||||
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
@@ -67,26 +70,26 @@ func (c *jsonStore) Set(d *Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) GetActive() *Config {
|
||||
func (c *jsonStore) GetActive() *config.Config {
|
||||
if x, ok := c.data["merged"]; ok {
|
||||
return NewConfigFrom(x)
|
||||
return x.Clone()
|
||||
}
|
||||
|
||||
if x, ok := c.data["base"]; ok {
|
||||
return NewConfigFrom(x)
|
||||
return x.Clone()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) SetActive(d *Config) error {
|
||||
func (c *jsonStore) SetActive(d *config.Config) error {
|
||||
d.Validate(true)
|
||||
|
||||
if d.HasErrors() {
|
||||
return fmt.Errorf("configuration data has errors after validation")
|
||||
}
|
||||
|
||||
c.data["merged"] = NewConfigFrom(d)
|
||||
c.data["merged"] = d.Clone()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -101,7 +104,7 @@ func (c *jsonStore) Reload() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) load(config *Config) error {
|
||||
func (c *jsonStore) load(cfg *config.Config) error {
|
||||
if len(c.path) == 0 {
|
||||
return nil
|
||||
}
|
||||
@@ -115,7 +118,7 @@ func (c *jsonStore) load(config *Config) error {
|
||||
return err
|
||||
}
|
||||
|
||||
dataV3 := &Data{}
|
||||
dataV3 := &config.Data{}
|
||||
|
||||
version := DataVersion{}
|
||||
|
||||
@@ -124,29 +127,29 @@ func (c *jsonStore) load(config *Config) error {
|
||||
}
|
||||
|
||||
if version.Version == 1 {
|
||||
dataV1 := &dataV1{}
|
||||
dataV1 := &v1.Data{}
|
||||
|
||||
if err = gojson.Unmarshal(jsondata, dataV1); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV2, err := NewV2FromV1(dataV1)
|
||||
dataV2, err := v2.UpgradeV1ToV2(dataV1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dataV3, err = NewV3FromV2(dataV2)
|
||||
dataV3, err = config.UpgradeV2ToV3(dataV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if version.Version == 2 {
|
||||
dataV2 := &dataV2{}
|
||||
dataV2 := &v2.Data{}
|
||||
|
||||
if err = gojson.Unmarshal(jsondata, dataV2); err != nil {
|
||||
return json.FormatError(jsondata, err)
|
||||
}
|
||||
|
||||
dataV3, err = NewV3FromV2(dataV2)
|
||||
dataV3, err = config.UpgradeV2ToV3(dataV2)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -156,15 +159,15 @@ func (c *jsonStore) load(config *Config) error {
|
||||
}
|
||||
}
|
||||
|
||||
config.Data = *dataV3
|
||||
cfg.Data = *dataV3
|
||||
|
||||
config.LoadedAt = time.Now()
|
||||
config.UpdatedAt = config.LoadedAt
|
||||
cfg.LoadedAt = time.Now()
|
||||
cfg.UpdatedAt = cfg.LoadedAt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *jsonStore) store(data *Config) error {
|
||||
func (c *jsonStore) store(data *config.Config) error {
|
||||
data.CreatedAt = time.Now()
|
||||
|
||||
if len(c.path) == 0 {
|
@@ -1,23 +1,29 @@
|
||||
package config
|
||||
package store
|
||||
|
||||
import "github.com/datarhei/core/v16/config"
|
||||
|
||||
// Store is a store for the configuration data.
|
||||
type Store interface {
|
||||
// Get the current configuration.
|
||||
Get() *Config
|
||||
Get() *config.Config
|
||||
|
||||
// Set a new configuration for persistence.
|
||||
Set(data *Config) error
|
||||
Set(data *config.Config) error
|
||||
|
||||
// GetActive returns the configuration that has been set as
|
||||
// active before, otherwise it return nil.
|
||||
GetActive() *Config
|
||||
GetActive() *config.Config
|
||||
|
||||
// SetActive will keep the given configuration
|
||||
// as active in memory. It can be retrieved later with GetActive()
|
||||
SetActive(data *Config) error
|
||||
SetActive(data *config.Config) error
|
||||
|
||||
// Reload will reload the stored configuration. It has to make sure
|
||||
// that all affected components will receiver their potentially
|
||||
// changed configuration.
|
||||
Reload() error
|
||||
}
|
||||
|
||||
type DataVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
844
config/types.go
844
config/types.go
@@ -1,844 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/http/cors"
|
||||
)
|
||||
|
||||
type value interface {
|
||||
// String returns a string representation of the value.
|
||||
String() string
|
||||
|
||||
// Set a new value for the value. Returns an
|
||||
// error if the given string representation can't
|
||||
// be transformed to the value. Returns nil
|
||||
// if the new value has been set.
|
||||
Set(string) error
|
||||
|
||||
// Validate the value. The returned error will
|
||||
// indicate what is wrong with the current value.
|
||||
// Returns nil if the value is OK.
|
||||
Validate() error
|
||||
|
||||
// IsEmpty returns whether the value represents an empty
|
||||
// representation for that value.
|
||||
IsEmpty() bool
|
||||
}
|
||||
|
||||
// string
|
||||
|
||||
type stringValue string
|
||||
|
||||
func newStringValue(p *string, val string) *stringValue {
|
||||
*p = val
|
||||
return (*stringValue)(p)
|
||||
}
|
||||
|
||||
func (s *stringValue) Set(val string) error {
|
||||
*s = stringValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *stringValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// address (host?:port)
|
||||
|
||||
type addressValue string
|
||||
|
||||
func newAddressValue(p *string, val string) *addressValue {
|
||||
*p = val
|
||||
return (*addressValue)(p)
|
||||
}
|
||||
|
||||
func (s *addressValue) Set(val string) error {
|
||||
// Check if the new value is only a port number
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if re.MatchString(val) {
|
||||
val = ":" + val
|
||||
}
|
||||
|
||||
*s = addressValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *addressValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *addressValue) Validate() error {
|
||||
_, port, err := net.SplitHostPort(string(*s))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if !re.MatchString(port) {
|
||||
return fmt.Errorf("the port must be numerical")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *addressValue) IsEmpty() bool {
|
||||
return s.Validate() != nil
|
||||
}
|
||||
|
||||
// array of strings
|
||||
|
||||
type stringListValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newStringListValue(p *[]string, val []string, separator string) *stringListValue {
|
||||
v := &stringListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *stringListValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *stringListValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of auth0 tenants
|
||||
|
||||
type tenantListValue struct {
|
||||
p *[]Auth0Tenant
|
||||
separator string
|
||||
}
|
||||
|
||||
func newTenantListValue(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *tenantListValue {
|
||||
v := &tenantListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *tenantListValue) Set(val string) error {
|
||||
list := []Auth0Tenant{}
|
||||
|
||||
for i, elm := range strings.Split(val, s.separator) {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t := Auth0Tenant{}
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *tenantListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
|
||||
}
|
||||
|
||||
return strings.Join(list, ",")
|
||||
}
|
||||
|
||||
func (s *tenantListValue) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Domain) == 0 {
|
||||
return fmt.Errorf("the domain for tenant %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Audience) == 0 {
|
||||
return fmt.Errorf("the audience for tenant %d is missing", i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *tenantListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// map of strings to strings
|
||||
|
||||
type stringMapStringValue struct {
|
||||
p *map[string]string
|
||||
}
|
||||
|
||||
func newStringMapStringValue(p *map[string]string, val map[string]string) *stringMapStringValue {
|
||||
v := &stringMapStringValue{
|
||||
p: p,
|
||||
}
|
||||
|
||||
if *p == nil {
|
||||
*p = make(map[string]string)
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
*p = val
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) Set(val string) error {
|
||||
mappings := make(map[string]string)
|
||||
|
||||
for _, elm := range strings.Split(val, " ") {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
mapping := strings.SplitN(elm, ":", 2)
|
||||
|
||||
mappings[mapping[0]] = mapping[1]
|
||||
}
|
||||
|
||||
*s.p = mappings
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
mappings := make([]string, len(*s.p))
|
||||
|
||||
i := 0
|
||||
for k, v := range *s.p {
|
||||
mappings[i] = k + ":" + v
|
||||
i++
|
||||
}
|
||||
|
||||
return strings.Join(mappings, " ")
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *stringMapStringValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of CIDR notation IP adresses
|
||||
|
||||
type cidrListValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newCIDRListValue(p *[]string, val []string, separator string) *cidrListValue {
|
||||
v := &cidrListValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *cidrListValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cidrListValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *cidrListValue) Validate() error {
|
||||
for _, cidr := range *s.p {
|
||||
_, _, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *cidrListValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of origins for CORS
|
||||
|
||||
type corsOriginsValue struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func newCORSOriginsValue(p *[]string, val []string, separator string) *corsOriginsValue {
|
||||
v := &corsOriginsValue{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) Validate() error {
|
||||
return cors.Validate(*s.p)
|
||||
}
|
||||
|
||||
func (s *corsOriginsValue) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// boolean
|
||||
|
||||
type boolValue bool
|
||||
|
||||
func newBoolValue(p *bool, val bool) *boolValue {
|
||||
*p = val
|
||||
return (*boolValue)(p)
|
||||
}
|
||||
|
||||
func (b *boolValue) Set(val string) error {
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = boolValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolValue) String() string {
|
||||
return strconv.FormatBool(bool(*b))
|
||||
}
|
||||
|
||||
func (b *boolValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *boolValue) IsEmpty() bool {
|
||||
return !bool(*b)
|
||||
}
|
||||
|
||||
// int
|
||||
|
||||
type intValue int
|
||||
|
||||
func newIntValue(p *int, val int) *intValue {
|
||||
*p = val
|
||||
return (*intValue)(p)
|
||||
}
|
||||
|
||||
func (i *intValue) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = intValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *intValue) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *intValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *intValue) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// int64
|
||||
|
||||
type int64Value int64
|
||||
|
||||
func newInt64Value(p *int64, val int64) *int64Value {
|
||||
*p = val
|
||||
return (*int64Value)(p)
|
||||
}
|
||||
|
||||
func (u *int64Value) Set(val string) error {
|
||||
v, err := strconv.ParseInt(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = int64Value(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *int64Value) String() string {
|
||||
return strconv.FormatInt(int64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *int64Value) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *int64Value) IsEmpty() bool {
|
||||
return int64(*u) == 0
|
||||
}
|
||||
|
||||
// uint64
|
||||
|
||||
type uint64Value uint64
|
||||
|
||||
func newUint64Value(p *uint64, val uint64) *uint64Value {
|
||||
*p = val
|
||||
return (*uint64Value)(p)
|
||||
}
|
||||
|
||||
func (u *uint64Value) Set(val string) error {
|
||||
v, err := strconv.ParseUint(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = uint64Value(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uint64Value) String() string {
|
||||
return strconv.FormatUint(uint64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *uint64Value) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *uint64Value) IsEmpty() bool {
|
||||
return uint64(*u) == 0
|
||||
}
|
||||
|
||||
// network port
|
||||
|
||||
type portValue int
|
||||
|
||||
func newPortValue(p *int, val int) *portValue {
|
||||
*p = val
|
||||
return (*portValue)(p)
|
||||
}
|
||||
|
||||
func (i *portValue) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = portValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *portValue) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *portValue) Validate() error {
|
||||
val := int(*i)
|
||||
|
||||
if val < 0 || val >= (1<<16) {
|
||||
return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *portValue) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// must directory
|
||||
|
||||
type mustDirValue string
|
||||
|
||||
func newMustDirValue(p *string, val string) *mustDirValue {
|
||||
*p = val
|
||||
return (*mustDirValue)(p)
|
||||
}
|
||||
|
||||
func (u *mustDirValue) Set(val string) error {
|
||||
*u = mustDirValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *mustDirValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *mustDirValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return fmt.Errorf("path name must not be empty")
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *mustDirValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// directory
|
||||
|
||||
type dirValue string
|
||||
|
||||
func newDirValue(p *string, val string) *dirValue {
|
||||
*p = val
|
||||
return (*dirValue)(p)
|
||||
}
|
||||
|
||||
func (u *dirValue) Set(val string) error {
|
||||
*u = dirValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *dirValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *dirValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *dirValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// executable
|
||||
|
||||
type execValue string
|
||||
|
||||
func newExecValue(p *string, val string) *execValue {
|
||||
*p = val
|
||||
return (*execValue)(p)
|
||||
}
|
||||
|
||||
func (u *execValue) Set(val string) error {
|
||||
*u = execValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *execValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *execValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
_, err := exec.LookPath(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s not found or is not executable", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *execValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// regular file
|
||||
|
||||
type fileValue string
|
||||
|
||||
func newFileValue(p *string, val string) *fileValue {
|
||||
*p = val
|
||||
return (*fileValue)(p)
|
||||
}
|
||||
|
||||
func (u *fileValue) Set(val string) error {
|
||||
*u = fileValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *fileValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *fileValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s is not a regular file", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *fileValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// time
|
||||
|
||||
type timeValue time.Time
|
||||
|
||||
func newTimeValue(p *time.Time, val time.Time) *timeValue {
|
||||
*p = val
|
||||
return (*timeValue)(p)
|
||||
}
|
||||
|
||||
func (u *timeValue) Set(val string) error {
|
||||
v, err := time.Parse(time.RFC3339, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = timeValue(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *timeValue) String() string {
|
||||
v := time.Time(*u)
|
||||
return v.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func (u *timeValue) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *timeValue) IsEmpty() bool {
|
||||
v := time.Time(*u)
|
||||
return v.IsZero()
|
||||
}
|
||||
|
||||
// url
|
||||
|
||||
type urlValue string
|
||||
|
||||
func newURLValue(p *string, val string) *urlValue {
|
||||
*p = val
|
||||
return (*urlValue)(p)
|
||||
}
|
||||
|
||||
func (u *urlValue) Set(val string) error {
|
||||
*u = urlValue(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *urlValue) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *urlValue) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
URL, err := url.Parse(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
if len(URL.Scheme) == 0 || len(URL.Host) == 0 {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *urlValue) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// absolute path
|
||||
|
||||
type absolutePathValue string
|
||||
|
||||
func newAbsolutePathValue(p *string, val string) *absolutePathValue {
|
||||
*p = filepath.Clean(val)
|
||||
return (*absolutePathValue)(p)
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) Set(val string) error {
|
||||
*s = absolutePathValue(filepath.Clean(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) Validate() error {
|
||||
path := string(*s)
|
||||
|
||||
if !filepath.IsAbs(path) {
|
||||
return fmt.Errorf("%s is not an absolute path", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *absolutePathValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// email address
|
||||
|
||||
type emailValue string
|
||||
|
||||
func newEmailValue(p *string, val string) *emailValue {
|
||||
*p = val
|
||||
return (*emailValue)(p)
|
||||
}
|
||||
|
||||
func (s *emailValue) Set(val string) error {
|
||||
addr, err := mail.ParseAddress(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = emailValue(addr.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *emailValue) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *emailValue) Validate() error {
|
||||
if len(s.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(s.String())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *emailValue) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
397
config/v1/config.go
Normal file
397
config/v1/config.go
Normal file
@@ -0,0 +1,397 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const version int64 = 1
|
||||
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
|
||||
cfg.init()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.Version = d.Version
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.vars.Transfer(&d.vars)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.vars.ResetLogs()
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d.vars.Validate()
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.Storage.Memory.Auth.Enable = false
|
||||
}
|
||||
}
|
||||
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
}
|
||||
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
}
|
||||
|
||||
func (d *Config) HasErrors() bool {
|
||||
return d.vars.HasErrors()
|
||||
}
|
||||
|
||||
func (d *Config) Overrides() []string {
|
||||
return d.vars.Overrides()
|
||||
}
|
@@ -1,8 +1,12 @@
|
||||
package config
|
||||
package v1
|
||||
|
||||
import "time"
|
||||
import (
|
||||
"time"
|
||||
|
||||
type dataV1 struct {
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type Data struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
@@ -44,8 +48,8 @@ type dataV1 struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
398
config/v2/config.go
Normal file
398
config/v2/config.go
Normal file
@@ -0,0 +1,398 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
"github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/math/rand"
|
||||
|
||||
haikunator "github.com/atrox/haikunatorgo/v2"
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const version int64 = 2
|
||||
|
||||
// Make sure that the config.Config interface is satisfied
|
||||
//var _ config.Config = &Config{}
|
||||
|
||||
// Config is a wrapper for Data
|
||||
type Config struct {
|
||||
vars vars.Variables
|
||||
|
||||
Data
|
||||
}
|
||||
|
||||
// New returns a Config which is initialized with its default values
|
||||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
|
||||
cfg.init()
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
func (d *Config) Get(name string) (string, error) {
|
||||
return d.vars.Get(name)
|
||||
}
|
||||
|
||||
func (d *Config) Set(name, val string) error {
|
||||
return d.vars.Set(name, val)
|
||||
}
|
||||
|
||||
// NewConfigFrom returns a clone of a Config
|
||||
func (d *Config) Clone() *Config {
|
||||
data := New()
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.Version = d.Version
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.RTMP = d.RTMP
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
data.Storage.Disk.Cache.Types = copy.Slice(d.Storage.Disk.Cache.Types)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
data.vars.Transfer(&d.vars)
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
func (d *Config) init() {
|
||||
d.vars.Register(value.NewInt64(&d.Version, version), "version", "", nil, "Configuration file layout version", true, false)
|
||||
d.vars.Register(value.NewTime(&d.CreatedAt, time.Now()), "created_at", "", nil, "Configuration file creation time", false, false)
|
||||
d.vars.Register(value.NewString(&d.ID, uuid.New().String()), "id", "CORE_ID", nil, "ID for this instance", true, false)
|
||||
d.vars.Register(value.NewString(&d.Name, haikunator.New().Haikunate()), "name", "CORE_NAME", nil, "A human readable name for this instance", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.Address, ":8080"), "address", "CORE_ADDRESS", nil, "HTTP listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.CheckForUpdates, true), "update_check", "CORE_UPDATE_CHECK", nil, "Check for updates and send anonymized data", false, false)
|
||||
|
||||
// Log
|
||||
d.vars.Register(value.NewString(&d.Log.Level, "info"), "log.level", "CORE_LOG_LEVEL", nil, "Loglevel: silent, error, warn, info, debug", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Log.Topics, []string{}, ","), "log.topics", "CORE_LOG_TOPICS", nil, "Show only selected log topics", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Log.MaxLines, 1000), "log.max_lines", "CORE_LOG_MAXLINES", nil, "Number of latest log lines to keep in memory", false, false)
|
||||
|
||||
// DB
|
||||
d.vars.Register(value.NewMustDir(&d.DB.Dir, "./config"), "db.dir", "CORE_DB_DIR", nil, "Directory for holding the operational data", false, false)
|
||||
|
||||
// Host
|
||||
d.vars.Register(value.NewStringList(&d.Host.Name, []string{}, ","), "host.name", "CORE_HOST_NAME", nil, "Comma separated list of public host/domain names or IPs", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Host.Auto, true), "host.auto", "CORE_HOST_AUTO", nil, "Enable detection of public IP addresses", false, false)
|
||||
|
||||
// API
|
||||
d.vars.Register(value.NewBool(&d.API.ReadOnly, false), "api.read_only", "CORE_API_READ_ONLY", nil, "Allow only ready only access to the API", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Allow, []string{}, ","), "api.access.http.allow", "CORE_API_ACCESS_HTTP_ALLOW", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTP.Block, []string{}, ","), "api.access.http.block", "CORE_API_ACCESS_HTTP_BLOCK", nil, "List of IPs in CIDR notation (HTTP traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Allow, []string{}, ","), "api.access.https.allow", "CORE_API_ACCESS_HTTPS_ALLOW", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.API.Access.HTTPS.Block, []string{}, ","), "api.access.https.block", "CORE_API_ACCESS_HTTPS_BLOCK", nil, "List of IPs in CIDR notation (HTTPS traffic)", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Enable, false), "api.auth.enable", "CORE_API_AUTH_ENABLE", nil, "Enable authentication for all clients", false, false)
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.DisableLocalhost, false), "api.auth.disable_localhost", "CORE_API_AUTH_DISABLE_LOCALHOST", nil, "Disable authentication for clients from localhost", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Username, ""), "api.auth.username", "CORE_API_AUTH_USERNAME", []string{"RS_USERNAME"}, "Username", false, false)
|
||||
d.vars.Register(value.NewString(&d.API.Auth.Password, ""), "api.auth.password", "CORE_API_AUTH_PASSWORD", []string{"RS_PASSWORD"}, "Password", false, true)
|
||||
|
||||
// Auth JWT
|
||||
d.vars.Register(value.NewString(&d.API.Auth.JWT.Secret, rand.String(32)), "api.auth.jwt.secret", "CORE_API_AUTH_JWT_SECRET", nil, "JWT secret, leave empty for generating a random value", false, true)
|
||||
|
||||
// Auth Auth0
|
||||
d.vars.Register(value.NewBool(&d.API.Auth.Auth0.Enable, false), "api.auth.auth0.enable", "CORE_API_AUTH_AUTH0_ENABLE", nil, "Enable Auth0", false, false)
|
||||
d.vars.Register(value.NewTenantList(&d.API.Auth.Auth0.Tenants, []value.Auth0Tenant{}, ","), "api.auth.auth0.tenants", "CORE_API_AUTH_AUTH0_TENANTS", nil, "List of Auth0 tenants", false, false)
|
||||
|
||||
// TLS
|
||||
d.vars.Register(value.NewAddress(&d.TLS.Address, ":8181"), "tls.address", "CORE_TLS_ADDRESS", nil, "HTTPS listening address", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Enable, false), "tls.enable", "CORE_TLS_ENABLE", nil, "Enable HTTPS", false, false)
|
||||
d.vars.Register(value.NewBool(&d.TLS.Auto, false), "tls.auto", "CORE_TLS_AUTO", nil, "Enable Let's Encrypt certificate", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.CertFile, ""), "tls.cert_file", "CORE_TLS_CERTFILE", nil, "Path to certificate file in PEM format", false, false)
|
||||
d.vars.Register(value.NewFile(&d.TLS.KeyFile, ""), "tls.key_file", "CORE_TLS_KEYFILE", nil, "Path to key file in PEM format", false, false)
|
||||
|
||||
// Storage
|
||||
d.vars.Register(value.NewFile(&d.Storage.MimeTypes, "./mime.types"), "storage.mimetypes_file", "CORE_STORAGE_MIMETYPES_FILE", []string{"CORE_MIMETYPES_FILE"}, "Path to file with mime-types", false, false)
|
||||
|
||||
// Storage (Disk)
|
||||
d.vars.Register(value.NewMustDir(&d.Storage.Disk.Dir, "./data"), "storage.disk.dir", "CORE_STORAGE_DISK_DIR", nil, "Directory on disk, exposed on /", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Size, 0), "storage.disk.max_size_mbytes", "CORE_STORAGE_DISK_MAXSIZEMBYTES", nil, "Max. allowed megabytes for storage.disk.dir, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Disk.Cache.Enable, true), "storage.disk.cache.enable", "CORE_STORAGE_DISK_CACHE_ENABLE", nil, "Enable cache for /", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.Size, 0), "storage.disk.cache.max_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXSIZEMBYTES", nil, "Max. allowed cache size, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Disk.Cache.TTL, 300), "storage.disk.cache.ttl_seconds", "CORE_STORAGE_DISK_CACHE_TTLSECONDS", nil, "Seconds to keep files in cache", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Storage.Disk.Cache.FileSize, 1), "storage.disk.cache.max_file_size_mbytes", "CORE_STORAGE_DISK_CACHE_MAXFILESIZEMBYTES", nil, "Max. file size to put in cache", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.Storage.Disk.Cache.Types, []string{}, " "), "storage.disk.cache.types", "CORE_STORAGE_DISK_CACHE_TYPES_ALLOW", []string{"CORE_STORAGE_DISK_CACHE_TYPES"}, "File extensions to cache, empty for all", false, false)
|
||||
|
||||
// Storage (Memory)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Auth.Enable, true), "storage.memory.auth.enable", "CORE_STORAGE_MEMORY_AUTH_ENABLE", nil, "Enable basic auth for PUT,POST, and DELETE on /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Username, "admin"), "storage.memory.auth.username", "CORE_STORAGE_MEMORY_AUTH_USERNAME", nil, "Username for Basic-Auth of /memfs", false, false)
|
||||
d.vars.Register(value.NewString(&d.Storage.Memory.Auth.Password, rand.StringAlphanumeric(18)), "storage.memory.auth.password", "CORE_STORAGE_MEMORY_AUTH_PASSWORD", nil, "Password for Basic-Auth of /memfs", false, true)
|
||||
d.vars.Register(value.NewInt64(&d.Storage.Memory.Size, 0), "storage.memory.max_size_mbytes", "CORE_STORAGE_MEMORY_MAXSIZEMBYTES", nil, "Max. allowed megabytes for /memfs, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Storage.Memory.Purge, false), "storage.memory.purge", "CORE_STORAGE_MEMORY_PURGE", nil, "Automatically remove the oldest files if /memfs is full", false, false)
|
||||
|
||||
// Storage (CORS)
|
||||
d.vars.Register(value.NewCORSOrigins(&d.Storage.CORS.Origins, []string{"*"}, ","), "storage.cors.origins", "CORE_STORAGE_CORS_ORIGINS", nil, "Allowed CORS origins for /memfs and /data", false, false)
|
||||
|
||||
// RTMP
|
||||
d.vars.Register(value.NewBool(&d.RTMP.Enable, false), "rtmp.enable", "CORE_RTMP_ENABLE", nil, "Enable RTMP server", false, false)
|
||||
d.vars.Register(value.NewBool(&d.RTMP.EnableTLS, false), "rtmp.enable_tls", "CORE_RTMP_ENABLE_TLS", nil, "Enable RTMPS server instead of RTMP", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.Address, ":1935"), "rtmp.address", "CORE_RTMP_ADDRESS", nil, "RTMP server listen address", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.RTMP.AddressTLS, ":1936"), "rtmp.address_tls", "CORE_RTMP_ADDRESS_TLS", nil, "RTMPS server listen address", false, false)
|
||||
d.vars.Register(value.NewAbsolutePath(&d.RTMP.App, "/"), "rtmp.app", "CORE_RTMP_APP", nil, "RTMP app for publishing", false, false)
|
||||
d.vars.Register(value.NewString(&d.RTMP.Token, ""), "rtmp.token", "CORE_RTMP_TOKEN", nil, "RTMP token for publishing and playing", false, true)
|
||||
|
||||
// SRT
|
||||
d.vars.Register(value.NewBool(&d.SRT.Enable, false), "srt.enable", "CORE_SRT_ENABLE", nil, "Enable SRT server", false, false)
|
||||
d.vars.Register(value.NewAddress(&d.SRT.Address, ":6000"), "srt.address", "CORE_SRT_ADDRESS", nil, "SRT server listen address", false, false)
|
||||
d.vars.Register(value.NewString(&d.SRT.Passphrase, ""), "srt.passphrase", "CORE_SRT_PASSPHRASE", nil, "SRT encryption passphrase", false, true)
|
||||
d.vars.Register(value.NewString(&d.SRT.Token, ""), "srt.token", "CORE_SRT_TOKEN", nil, "SRT token for publishing and playing", false, true)
|
||||
d.vars.Register(value.NewBool(&d.SRT.Log.Enable, false), "srt.log.enable", "CORE_SRT_LOG_ENABLE", nil, "Enable SRT server logging", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.SRT.Log.Topics, []string{}, ","), "srt.log.topics", "CORE_SRT_LOG_TOPICS", nil, "List of topics to log", false, false)
|
||||
|
||||
// FFmpeg
|
||||
d.vars.Register(value.NewExec(&d.FFmpeg.Binary, "ffmpeg"), "ffmpeg.binary", "CORE_FFMPEG_BINARY", nil, "Path to ffmpeg binary", true, false)
|
||||
d.vars.Register(value.NewInt64(&d.FFmpeg.MaxProcesses, 0), "ffmpeg.max_processes", "CORE_FFMPEG_MAXPROCESSES", nil, "Max. allowed simultaneously running ffmpeg instances, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Allow, []string{}, " "), "ffmpeg.access.input.allow", "CORE_FFMPEG_ACCESS_INPUT_ALLOW", nil, "List of allowed expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Input.Block, []string{}, " "), "ffmpeg.access.input.block", "CORE_FFMPEG_ACCESS_INPUT_BLOCK", nil, "List of blocked expression to match against the input addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Allow, []string{}, " "), "ffmpeg.access.output.allow", "CORE_FFMPEG_ACCESS_OUTPUT_ALLOW", nil, "List of allowed expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewStringList(&d.FFmpeg.Access.Output.Block, []string{}, " "), "ffmpeg.access.output.block", "CORE_FFMPEG_ACCESS_OUTPUT_BLOCK", nil, "List of blocked expression to match against the output addresses", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxLines, 50), "ffmpeg.log.max_lines", "CORE_FFMPEG_LOG_MAXLINES", nil, "Number of latest log lines to keep for each process", false, false)
|
||||
d.vars.Register(value.NewInt(&d.FFmpeg.Log.MaxHistory, 3), "ffmpeg.log.max_history", "CORE_FFMPEG_LOG_MAXHISTORY", nil, "Number of latest logs to keep for each process", false, false)
|
||||
|
||||
// Playout
|
||||
d.vars.Register(value.NewBool(&d.Playout.Enable, false), "playout.enable", "CORE_PLAYOUT_ENABLE", nil, "Enable playout proxy where available", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MinPort, 0), "playout.min_port", "CORE_PLAYOUT_MINPORT", nil, "Min. playout server port", false, false)
|
||||
d.vars.Register(value.NewPort(&d.Playout.MaxPort, 0), "playout.max_port", "CORE_PLAYOUT_MAXPORT", nil, "Max. playout server port", false, false)
|
||||
|
||||
// Debug
|
||||
d.vars.Register(value.NewBool(&d.Debug.Profiling, false), "debug.profiling", "CORE_DEBUG_PROFILING", nil, "Enable profiling endpoint on /profiling", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Debug.ForceGC, 0), "debug.force_gc", "CORE_DEBUG_FORCEGC", nil, "Number of seconds between forcing GC to return memory to the OS", false, false)
|
||||
|
||||
// Metrics
|
||||
d.vars.Register(value.NewBool(&d.Metrics.Enable, false), "metrics.enable", "CORE_METRICS_ENABLE", nil, "Enable collecting historic metrics data", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Metrics.EnablePrometheus, false), "metrics.enable_prometheus", "CORE_METRICS_ENABLE_PROMETHEUS", nil, "Enable prometheus endpoint /metrics", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Range, 300), "metrics.range_seconds", "CORE_METRICS_RANGE_SECONDS", nil, "Seconds to keep history data", false, false)
|
||||
d.vars.Register(value.NewInt64(&d.Metrics.Interval, 2), "metrics.interval_seconds", "CORE_METRICS_INTERVAL_SECONDS", nil, "Interval for collecting metrics", false, false)
|
||||
|
||||
// Sessions
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Enable, true), "sessions.enable", "CORE_SESSIONS_ENABLE", nil, "Enable collecting HLS session stats for /memfs", false, false)
|
||||
d.vars.Register(value.NewCIDRList(&d.Sessions.IPIgnoreList, []string{"127.0.0.1/32", "::1/128"}, ","), "sessions.ip_ignorelist", "CORE_SESSIONS_IP_IGNORELIST", nil, "List of IP ranges in CIDR notation to ignore", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.SessionTimeout, 30), "sessions.session_timeout_sec", "CORE_SESSIONS_SESSION_TIMEOUT_SEC", nil, "Timeout for an idle session", false, false)
|
||||
d.vars.Register(value.NewBool(&d.Sessions.Persist, false), "sessions.persist", "CORE_SESSIONS_PERSIST", nil, "Whether to persist session history. Will be stored as sessions.json in db.dir", false, false)
|
||||
d.vars.Register(value.NewInt(&d.Sessions.PersistInterval, 300), "sessions.persist_interval_sec", "CORE_SESSIONS_PERSIST_INTERVAL_SEC", nil, "Interval in seconds in which to persist the current session history", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxBitrate, 0), "sessions.max_bitrate_mbit", "CORE_SESSIONS_MAXBITRATE_MBIT", nil, "Max. allowed outgoing bitrate in mbit/s, 0 for unlimited", false, false)
|
||||
d.vars.Register(value.NewUint64(&d.Sessions.MaxSessions, 0), "sessions.max_sessions", "CORE_SESSIONS_MAXSESSIONS", nil, "Max. allowed number of simultaneous sessions, 0 for unlimited", false, false)
|
||||
|
||||
// Service
|
||||
d.vars.Register(value.NewBool(&d.Service.Enable, false), "service.enable", "CORE_SERVICE_ENABLE", nil, "Enable connecting to the Restreamer Service", false, false)
|
||||
d.vars.Register(value.NewString(&d.Service.Token, ""), "service.token", "CORE_SERVICE_TOKEN", nil, "Restreamer Service account token", false, true)
|
||||
d.vars.Register(value.NewURL(&d.Service.URL, "https://service.datarhei.com"), "service.url", "CORE_SERVICE_URL", nil, "URL of the Restreamer Service", false, false)
|
||||
|
||||
// Router
|
||||
d.vars.Register(value.NewStringList(&d.Router.BlockedPrefixes, []string{"/api"}, ","), "router.blocked_prefixes", "CORE_ROUTER_BLOCKED_PREFIXES", nil, "List of path prefixes that can't be routed", false, false)
|
||||
d.vars.Register(value.NewStringMapString(&d.Router.Routes, nil), "router.routes", "CORE_ROUTER_ROUTES", nil, "List of route mappings", false, false)
|
||||
d.vars.Register(value.NewDir(&d.Router.UIPath, ""), "router.ui_path", "CORE_ROUTER_UI_PATH", nil, "Path to a directory holding UI files mounted as /ui", false, false)
|
||||
}
|
||||
|
||||
// Validate validates the current state of the Config for completeness and sanity. Errors are
|
||||
// written to the log. Use resetLogs to indicate to reset the logs prior validation.
|
||||
func (d *Config) Validate(resetLogs bool) {
|
||||
if resetLogs {
|
||||
d.vars.ResetLogs()
|
||||
}
|
||||
|
||||
if d.Version != version {
|
||||
d.vars.Log("error", "version", "unknown configuration layout version (found version %d, expecting version %d)", d.Version, version)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
d.vars.Validate()
|
||||
|
||||
// Individual sanity checks
|
||||
|
||||
// If HTTP Auth is enabled, check that the username and password are set
|
||||
if d.API.Auth.Enable {
|
||||
if len(d.API.Auth.Username) == 0 || len(d.API.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "api.auth.enable", "api.auth.username and api.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If Auth0 is enabled, check that domain, audience, and clientid are set
|
||||
if d.API.Auth.Auth0.Enable {
|
||||
if len(d.API.Auth.Auth0.Tenants) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.enable", "at least one tenants must be set")
|
||||
}
|
||||
|
||||
for i, t := range d.API.Auth.Auth0.Tenants {
|
||||
if len(t.Domain) == 0 || len(t.Audience) == 0 || len(t.ClientID) == 0 {
|
||||
d.vars.Log("error", "api.auth.auth0.tenants", "domain, audience, and clientid must be set (tenant %d)", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS is enabled and Let's Encrypt is disabled, require certfile and keyfile
|
||||
if d.TLS.Enable && !d.TLS.Auto {
|
||||
if len(d.TLS.CertFile) == 0 || len(d.TLS.KeyFile) == 0 {
|
||||
d.vars.Log("error", "tls.enable", "tls.certfile and tls.keyfile must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS and Let's Encrypt certificate is enabled, we require a public hostname
|
||||
if d.TLS.Enable && d.TLS.Auto {
|
||||
if len(d.Host.Name) == 0 {
|
||||
d.vars.Log("error", "host.name", "a hostname must be set in order to get an automatic TLS certificate")
|
||||
} else {
|
||||
r := &net.Resolver{
|
||||
PreferGo: true,
|
||||
StrictErrors: true,
|
||||
}
|
||||
|
||||
for _, host := range d.Host.Name {
|
||||
// Don't lookup IP addresses
|
||||
if ip := net.ParseIP(host); ip != nil {
|
||||
d.vars.Log("error", "host.name", "only host names are allowed if automatic TLS is enabled, but found IP address: %s", host)
|
||||
}
|
||||
|
||||
// Lookup host name with a timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
_, err := r.LookupHost(ctx, host)
|
||||
if err != nil {
|
||||
d.vars.Log("error", "host.name", "the host '%s' can't be resolved and will not work with automatic TLS", host)
|
||||
}
|
||||
|
||||
cancel()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If TLS for RTMP is enabled, TLS must be enabled
|
||||
if d.RTMP.EnableTLS {
|
||||
if !d.RTMP.Enable {
|
||||
d.vars.Log("error", "rtmp.enable", "RTMP server must be enabled if RTMPS server is enabled")
|
||||
}
|
||||
|
||||
if !d.TLS.Enable {
|
||||
d.vars.Log("error", "rtmp.enable_tls", "RTMPS server can only be enabled if TLS is enabled")
|
||||
}
|
||||
}
|
||||
|
||||
// If CORE_MEMFS_USERNAME and CORE_MEMFS_PASSWORD are set, automatically active/deactivate Basic-Auth for memfs
|
||||
if d.vars.IsMerged("storage.memory.auth.username") && d.vars.IsMerged("storage.memory.auth.password") {
|
||||
d.Storage.Memory.Auth.Enable = true
|
||||
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 && len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.Storage.Memory.Auth.Enable = false
|
||||
}
|
||||
}
|
||||
|
||||
// If Basic-Auth for memfs is enable, check that the username and password are set
|
||||
if d.Storage.Memory.Auth.Enable {
|
||||
if len(d.Storage.Memory.Auth.Username) == 0 || len(d.Storage.Memory.Auth.Password) == 0 {
|
||||
d.vars.Log("error", "storage.memory.auth.enable", "storage.memory.auth.username and storage.memory.auth.password must be set")
|
||||
}
|
||||
}
|
||||
|
||||
// If playout is enabled, check that the port range is sane
|
||||
if d.Playout.Enable {
|
||||
if d.Playout.MinPort >= d.Playout.MaxPort {
|
||||
d.vars.Log("error", "playout.min_port", "must be bigger than playout.max_port")
|
||||
}
|
||||
}
|
||||
|
||||
// If cache is enabled, a valid TTL has to be set to a useful value
|
||||
if d.Storage.Disk.Cache.Enable && d.Storage.Disk.Cache.TTL < 0 {
|
||||
d.vars.Log("error", "storage.disk.cache.ttl_seconds", "must be equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the stats are enabled, the session timeout has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.SessionTimeout < 1 {
|
||||
d.vars.Log("error", "stats.session_timeout_sec", "must be equal or greater than 1")
|
||||
}
|
||||
|
||||
// If the stats and their persistence are enabled, the persist interval has to be set to a useful value
|
||||
if d.Sessions.Enable && d.Sessions.PersistInterval < 0 {
|
||||
d.vars.Log("error", "stats.persist_interval_sec", "must be at equal or greater than 0")
|
||||
}
|
||||
|
||||
// If the service is enabled, the token and enpoint have to be defined
|
||||
if d.Service.Enable {
|
||||
if len(d.Service.Token) == 0 {
|
||||
d.vars.Log("error", "service.token", "must be non-empty")
|
||||
}
|
||||
|
||||
if len(d.Service.URL) == 0 {
|
||||
d.vars.Log("error", "service.url", "must be non-empty")
|
||||
}
|
||||
}
|
||||
|
||||
// If historic metrics are enabled, the timerange and interval have to be valid
|
||||
if d.Metrics.Enable {
|
||||
if d.Metrics.Range <= 0 {
|
||||
d.vars.Log("error", "metrics.range", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval <= 0 {
|
||||
d.vars.Log("error", "metrics.interval", "must be greater 0")
|
||||
}
|
||||
|
||||
if d.Metrics.Interval > d.Metrics.Range {
|
||||
d.vars.Log("error", "metrics.interval", "must be smaller than the range")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Config) Merge() {
|
||||
d.vars.Merge()
|
||||
}
|
||||
|
||||
func (d *Config) Messages(logger func(level string, v vars.Variable, message string)) {
|
||||
d.vars.Messages(logger)
|
||||
}
|
||||
|
||||
func (d *Config) HasErrors() bool {
|
||||
return d.vars.HasErrors()
|
||||
}
|
||||
|
||||
func (d *Config) Overrides() []string {
|
||||
return d.vars.Overrides()
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package config
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@@ -6,9 +6,13 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config/copy"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type dataV2 struct {
|
||||
type Data struct {
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
LoadedAt time.Time `json:"-"`
|
||||
UpdatedAt time.Time `json:"-"`
|
||||
@@ -50,8 +54,8 @@ type dataV2 struct {
|
||||
Secret string `json:"secret"`
|
||||
} `json:"jwt"`
|
||||
Auth0 struct {
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []Auth0Tenant `json:"tenants"`
|
||||
Enable bool `json:"enable"`
|
||||
Tenants []value.Auth0Tenant `json:"tenants"`
|
||||
} `json:"auth0"`
|
||||
} `json:"auth"`
|
||||
} `json:"api"`
|
||||
@@ -160,11 +164,15 @@ type dataV2 struct {
|
||||
} `json:"router"`
|
||||
}
|
||||
|
||||
// Migrate will migrate some settings, depending on the version it finds. Migrations
|
||||
// are only going upwards,i.e. from a lower version to a higher version.
|
||||
func NewV2FromV1(d *dataV1) (*dataV2, error) {
|
||||
data := &dataV2{}
|
||||
func UpgradeV1ToV2(d *v1.Data) (*Data, error) {
|
||||
cfg := New()
|
||||
|
||||
return MergeV1ToV2(&cfg.Data, d)
|
||||
}
|
||||
|
||||
// Migrate will migrate some settings, depending on the version it finds. Migrations
|
||||
// are only going upwards, i.e. from a lower version to a higher version.
|
||||
func MergeV1ToV2(data *Data, d *v1.Data) (*Data, error) {
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
@@ -189,30 +197,30 @@ func NewV2FromV1(d *dataV1) (*dataV2, error) {
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copyStringSlice(d.Log.Topics)
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copyStringSlice(d.Host.Name)
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copyStringSlice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copyStringSlice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copyStringSlice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copyStringSlice(d.API.Access.HTTPS.Block)
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copyTenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copyStringSlice(d.Storage.CORS.Origins)
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copyStringSlice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copyStringSlice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copyStringSlice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copyStringSlice(d.FFmpeg.Access.Output.Block)
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copyStringSlice(d.Sessions.IPIgnoreList)
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copyStringSlice(d.SRT.Log.Topics)
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copyStringSlice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copyStringMap(d.Router.Routes)
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.RTMP.Enable = d.RTMP.Enable
|
||||
@@ -245,3 +253,67 @@ func NewV2FromV1(d *dataV1) (*dataV2, error) {
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func DowngradeV2toV1(d *Data) (*v1.Data, error) {
|
||||
data := &v1.Data{}
|
||||
|
||||
data.CreatedAt = d.CreatedAt
|
||||
data.LoadedAt = d.LoadedAt
|
||||
data.UpdatedAt = d.UpdatedAt
|
||||
|
||||
data.ID = d.ID
|
||||
data.Name = d.Name
|
||||
data.Address = d.Address
|
||||
data.CheckForUpdates = d.CheckForUpdates
|
||||
|
||||
data.Log = d.Log
|
||||
data.DB = d.DB
|
||||
data.Host = d.Host
|
||||
data.API = d.API
|
||||
data.TLS = d.TLS
|
||||
data.Storage = d.Storage
|
||||
data.SRT = d.SRT
|
||||
data.FFmpeg = d.FFmpeg
|
||||
data.Playout = d.Playout
|
||||
data.Debug = d.Debug
|
||||
data.Metrics = d.Metrics
|
||||
data.Sessions = d.Sessions
|
||||
data.Service = d.Service
|
||||
data.Router = d.Router
|
||||
|
||||
data.Log.Topics = copy.Slice(d.Log.Topics)
|
||||
|
||||
data.Host.Name = copy.Slice(d.Host.Name)
|
||||
|
||||
data.API.Access.HTTP.Allow = copy.Slice(d.API.Access.HTTP.Allow)
|
||||
data.API.Access.HTTP.Block = copy.Slice(d.API.Access.HTTP.Block)
|
||||
data.API.Access.HTTPS.Allow = copy.Slice(d.API.Access.HTTPS.Allow)
|
||||
data.API.Access.HTTPS.Block = copy.Slice(d.API.Access.HTTPS.Block)
|
||||
|
||||
data.API.Auth.Auth0.Tenants = copy.TenantSlice(d.API.Auth.Auth0.Tenants)
|
||||
|
||||
data.Storage.CORS.Origins = copy.Slice(d.Storage.CORS.Origins)
|
||||
|
||||
data.FFmpeg.Access.Input.Allow = copy.Slice(d.FFmpeg.Access.Input.Allow)
|
||||
data.FFmpeg.Access.Input.Block = copy.Slice(d.FFmpeg.Access.Input.Block)
|
||||
data.FFmpeg.Access.Output.Allow = copy.Slice(d.FFmpeg.Access.Output.Allow)
|
||||
data.FFmpeg.Access.Output.Block = copy.Slice(d.FFmpeg.Access.Output.Block)
|
||||
|
||||
data.Sessions.IPIgnoreList = copy.Slice(d.Sessions.IPIgnoreList)
|
||||
|
||||
data.SRT.Log.Topics = copy.Slice(d.SRT.Log.Topics)
|
||||
|
||||
data.Router.BlockedPrefixes = copy.Slice(d.Router.BlockedPrefixes)
|
||||
data.Router.Routes = copy.StringMap(d.Router.Routes)
|
||||
|
||||
// Actual changes
|
||||
data.RTMP.Enable = d.RTMP.Enable
|
||||
data.RTMP.EnableTLS = d.RTMP.EnableTLS
|
||||
data.RTMP.Address = d.RTMP.Address
|
||||
data.RTMP.App = d.RTMP.App
|
||||
data.RTMP.Token = d.RTMP.Token
|
||||
|
||||
data.Version = 1
|
||||
|
||||
return data, nil
|
||||
}
|
86
config/value/auth0.go
Normal file
86
config/value/auth0.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// array of auth0 tenants
|
||||
|
||||
type Auth0Tenant struct {
|
||||
Domain string `json:"domain"`
|
||||
Audience string `json:"audience"`
|
||||
ClientID string `json:"clientid"`
|
||||
Users []string `json:"users"`
|
||||
}
|
||||
|
||||
type TenantList struct {
|
||||
p *[]Auth0Tenant
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewTenantList(p *[]Auth0Tenant, val []Auth0Tenant, separator string) *TenantList {
|
||||
v := &TenantList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *TenantList) Set(val string) error {
|
||||
list := []Auth0Tenant{}
|
||||
|
||||
for i, elm := range strings.Split(val, s.separator) {
|
||||
data, err := base64.StdEncoding.DecodeString(elm)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid base64 encoding of tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
t := Auth0Tenant{}
|
||||
if err := json.Unmarshal(data, &t); err != nil {
|
||||
return fmt.Errorf("invalid JSON in tenant %d: %w", i, err)
|
||||
}
|
||||
|
||||
list = append(list, t)
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TenantList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
list := []string{}
|
||||
|
||||
for _, t := range *s.p {
|
||||
list = append(list, fmt.Sprintf("%s (%d users)", t.Domain, len(t.Users)))
|
||||
}
|
||||
|
||||
return strings.Join(list, ",")
|
||||
}
|
||||
|
||||
func (s *TenantList) Validate() error {
|
||||
for i, t := range *s.p {
|
||||
if len(t.Domain) == 0 {
|
||||
return fmt.Errorf("the domain for tenant %d is missing", i)
|
||||
}
|
||||
|
||||
if len(t.Audience) == 0 {
|
||||
return fmt.Errorf("the audience for tenant %d is missing", i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *TenantList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
269
config/value/network.go
Normal file
269
config/value/network.go
Normal file
@@ -0,0 +1,269 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/mail"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/http/cors"
|
||||
)
|
||||
|
||||
// address (host?:port)
|
||||
|
||||
type Address string
|
||||
|
||||
func NewAddress(p *string, val string) *Address {
|
||||
*p = val
|
||||
return (*Address)(p)
|
||||
}
|
||||
|
||||
func (s *Address) Set(val string) error {
|
||||
// Check if the new value is only a port number
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if re.MatchString(val) {
|
||||
val = ":" + val
|
||||
}
|
||||
|
||||
*s = Address(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Address) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *Address) Validate() error {
|
||||
_, port, err := net.SplitHostPort(string(*s))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
re := regexp.MustCompile("^[0-9]+$")
|
||||
if !re.MatchString(port) {
|
||||
return fmt.Errorf("the port must be numerical")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Address) IsEmpty() bool {
|
||||
return s.Validate() != nil
|
||||
}
|
||||
|
||||
// array of CIDR notation IP adresses
|
||||
|
||||
type CIDRList struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewCIDRList(p *[]string, val []string, separator string) *CIDRList {
|
||||
v := &CIDRList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *CIDRList) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CIDRList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *CIDRList) Validate() error {
|
||||
for _, cidr := range *s.p {
|
||||
_, _, err := net.ParseCIDR(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CIDRList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// array of origins for CORS
|
||||
|
||||
type CORSOrigins struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewCORSOrigins(p *[]string, val []string, separator string) *CORSOrigins {
|
||||
v := &CORSOrigins{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) Validate() error {
|
||||
return cors.Validate(*s.p)
|
||||
}
|
||||
|
||||
func (s *CORSOrigins) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// network port
|
||||
|
||||
type Port int
|
||||
|
||||
func NewPort(p *int, val int) *Port {
|
||||
*p = val
|
||||
return (*Port)(p)
|
||||
}
|
||||
|
||||
func (i *Port) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Port(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Port) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *Port) Validate() error {
|
||||
val := int(*i)
|
||||
|
||||
if val < 0 || val >= (1<<16) {
|
||||
return fmt.Errorf("%d is not in the range of [0, %d]", val, 1<<16-1)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Port) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// url
|
||||
|
||||
type URL string
|
||||
|
||||
func NewURL(p *string, val string) *URL {
|
||||
*p = val
|
||||
return (*URL)(p)
|
||||
}
|
||||
|
||||
func (u *URL) Set(val string) error {
|
||||
*u = URL(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URL) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *URL) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
URL, err := url.Parse(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
if len(URL.Scheme) == 0 || len(URL.Host) == 0 {
|
||||
return fmt.Errorf("%s is not a valid URL", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *URL) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// email address
|
||||
|
||||
type Email string
|
||||
|
||||
func NewEmail(p *string, val string) *Email {
|
||||
*p = val
|
||||
return (*Email)(p)
|
||||
}
|
||||
|
||||
func (s *Email) Set(val string) error {
|
||||
addr, err := mail.ParseAddress(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*s = Email(addr.Address)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Email) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *Email) Validate() error {
|
||||
if len(s.String()) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := mail.ParseAddress(s.String())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *Email) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
197
config/value/os.go
Normal file
197
config/value/os.go
Normal file
@@ -0,0 +1,197 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// must directory
|
||||
|
||||
type MustDir string
|
||||
|
||||
func NewMustDir(p *string, val string) *MustDir {
|
||||
*p = val
|
||||
return (*MustDir)(p)
|
||||
}
|
||||
|
||||
func (u *MustDir) Set(val string) error {
|
||||
*u = MustDir(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MustDir) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *MustDir) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return fmt.Errorf("path name must not be empty")
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *MustDir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// directory
|
||||
|
||||
type Dir string
|
||||
|
||||
func NewDir(p *string, val string) *Dir {
|
||||
*p = val
|
||||
return (*Dir)(p)
|
||||
}
|
||||
|
||||
func (u *Dir) Set(val string) error {
|
||||
*u = Dir(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Dir) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *Dir) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(strings.TrimSpace(val)) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.IsDir() {
|
||||
return fmt.Errorf("%s is not a directory", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Dir) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// executable
|
||||
|
||||
type Exec string
|
||||
|
||||
func NewExec(p *string, val string) *Exec {
|
||||
*p = val
|
||||
return (*Exec)(p)
|
||||
}
|
||||
|
||||
func (u *Exec) Set(val string) error {
|
||||
*u = Exec(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Exec) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *Exec) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
_, err := exec.LookPath(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s not found or is not executable", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Exec) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// regular file
|
||||
|
||||
type File string
|
||||
|
||||
func NewFile(p *string, val string) *File {
|
||||
*p = val
|
||||
return (*File)(p)
|
||||
}
|
||||
|
||||
func (u *File) Set(val string) error {
|
||||
*u = File(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *File) String() string {
|
||||
return string(*u)
|
||||
}
|
||||
|
||||
func (u *File) Validate() error {
|
||||
val := string(*u)
|
||||
|
||||
if len(val) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
finfo, err := os.Stat(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s does not exist", val)
|
||||
}
|
||||
|
||||
if !finfo.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s is not a regular file", val)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *File) IsEmpty() bool {
|
||||
return len(string(*u)) == 0
|
||||
}
|
||||
|
||||
// absolute path
|
||||
|
||||
type AbsolutePath string
|
||||
|
||||
func NewAbsolutePath(p *string, val string) *AbsolutePath {
|
||||
*p = filepath.Clean(val)
|
||||
return (*AbsolutePath)(p)
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) Set(val string) error {
|
||||
*s = AbsolutePath(filepath.Clean(val))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) Validate() error {
|
||||
path := string(*s)
|
||||
|
||||
if !filepath.IsAbs(path) {
|
||||
return fmt.Errorf("%s is not an absolute path", path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *AbsolutePath) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
264
config/value/primitives.go
Normal file
264
config/value/primitives.go
Normal file
@@ -0,0 +1,264 @@
|
||||
package value
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// string
|
||||
|
||||
type String string
|
||||
|
||||
func NewString(p *string, val string) *String {
|
||||
*p = val
|
||||
return (*String)(p)
|
||||
}
|
||||
|
||||
func (s *String) Set(val string) error {
|
||||
*s = String(val)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *String) String() string {
|
||||
return string(*s)
|
||||
}
|
||||
|
||||
func (s *String) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *String) IsEmpty() bool {
|
||||
return len(string(*s)) == 0
|
||||
}
|
||||
|
||||
// array of strings
|
||||
|
||||
type StringList struct {
|
||||
p *[]string
|
||||
separator string
|
||||
}
|
||||
|
||||
func NewStringList(p *[]string, val []string, separator string) *StringList {
|
||||
v := &StringList{
|
||||
p: p,
|
||||
separator: separator,
|
||||
}
|
||||
*p = val
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *StringList) Set(val string) error {
|
||||
list := []string{}
|
||||
|
||||
for _, elm := range strings.Split(val, s.separator) {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) != 0 {
|
||||
list = append(list, elm)
|
||||
}
|
||||
}
|
||||
|
||||
*s.p = list
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringList) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
return strings.Join(*s.p, s.separator)
|
||||
}
|
||||
|
||||
func (s *StringList) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringList) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// map of strings to strings
|
||||
|
||||
type StringMapString struct {
|
||||
p *map[string]string
|
||||
}
|
||||
|
||||
func NewStringMapString(p *map[string]string, val map[string]string) *StringMapString {
|
||||
v := &StringMapString{
|
||||
p: p,
|
||||
}
|
||||
|
||||
if *p == nil {
|
||||
*p = make(map[string]string)
|
||||
}
|
||||
|
||||
if val != nil {
|
||||
*p = val
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
func (s *StringMapString) Set(val string) error {
|
||||
mappings := make(map[string]string)
|
||||
|
||||
for _, elm := range strings.Split(val, " ") {
|
||||
elm = strings.TrimSpace(elm)
|
||||
if len(elm) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
mapping := strings.SplitN(elm, ":", 2)
|
||||
|
||||
mappings[mapping[0]] = mapping[1]
|
||||
}
|
||||
|
||||
*s.p = mappings
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringMapString) String() string {
|
||||
if s.IsEmpty() {
|
||||
return "(empty)"
|
||||
}
|
||||
|
||||
mappings := make([]string, len(*s.p))
|
||||
|
||||
i := 0
|
||||
for k, v := range *s.p {
|
||||
mappings[i] = k + ":" + v
|
||||
i++
|
||||
}
|
||||
|
||||
return strings.Join(mappings, " ")
|
||||
}
|
||||
|
||||
func (s *StringMapString) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *StringMapString) IsEmpty() bool {
|
||||
return len(*s.p) == 0
|
||||
}
|
||||
|
||||
// boolean
|
||||
|
||||
type Bool bool
|
||||
|
||||
func NewBool(p *bool, val bool) *Bool {
|
||||
*p = val
|
||||
return (*Bool)(p)
|
||||
}
|
||||
|
||||
func (b *Bool) Set(val string) error {
|
||||
v, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*b = Bool(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bool) String() string {
|
||||
return strconv.FormatBool(bool(*b))
|
||||
}
|
||||
|
||||
func (b *Bool) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Bool) IsEmpty() bool {
|
||||
return !bool(*b)
|
||||
}
|
||||
|
||||
// int
|
||||
|
||||
type Int int
|
||||
|
||||
func NewInt(p *int, val int) *Int {
|
||||
*p = val
|
||||
return (*Int)(p)
|
||||
}
|
||||
|
||||
func (i *Int) Set(val string) error {
|
||||
v, err := strconv.Atoi(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*i = Int(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Int) String() string {
|
||||
return strconv.Itoa(int(*i))
|
||||
}
|
||||
|
||||
func (i *Int) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Int) IsEmpty() bool {
|
||||
return int(*i) == 0
|
||||
}
|
||||
|
||||
// int64
|
||||
|
||||
type Int64 int64
|
||||
|
||||
func NewInt64(p *int64, val int64) *Int64 {
|
||||
*p = val
|
||||
return (*Int64)(p)
|
||||
}
|
||||
|
||||
func (u *Int64) Set(val string) error {
|
||||
v, err := strconv.ParseInt(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Int64(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Int64) String() string {
|
||||
return strconv.FormatInt(int64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *Int64) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Int64) IsEmpty() bool {
|
||||
return int64(*u) == 0
|
||||
}
|
||||
|
||||
// uint64
|
||||
|
||||
type Uint64 uint64
|
||||
|
||||
func NewUint64(p *uint64, val uint64) *Uint64 {
|
||||
*p = val
|
||||
return (*Uint64)(p)
|
||||
}
|
||||
|
||||
func (u *Uint64) Set(val string) error {
|
||||
v, err := strconv.ParseUint(val, 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Uint64(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) String() string {
|
||||
return strconv.FormatUint(uint64(*u), 10)
|
||||
}
|
||||
|
||||
func (u *Uint64) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Uint64) IsEmpty() bool {
|
||||
return uint64(*u) == 0
|
||||
}
|
35
config/value/time.go
Normal file
35
config/value/time.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package value
|
||||
|
||||
import "time"
|
||||
|
||||
// time
|
||||
|
||||
type Time time.Time
|
||||
|
||||
func NewTime(p *time.Time, val time.Time) *Time {
|
||||
*p = val
|
||||
return (*Time)(p)
|
||||
}
|
||||
|
||||
func (u *Time) Set(val string) error {
|
||||
v, err := time.Parse(time.RFC3339, val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*u = Time(v)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Time) String() string {
|
||||
v := time.Time(*u)
|
||||
return v.Format(time.RFC3339)
|
||||
}
|
||||
|
||||
func (u *Time) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (u *Time) IsEmpty() bool {
|
||||
v := time.Time(*u)
|
||||
return v.IsZero()
|
||||
}
|
21
config/value/value.go
Normal file
21
config/value/value.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package value
|
||||
|
||||
type Value interface {
|
||||
// String returns a string representation of the value.
|
||||
String() string
|
||||
|
||||
// Set a new value for the value. Returns an
|
||||
// error if the given string representation can't
|
||||
// be transformed to the value. Returns nil
|
||||
// if the new value has been set.
|
||||
Set(string) error
|
||||
|
||||
// Validate the value. The returned error will
|
||||
// indicate what is wrong with the current value.
|
||||
// Returns nil if the value is OK.
|
||||
Validate() error
|
||||
|
||||
// IsEmpty returns whether the value represents an empty
|
||||
// representation for that value.
|
||||
IsEmpty() bool
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package config
|
||||
package value
|
||||
|
||||
import (
|
||||
"testing"
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
func TestIntValue(t *testing.T) {
|
||||
var i int
|
||||
|
||||
ivar := newIntValue(&i, 11)
|
||||
ivar := NewInt(&i, 11)
|
||||
|
||||
assert.Equal(t, "11", ivar.String())
|
||||
assert.Equal(t, nil, ivar.Validate())
|
||||
@@ -34,16 +34,16 @@ type testdata struct {
|
||||
func TestCopyStruct(t *testing.T) {
|
||||
data1 := testdata{}
|
||||
|
||||
newIntValue(&data1.value1, 1)
|
||||
newIntValue(&data1.value2, 2)
|
||||
NewInt(&data1.value1, 1)
|
||||
NewInt(&data1.value2, 2)
|
||||
|
||||
assert.Equal(t, int(1), data1.value1)
|
||||
assert.Equal(t, int(2), data1.value2)
|
||||
|
||||
data2 := testdata{}
|
||||
|
||||
val21 := newIntValue(&data2.value1, 3)
|
||||
val22 := newIntValue(&data2.value2, 4)
|
||||
val21 := NewInt(&data2.value1, 3)
|
||||
val22 := NewInt(&data2.value2, 4)
|
||||
|
||||
assert.Equal(t, int(3), data2.value1)
|
||||
assert.Equal(t, int(4), data2.value2)
|
216
config/vars/vars.go
Normal file
216
config/vars/vars.go
Normal file
@@ -0,0 +1,216 @@
|
||||
package vars
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
)
|
||||
|
||||
type variable struct {
|
||||
value value.Value // The actual value
|
||||
defVal string // The default value in string representation
|
||||
name string // A name for this value
|
||||
envName string // The environment variable that corresponds to this value
|
||||
envAltNames []string // Alternative environment variable names
|
||||
description string // A desriptions for this value
|
||||
required bool // Whether a non-empty value is required
|
||||
disguise bool // Whether the value should be disguised if printed
|
||||
merged bool // Whether this value has been replaced by its corresponding environment variable
|
||||
}
|
||||
|
||||
type Variable struct {
|
||||
Value string
|
||||
Name string
|
||||
EnvName string
|
||||
Description string
|
||||
Merged bool
|
||||
}
|
||||
|
||||
type message struct {
|
||||
message string // The log message
|
||||
variable Variable // The config field this message refers to
|
||||
level string // The loglevel for this message
|
||||
}
|
||||
|
||||
type Variables struct {
|
||||
vars []*variable
|
||||
logs []message
|
||||
}
|
||||
|
||||
func (vs *Variables) Register(val value.Value, name, envName string, envAltNames []string, description string, required, disguise bool) {
|
||||
vs.vars = append(vs.vars, &variable{
|
||||
value: val,
|
||||
defVal: val.String(),
|
||||
name: name,
|
||||
envName: envName,
|
||||
envAltNames: envAltNames,
|
||||
description: description,
|
||||
required: required,
|
||||
disguise: disguise,
|
||||
})
|
||||
}
|
||||
|
||||
func (vs *Variables) Transfer(vss *Variables) {
|
||||
for _, v := range vs.vars {
|
||||
if vss.IsMerged(v.name) {
|
||||
v.merged = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) SetDefault(name string) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
v.value.Set(v.defVal)
|
||||
}
|
||||
|
||||
func (vs *Variables) Get(name string) (string, error) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return "", fmt.Errorf("variable not found")
|
||||
}
|
||||
|
||||
return v.value.String(), nil
|
||||
}
|
||||
|
||||
func (vs *Variables) Set(name, val string) error {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return fmt.Errorf("variable not found")
|
||||
}
|
||||
|
||||
return v.value.Set(val)
|
||||
}
|
||||
|
||||
func (vs *Variables) Log(level, name string, format string, args ...interface{}) {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return
|
||||
}
|
||||
|
||||
variable := Variable{
|
||||
Value: v.value.String(),
|
||||
Name: v.name,
|
||||
EnvName: v.envName,
|
||||
Description: v.description,
|
||||
Merged: v.merged,
|
||||
}
|
||||
|
||||
if v.disguise {
|
||||
variable.Value = "***"
|
||||
}
|
||||
|
||||
l := message{
|
||||
message: fmt.Sprintf(format, args...),
|
||||
variable: variable,
|
||||
level: level,
|
||||
}
|
||||
|
||||
vs.logs = append(vs.logs, l)
|
||||
}
|
||||
|
||||
func (vs *Variables) Merge() {
|
||||
for _, v := range vs.vars {
|
||||
if len(v.envName) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
var envval string
|
||||
var ok bool
|
||||
|
||||
envval, ok = os.LookupEnv(v.envName)
|
||||
if !ok {
|
||||
foundAltName := false
|
||||
|
||||
for _, envName := range v.envAltNames {
|
||||
envval, ok = os.LookupEnv(envName)
|
||||
if ok {
|
||||
foundAltName = true
|
||||
vs.Log("warn", v.name, "deprecated name, please use %s", v.envName)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundAltName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
err := v.value.Set(envval)
|
||||
if err != nil {
|
||||
vs.Log("error", v.name, "%s", err.Error())
|
||||
}
|
||||
|
||||
v.merged = true
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) IsMerged(name string) bool {
|
||||
v := vs.findVariable(name)
|
||||
if v == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return v.merged
|
||||
}
|
||||
|
||||
func (vs *Variables) Validate() {
|
||||
for _, v := range vs.vars {
|
||||
vs.Log("info", v.name, "%s", "")
|
||||
|
||||
err := v.value.Validate()
|
||||
if err != nil {
|
||||
vs.Log("error", v.name, "%s", err.Error())
|
||||
}
|
||||
|
||||
if v.required && v.value.IsEmpty() {
|
||||
vs.Log("error", v.name, "a value is required")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) ResetLogs() {
|
||||
vs.logs = nil
|
||||
}
|
||||
|
||||
func (vs *Variables) Messages(logger func(level string, v Variable, message string)) {
|
||||
for _, l := range vs.logs {
|
||||
logger(l.level, l.variable, l.message)
|
||||
}
|
||||
}
|
||||
|
||||
func (vs *Variables) HasErrors() bool {
|
||||
for _, l := range vs.logs {
|
||||
if l.level == "error" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (vs *Variables) Overrides() []string {
|
||||
overrides := []string{}
|
||||
|
||||
for _, v := range vs.vars {
|
||||
if v.merged {
|
||||
overrides = append(overrides, v.name)
|
||||
}
|
||||
}
|
||||
|
||||
return overrides
|
||||
}
|
||||
|
||||
func (vs *Variables) findVariable(name string) *variable {
|
||||
for _, v := range vs.vars {
|
||||
if v.name == name {
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
40
config/vars/vars_test.go
Normal file
40
config/vars/vars_test.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package vars
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config/value"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestVars(t *testing.T) {
|
||||
v1 := Variables{}
|
||||
|
||||
s := ""
|
||||
|
||||
v1.Register(value.NewString(&s, "foobar"), "string", "", nil, "a string", false, false)
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
x, _ := v1.Get("string")
|
||||
require.Equal(t, "foobar", x)
|
||||
|
||||
v := v1.findVariable("string")
|
||||
v.value.Set("barfoo")
|
||||
|
||||
require.Equal(t, "barfoo", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "barfoo", x)
|
||||
|
||||
v1.Set("string", "foobaz")
|
||||
|
||||
require.Equal(t, "foobaz", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "foobaz", x)
|
||||
|
||||
v1.SetDefault("string")
|
||||
|
||||
require.Equal(t, "foobar", s)
|
||||
x, _ = v1.Get("string")
|
||||
require.Equal(t, "foobar", x)
|
||||
}
|
@@ -4,8 +4,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
v1config "github.com/datarhei/core/v16/config/v1"
|
||||
v2config "github.com/datarhei/core/v16/config/v2"
|
||||
)
|
||||
|
||||
// ConfigVersion is used to only unmarshal the version field in order
|
||||
// find out which SetConfig should be used.
|
||||
type ConfigVersion struct {
|
||||
Version int64 `json:"version"`
|
||||
}
|
||||
|
||||
// ConfigData embeds config.Data
|
||||
type ConfigData struct {
|
||||
config.Data
|
||||
@@ -22,11 +30,68 @@ type Config struct {
|
||||
Overrides []string `json:"overrides"`
|
||||
}
|
||||
|
||||
type SetConfigV1 struct {
|
||||
v1config.Data
|
||||
}
|
||||
|
||||
// NewSetConfigV1 creates a new SetConfigV1 based on the current
|
||||
// config with downgrading.
|
||||
func NewSetConfigV1(cfg *config.Config) SetConfigV1 {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
v1data, _ := v2config.DowngradeV2toV1(v2data)
|
||||
|
||||
data := SetConfigV1{
|
||||
Data: *v1data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges the v1 config into the current config.
|
||||
func (s *SetConfigV1) MergeTo(cfg *config.Config) {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
|
||||
v2config.MergeV1ToV2(v2data, &s.Data)
|
||||
config.MergeV2toV3(&cfg.Data, v2data)
|
||||
}
|
||||
|
||||
type SetConfigV2 struct {
|
||||
v2config.Data
|
||||
}
|
||||
|
||||
// NewSetConfigV2 creates a new SetConfigV2 based on the current
|
||||
// config with downgrading.
|
||||
func NewSetConfigV2(cfg *config.Config) SetConfigV2 {
|
||||
v2data, _ := config.DowngradeV3toV2(&cfg.Data)
|
||||
|
||||
data := SetConfigV2{
|
||||
Data: *v2data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges the v2 config into the current config.
|
||||
func (s *SetConfigV2) MergeTo(cfg *config.Config) {
|
||||
config.MergeV2toV3(&cfg.Data, &s.Data)
|
||||
}
|
||||
|
||||
// SetConfig embeds config.Data. It is used to send a new config to the server.
|
||||
type SetConfig struct {
|
||||
config.Data
|
||||
}
|
||||
|
||||
// NewSetConfig converts a config.Config into a SetConfig in order to prepopulate
|
||||
// a SetConfig with the current values. The uploaded config can have missing fields that
|
||||
// will be filled with the current values after unmarshalling the JSON.
|
||||
func NewSetConfig(cfg *config.Config) SetConfig {
|
||||
data := SetConfig{
|
||||
cfg.Data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// MergeTo merges a sent config into a config.Config
|
||||
func (rscfg *SetConfig) MergeTo(cfg *config.Config) {
|
||||
cfg.ID = rscfg.ID
|
||||
@@ -51,18 +116,7 @@ func (rscfg *SetConfig) MergeTo(cfg *config.Config) {
|
||||
cfg.Router = rscfg.Router
|
||||
}
|
||||
|
||||
// NewSetConfig converts a config.Config into a RestreamerSetConfig in order to prepopulate
|
||||
// a RestreamerSetConfig with the current values. The uploaded config can have missing fields that
|
||||
// will be filled with the current values after unmarshalling the JSON.
|
||||
func NewSetConfig(cfg *config.Config) SetConfig {
|
||||
data := SetConfig{
|
||||
cfg.Data,
|
||||
}
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
// Unmarshal converts a config.Config to a RestreamerConfig.
|
||||
// Unmarshal converts a config.Config to a Config.
|
||||
func (c *Config) Unmarshal(cfg *config.Config) {
|
||||
if cfg == nil {
|
||||
return
|
||||
|
@@ -1,11 +1,13 @@
|
||||
package api
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
cfgvars "github.com/datarhei/core/v16/config/vars"
|
||||
"github.com/datarhei/core/v16/encoding/json"
|
||||
"github.com/datarhei/core/v16/http/api"
|
||||
"github.com/datarhei/core/v16/http/handler/util"
|
||||
|
||||
"github.com/labstack/echo/v4"
|
||||
)
|
||||
@@ -13,11 +15,11 @@ import (
|
||||
// The ConfigHandler type provides handler functions for reading and manipulating
|
||||
// the current config.
|
||||
type ConfigHandler struct {
|
||||
store config.Store
|
||||
store cfgstore.Store
|
||||
}
|
||||
|
||||
// NewConfig return a new Config type. You have to provide a valid config store.
|
||||
func NewConfig(store config.Store) *ConfigHandler {
|
||||
func NewConfig(store cfgstore.Store) *ConfigHandler {
|
||||
return &ConfigHandler{
|
||||
store: store,
|
||||
}
|
||||
@@ -53,25 +55,73 @@ func (p *ConfigHandler) Get(c echo.Context) error {
|
||||
// @Security ApiKeyAuth
|
||||
// @Router /api/v3/config [put]
|
||||
func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
cfg := p.store.Get()
|
||||
version := api.ConfigVersion{}
|
||||
|
||||
// Set the current config as default config value. This will
|
||||
// allow to set a partial config without destroying the other
|
||||
// values.
|
||||
setConfig := api.NewSetConfig(cfg)
|
||||
req := c.Request()
|
||||
|
||||
if err := util.ShouldBindJSON(c, &setConfig); err != nil {
|
||||
body, err := io.ReadAll(req.Body)
|
||||
if err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
setConfig.MergeTo(cfg)
|
||||
if err := json.Unmarshal(body, &version); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
cfg := p.store.Get()
|
||||
|
||||
// For each version, set the current config as default config value. This will
|
||||
// allow to set a partial config without destroying the other values.
|
||||
if version.Version == 1 {
|
||||
// Downgrade to v1 in order to have a populated v1 config
|
||||
v1SetConfig := api.NewSetConfigV1(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v1SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v1SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v1SetConfig.MergeTo(cfg)
|
||||
} else if version.Version == 2 {
|
||||
// Downgrade to v2 in order to have a populated v2 config
|
||||
v2SetConfig := api.NewSetConfigV2(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v2SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v2SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v2SetConfig.MergeTo(cfg)
|
||||
} else if version.Version == 3 {
|
||||
v3SetConfig := api.NewSetConfig(cfg)
|
||||
|
||||
if err := json.Unmarshal(body, &v3SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", json.FormatError(body, err))
|
||||
}
|
||||
|
||||
if err := c.Validate(v3SetConfig); err != nil {
|
||||
return api.Err(http.StatusBadRequest, "Invalid JSON", "%s", err)
|
||||
}
|
||||
|
||||
// Merge it into the current config
|
||||
v3SetConfig.MergeTo(cfg)
|
||||
} else {
|
||||
return api.Err(http.StatusBadRequest, "Invalid config version", "version %d", version.Version)
|
||||
}
|
||||
|
||||
// Now we make a copy from the config and merge it with the environment
|
||||
// variables. If this configuration is valid, we will store the un-merged
|
||||
// one to disk.
|
||||
|
||||
mergedConfig := config.NewConfigFrom(cfg)
|
||||
mergedConfig := cfg.Clone()
|
||||
mergedConfig.Merge()
|
||||
|
||||
// Validate the new merged config
|
||||
@@ -79,7 +129,7 @@ func (p *ConfigHandler) Set(c echo.Context) error {
|
||||
if mergedConfig.HasErrors() {
|
||||
errors := make(map[string][]string)
|
||||
|
||||
mergedConfig.Messages(func(level string, v config.Variable, message string) {
|
||||
mergedConfig.Messages(func(level string, v cfgvars.Variable, message string) {
|
||||
if level != "error" {
|
||||
return
|
||||
}
|
||||
|
@@ -7,25 +7,28 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
"github.com/datarhei/core/v16/config/store"
|
||||
v1 "github.com/datarhei/core/v16/config/v1"
|
||||
"github.com/datarhei/core/v16/http/mock"
|
||||
"github.com/labstack/echo/v4"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func getDummyConfigRouter() *echo.Echo {
|
||||
func getDummyConfigRouter() (*echo.Echo, store.Store) {
|
||||
router := mock.DummyEcho()
|
||||
|
||||
config := config.NewDummyStore()
|
||||
config := store.NewDummy()
|
||||
|
||||
handler := NewConfig(config)
|
||||
|
||||
router.Add("GET", "/", handler.Get)
|
||||
router.Add("PUT", "/", handler.Set)
|
||||
|
||||
return router
|
||||
return router, config
|
||||
}
|
||||
|
||||
func TestConfigGet(t *testing.T) {
|
||||
router := getDummyConfigRouter()
|
||||
router, _ := getDummyConfigRouter()
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "GET", "/", nil)
|
||||
|
||||
@@ -33,7 +36,7 @@ func TestConfigGet(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigSetConflict(t *testing.T) {
|
||||
router := getDummyConfigRouter()
|
||||
router, _ := getDummyConfigRouter()
|
||||
|
||||
var data bytes.Buffer
|
||||
|
||||
@@ -44,18 +47,86 @@ func TestConfigSetConflict(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestConfigSet(t *testing.T) {
|
||||
router := getDummyConfigRouter()
|
||||
router, store := getDummyConfigRouter()
|
||||
|
||||
storedcfg := store.Get()
|
||||
|
||||
require.Equal(t, []string{}, storedcfg.Host.Name)
|
||||
|
||||
var data bytes.Buffer
|
||||
encoder := json.NewEncoder(&data)
|
||||
|
||||
// Setting a new v3 config
|
||||
cfg := config.New()
|
||||
cfg.FFmpeg.Binary = "true"
|
||||
cfg.DB.Dir = "."
|
||||
cfg.Storage.Disk.Dir = "."
|
||||
cfg.Storage.MimeTypes = ""
|
||||
cfg.Storage.Disk.Cache.Types.Allow = []string{".aaa"}
|
||||
cfg.Storage.Disk.Cache.Types.Block = []string{".zzz"}
|
||||
cfg.Host.Name = []string{"foobar.com"}
|
||||
|
||||
encoder := json.NewEncoder(&data)
|
||||
encoder.Encode(cfg)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".aaa"}, cfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, cfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
|
||||
// Setting a complete v1 config
|
||||
cfgv1 := v1.New()
|
||||
cfgv1.FFmpeg.Binary = "true"
|
||||
cfgv1.DB.Dir = "."
|
||||
cfgv1.Storage.Disk.Dir = "."
|
||||
cfgv1.Storage.MimeTypes = ""
|
||||
cfgv1.Storage.Disk.Cache.Types = []string{".bbb"}
|
||||
cfgv1.Host.Name = []string{"foobar.com"}
|
||||
|
||||
data.Reset()
|
||||
|
||||
encoder.Encode(cfgv1)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".bbb"}, storedcfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
|
||||
// Setting a partial v1 config
|
||||
type customconfig struct {
|
||||
Version int `json:"version"`
|
||||
Storage struct {
|
||||
Disk struct {
|
||||
Cache struct {
|
||||
Types []string `json:"types"`
|
||||
} `json:"cache"`
|
||||
} `json:"disk"`
|
||||
} `json:"storage"`
|
||||
}
|
||||
|
||||
customcfg := customconfig{
|
||||
Version: 1,
|
||||
}
|
||||
|
||||
customcfg.Storage.Disk.Cache.Types = []string{".ccc"}
|
||||
|
||||
data.Reset()
|
||||
|
||||
encoder.Encode(customcfg)
|
||||
|
||||
mock.Request(t, http.StatusOK, router, "PUT", "/", &data)
|
||||
|
||||
storedcfg = store.Get()
|
||||
|
||||
require.Equal(t, []string{"foobar.com"}, storedcfg.Host.Name)
|
||||
require.Equal(t, []string{".ccc"}, storedcfg.Storage.Disk.Cache.Types.Allow)
|
||||
require.Equal(t, []string{".zzz"}, storedcfg.Storage.Disk.Cache.Types.Block)
|
||||
require.Equal(t, "cert@datarhei.com", cfg.TLS.Email)
|
||||
}
|
||||
|
@@ -32,7 +32,7 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/datarhei/core/v16/config"
|
||||
cfgstore "github.com/datarhei/core/v16/config/store"
|
||||
"github.com/datarhei/core/v16/http/cache"
|
||||
"github.com/datarhei/core/v16/http/errorhandler"
|
||||
"github.com/datarhei/core/v16/http/graph/resolver"
|
||||
@@ -87,7 +87,7 @@ type Config struct {
|
||||
RTMP rtmp.Server
|
||||
SRT srt.Server
|
||||
JWT jwt.JWT
|
||||
Config config.Store
|
||||
Config cfgstore.Store
|
||||
Cache cache.Cacher
|
||||
Sessions session.RegistryReader
|
||||
Router router.Router
|
||||
|
70
net/ip.go
70
net/ip.go
@@ -4,7 +4,11 @@ package net
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -58,3 +62,69 @@ func ipVersion(ipAddress string) int {
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// GetPublicIPs will try to figure out the public IPs (v4 and v6)
|
||||
// we're running on. If it fails, an empty list will be returned.
|
||||
func GetPublicIPs(timeout time.Duration) []string {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
ipv4 := ""
|
||||
ipv6 := ""
|
||||
|
||||
wg.Add(2)
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv4 = doRequest("https://api.ipify.org", timeout)
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
||||
ipv6 = doRequest("https://api6.ipify.org", timeout)
|
||||
}()
|
||||
|
||||
wg.Wait()
|
||||
|
||||
ips := []string{}
|
||||
|
||||
if len(ipv4) != 0 {
|
||||
ips = append(ips, ipv4)
|
||||
}
|
||||
|
||||
if len(ipv6) != 0 && ipv4 != ipv6 {
|
||||
ips = append(ips, ipv6)
|
||||
}
|
||||
|
||||
return ips
|
||||
}
|
||||
|
||||
func doRequest(url string, timeout time.Duration) string {
|
||||
client := &http.Client{
|
||||
Timeout: timeout,
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return ""
|
||||
}
|
||||
|
||||
return string(body)
|
||||
}
|
||||
|
Reference in New Issue
Block a user