Add v16.7.2

This commit is contained in:
Jan Stabenow
2022-05-13 19:26:45 +02:00
parent 8e70517dff
commit 9c0b535199
2368 changed files with 687657 additions and 1 deletions

68
monitor/cpu.go Normal file
View File

@@ -0,0 +1,68 @@
package monitor
import (
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/psutil"
)
type cpuCollector struct {
ncpuDescr *metric.Description
systemDescr *metric.Description
userDescr *metric.Description
idleDescr *metric.Description
otherDescr *metric.Description
ncpu float64
}
func NewCPUCollector() metric.Collector {
c := &cpuCollector{
ncpu: 1,
}
c.ncpuDescr = metric.NewDesc("cpu_ncpu", "", nil)
c.systemDescr = metric.NewDesc("cpu_system", "", nil)
c.userDescr = metric.NewDesc("cpu_user", "", nil)
c.idleDescr = metric.NewDesc("cpu_idle", "", nil)
c.otherDescr = metric.NewDesc("cpu_other", "", nil)
if ncpu, err := psutil.CPUCounts(true); err == nil {
c.ncpu = ncpu
}
return c
}
func (c *cpuCollector) Stop() {}
func (c *cpuCollector) Prefix() string {
return "cpu"
}
func (c *cpuCollector) Describe() []*metric.Description {
return []*metric.Description{
c.ncpuDescr,
c.systemDescr,
c.userDescr,
c.idleDescr,
c.otherDescr,
}
}
func (c *cpuCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
metrics.Add(metric.NewValue(c.ncpuDescr, c.ncpu))
stat, err := psutil.CPUPercent()
if err != nil {
return metrics
}
metrics.Add(metric.NewValue(c.systemDescr, stat.System))
metrics.Add(metric.NewValue(c.userDescr, stat.User))
metrics.Add(metric.NewValue(c.idleDescr, stat.Idle))
metrics.Add(metric.NewValue(c.otherDescr, stat.Other))
return metrics
}

51
monitor/disk.go Normal file
View File

@@ -0,0 +1,51 @@
package monitor
import (
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/psutil"
)
type diskCollector struct {
path string
totalDescr *metric.Description
usageDescr *metric.Description
}
func NewDiskCollector(path string) metric.Collector {
c := &diskCollector{
path: path,
}
c.totalDescr = metric.NewDesc("disk_total", "", []string{"path"})
c.usageDescr = metric.NewDesc("disk_usage", "", []string{"path"})
return c
}
func (c *diskCollector) Prefix() string {
return "disk"
}
func (c *diskCollector) Describe() []*metric.Description {
return []*metric.Description{
c.totalDescr,
c.usageDescr,
}
}
func (c *diskCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
stat, err := psutil.DiskUsage(c.path)
if err != nil {
return metrics
}
metrics.Add(metric.NewValue(c.totalDescr, float64(stat.Total), c.path))
metrics.Add(metric.NewValue(c.usageDescr, float64(stat.Used), c.path))
return metrics
}
func (c *diskCollector) Stop() {}

50
monitor/ffmpeg.go Normal file
View File

@@ -0,0 +1,50 @@
package monitor
import (
"github.com/datarhei/core/ffmpeg"
"github.com/datarhei/core/monitor/metric"
)
type ffmpegCollector struct {
prefix string
ffmpeg ffmpeg.FFmpeg
processDescr *metric.Description
}
func NewFFmpegCollector(f ffmpeg.FFmpeg) metric.Collector {
c := &ffmpegCollector{
prefix: "ffmpeg",
ffmpeg: f,
}
c.processDescr = metric.NewDesc("ffmpeg_process", "", []string{"state"})
return c
}
func (c *ffmpegCollector) Prefix() string {
return c.prefix
}
func (c *ffmpegCollector) Describe() []*metric.Description {
return []*metric.Description{
c.processDescr,
}
}
func (c *ffmpegCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
states := c.ffmpeg.States()
metrics.Add(metric.NewValue(c.processDescr, float64(states.Finished), "finished"))
metrics.Add(metric.NewValue(c.processDescr, float64(states.Starting), "starting"))
metrics.Add(metric.NewValue(c.processDescr, float64(states.Running), "running"))
metrics.Add(metric.NewValue(c.processDescr, float64(states.Finishing), "finishing"))
metrics.Add(metric.NewValue(c.processDescr, float64(states.Failed), "failed"))
metrics.Add(metric.NewValue(c.processDescr, float64(states.Killed), "killed"))
return metrics
}
func (c *ffmpegCollector) Stop() {}

54
monitor/filesystem.go Normal file
View File

@@ -0,0 +1,54 @@
package monitor
import (
"github.com/datarhei/core/io/fs"
"github.com/datarhei/core/monitor/metric"
)
type filesystemCollector struct {
fs fs.Filesystem
name string
limitDescr *metric.Description
usageDescr *metric.Description
filesDescr *metric.Description
}
func NewFilesystemCollector(name string, fs fs.Filesystem) metric.Collector {
c := &filesystemCollector{
fs: fs,
name: name,
}
c.limitDescr = metric.NewDesc("filesystem_limit", "", []string{"name"})
c.usageDescr = metric.NewDesc("filesystem_usage", "", []string{"name"})
c.filesDescr = metric.NewDesc("filesystem_files", "", []string{"name"})
return c
}
func (c *filesystemCollector) Prefix() string {
return "filesystem"
}
func (c *filesystemCollector) Describe() []*metric.Description {
return []*metric.Description{
c.limitDescr,
c.usageDescr,
c.filesDescr,
}
}
func (c *filesystemCollector) Collect() metric.Metrics {
size, limit := c.fs.Size()
files := c.fs.Files()
metrics := metric.NewMetrics()
metrics.Add(metric.NewValue(c.limitDescr, float64(limit), c.name))
metrics.Add(metric.NewValue(c.usageDescr, float64(size), c.name))
metrics.Add(metric.NewValue(c.filesDescr, float64(files), c.name))
return metrics
}
func (c *filesystemCollector) Stop() {}

47
monitor/mem.go Normal file
View File

@@ -0,0 +1,47 @@
package monitor
import (
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/psutil"
)
type memCollector struct {
totalDescr *metric.Description
freeDescr *metric.Description
}
func NewMemCollector() metric.Collector {
c := &memCollector{}
c.totalDescr = metric.NewDesc("mem_total", "", nil)
c.freeDescr = metric.NewDesc("mem_free", "", nil)
return c
}
func (c *memCollector) Prefix() string {
return "mem"
}
func (c *memCollector) Describe() []*metric.Description {
return []*metric.Description{
c.totalDescr,
c.freeDescr,
}
}
func (c *memCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
stat, err := psutil.VirtualMemory()
if err != nil {
return metrics
}
metrics.Add(metric.NewValue(c.totalDescr, float64(stat.Total)))
metrics.Add(metric.NewValue(c.freeDescr, float64(stat.Available)))
return metrics
}
func (c *memCollector) Stop() {}

324
monitor/metric/metric.go Normal file
View File

@@ -0,0 +1,324 @@
package metric
import (
"fmt"
"regexp"
"sort"
)
type Pattern interface {
// Name returns the name of the metric this pattern is designated to.
Name() string
// Match returns whether a map of labels with its label values
// match this pattern.
Match(labels map[string]string) bool
// IsValid returns whether the pattern is valid.
IsValid() bool
}
type pattern struct {
name string
labels map[string]*regexp.Regexp
valid bool
}
// NewPattern creates a new pattern with the given prefix and group name. There
// has to be an even number of parameter, which is ("label", "labelvalue", "label",
// "labelvalue" ...). The label value will be interpreted as regular expression.
func NewPattern(name string, labels ...string) Pattern {
p := &pattern{
name: name,
labels: make(map[string]*regexp.Regexp),
}
if len(labels)%2 == 0 {
for i := 0; i < len(labels); i += 2 {
exp, err := regexp.Compile(labels[i+1])
if err != nil {
fmt.Printf("error: %s\n", err)
continue
}
p.labels[labels[i]] = exp
}
}
if len(p.labels) == len(labels)/2 {
p.valid = true
}
return p
}
func (p *pattern) Name() string {
return p.name
}
func (p *pattern) Match(labels map[string]string) bool {
if !p.valid {
return false
}
if len(p.labels) == 0 {
return true
}
for pname, pexp := range p.labels {
value, ok := labels[pname]
if !ok {
return false
}
if !pexp.MatchString(value) {
return false
}
}
return true
}
func (p *pattern) IsValid() bool {
return p.valid
}
type Metrics interface {
Value(name string, labels ...string) Value
Values(name string, labels ...string) []Value
Labels(name string, label string) []string
All() []Value
Add(v Value)
String() string
}
type metrics struct {
values []Value
}
func NewMetrics() *metrics {
return &metrics{}
}
func (m *metrics) String() string {
s := ""
for _, v := range m.values {
s += v.String()
}
return s
}
func (m *metrics) Values(name string, labels ...string) []Value {
if len(labels)%2 != 0 {
return []Value{}
}
patterns := []Pattern{
NewPattern(name, labels...),
}
values := []Value{}
for _, v := range m.values {
if !v.Match(patterns) {
continue
}
values = append(values, v)
}
return values
}
func (m *metrics) Value(name string, labels ...string) Value {
vs := m.Values(name, labels...)
if len(vs) == 0 {
return nullValue
}
return vs[0]
}
func (m *metrics) All() []Value {
return m.values
}
func (m *metrics) Labels(name string, label string) []string {
vs := m.Values(name)
values := make(map[string]struct{})
for _, v := range vs {
l := v.L(label)
if len(l) == 0 {
break
}
values[l] = struct{}{}
}
labelvalues := []string{}
for v := range values {
labelvalues = append(labelvalues, v)
}
return labelvalues
}
func (m *metrics) Add(v Value) {
if v == nil {
return
}
m.values = append(m.values, v)
}
type Value interface {
Name() string
Val() float64
L(name string) string
Labels() map[string]string
Match(patterns []Pattern) bool
Hash() string
String() string
}
type value struct {
name string
labels map[string]string
value float64
hash string
}
var nullValue Value = NewValue(NewDesc("", "", nil), 0)
func NewValue(description *Description, v float64, elms ...string) Value {
if len(description.labels) != len(elms) {
return nil
}
val := &value{
name: description.name,
value: v,
labels: make(map[string]string),
}
labels := []string{}
for i, label := range description.labels {
val.labels[label] = elms[i]
labels = append(labels, label)
}
val.hash = fmt.Sprintf("%s:", val.name)
sort.Strings(labels)
for _, k := range labels {
val.hash += k + "=" + val.labels[k] + " "
}
return val
}
func (v *value) Hash() string {
return v.hash
}
func (v *value) String() string {
s := fmt.Sprintf("%s: %f {", v.name, v.value)
for k, v := range v.labels {
s += k + "=" + v + " "
}
s += "}"
return s
}
func (v *value) Name() string {
return v.name
}
func (v *value) Val() float64 {
return v.value
}
func (v *value) L(name string) string {
l, ok := v.labels[name]
if !ok {
return ""
}
return l
}
func (v *value) Labels() map[string]string {
if len(v.labels) == 0 {
return nil
}
l := make(map[string]string, len(v.labels))
for k, v := range v.labels {
l[k] = v
}
return l
}
func (v *value) Match(patterns []Pattern) bool {
if len(patterns) == 0 {
return true
}
for _, p := range patterns {
if v.name != p.Name() {
continue
}
if !p.Match(v.labels) {
continue
}
return true
}
return false
}
type Description struct {
name string
description string
labels []string
}
func NewDesc(name, description string, labels []string) *Description {
return &Description{
name: name,
description: description,
labels: labels,
}
}
func (d *Description) Name() string {
return d.name
}
func (d *Description) Description() string {
return d.description
}
type Collector interface {
Prefix() string
Describe() []*Description
Collect() Metrics
Stop()
}
type Reader interface {
Collect([]Pattern) Metrics
}

View File

@@ -0,0 +1,26 @@
package metric
import (
"testing"
)
func TestValue(t *testing.T) {
d := NewDesc("group", "", []string{"name"})
v := NewValue(d, 42, "foobar")
if v.L("name") != "foobar" {
t.Fatalf("label name doesn't have the expected value")
}
p1 := NewPattern("group")
if v.Match([]Pattern{p1}) == false {
t.Fatalf("pattern p1 should have matched")
}
p2 := NewPattern("group", "name", "foobar")
if v.Match([]Pattern{p2}) == false {
t.Fatalf("pattern p2 should have matched")
}
}

339
monitor/monitor.go Normal file
View File

@@ -0,0 +1,339 @@
package monitor
import (
"container/ring"
"fmt"
"math"
"sync"
"time"
"github.com/datarhei/core/monitor/metric"
)
type Monitor interface {
Register(c metric.Collector)
Collect(patterns []metric.Pattern) metric.Metrics
UnregisterAll()
}
type Config struct{}
type monitor struct {
lock sync.RWMutex
collectors map[string]metric.Collector
}
func New(config Config) Monitor {
m := &monitor{
collectors: map[string]metric.Collector{},
}
return m
}
func (m *monitor) Register(c metric.Collector) {
if c == nil {
return
}
descriptors := c.Describe()
m.lock.Lock()
defer m.lock.Unlock()
for _, d := range descriptors {
m.collectors[d.Name()] = c
}
}
func (m *monitor) Collect(patterns []metric.Pattern) metric.Metrics {
metrics := metric.NewMetrics()
prefixes := make(map[metric.Collector][]metric.Pattern)
m.lock.Lock()
defer m.lock.Unlock()
for _, pattern := range patterns {
c, ok := m.collectors[pattern.Name()]
if !ok {
continue
}
prefixes[c] = append(prefixes[c], pattern)
}
for c, patterns := range prefixes {
vs := c.Collect()
for _, v := range vs.All() {
if v.Match(patterns) {
metrics.Add(v)
}
}
}
return metrics
}
func (m *monitor) UnregisterAll() {
m.lock.Lock()
defer m.lock.Unlock()
for _, collector := range m.collectors {
collector.Stop()
}
m.collectors = make(map[string]metric.Collector)
}
type HistoryMonitor interface {
Monitor
History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics
Resolution() (timerange, interval time.Duration)
}
type historyMonitor struct {
monitor Monitor
enable bool
timerange time.Duration
interval time.Duration
metrics *ring.Ring
lock sync.RWMutex
stopTicker chan struct{}
patterns []metric.Pattern
}
type HistoryMetrics struct {
TS time.Time
Metrics metric.Metrics
}
type HistoryConfig struct {
Config Config
Enable bool
Timerange time.Duration
Interval time.Duration
}
func NewHistory(config HistoryConfig) (HistoryMonitor, error) {
m := &historyMonitor{
monitor: New(config.Config),
enable: config.Enable,
}
if m.enable {
if config.Interval <= 0 {
return nil, fmt.Errorf("the interval must be greater than 0")
}
if config.Timerange <= 0 {
return nil, fmt.Errorf("the timeframe must be greater than 0")
}
if config.Interval > config.Timerange {
return nil, fmt.Errorf("the interval has to be shorter than the frame")
}
n := config.Timerange / config.Interval
if n > math.MaxInt {
return nil, fmt.Errorf("too many intervals")
}
if n == 0 {
n = 1
}
m.timerange = config.Timerange
m.interval = config.Interval
m.metrics = ring.New(int(n))
m.stopTicker = make(chan struct{})
}
return m, nil
}
func (m *historyMonitor) tick() {
ticker := time.NewTicker(m.interval)
defer ticker.Stop()
for {
select {
case <-m.stopTicker:
return
case ts := <-ticker.C:
m.collectAll(ts)
}
}
}
func (m *historyMonitor) collectAll(ts time.Time) {
m.lock.Lock()
defer m.lock.Unlock()
metrics := m.Collect(m.patterns)
m.metrics.Value = &HistoryMetrics{
TS: ts,
Metrics: metrics,
}
m.metrics = m.metrics.Next()
}
func (m *historyMonitor) Register(c metric.Collector) {
m.monitor.Register(c)
if !m.enable {
return
}
m.lock.Lock()
defer m.lock.Unlock()
for _, d := range c.Describe() {
m.patterns = append(m.patterns, metric.NewPattern(d.Name()))
}
if len(m.patterns) == 1 {
// start collecting metrics with the first registered collector
go m.tick()
}
}
func (m *historyMonitor) Collect(patterns []metric.Pattern) metric.Metrics {
return m.monitor.Collect(patterns)
}
func (m *historyMonitor) UnregisterAll() {
m.monitor.UnregisterAll()
if !m.enable {
return
}
m.lock.Lock()
defer m.lock.Unlock()
m.patterns = nil
// stop collecting metrics if all collectors are unregisterd
close(m.stopTicker)
}
func (m *historyMonitor) History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics {
metricsList := []HistoryMetrics{}
if !m.enable {
return metricsList
}
notBefore := time.Now().Add(-timerange - m.interval)
m.lock.Lock()
m.metrics.Do(func(l interface{}) {
if l == nil {
return
}
historyMetrics := l.(*HistoryMetrics)
if historyMetrics.TS.Before(notBefore) {
return
}
metrics := metric.NewMetrics()
for _, v := range historyMetrics.Metrics.All() {
if v.Match(patterns) {
metrics.Add(v)
}
}
metricsList = append(metricsList, HistoryMetrics{
TS: historyMetrics.TS,
Metrics: metrics,
})
})
m.lock.Unlock()
metricsList = m.resample(metricsList, timerange, interval)
return metricsList
}
func (m *historyMonitor) Resolution() (timerange, interval time.Duration) {
return m.timerange, m.interval
}
func (m *historyMonitor) resample(values []HistoryMetrics, timerange, interval time.Duration) []HistoryMetrics {
v := []HistoryMetrics{}
nvalues := len(values)
if nvalues == 0 || timerange == 0 || interval == 0 {
return v
}
to := time.Now()
from := to.Add(-timerange - m.interval)
start := values[0].TS
end := values[nvalues-1].TS
//startValue := values[0].Metrics
endValue := values[nvalues-1].Metrics
steps := int(timerange / interval)
lastJ := 0
for i := 0; i < steps; i++ {
now := from.Add(time.Duration(i) * interval)
if now.Before(start) {
v = append(v, HistoryMetrics{
TS: now,
Metrics: nil,
})
continue
}
if now.After(end) {
v = append(v, HistoryMetrics{
TS: now,
Metrics: endValue,
})
continue
}
for j := lastJ; j < nvalues-1; j++ {
x := values[j].TS
y := values[j+1].TS
if (now.Equal(x) || now.After(x)) && now.Before(y) {
v = append(v, HistoryMetrics{
TS: now,
Metrics: values[j].Metrics,
})
lastJ = j
break
}
}
}
return v
}
type Reader interface {
Collect(patterns []metric.Pattern) metric.Metrics
}
type HistoryReader interface {
Reader
History(timerange, interval time.Duration, patterns []metric.Pattern) []HistoryMetrics
Resolution() (timerange, interval time.Duration)
}

49
monitor/net.go Normal file
View File

@@ -0,0 +1,49 @@
package monitor
import (
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/psutil"
)
type netCollector struct {
rxDescr *metric.Description
txDescr *metric.Description
}
func NewNetCollector() metric.Collector {
c := &netCollector{}
c.rxDescr = metric.NewDesc("net_rx", "", []string{"interface"})
c.txDescr = metric.NewDesc("net_tx", "", []string{"interface"})
return c
}
func (c *netCollector) Prefix() string {
return "net"
}
func (c *netCollector) Describe() []*metric.Description {
return []*metric.Description{
c.rxDescr,
c.txDescr,
}
}
func (c *netCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
devs, err := psutil.NetIOCounters(true)
if err != nil {
return metrics
}
for _, dev := range devs {
metrics.Add(metric.NewValue(c.rxDescr, float64(dev.BytesRecv), dev.Name))
metrics.Add(metric.NewValue(c.txDescr, float64(dev.BytesSent), dev.Name))
}
return metrics
}
func (c *netCollector) Stop() {}

159
monitor/restream.go Normal file
View File

@@ -0,0 +1,159 @@
package monitor
import (
"strconv"
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/restream"
)
type restreamCollector struct {
prefix string
r restream.Restreamer
restreamProcessDescr *metric.Description
restreamProcessStatesDescr *metric.Description
restreamProcessIODescr *metric.Description
restreamStatesDescr *metric.Description
}
func NewRestreamCollector(r restream.Restreamer) metric.Collector {
c := &restreamCollector{
prefix: "restream",
r: r,
}
c.restreamProcessDescr = metric.NewDesc("restream_process", "", []string{"processid", "state", "order", "name"})
c.restreamProcessStatesDescr = metric.NewDesc("restream_process_states", "", []string{"processid", "state"})
c.restreamProcessIODescr = metric.NewDesc("restream_io", "", []string{"processid", "type", "id", "address", "index", "stream", "media", "name"})
c.restreamStatesDescr = metric.NewDesc("restream_state", "", []string{"state"})
return c
}
func (c *restreamCollector) Prefix() string {
return c.prefix
}
func (c *restreamCollector) Describe() []*metric.Description {
return []*metric.Description{
c.restreamProcessDescr,
c.restreamProcessStatesDescr,
c.restreamProcessIODescr,
c.restreamStatesDescr,
}
}
func (c *restreamCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
value := float64(0)
states := map[string]float64{
"failed": 0,
"finished": 0,
"finishing": 0,
"killed": 0,
"running": 0,
"starting": 0,
}
ids := c.r.GetProcessIDs()
for _, id := range ids {
state, _ := c.r.GetProcessState(id)
if state == nil {
continue
}
proc, _ := c.r.GetProcess(id)
if proc == nil {
continue
}
states[state.State]++
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Frame), id, state.State, state.Order, "frame"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.FPS), id, state.State, state.Order, "fps"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Speed), id, state.State, state.Order, "speed"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, state.Progress.Quantizer, id, state.State, state.Order, "q"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Size), id, state.State, state.Order, "size"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, state.Progress.Time, id, state.State, state.Order, "time"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Drop), id, state.State, state.Order, "drop"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Dup), id, state.State, state.Order, "dup"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Progress.Packet), id, state.State, state.Order, "packet"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, state.Progress.Bitrate, id, state.State, state.Order, "bitrate"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, state.CPU, id, state.State, state.Order, "cpu"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(state.Memory), id, state.State, state.Order, "memory"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, state.Duration, id, state.State, state.Order, "uptime"))
if proc.Config != nil {
metrics.Add(metric.NewValue(c.restreamProcessDescr, proc.Config.LimitCPU, id, state.State, state.Order, "cpu_limit"))
metrics.Add(metric.NewValue(c.restreamProcessDescr, float64(proc.Config.LimitMemory), id, state.State, state.Order, "memory_limit"))
}
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Failed), id, "failed"))
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Finished), id, "finished"))
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Finishing), id, "finishing"))
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Killed), id, "killed"))
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Running), id, "running"))
metrics.Add(metric.NewValue(c.restreamProcessStatesDescr, float64(state.States.Starting), id, "starting"))
for i := range state.Progress.Input {
io := &state.Progress.Input[i]
index := strconv.FormatUint(io.Index, 10)
stream := strconv.FormatUint(io.Stream, 10)
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Frame), id, "input", io.ID, io.Address, index, stream, io.Type, "frame"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.FPS), id, "input", io.ID, io.Address, index, stream, io.Type, "fps"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Packet), id, "input", io.ID, io.Address, index, stream, io.Type, "packet"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.PPS), id, "input", io.ID, io.Address, index, stream, io.Type, "pps"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Size), id, "input", io.ID, io.Address, index, stream, io.Type, "size"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Bitrate), id, "input", io.ID, io.Address, index, stream, io.Type, "bitrate"))
if io.AVstream != nil {
a := io.AVstream
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(a.Queue), id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_queue"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(a.Dup), id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_dup"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(a.Drop), id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_drop"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(a.Enc), id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_enc"))
value = 0
if a.Looping {
value = 1
}
metrics.Add(metric.NewValue(c.restreamProcessIODescr, value, id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_looping"))
value = 0
if a.Duplicating {
value = 1
}
metrics.Add(metric.NewValue(c.restreamProcessIODescr, value, id, "input", io.ID, io.Address, index, stream, io.Type, "avstream_duplicating"))
}
}
for i := range state.Progress.Output {
io := &state.Progress.Output[i]
index := strconv.FormatUint(io.Index, 10)
stream := strconv.FormatUint(io.Stream, 10)
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Frame), id, "output", io.ID, io.Address, index, stream, io.Type, "frame"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.FPS), id, "output", io.ID, io.Address, index, stream, io.Type, "fps"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Packet), id, "output", io.ID, io.Address, index, stream, io.Type, "packet"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.PPS), id, "output", io.ID, io.Address, index, stream, io.Type, "pps"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Size), id, "output", io.ID, io.Address, index, stream, io.Type, "size"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Bitrate), id, "output", io.ID, io.Address, index, stream, io.Type, "bitrate"))
metrics.Add(metric.NewValue(c.restreamProcessIODescr, float64(io.Quantizer), id, "output", io.ID, io.Address, index, stream, io.Type, "q"))
}
}
for state, value := range states {
metrics.Add(metric.NewValue(c.restreamStatesDescr, value, state))
}
return metrics
}
func (c *restreamCollector) Stop() {}

87
monitor/session.go Normal file
View File

@@ -0,0 +1,87 @@
package monitor
import (
"github.com/datarhei/core/monitor/metric"
"github.com/datarhei/core/session"
)
type sessionCollector struct {
prefix string
r session.Registry
collectors []string
totalDescr *metric.Description
limitDescr *metric.Description
activeDescr *metric.Description
rxBytesDescr *metric.Description
txBytesDescr *metric.Description
rxBitrateDescr *metric.Description
txBitrateDescr *metric.Description
maxTxBitrateDescr *metric.Description
maxRxBitrateDescr *metric.Description
}
func NewSessionCollector(r session.Registry, collectors []string) metric.Collector {
c := &sessionCollector{
prefix: "session",
r: r,
collectors: collectors,
}
if len(collectors) == 0 {
c.collectors = r.Collectors()
}
c.totalDescr = metric.NewDesc("session_total", "", []string{"collector"})
c.limitDescr = metric.NewDesc("session_limit", "", []string{"collector"})
c.activeDescr = metric.NewDesc("session_active", "", []string{"collector"})
c.rxBytesDescr = metric.NewDesc("session_rxbytes", "", []string{"collector"})
c.txBytesDescr = metric.NewDesc("session_txbytes", "", []string{"collector"})
c.rxBitrateDescr = metric.NewDesc("session_rxbitrate", "", []string{"collector"})
c.txBitrateDescr = metric.NewDesc("session_txbitrate", "", []string{"collector"})
c.maxTxBitrateDescr = metric.NewDesc("session_maxtxbitrate", "", []string{"collector"})
c.maxRxBitrateDescr = metric.NewDesc("session_maxrxbitrate", "", []string{"collector"})
return c
}
func (c *sessionCollector) Prefix() string {
return c.prefix
}
func (c *sessionCollector) Describe() []*metric.Description {
return []*metric.Description{
c.totalDescr,
c.limitDescr,
c.activeDescr,
c.rxBytesDescr,
c.txBytesDescr,
c.rxBitrateDescr,
c.txBitrateDescr,
c.maxTxBitrateDescr,
c.maxRxBitrateDescr,
}
}
func (c *sessionCollector) Collect() metric.Metrics {
metrics := metric.NewMetrics()
for _, name := range c.collectors {
s := c.r.Summary(name)
metrics.Add(metric.NewValue(c.totalDescr, float64(s.Summary.TotalSessions), name))
metrics.Add(metric.NewValue(c.limitDescr, float64(s.MaxSessions), name))
metrics.Add(metric.NewValue(c.activeDescr, float64(s.CurrentSessions), name))
metrics.Add(metric.NewValue(c.rxBytesDescr, float64(s.Summary.TotalRxBytes), name))
metrics.Add(metric.NewValue(c.txBytesDescr, float64(s.Summary.TotalTxBytes), name))
metrics.Add(metric.NewValue(c.rxBitrateDescr, s.CurrentRxBitrate, name))
metrics.Add(metric.NewValue(c.txBitrateDescr, s.CurrentTxBitrate, name))
metrics.Add(metric.NewValue(c.maxTxBitrateDescr, s.MaxTxBitrate, name))
metrics.Add(metric.NewValue(c.maxRxBitrateDescr, 0, name))
}
return metrics
}
func (c *sessionCollector) Stop() {}

44
monitor/uptime.go Normal file
View File

@@ -0,0 +1,44 @@
package monitor
import (
"time"
"github.com/datarhei/core/monitor/metric"
)
type uptimeCollector struct {
t time.Time
uptimeDescr *metric.Description
}
func NewUptimeCollector() metric.Collector {
c := &uptimeCollector{
t: time.Now(),
}
c.uptimeDescr = metric.NewDesc("uptime_uptime", "", nil)
return c
}
func (c *uptimeCollector) Prefix() string {
return "uptime"
}
func (c *uptimeCollector) Describe() []*metric.Description {
return []*metric.Description{
c.uptimeDescr,
}
}
func (c *uptimeCollector) Collect() metric.Metrics {
uptime := time.Since(c.t).Seconds()
metrics := metric.NewMetrics()
metrics.Add(metric.NewValue(c.uptimeDescr, uptime))
return metrics
}
func (c *uptimeCollector) Stop() {}