mirror of
https://github.com/datarhei/core.git
synced 2025-10-04 15:42:57 +08:00
WIP: allow update processes in cluster
This commit is contained in:
@@ -141,7 +141,6 @@ func New(config Config) (RestClient, error) {
|
|||||||
u.Fragment = ""
|
u.Fragment = ""
|
||||||
|
|
||||||
r.address = u.String()
|
r.address = u.String()
|
||||||
fmt.Printf("address: %s\n", r.address)
|
|
||||||
|
|
||||||
if r.client == nil {
|
if r.client == nil {
|
||||||
r.client = &http.Client{
|
r.client = &http.Client{
|
||||||
|
@@ -9,6 +9,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/datarhei/core/v16/cluster/proxy"
|
"github.com/datarhei/core/v16/cluster/proxy"
|
||||||
|
"github.com/datarhei/core/v16/cluster/store"
|
||||||
"github.com/datarhei/core/v16/log"
|
"github.com/datarhei/core/v16/log"
|
||||||
"github.com/datarhei/core/v16/restream/app"
|
"github.com/datarhei/core/v16/restream/app"
|
||||||
)
|
)
|
||||||
@@ -342,6 +343,12 @@ type processOpAdd struct {
|
|||||||
config *app.Config
|
config *app.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type processOpUpdate struct {
|
||||||
|
nodeid string
|
||||||
|
processid string
|
||||||
|
config *app.Config
|
||||||
|
}
|
||||||
|
|
||||||
type processOpReject struct {
|
type processOpReject struct {
|
||||||
processid string
|
processid string
|
||||||
err error
|
err error
|
||||||
@@ -377,6 +384,19 @@ func (c *cluster) applyOpStack(stack []interface{}) {
|
|||||||
"processid": v.config.ID,
|
"processid": v.config.ID,
|
||||||
"nodeid": v.nodeid,
|
"nodeid": v.nodeid,
|
||||||
}).Log("Adding process")
|
}).Log("Adding process")
|
||||||
|
case processOpUpdate:
|
||||||
|
err := c.proxy.ProcessUpdate(v.nodeid, v.processid, v.config)
|
||||||
|
if err != nil {
|
||||||
|
c.logger.Info().WithError(err).WithFields(log.Fields{
|
||||||
|
"processid": v.config.ID,
|
||||||
|
"nodeid": v.nodeid,
|
||||||
|
}).Log("Updating process")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.logger.Info().WithFields(log.Fields{
|
||||||
|
"processid": v.config.ID,
|
||||||
|
"nodeid": v.nodeid,
|
||||||
|
}).Log("Updating process")
|
||||||
case processOpDelete:
|
case processOpDelete:
|
||||||
err := c.proxy.ProcessDelete(v.nodeid, v.processid)
|
err := c.proxy.ProcessDelete(v.nodeid, v.processid)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -451,7 +471,7 @@ func (c *cluster) applyOpStack(stack []interface{}) {
|
|||||||
|
|
||||||
func (c *cluster) doSynchronize() {
|
func (c *cluster) doSynchronize() {
|
||||||
want := c.store.ProcessList()
|
want := c.store.ProcessList()
|
||||||
have := c.proxy.ProcessList()
|
have := c.proxy.ListProcesses()
|
||||||
resources := c.proxy.Resources()
|
resources := c.proxy.Resources()
|
||||||
|
|
||||||
c.logger.Debug().WithFields(log.Fields{
|
c.logger.Debug().WithFields(log.Fields{
|
||||||
@@ -466,7 +486,7 @@ func (c *cluster) doSynchronize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *cluster) doRebalance() {
|
func (c *cluster) doRebalance() {
|
||||||
have := c.proxy.ProcessList()
|
have := c.proxy.ListProcesses()
|
||||||
resources := c.proxy.Resources()
|
resources := c.proxy.Resources()
|
||||||
|
|
||||||
c.logger.Debug().WithFields(log.Fields{
|
c.logger.Debug().WithFields(log.Fields{
|
||||||
@@ -482,7 +502,7 @@ func (c *cluster) doRebalance() {
|
|||||||
// normalizeProcessesAndResources normalizes the CPU and memory consumption of the processes and resources in-place.
|
// normalizeProcessesAndResources normalizes the CPU and memory consumption of the processes and resources in-place.
|
||||||
//
|
//
|
||||||
// Deprecated: all values are absolute or already normed to 0-100*ncpu percent
|
// Deprecated: all values are absolute or already normed to 0-100*ncpu percent
|
||||||
func normalizeProcessesAndResources(processes []proxy.ProcessConfig, resources map[string]proxy.NodeResources) {
|
func normalizeProcessesAndResources(processes []proxy.Process, resources map[string]proxy.NodeResources) {
|
||||||
maxNCPU := .0
|
maxNCPU := .0
|
||||||
|
|
||||||
for _, r := range resources {
|
for _, r := range resources {
|
||||||
@@ -520,12 +540,12 @@ func normalizeProcessesAndResources(processes []proxy.ProcessConfig, resources m
|
|||||||
|
|
||||||
// synchronize returns a list of operations in order to adjust the "have" list to the "want" list
|
// synchronize returns a list of operations in order to adjust the "have" list to the "want" list
|
||||||
// with taking the available resources on each node into account.
|
// with taking the available resources on each node into account.
|
||||||
func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[string]proxy.NodeResources) []interface{} {
|
func synchronize(want []store.Process, have []proxy.Process, resources map[string]proxy.NodeResources) []interface{} {
|
||||||
// A map from the process ID to the process config of the processes
|
// A map from the process ID to the process config of the processes
|
||||||
// we want to be running on the nodes.
|
// we want to be running on the nodes.
|
||||||
wantMap := map[string]*app.Config{}
|
wantMap := map[string]store.Process{}
|
||||||
for _, config := range want {
|
for _, process := range want {
|
||||||
wantMap[config.ID] = &config
|
wantMap[process.Config.ID] = process
|
||||||
}
|
}
|
||||||
|
|
||||||
opStack := []interface{}{}
|
opStack := []interface{}{}
|
||||||
@@ -533,10 +553,10 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
// Now we iterate through the processes we actually have running on the nodes
|
// Now we iterate through the processes we actually have running on the nodes
|
||||||
// and remove them from the wantMap. We also make sure that they are running.
|
// and remove them from the wantMap. We also make sure that they are running.
|
||||||
// If a process is not on the wantMap, it will be deleted from the nodes.
|
// If a process is not on the wantMap, it will be deleted from the nodes.
|
||||||
haveAfterRemove := []proxy.ProcessConfig{}
|
haveAfterRemove := []proxy.Process{}
|
||||||
|
|
||||||
for _, p := range have {
|
for _, p := range have {
|
||||||
if _, ok := wantMap[p.Config.ID]; !ok {
|
if wantP, ok := wantMap[p.Config.ID]; !ok {
|
||||||
opStack = append(opStack, processOpDelete{
|
opStack = append(opStack, processOpDelete{
|
||||||
nodeid: p.NodeID,
|
nodeid: p.NodeID,
|
||||||
processid: p.Config.ID,
|
processid: p.Config.ID,
|
||||||
@@ -551,6 +571,14 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
}
|
}
|
||||||
|
|
||||||
continue
|
continue
|
||||||
|
} else {
|
||||||
|
if wantP.UpdatedAt.After(p.UpdatedAt) {
|
||||||
|
opStack = append(opStack, processOpUpdate{
|
||||||
|
nodeid: p.NodeID,
|
||||||
|
processid: p.Config.ID,
|
||||||
|
config: wantP.Config,
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
delete(wantMap, p.Config.ID)
|
delete(wantMap, p.Config.ID)
|
||||||
@@ -571,11 +599,11 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
haveReferenceAffinityMap := createReferenceAffinityMap(have)
|
haveReferenceAffinityMap := createReferenceAffinityMap(have)
|
||||||
|
|
||||||
// Now all remaining processes in the wantMap must be added to one of the nodes
|
// Now all remaining processes in the wantMap must be added to one of the nodes
|
||||||
for _, config := range wantMap {
|
for _, process := range wantMap {
|
||||||
// If a process doesn't have any limits defined, reject that process
|
// If a process doesn't have any limits defined, reject that process
|
||||||
if config.LimitCPU <= 0 || config.LimitMemory <= 0 {
|
if process.Config.LimitCPU <= 0 || process.Config.LimitMemory <= 0 {
|
||||||
opStack = append(opStack, processOpReject{
|
opStack = append(opStack, processOpReject{
|
||||||
processid: config.ID,
|
processid: process.Config.ID,
|
||||||
err: errNoLimitsDefined,
|
err: errNoLimitsDefined,
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -589,11 +617,11 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
|
|
||||||
// Try to add the process to a node where other processes with the same
|
// Try to add the process to a node where other processes with the same
|
||||||
// reference currently reside.
|
// reference currently reside.
|
||||||
if len(config.Reference) != 0 {
|
if len(process.Config.Reference) != 0 {
|
||||||
for _, count := range haveReferenceAffinityMap[config.Reference] {
|
for _, count := range haveReferenceAffinityMap[process.Config.Reference] {
|
||||||
r := resources[count.nodeid]
|
r := resources[count.nodeid]
|
||||||
cpu := config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
cpu := process.Config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
||||||
mem := config.LimitMemory
|
mem := process.Config.LimitMemory
|
||||||
|
|
||||||
if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit {
|
if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit {
|
||||||
nodeid = count.nodeid
|
nodeid = count.nodeid
|
||||||
@@ -605,8 +633,8 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
// Find the node with the most resources available
|
// Find the node with the most resources available
|
||||||
if len(nodeid) == 0 {
|
if len(nodeid) == 0 {
|
||||||
for id, r := range resources {
|
for id, r := range resources {
|
||||||
cpu := config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
cpu := process.Config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
||||||
mem := config.LimitMemory
|
mem := process.Config.LimitMemory
|
||||||
|
|
||||||
if len(nodeid) == 0 {
|
if len(nodeid) == 0 {
|
||||||
if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit {
|
if r.CPU+cpu < r.CPULimit && r.Mem+mem < r.MemLimit {
|
||||||
@@ -625,19 +653,19 @@ func synchronize(want []app.Config, have []proxy.ProcessConfig, resources map[st
|
|||||||
if len(nodeid) != 0 {
|
if len(nodeid) != 0 {
|
||||||
opStack = append(opStack, processOpAdd{
|
opStack = append(opStack, processOpAdd{
|
||||||
nodeid: nodeid,
|
nodeid: nodeid,
|
||||||
config: config,
|
config: process.Config,
|
||||||
})
|
})
|
||||||
|
|
||||||
// Adjust the resources
|
// Adjust the resources
|
||||||
r, ok := resources[nodeid]
|
r, ok := resources[nodeid]
|
||||||
if ok {
|
if ok {
|
||||||
r.CPU += config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
r.CPU += process.Config.LimitCPU * r.NCPU // TODO: in the vod branch this changed if system-wide limits are given
|
||||||
r.Mem += config.LimitMemory
|
r.Mem += process.Config.LimitMemory
|
||||||
resources[nodeid] = r
|
resources[nodeid] = r
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
opStack = append(opStack, processOpReject{
|
opStack = append(opStack, processOpReject{
|
||||||
processid: config.ID,
|
processid: process.Config.ID,
|
||||||
err: errNotEnoughResources,
|
err: errNotEnoughResources,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@@ -651,7 +679,7 @@ type referenceAffinityNodeCount struct {
|
|||||||
count uint64
|
count uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func createReferenceAffinityMap(processes []proxy.ProcessConfig) map[string][]referenceAffinityNodeCount {
|
func createReferenceAffinityMap(processes []proxy.Process) map[string][]referenceAffinityNodeCount {
|
||||||
referenceAffinityMap := map[string][]referenceAffinityNodeCount{}
|
referenceAffinityMap := map[string][]referenceAffinityNodeCount{}
|
||||||
for _, p := range processes {
|
for _, p := range processes {
|
||||||
if len(p.Config.Reference) == 0 {
|
if len(p.Config.Reference) == 0 {
|
||||||
@@ -696,9 +724,9 @@ func createReferenceAffinityMap(processes []proxy.ProcessConfig) map[string][]re
|
|||||||
|
|
||||||
// rebalance returns a list of operations that will move running processes away from nodes
|
// rebalance returns a list of operations that will move running processes away from nodes
|
||||||
// that are overloaded.
|
// that are overloaded.
|
||||||
func rebalance(have []proxy.ProcessConfig, resources map[string]proxy.NodeResources) []interface{} {
|
func rebalance(have []proxy.Process, resources map[string]proxy.NodeResources) []interface{} {
|
||||||
// Group the processes by node
|
// Group the processes by node
|
||||||
processNodeMap := map[string][]proxy.ProcessConfig{}
|
processNodeMap := map[string][]proxy.Process{}
|
||||||
|
|
||||||
for _, p := range have {
|
for _, p := range have {
|
||||||
processNodeMap[p.NodeID] = append(processNodeMap[p.NodeID], p)
|
processNodeMap[p.NodeID] = append(processNodeMap[p.NodeID], p)
|
||||||
|
@@ -2,23 +2,28 @@ package cluster
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/datarhei/core/v16/cluster/proxy"
|
"github.com/datarhei/core/v16/cluster/proxy"
|
||||||
|
"github.com/datarhei/core/v16/cluster/store"
|
||||||
"github.com/datarhei/core/v16/restream/app"
|
"github.com/datarhei/core/v16/restream/app"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestSynchronizeAdd(t *testing.T) {
|
func TestSynchronizeAdd(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
LimitCPU: 10,
|
Config: &app.Config{
|
||||||
LimitMemory: 50,
|
ID: "foobar",
|
||||||
|
LimitCPU: 10,
|
||||||
|
LimitMemory: 50,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{}
|
have := []proxy.Process{}
|
||||||
|
|
||||||
resources := map[string]proxy.NodeResources{
|
resources := map[string]proxy.NodeResources{
|
||||||
"node1": {
|
"node1": {
|
||||||
@@ -69,22 +74,28 @@ func TestSynchronizeAdd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddReferenceAffinity(t *testing.T) {
|
func TestSynchronizeAddReferenceAffinity(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
Reference: "barfoo",
|
Config: &app.Config{
|
||||||
LimitCPU: 10,
|
ID: "foobar",
|
||||||
LimitMemory: 20,
|
Reference: "barfoo",
|
||||||
|
LimitCPU: 10,
|
||||||
|
LimitMemory: 20,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ID: "foobar2",
|
UpdatedAt: time.Now(),
|
||||||
Reference: "barfoo",
|
Config: &app.Config{
|
||||||
LimitCPU: 10,
|
ID: "foobar2",
|
||||||
LimitMemory: 30,
|
Reference: "barfoo",
|
||||||
|
LimitCPU: 10,
|
||||||
|
LimitMemory: 30,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{
|
have := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node2",
|
NodeID: "node2",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -132,15 +143,18 @@ func TestSynchronizeAddReferenceAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddLimit(t *testing.T) {
|
func TestSynchronizeAddLimit(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
LimitCPU: 10,
|
Config: &app.Config{
|
||||||
LimitMemory: 5,
|
ID: "foobar",
|
||||||
|
LimitCPU: 10,
|
||||||
|
LimitMemory: 5,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{}
|
have := []proxy.Process{}
|
||||||
|
|
||||||
resources := map[string]proxy.NodeResources{
|
resources := map[string]proxy.NodeResources{
|
||||||
"node1": {
|
"node1": {
|
||||||
@@ -191,15 +205,18 @@ func TestSynchronizeAddLimit(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddNoResourcesCPU(t *testing.T) {
|
func TestSynchronizeAddNoResourcesCPU(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
LimitCPU: 30,
|
Config: &app.Config{
|
||||||
LimitMemory: 5,
|
ID: "foobar",
|
||||||
|
LimitCPU: 30,
|
||||||
|
LimitMemory: 5,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{}
|
have := []proxy.Process{}
|
||||||
|
|
||||||
resources := map[string]proxy.NodeResources{
|
resources := map[string]proxy.NodeResources{
|
||||||
"node1": {
|
"node1": {
|
||||||
@@ -229,15 +246,18 @@ func TestSynchronizeAddNoResourcesCPU(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddNoResourcesMemory(t *testing.T) {
|
func TestSynchronizeAddNoResourcesMemory(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
LimitCPU: 1,
|
Config: &app.Config{
|
||||||
LimitMemory: 50,
|
ID: "foobar",
|
||||||
|
LimitCPU: 1,
|
||||||
|
LimitMemory: 50,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{}
|
have := []proxy.Process{}
|
||||||
|
|
||||||
resources := map[string]proxy.NodeResources{
|
resources := map[string]proxy.NodeResources{
|
||||||
"node1": {
|
"node1": {
|
||||||
@@ -267,13 +287,16 @@ func TestSynchronizeAddNoResourcesMemory(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddNoLimits(t *testing.T) {
|
func TestSynchronizeAddNoLimits(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar",
|
UpdatedAt: time.Now(),
|
||||||
|
Config: &app.Config{
|
||||||
|
ID: "foobar",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{}
|
have := []proxy.Process{}
|
||||||
|
|
||||||
resources := map[string]proxy.NodeResources{
|
resources := map[string]proxy.NodeResources{
|
||||||
"node1": {
|
"node1": {
|
||||||
@@ -303,9 +326,9 @@ func TestSynchronizeAddNoLimits(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeRemove(t *testing.T) {
|
func TestSynchronizeRemove(t *testing.T) {
|
||||||
want := []app.Config{}
|
want := []store.Process{}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{
|
have := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node2",
|
NodeID: "node2",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -364,15 +387,18 @@ func TestSynchronizeRemove(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSynchronizeAddRemove(t *testing.T) {
|
func TestSynchronizeAddRemove(t *testing.T) {
|
||||||
want := []app.Config{
|
want := []store.Process{
|
||||||
{
|
{
|
||||||
ID: "foobar1",
|
UpdatedAt: time.Now(),
|
||||||
LimitCPU: 10,
|
Config: &app.Config{
|
||||||
LimitMemory: 5,
|
ID: "foobar1",
|
||||||
|
LimitCPU: 10,
|
||||||
|
LimitMemory: 5,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
have := []proxy.ProcessConfig{
|
have := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node2",
|
NodeID: "node2",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -439,7 +465,7 @@ func TestSynchronizeAddRemove(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRebalanceNothingToDo(t *testing.T) {
|
func TestRebalanceNothingToDo(t *testing.T) {
|
||||||
processes := []proxy.ProcessConfig{
|
processes := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node1",
|
NodeID: "node1",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -487,7 +513,7 @@ func TestRebalanceNothingToDo(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRebalanceOverload(t *testing.T) {
|
func TestRebalanceOverload(t *testing.T) {
|
||||||
processes := []proxy.ProcessConfig{
|
processes := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node1",
|
NodeID: "node1",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -573,7 +599,7 @@ func TestRebalanceOverload(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRebalanceSkip(t *testing.T) {
|
func TestRebalanceSkip(t *testing.T) {
|
||||||
processes := []proxy.ProcessConfig{
|
processes := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node1",
|
NodeID: "node1",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -667,7 +693,7 @@ func TestRebalanceSkip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestRebalanceReferenceAffinity(t *testing.T) {
|
func TestRebalanceReferenceAffinity(t *testing.T) {
|
||||||
processes := []proxy.ProcessConfig{
|
processes := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node1",
|
NodeID: "node1",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
@@ -794,7 +820,7 @@ func TestRebalanceReferenceAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestCreateReferenceAffinityNodeMap(t *testing.T) {
|
func TestCreateReferenceAffinityNodeMap(t *testing.T) {
|
||||||
processes := []proxy.ProcessConfig{
|
processes := []proxy.Process{
|
||||||
{
|
{
|
||||||
NodeID: "node1",
|
NodeID: "node1",
|
||||||
Order: "start",
|
Order: "start",
|
||||||
|
@@ -27,7 +27,7 @@ type Node interface {
|
|||||||
GetURL(path string) (string, error)
|
GetURL(path string) (string, error)
|
||||||
GetFile(path string) (io.ReadCloser, error)
|
GetFile(path string) (io.ReadCloser, error)
|
||||||
|
|
||||||
ProcessList() ([]ProcessConfig, error)
|
ProcessList() ([]Process, error)
|
||||||
ProcessAdd(*app.Config) error
|
ProcessAdd(*app.Config) error
|
||||||
ProcessStart(id string) error
|
ProcessStart(id string) error
|
||||||
ProcessStop(id string) error
|
ProcessStop(id string) error
|
||||||
@@ -37,11 +37,10 @@ type Node interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type NodeReader interface {
|
type NodeReader interface {
|
||||||
ID() string
|
|
||||||
Address() string
|
|
||||||
IPs() []string
|
IPs() []string
|
||||||
Files() NodeFiles
|
Files() NodeFiles
|
||||||
State() NodeState
|
About() NodeAbout
|
||||||
|
Version() NodeVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeFiles struct {
|
type NodeFiles struct {
|
||||||
@@ -58,14 +57,27 @@ type NodeResources struct {
|
|||||||
MemLimit uint64 // Defined memory limit in bytes
|
MemLimit uint64 // Defined memory limit in bytes
|
||||||
}
|
}
|
||||||
|
|
||||||
type NodeState struct {
|
type NodeAbout struct {
|
||||||
ID string
|
ID string
|
||||||
|
Name string
|
||||||
|
Address string
|
||||||
State string
|
State string
|
||||||
|
CreatedAt time.Time
|
||||||
|
Uptime time.Duration
|
||||||
LastContact time.Time
|
LastContact time.Time
|
||||||
Latency time.Duration
|
Latency time.Duration
|
||||||
Resources NodeResources
|
Resources NodeResources
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type NodeVersion struct {
|
||||||
|
Number string
|
||||||
|
Commit string
|
||||||
|
Branch string
|
||||||
|
Build time.Time
|
||||||
|
Arch string
|
||||||
|
Compiler string
|
||||||
|
}
|
||||||
|
|
||||||
type nodeState string
|
type nodeState string
|
||||||
|
|
||||||
func (n nodeState) String() string {
|
func (n nodeState) String() string {
|
||||||
@@ -364,50 +376,34 @@ func (n *node) StopFiles() {
|
|||||||
n.cancelFiles()
|
n.cancelFiles()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) Address() string {
|
func (n *node) About() NodeAbout {
|
||||||
return n.address
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) IPs() []string {
|
|
||||||
return n.ips
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) ID() string {
|
|
||||||
n.peerLock.RLock()
|
n.peerLock.RLock()
|
||||||
defer n.peerLock.RUnlock()
|
|
||||||
|
|
||||||
if n.peer == nil {
|
if n.peer == nil {
|
||||||
return ""
|
n.peerLock.RUnlock()
|
||||||
|
return NodeAbout{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return n.peer.ID()
|
about := n.peer.About()
|
||||||
}
|
|
||||||
|
n.peerLock.RUnlock()
|
||||||
|
|
||||||
|
createdAt, err := time.Parse(time.RFC3339, about.CreatedAt)
|
||||||
|
if err != nil {
|
||||||
|
createdAt = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
func (n *node) Files() NodeFiles {
|
|
||||||
n.stateLock.RLock()
|
n.stateLock.RLock()
|
||||||
defer n.stateLock.RUnlock()
|
defer n.stateLock.RUnlock()
|
||||||
|
|
||||||
state := NodeFiles{
|
state := NodeAbout{
|
||||||
ID: n.ID(),
|
ID: about.ID,
|
||||||
LastUpdate: n.lastUpdate,
|
Name: about.Name,
|
||||||
}
|
Address: n.address,
|
||||||
|
|
||||||
if n.state != stateDisconnected && time.Since(n.lastUpdate) <= 2*time.Second {
|
|
||||||
state.Files = make([]string, len(n.filesList))
|
|
||||||
copy(state.Files, n.filesList)
|
|
||||||
}
|
|
||||||
|
|
||||||
return state
|
|
||||||
}
|
|
||||||
|
|
||||||
func (n *node) State() NodeState {
|
|
||||||
n.stateLock.RLock()
|
|
||||||
defer n.stateLock.RUnlock()
|
|
||||||
|
|
||||||
state := NodeState{
|
|
||||||
ID: n.ID(),
|
|
||||||
LastContact: n.lastContact,
|
|
||||||
State: n.state.String(),
|
State: n.state.String(),
|
||||||
|
CreatedAt: createdAt,
|
||||||
|
Uptime: time.Since(createdAt),
|
||||||
|
LastContact: n.lastContact,
|
||||||
Latency: time.Duration(n.latency * float64(time.Second)),
|
Latency: time.Duration(n.latency * float64(time.Second)),
|
||||||
Resources: NodeResources{
|
Resources: NodeResources{
|
||||||
NCPU: n.resources.ncpu,
|
NCPU: n.resources.ncpu,
|
||||||
@@ -421,6 +417,54 @@ func (n *node) State() NodeState {
|
|||||||
return state
|
return state
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *node) Version() NodeVersion {
|
||||||
|
n.peerLock.RLock()
|
||||||
|
defer n.peerLock.RUnlock()
|
||||||
|
|
||||||
|
if n.peer == nil {
|
||||||
|
return NodeVersion{}
|
||||||
|
}
|
||||||
|
|
||||||
|
about := n.peer.About()
|
||||||
|
|
||||||
|
build, err := time.Parse(time.RFC3339, about.Version.Build)
|
||||||
|
if err != nil {
|
||||||
|
build = time.Time{}
|
||||||
|
}
|
||||||
|
|
||||||
|
version := NodeVersion{
|
||||||
|
Number: about.Version.Number,
|
||||||
|
Commit: about.Version.Commit,
|
||||||
|
Branch: about.Version.Branch,
|
||||||
|
Build: build,
|
||||||
|
Arch: about.Version.Arch,
|
||||||
|
Compiler: about.Version.Compiler,
|
||||||
|
}
|
||||||
|
|
||||||
|
return version
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) IPs() []string {
|
||||||
|
return n.ips
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *node) Files() NodeFiles {
|
||||||
|
n.stateLock.RLock()
|
||||||
|
defer n.stateLock.RUnlock()
|
||||||
|
|
||||||
|
state := NodeFiles{
|
||||||
|
ID: n.About().ID,
|
||||||
|
LastUpdate: n.lastUpdate,
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.state != stateDisconnected && time.Since(n.lastUpdate) <= 2*time.Second {
|
||||||
|
state.Files = make([]string, len(n.filesList))
|
||||||
|
copy(state.Files, n.filesList)
|
||||||
|
}
|
||||||
|
|
||||||
|
return state
|
||||||
|
}
|
||||||
|
|
||||||
func (n *node) files() {
|
func (n *node) files() {
|
||||||
filesChan := make(chan string, 1024)
|
filesChan := make(chan string, 1024)
|
||||||
filesList := []string{}
|
filesList := []string{}
|
||||||
@@ -603,7 +647,7 @@ func (n *node) GetFile(path string) (io.ReadCloser, error) {
|
|||||||
return nil, fmt.Errorf("unknown prefix")
|
return nil, fmt.Errorf("unknown prefix")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *node) ProcessList() ([]ProcessConfig, error) {
|
func (n *node) ProcessList() ([]Process, error) {
|
||||||
n.peerLock.RLock()
|
n.peerLock.RLock()
|
||||||
defer n.peerLock.RUnlock()
|
defer n.peerLock.RUnlock()
|
||||||
|
|
||||||
@@ -619,16 +663,17 @@ func (n *node) ProcessList() ([]ProcessConfig, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
processes := []ProcessConfig{}
|
processes := []Process{}
|
||||||
|
|
||||||
for _, p := range list {
|
for _, p := range list {
|
||||||
process := ProcessConfig{
|
process := Process{
|
||||||
NodeID: n.ID(),
|
NodeID: n.About().ID,
|
||||||
Order: p.State.Order,
|
Order: p.State.Order,
|
||||||
State: p.State.State,
|
State: p.State.State,
|
||||||
Mem: p.State.Memory,
|
Mem: p.State.Memory,
|
||||||
Runtime: time.Duration(p.State.Runtime) * time.Second,
|
Runtime: time.Duration(p.State.Runtime) * time.Second,
|
||||||
Config: p.Config.Marshal(),
|
UpdatedAt: time.Unix(p.UpdatedAt, 0),
|
||||||
|
Config: p.Config.Marshal(),
|
||||||
}
|
}
|
||||||
|
|
||||||
if x, err := p.State.CPU.Float64(); err == nil {
|
if x, err := p.State.CPU.Float64(); err == nil {
|
||||||
|
@@ -23,67 +23,19 @@ type Proxy interface {
|
|||||||
ProxyReader
|
ProxyReader
|
||||||
Reader() ProxyReader
|
Reader() ProxyReader
|
||||||
|
|
||||||
ProxyProcessor
|
|
||||||
Processor() ProxyProcessor
|
|
||||||
}
|
|
||||||
|
|
||||||
type ProxyProcessor interface {
|
|
||||||
Resources() map[string]NodeResources
|
|
||||||
|
|
||||||
ProcessList() []ProcessConfig
|
|
||||||
ProcessAdd(nodeid string, config *app.Config) error
|
ProcessAdd(nodeid string, config *app.Config) error
|
||||||
ProcessDelete(nodeid string, id string) error
|
ProcessDelete(nodeid string, id string) error
|
||||||
ProcessStart(nodeid string, id string) error
|
ProcessStart(nodeid string, id string) error
|
||||||
}
|
ProcessUpdate(nodeid string, id string, config *app.Config) error
|
||||||
|
|
||||||
type proxyProcessor struct {
|
|
||||||
proxy *proxy
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxyProcessor) Resources() map[string]NodeResources {
|
|
||||||
if p.proxy == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.proxy.Resources()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxyProcessor) ProcessList() []ProcessConfig {
|
|
||||||
if p.proxy == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.proxy.ProcessList()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxyProcessor) ProcessAdd(nodeid string, config *app.Config) error {
|
|
||||||
if p.proxy == nil {
|
|
||||||
return fmt.Errorf("no proxy provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.proxy.ProcessAdd(nodeid, config)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxyProcessor) ProcessDelete(nodeid string, id string) error {
|
|
||||||
if p.proxy == nil {
|
|
||||||
return fmt.Errorf("no proxy provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.proxy.ProcessDelete(nodeid, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxyProcessor) ProcessStart(nodeid string, id string) error {
|
|
||||||
if p.proxy == nil {
|
|
||||||
return fmt.Errorf("no proxy provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
return p.proxy.ProcessStart(nodeid, id)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProxyReader interface {
|
type ProxyReader interface {
|
||||||
ListNodes() []NodeReader
|
ListNodes() []NodeReader
|
||||||
GetNode(id string) (NodeReader, error)
|
GetNode(id string) (NodeReader, error)
|
||||||
|
|
||||||
|
Resources() map[string]NodeResources
|
||||||
|
ListProcesses() []Process
|
||||||
|
|
||||||
GetURL(path string) (string, error)
|
GetURL(path string) (string, error)
|
||||||
GetFile(path string) (io.ReadCloser, error)
|
GetFile(path string) (io.ReadCloser, error)
|
||||||
}
|
}
|
||||||
@@ -112,6 +64,22 @@ func (p *proxyReader) GetNode(id string) (NodeReader, error) {
|
|||||||
return p.proxy.GetNode(id)
|
return p.proxy.GetNode(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *proxyReader) Resources() map[string]NodeResources {
|
||||||
|
if p.proxy == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.proxy.Resources()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *proxyReader) ListProcesses() []Process {
|
||||||
|
if p.proxy == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.proxy.ListProcesses()
|
||||||
|
}
|
||||||
|
|
||||||
func (p *proxyReader) GetURL(path string) (string, error) {
|
func (p *proxyReader) GetURL(path string) (string, error) {
|
||||||
if p.proxy == nil {
|
if p.proxy == nil {
|
||||||
return "", fmt.Errorf("no proxy provided")
|
return "", fmt.Errorf("no proxy provided")
|
||||||
@@ -264,12 +232,6 @@ func (p *proxy) Reader() ProxyReader {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxy) Processor() ProxyProcessor {
|
|
||||||
return &proxyProcessor{
|
|
||||||
proxy: p,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *proxy) Resources() map[string]NodeResources {
|
func (p *proxy) Resources() map[string]NodeResources {
|
||||||
resources := map[string]NodeResources{}
|
resources := map[string]NodeResources{}
|
||||||
|
|
||||||
@@ -277,15 +239,18 @@ func (p *proxy) Resources() map[string]NodeResources {
|
|||||||
defer p.lock.RUnlock()
|
defer p.lock.RUnlock()
|
||||||
|
|
||||||
for _, node := range p.nodes {
|
for _, node := range p.nodes {
|
||||||
resources[node.ID()] = node.State().Resources
|
about := node.About()
|
||||||
|
resources[about.ID] = about.Resources
|
||||||
}
|
}
|
||||||
|
|
||||||
return resources
|
return resources
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxy) AddNode(id string, node Node) (string, error) {
|
func (p *proxy) AddNode(id string, node Node) (string, error) {
|
||||||
if id != node.ID() {
|
about := node.About()
|
||||||
return "", fmt.Errorf("the provided (%s) and retrieved (%s) ID's don't match", id, node.ID())
|
|
||||||
|
if id != about.ID {
|
||||||
|
return "", fmt.Errorf("the provided (%s) and retrieved (%s) ID's don't match", id, about.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
@@ -315,7 +280,8 @@ func (p *proxy) AddNode(id string, node Node) (string, error) {
|
|||||||
node.StartFiles(p.updates)
|
node.StartFiles(p.updates)
|
||||||
|
|
||||||
p.logger.Info().WithFields(log.Fields{
|
p.logger.Info().WithFields(log.Fields{
|
||||||
"address": node.Address(),
|
"address": about.Address,
|
||||||
|
"name": about.Name,
|
||||||
"id": id,
|
"id": id,
|
||||||
}).Log("Added node")
|
}).Log("Added node")
|
||||||
|
|
||||||
@@ -449,19 +415,20 @@ func (p *proxy) GetFile(path string) (io.ReadCloser, error) {
|
|||||||
return data, nil
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type ProcessConfig struct {
|
type Process struct {
|
||||||
NodeID string
|
NodeID string
|
||||||
Order string
|
Order string
|
||||||
State string
|
State string
|
||||||
CPU float64 // Current CPU load of this process, 0-100*ncpu
|
CPU float64 // Current CPU load of this process, 0-100*ncpu
|
||||||
Mem uint64 // Currently consumed memory of this process in bytes
|
Mem uint64 // Currently consumed memory of this process in bytes
|
||||||
Runtime time.Duration
|
Runtime time.Duration
|
||||||
Config *app.Config
|
UpdatedAt time.Time
|
||||||
|
Config *app.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *proxy) ProcessList() []ProcessConfig {
|
func (p *proxy) ListProcesses() []Process {
|
||||||
processChan := make(chan ProcessConfig, 64)
|
processChan := make(chan Process, 64)
|
||||||
processList := []ProcessConfig{}
|
processList := []Process{}
|
||||||
|
|
||||||
wgList := sync.WaitGroup{}
|
wgList := sync.WaitGroup{}
|
||||||
wgList.Add(1)
|
wgList.Add(1)
|
||||||
@@ -469,8 +436,8 @@ func (p *proxy) ProcessList() []ProcessConfig {
|
|||||||
go func() {
|
go func() {
|
||||||
defer wgList.Done()
|
defer wgList.Done()
|
||||||
|
|
||||||
for file := range processChan {
|
for process := range processChan {
|
||||||
processList = append(processList, file)
|
processList = append(processList, process)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -480,7 +447,7 @@ func (p *proxy) ProcessList() []ProcessConfig {
|
|||||||
for _, node := range p.nodes {
|
for _, node := range p.nodes {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
|
||||||
go func(node Node, p chan<- ProcessConfig) {
|
go func(node Node, p chan<- Process) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
processes, err := node.ProcessList()
|
processes, err := node.ProcessList()
|
||||||
@@ -559,3 +526,15 @@ func (p *proxy) ProcessStart(nodeid string, id string) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *proxy) ProcessUpdate(nodeid string, id string, config *app.Config) error {
|
||||||
|
p.lock.RLock()
|
||||||
|
defer p.lock.RUnlock()
|
||||||
|
|
||||||
|
_, ok := p.nodes[nodeid]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("node not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
@@ -5,6 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/datarhei/core/v16/log"
|
"github.com/datarhei/core/v16/log"
|
||||||
"github.com/datarhei/core/v16/restream/app"
|
"github.com/datarhei/core/v16/restream/app"
|
||||||
@@ -15,8 +16,13 @@ import (
|
|||||||
type Store interface {
|
type Store interface {
|
||||||
raft.FSM
|
raft.FSM
|
||||||
|
|
||||||
ProcessList() []app.Config
|
ProcessList() []Process
|
||||||
GetProcess(id string) (app.Config, error)
|
GetProcess(id string) (Process, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
type Process struct {
|
||||||
|
UpdatedAt time.Time
|
||||||
|
Config *app.Config
|
||||||
}
|
}
|
||||||
|
|
||||||
type operation string
|
type operation string
|
||||||
@@ -42,7 +48,7 @@ type CommandRemoveProcess struct {
|
|||||||
// Implement a FSM
|
// Implement a FSM
|
||||||
type store struct {
|
type store struct {
|
||||||
lock sync.RWMutex
|
lock sync.RWMutex
|
||||||
Process map[string]app.Config
|
Process map[string]Process
|
||||||
|
|
||||||
logger log.Logger
|
logger log.Logger
|
||||||
}
|
}
|
||||||
@@ -53,7 +59,7 @@ type Config struct {
|
|||||||
|
|
||||||
func NewStore(config Config) (Store, error) {
|
func NewStore(config Config) (Store, error) {
|
||||||
s := &store{
|
s := &store{
|
||||||
Process: map[string]app.Config{},
|
Process: map[string]Process{},
|
||||||
logger: config.Logger,
|
logger: config.Logger,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,7 +95,10 @@ func (s *store) Apply(entry *raft.Log) interface{} {
|
|||||||
json.Unmarshal(b, &cmd)
|
json.Unmarshal(b, &cmd)
|
||||||
|
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
s.Process[cmd.ID] = cmd.Config
|
s.Process[cmd.ID] = Process{
|
||||||
|
UpdatedAt: time.Now(),
|
||||||
|
Config: &cmd.Config,
|
||||||
|
}
|
||||||
s.lock.Unlock()
|
s.lock.Unlock()
|
||||||
case OpRemoveProcess:
|
case OpRemoveProcess:
|
||||||
b, _ := json.Marshal(c.Data)
|
b, _ := json.Marshal(c.Data)
|
||||||
@@ -139,29 +148,35 @@ func (s *store) Restore(snapshot io.ReadCloser) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) ProcessList() []app.Config {
|
func (s *store) ProcessList() []Process {
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
|
|
||||||
processes := []app.Config{}
|
processes := []Process{}
|
||||||
|
|
||||||
for _, cfg := range s.Process {
|
for _, p := range s.Process {
|
||||||
processes = append(processes, *cfg.Clone())
|
processes = append(processes, Process{
|
||||||
|
UpdatedAt: p.UpdatedAt,
|
||||||
|
Config: p.Config.Clone(),
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return processes
|
return processes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *store) GetProcess(id string) (app.Config, error) {
|
func (s *store) GetProcess(id string) (Process, error) {
|
||||||
s.lock.RLock()
|
s.lock.RLock()
|
||||||
defer s.lock.RUnlock()
|
defer s.lock.RUnlock()
|
||||||
|
|
||||||
cfg, ok := s.Process[id]
|
process, ok := s.Process[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
return app.Config{}, fmt.Errorf("not found")
|
return Process{}, fmt.Errorf("not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
return *cfg.Clone(), nil
|
return Process{
|
||||||
|
UpdatedAt: process.UpdatedAt,
|
||||||
|
Config: process.Config.Clone(),
|
||||||
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type fsmSnapshot struct {
|
type fsmSnapshot struct {
|
||||||
|
224
docs/docs.go
224
docs/docs.go
@@ -219,6 +219,9 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List of nodes in the cluster",
|
"summary": "List of nodes in the cluster",
|
||||||
"operationId": "cluster-3-get-cluster",
|
"operationId": "cluster-3-get-cluster",
|
||||||
"responses": {
|
"responses": {
|
||||||
@@ -237,7 +240,7 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy": {
|
"/api/v3/cluster/node": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -248,8 +251,11 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List of proxy nodes in the cluster",
|
"summary": "List of proxy nodes in the cluster",
|
||||||
"operationId": "cluster-3-get-proxy-nodes",
|
"operationId": "cluster-3-get-nodes",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "OK",
|
"description": "OK",
|
||||||
@@ -269,7 +275,7 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy/node/{id}": {
|
"/api/v3/cluster/node/{id}": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -280,8 +286,11 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List a proxy node by its ID",
|
"summary": "List a proxy node by its ID",
|
||||||
"operationId": "cluster-3-get-proxy-node",
|
"operationId": "cluster-3-get-node",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -307,7 +316,7 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy/node/{id}/files": {
|
"/api/v3/cluster/node/{id}/files": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -318,8 +327,11 @@ const docTemplate = `{
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List the files of a proxy node by its ID",
|
"summary": "List the files of a proxy node by its ID",
|
||||||
"operationId": "cluster-3-get-proxy-node-files",
|
"operationId": "cluster-3-get-node-files",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -345,6 +357,120 @@ const docTemplate = `{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"/api/v3/cluster/process": {
|
||||||
|
"get": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "List of processes in the cluster",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "List of processes in the cluster",
|
||||||
|
"operationId": "cluster-3-list-processes",
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/api.ClusterProcess"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"post": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Add a new FFmpeg process",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "Add a new process",
|
||||||
|
"operationId": "cluster-3-add-process",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "Process config",
|
||||||
|
"name": "config",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ProcessConfig"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ProcessConfig"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.Error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/api/v3/cluster/process/{id}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Delete a process by its ID",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "Delete a process by its ID",
|
||||||
|
"operationId": "cluster-3-delete-process",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"description": "Process ID",
|
||||||
|
"name": "id",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Not Found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.Error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"/api/v3/config": {
|
"/api/v3/config": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
@@ -2251,7 +2377,7 @@ const docTemplate = `{
|
|||||||
"id": {
|
"id": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"nodes": {
|
"server": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ClusterServer"
|
"$ref": "#/definitions/api.ClusterServer"
|
||||||
@@ -2268,29 +2394,99 @@ const docTemplate = `{
|
|||||||
"address": {
|
"address": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
|
"created_at": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
"id": {
|
"id": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"last_ping": {
|
"last_contact": {
|
||||||
"type": "integer"
|
"description": "unix timestamp",
|
||||||
},
|
|
||||||
"last_update": {
|
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
"latency_ms": {
|
"latency_ms": {
|
||||||
"description": "milliseconds",
|
"description": "milliseconds",
|
||||||
"type": "number"
|
"type": "number"
|
||||||
},
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"$ref": "#/definitions/api.ClusterNodeResources"
|
||||||
|
},
|
||||||
"state": {
|
"state": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
|
},
|
||||||
|
"uptime_seconds": {
|
||||||
|
"type": "integer"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ClusterNodeFiles": {
|
"api.ClusterNodeFiles": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": {
|
"properties": {
|
||||||
"type": "array",
|
"files": {
|
||||||
"items": {
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"last_update": {
|
||||||
|
"description": "unix timestamp",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.ClusterNodeResources": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"cpu_limit": {
|
||||||
|
"description": "percent 0-100*npcu",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"cpu_used": {
|
||||||
|
"description": "percent 0-100*npcu",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"memory_limit_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"memory_used_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"ncpu": {
|
||||||
|
"type": "number"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.ClusterProcess": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"cpu": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"memory_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"node_id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"order": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"reference": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"runtime_seconds": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -212,6 +212,9 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List of nodes in the cluster",
|
"summary": "List of nodes in the cluster",
|
||||||
"operationId": "cluster-3-get-cluster",
|
"operationId": "cluster-3-get-cluster",
|
||||||
"responses": {
|
"responses": {
|
||||||
@@ -230,7 +233,7 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy": {
|
"/api/v3/cluster/node": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -241,8 +244,11 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List of proxy nodes in the cluster",
|
"summary": "List of proxy nodes in the cluster",
|
||||||
"operationId": "cluster-3-get-proxy-nodes",
|
"operationId": "cluster-3-get-nodes",
|
||||||
"responses": {
|
"responses": {
|
||||||
"200": {
|
"200": {
|
||||||
"description": "OK",
|
"description": "OK",
|
||||||
@@ -262,7 +268,7 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy/node/{id}": {
|
"/api/v3/cluster/node/{id}": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -273,8 +279,11 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List a proxy node by its ID",
|
"summary": "List a proxy node by its ID",
|
||||||
"operationId": "cluster-3-get-proxy-node",
|
"operationId": "cluster-3-get-node",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -300,7 +309,7 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"/api/v3/cluster/proxy/node/{id}/files": {
|
"/api/v3/cluster/node/{id}/files": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
{
|
{
|
||||||
@@ -311,8 +320,11 @@
|
|||||||
"produces": [
|
"produces": [
|
||||||
"application/json"
|
"application/json"
|
||||||
],
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
"summary": "List the files of a proxy node by its ID",
|
"summary": "List the files of a proxy node by its ID",
|
||||||
"operationId": "cluster-3-get-proxy-node-files",
|
"operationId": "cluster-3-get-node-files",
|
||||||
"parameters": [
|
"parameters": [
|
||||||
{
|
{
|
||||||
"type": "string",
|
"type": "string",
|
||||||
@@ -338,6 +350,120 @@
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"/api/v3/cluster/process": {
|
||||||
|
"get": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "List of processes in the cluster",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "List of processes in the cluster",
|
||||||
|
"operationId": "cluster-3-list-processes",
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"$ref": "#/definitions/api.ClusterProcess"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"post": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Add a new FFmpeg process",
|
||||||
|
"consumes": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "Add a new process",
|
||||||
|
"operationId": "cluster-3-add-process",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"description": "Process config",
|
||||||
|
"name": "config",
|
||||||
|
"in": "body",
|
||||||
|
"required": true,
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ProcessConfig"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.ProcessConfig"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"400": {
|
||||||
|
"description": "Bad Request",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.Error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"/api/v3/cluster/process/{id}": {
|
||||||
|
"delete": {
|
||||||
|
"security": [
|
||||||
|
{
|
||||||
|
"ApiKeyAuth": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"description": "Delete a process by its ID",
|
||||||
|
"produces": [
|
||||||
|
"application/json"
|
||||||
|
],
|
||||||
|
"tags": [
|
||||||
|
"v16.?.?"
|
||||||
|
],
|
||||||
|
"summary": "Delete a process by its ID",
|
||||||
|
"operationId": "cluster-3-delete-process",
|
||||||
|
"parameters": [
|
||||||
|
{
|
||||||
|
"type": "string",
|
||||||
|
"description": "Process ID",
|
||||||
|
"name": "id",
|
||||||
|
"in": "path",
|
||||||
|
"required": true
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"responses": {
|
||||||
|
"200": {
|
||||||
|
"description": "OK",
|
||||||
|
"schema": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"404": {
|
||||||
|
"description": "Not Found",
|
||||||
|
"schema": {
|
||||||
|
"$ref": "#/definitions/api.Error"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
"/api/v3/config": {
|
"/api/v3/config": {
|
||||||
"get": {
|
"get": {
|
||||||
"security": [
|
"security": [
|
||||||
@@ -2244,7 +2370,7 @@
|
|||||||
"id": {
|
"id": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"nodes": {
|
"server": {
|
||||||
"type": "array",
|
"type": "array",
|
||||||
"items": {
|
"items": {
|
||||||
"$ref": "#/definitions/api.ClusterServer"
|
"$ref": "#/definitions/api.ClusterServer"
|
||||||
@@ -2261,29 +2387,99 @@
|
|||||||
"address": {
|
"address": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
|
"created_at": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
"id": {
|
"id": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"last_ping": {
|
"last_contact": {
|
||||||
"type": "integer"
|
"description": "unix timestamp",
|
||||||
},
|
|
||||||
"last_update": {
|
|
||||||
"type": "integer"
|
"type": "integer"
|
||||||
},
|
},
|
||||||
"latency_ms": {
|
"latency_ms": {
|
||||||
"description": "milliseconds",
|
"description": "milliseconds",
|
||||||
"type": "number"
|
"type": "number"
|
||||||
},
|
},
|
||||||
|
"name": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"resources": {
|
||||||
|
"$ref": "#/definitions/api.ClusterNodeResources"
|
||||||
|
},
|
||||||
"state": {
|
"state": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
|
},
|
||||||
|
"uptime_seconds": {
|
||||||
|
"type": "integer"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"api.ClusterNodeFiles": {
|
"api.ClusterNodeFiles": {
|
||||||
"type": "object",
|
"type": "object",
|
||||||
"additionalProperties": {
|
"properties": {
|
||||||
"type": "array",
|
"files": {
|
||||||
"items": {
|
"type": "object",
|
||||||
|
"additionalProperties": {
|
||||||
|
"type": "array",
|
||||||
|
"items": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"last_update": {
|
||||||
|
"description": "unix timestamp",
|
||||||
|
"type": "integer"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.ClusterNodeResources": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"cpu_limit": {
|
||||||
|
"description": "percent 0-100*npcu",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"cpu_used": {
|
||||||
|
"description": "percent 0-100*npcu",
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"memory_limit_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"memory_used_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"ncpu": {
|
||||||
|
"type": "number"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"api.ClusterProcess": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"cpu": {
|
||||||
|
"type": "number"
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"memory_bytes": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"node_id": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"order": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"reference": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"runtime_seconds": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"state": {
|
||||||
"type": "string"
|
"type": "string"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -75,7 +75,7 @@ definitions:
|
|||||||
type: string
|
type: string
|
||||||
id:
|
id:
|
||||||
type: string
|
type: string
|
||||||
nodes:
|
server:
|
||||||
items:
|
items:
|
||||||
$ref: '#/definitions/api.ClusterServer'
|
$ref: '#/definitions/api.ClusterServer'
|
||||||
type: array
|
type: array
|
||||||
@@ -86,23 +86,70 @@ definitions:
|
|||||||
properties:
|
properties:
|
||||||
address:
|
address:
|
||||||
type: string
|
type: string
|
||||||
|
created_at:
|
||||||
|
type: string
|
||||||
id:
|
id:
|
||||||
type: string
|
type: string
|
||||||
last_ping:
|
last_contact:
|
||||||
type: integer
|
description: unix timestamp
|
||||||
last_update:
|
|
||||||
type: integer
|
type: integer
|
||||||
latency_ms:
|
latency_ms:
|
||||||
description: milliseconds
|
description: milliseconds
|
||||||
type: number
|
type: number
|
||||||
|
name:
|
||||||
|
type: string
|
||||||
|
resources:
|
||||||
|
$ref: '#/definitions/api.ClusterNodeResources'
|
||||||
state:
|
state:
|
||||||
type: string
|
type: string
|
||||||
|
uptime_seconds:
|
||||||
|
type: integer
|
||||||
type: object
|
type: object
|
||||||
api.ClusterNodeFiles:
|
api.ClusterNodeFiles:
|
||||||
additionalProperties:
|
properties:
|
||||||
items:
|
files:
|
||||||
|
additionalProperties:
|
||||||
|
items:
|
||||||
|
type: string
|
||||||
|
type: array
|
||||||
|
type: object
|
||||||
|
last_update:
|
||||||
|
description: unix timestamp
|
||||||
|
type: integer
|
||||||
|
type: object
|
||||||
|
api.ClusterNodeResources:
|
||||||
|
properties:
|
||||||
|
cpu_limit:
|
||||||
|
description: percent 0-100*npcu
|
||||||
|
type: number
|
||||||
|
cpu_used:
|
||||||
|
description: percent 0-100*npcu
|
||||||
|
type: number
|
||||||
|
memory_limit_bytes:
|
||||||
|
type: integer
|
||||||
|
memory_used_bytes:
|
||||||
|
type: integer
|
||||||
|
ncpu:
|
||||||
|
type: number
|
||||||
|
type: object
|
||||||
|
api.ClusterProcess:
|
||||||
|
properties:
|
||||||
|
cpu:
|
||||||
|
type: number
|
||||||
|
id:
|
||||||
|
type: string
|
||||||
|
memory_bytes:
|
||||||
|
type: integer
|
||||||
|
node_id:
|
||||||
|
type: string
|
||||||
|
order:
|
||||||
|
type: string
|
||||||
|
reference:
|
||||||
|
type: string
|
||||||
|
runtime_seconds:
|
||||||
|
type: integer
|
||||||
|
state:
|
||||||
type: string
|
type: string
|
||||||
type: array
|
|
||||||
type: object
|
type: object
|
||||||
api.ClusterServer:
|
api.ClusterServer:
|
||||||
properties:
|
properties:
|
||||||
@@ -2179,10 +2226,12 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: List of nodes in the cluster
|
summary: List of nodes in the cluster
|
||||||
/api/v3/cluster/proxy:
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
/api/v3/cluster/node:
|
||||||
get:
|
get:
|
||||||
description: List of proxy nodes in the cluster
|
description: List of proxy nodes in the cluster
|
||||||
operationId: cluster-3-get-proxy-nodes
|
operationId: cluster-3-get-nodes
|
||||||
produces:
|
produces:
|
||||||
- application/json
|
- application/json
|
||||||
responses:
|
responses:
|
||||||
@@ -2199,10 +2248,12 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: List of proxy nodes in the cluster
|
summary: List of proxy nodes in the cluster
|
||||||
/api/v3/cluster/proxy/node/{id}:
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
/api/v3/cluster/node/{id}:
|
||||||
get:
|
get:
|
||||||
description: List a proxy node by its ID
|
description: List a proxy node by its ID
|
||||||
operationId: cluster-3-get-proxy-node
|
operationId: cluster-3-get-node
|
||||||
parameters:
|
parameters:
|
||||||
- description: Node ID
|
- description: Node ID
|
||||||
in: path
|
in: path
|
||||||
@@ -2223,10 +2274,12 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: List a proxy node by its ID
|
summary: List a proxy node by its ID
|
||||||
/api/v3/cluster/proxy/node/{id}/files:
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
/api/v3/cluster/node/{id}/files:
|
||||||
get:
|
get:
|
||||||
description: List the files of a proxy node by its ID
|
description: List the files of a proxy node by its ID
|
||||||
operationId: cluster-3-get-proxy-node-files
|
operationId: cluster-3-get-node-files
|
||||||
parameters:
|
parameters:
|
||||||
- description: Node ID
|
- description: Node ID
|
||||||
in: path
|
in: path
|
||||||
@@ -2247,6 +2300,80 @@ paths:
|
|||||||
security:
|
security:
|
||||||
- ApiKeyAuth: []
|
- ApiKeyAuth: []
|
||||||
summary: List the files of a proxy node by its ID
|
summary: List the files of a proxy node by its ID
|
||||||
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
/api/v3/cluster/process:
|
||||||
|
get:
|
||||||
|
description: List of processes in the cluster
|
||||||
|
operationId: cluster-3-list-processes
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
schema:
|
||||||
|
items:
|
||||||
|
$ref: '#/definitions/api.ClusterProcess'
|
||||||
|
type: array
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: List of processes in the cluster
|
||||||
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
post:
|
||||||
|
consumes:
|
||||||
|
- application/json
|
||||||
|
description: Add a new FFmpeg process
|
||||||
|
operationId: cluster-3-add-process
|
||||||
|
parameters:
|
||||||
|
- description: Process config
|
||||||
|
in: body
|
||||||
|
name: config
|
||||||
|
required: true
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ProcessConfig'
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.ProcessConfig'
|
||||||
|
"400":
|
||||||
|
description: Bad Request
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.Error'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: Add a new process
|
||||||
|
tags:
|
||||||
|
- v16.?.?
|
||||||
|
/api/v3/cluster/process/{id}:
|
||||||
|
delete:
|
||||||
|
description: Delete a process by its ID
|
||||||
|
operationId: cluster-3-delete-process
|
||||||
|
parameters:
|
||||||
|
- description: Process ID
|
||||||
|
in: path
|
||||||
|
name: id
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
produces:
|
||||||
|
- application/json
|
||||||
|
responses:
|
||||||
|
"200":
|
||||||
|
description: OK
|
||||||
|
schema:
|
||||||
|
type: string
|
||||||
|
"404":
|
||||||
|
description: Not Found
|
||||||
|
schema:
|
||||||
|
$ref: '#/definitions/api.Error'
|
||||||
|
security:
|
||||||
|
- ApiKeyAuth: []
|
||||||
|
summary: Delete a process by its ID
|
||||||
|
tags:
|
||||||
|
- v16.?.?
|
||||||
/api/v3/config:
|
/api/v3/config:
|
||||||
get:
|
get:
|
||||||
description: Retrieve the currently active Restreamer configuration
|
description: Retrieve the currently active Restreamer configuration
|
||||||
|
@@ -58,3 +58,15 @@ func lineAndCharacter(input []byte, offset int) (line int, character int, err er
|
|||||||
|
|
||||||
return line, character, nil
|
return line, character, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ToNumber(f float64) json.Number {
|
||||||
|
var s string
|
||||||
|
|
||||||
|
if f == float64(int64(f)) {
|
||||||
|
s = fmt.Sprintf("%.0f", f) // 0 decimal if integer
|
||||||
|
} else {
|
||||||
|
s = fmt.Sprintf("%.3f", f) // max. 3 decimal if float
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.Number(s)
|
||||||
|
}
|
||||||
|
@@ -1,19 +1,25 @@
|
|||||||
package api
|
package api
|
||||||
|
|
||||||
type ClusterNodeConfig struct {
|
import "encoding/json"
|
||||||
Address string `json:"address"`
|
|
||||||
Username string `json:"username"`
|
|
||||||
Password string `json:"password"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type ClusterNode struct {
|
type ClusterNode struct {
|
||||||
Address string `json:"address"`
|
ID string `json:"id"`
|
||||||
ID string `json:"id"`
|
Name string `json:"name"`
|
||||||
LastContact int64 `json:"last_contact"` // unix timestamp
|
Address string `json:"address"`
|
||||||
Latency float64 `json:"latency_ms"` // milliseconds
|
CreatedAt string `json:"created_at"`
|
||||||
State string `json:"state"`
|
Uptime int64 `json:"uptime_seconds"`
|
||||||
CPU float64 `json:"cpu_used"` // percent 0-100*npcu
|
LastContact int64 `json:"last_contact"` // unix timestamp
|
||||||
Mem uint64 `json:"mem_used" format:"uint64"` // bytes
|
Latency float64 `json:"latency_ms"` // milliseconds
|
||||||
|
State string `json:"state"`
|
||||||
|
Resources ClusterNodeResources `json:"resources"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ClusterNodeResources struct {
|
||||||
|
NCPU float64 `json:"ncpu"`
|
||||||
|
CPU float64 `json:"cpu_used"` // percent 0-100*npcu
|
||||||
|
CPULimit float64 `json:"cpu_limit"` // percent 0-100*npcu
|
||||||
|
Mem uint64 `json:"memory_used_bytes"`
|
||||||
|
MemLimit uint64 `json:"memory_limit_bytes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClusterNodeFiles struct {
|
type ClusterNodeFiles struct {
|
||||||
@@ -28,6 +34,17 @@ type ClusterServer struct {
|
|||||||
Leader bool `json:"leader"`
|
Leader bool `json:"leader"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ClusterProcess struct {
|
||||||
|
ProcessID string `json:"id"`
|
||||||
|
NodeID string `json:"node_id"`
|
||||||
|
Reference string `json:"reference"`
|
||||||
|
Order string `json:"order"`
|
||||||
|
State string `json:"state"`
|
||||||
|
CPU json.Number `json:"cpu" swaggertype:"number" jsonschema:"type=number"`
|
||||||
|
Memory uint64 `json:"memory_bytes"`
|
||||||
|
Runtime int64 `json:"runtime_seconds"`
|
||||||
|
}
|
||||||
|
|
||||||
type ClusterStats struct {
|
type ClusterStats struct {
|
||||||
State string `json:"state"`
|
State string `json:"state"`
|
||||||
LastContact float64 `json:"last_contact_ms"`
|
LastContact float64 `json:"last_contact_ms"`
|
||||||
@@ -39,6 +56,6 @@ type ClusterAbout struct {
|
|||||||
Address string `json:"address"`
|
Address string `json:"address"`
|
||||||
ClusterAPIAddress string `json:"cluster_api_address"`
|
ClusterAPIAddress string `json:"cluster_api_address"`
|
||||||
CoreAPIAddress string `json:"core_api_address"`
|
CoreAPIAddress string `json:"core_api_address"`
|
||||||
Nodes []ClusterServer `json:"nodes"`
|
Server []ClusterServer `json:"server"`
|
||||||
Stats ClusterStats `json:"stats"`
|
Stats ClusterStats `json:"stats"`
|
||||||
}
|
}
|
||||||
|
@@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
func toNumber(f float64) json.Number {
|
func ToNumber(f float64) json.Number {
|
||||||
var s string
|
var s string
|
||||||
|
|
||||||
if f == float64(int64(f)) {
|
if f == float64(int64(f)) {
|
||||||
|
@@ -45,10 +45,10 @@ func (i *ProbeIO) Unmarshal(io *app.ProbeIO) {
|
|||||||
i.Type = io.Type
|
i.Type = io.Type
|
||||||
i.Codec = io.Codec
|
i.Codec = io.Codec
|
||||||
i.Coder = io.Coder
|
i.Coder = io.Coder
|
||||||
i.Bitrate = toNumber(io.Bitrate)
|
i.Bitrate = ToNumber(io.Bitrate)
|
||||||
i.Duration = toNumber(io.Duration)
|
i.Duration = ToNumber(io.Duration)
|
||||||
|
|
||||||
i.FPS = toNumber(io.FPS)
|
i.FPS = ToNumber(io.FPS)
|
||||||
i.Pixfmt = io.Pixfmt
|
i.Pixfmt = io.Pixfmt
|
||||||
i.Width = io.Width
|
i.Width = io.Width
|
||||||
i.Height = io.Height
|
i.Height = io.Height
|
||||||
|
@@ -258,7 +258,7 @@ func (s *ProcessState) Unmarshal(state *app.State) {
|
|||||||
s.LastLog = state.LastLog
|
s.LastLog = state.LastLog
|
||||||
s.Progress = &Progress{}
|
s.Progress = &Progress{}
|
||||||
s.Memory = state.Memory
|
s.Memory = state.Memory
|
||||||
s.CPU = toNumber(state.CPU)
|
s.CPU = ToNumber(state.CPU)
|
||||||
s.Command = state.Command
|
s.Command = state.Command
|
||||||
|
|
||||||
s.Progress.Unmarshal(&state.Progress)
|
s.Progress.Unmarshal(&state.Progress)
|
||||||
|
@@ -118,12 +118,12 @@ func (progress *Progress) Unmarshal(p *app.Progress) {
|
|||||||
progress.Output = make([]ProgressIO, len(p.Output))
|
progress.Output = make([]ProgressIO, len(p.Output))
|
||||||
progress.Frame = p.Frame
|
progress.Frame = p.Frame
|
||||||
progress.Packet = p.Packet
|
progress.Packet = p.Packet
|
||||||
progress.FPS = toNumber(p.FPS)
|
progress.FPS = ToNumber(p.FPS)
|
||||||
progress.Quantizer = toNumber(p.Quantizer)
|
progress.Quantizer = ToNumber(p.Quantizer)
|
||||||
progress.Size = p.Size / 1024
|
progress.Size = p.Size / 1024
|
||||||
progress.Time = toNumber(p.Time)
|
progress.Time = ToNumber(p.Time)
|
||||||
progress.Bitrate = toNumber(p.Bitrate / 1024)
|
progress.Bitrate = ToNumber(p.Bitrate / 1024)
|
||||||
progress.Speed = toNumber(p.Speed)
|
progress.Speed = ToNumber(p.Speed)
|
||||||
progress.Drop = p.Drop
|
progress.Drop = p.Drop
|
||||||
progress.Dup = p.Dup
|
progress.Dup = p.Dup
|
||||||
|
|
||||||
|
@@ -43,8 +43,8 @@ func (s *Session) Unmarshal(sess session.Session) {
|
|||||||
s.Extra = sess.Extra
|
s.Extra = sess.Extra
|
||||||
s.RxBytes = sess.RxBytes
|
s.RxBytes = sess.RxBytes
|
||||||
s.TxBytes = sess.TxBytes
|
s.TxBytes = sess.TxBytes
|
||||||
s.RxBitrate = toNumber(sess.RxBitrate / 1024)
|
s.RxBitrate = ToNumber(sess.RxBitrate / 1024)
|
||||||
s.TxBitrate = toNumber(sess.TxBitrate / 1024)
|
s.TxBitrate = ToNumber(sess.TxBitrate / 1024)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SessionSummaryActive represents the currently active sessions
|
// SessionSummaryActive represents the currently active sessions
|
||||||
@@ -81,12 +81,12 @@ type SessionsActive map[string][]Session
|
|||||||
// Unmarshal creates a new SessionSummary from a session.Summary
|
// Unmarshal creates a new SessionSummary from a session.Summary
|
||||||
func (summary *SessionSummary) Unmarshal(sum session.Summary) {
|
func (summary *SessionSummary) Unmarshal(sum session.Summary) {
|
||||||
summary.Active.MaxSessions = sum.MaxSessions
|
summary.Active.MaxSessions = sum.MaxSessions
|
||||||
summary.Active.MaxRxBitrate = toNumber(sum.MaxRxBitrate / 1024 / 1024)
|
summary.Active.MaxRxBitrate = ToNumber(sum.MaxRxBitrate / 1024 / 1024)
|
||||||
summary.Active.MaxTxBitrate = toNumber(sum.MaxTxBitrate / 1024 / 1024)
|
summary.Active.MaxTxBitrate = ToNumber(sum.MaxTxBitrate / 1024 / 1024)
|
||||||
|
|
||||||
summary.Active.Sessions = sum.CurrentSessions
|
summary.Active.Sessions = sum.CurrentSessions
|
||||||
summary.Active.RxBitrate = toNumber(sum.CurrentRxBitrate / 1024 / 1024)
|
summary.Active.RxBitrate = ToNumber(sum.CurrentRxBitrate / 1024 / 1024)
|
||||||
summary.Active.TxBitrate = toNumber(sum.CurrentTxBitrate / 1024 / 1024)
|
summary.Active.TxBitrate = ToNumber(sum.CurrentTxBitrate / 1024 / 1024)
|
||||||
|
|
||||||
summary.Active.SessionList = make([]Session, len(sum.Active))
|
summary.Active.SessionList = make([]Session, len(sum.Active))
|
||||||
|
|
||||||
|
@@ -4,11 +4,14 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/datarhei/core/v16/cluster"
|
"github.com/datarhei/core/v16/cluster"
|
||||||
"github.com/datarhei/core/v16/cluster/proxy"
|
"github.com/datarhei/core/v16/cluster/proxy"
|
||||||
|
"github.com/datarhei/core/v16/encoding/json"
|
||||||
"github.com/datarhei/core/v16/http/api"
|
"github.com/datarhei/core/v16/http/api"
|
||||||
"github.com/datarhei/core/v16/http/handler/util"
|
"github.com/datarhei/core/v16/http/handler/util"
|
||||||
|
|
||||||
"github.com/labstack/echo/v4"
|
"github.com/labstack/echo/v4"
|
||||||
"github.com/lithammer/shortuuid/v4"
|
"github.com/lithammer/shortuuid/v4"
|
||||||
)
|
)
|
||||||
@@ -27,31 +30,33 @@ func NewCluster(cluster cluster.Cluster) *ClusterHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProxyNodes returns the list of proxy nodes in the cluster
|
// GetNodes returns the list of proxy nodes in the cluster
|
||||||
// @Summary List of proxy nodes in the cluster
|
// @Summary List of proxy nodes in the cluster
|
||||||
// @Description List of proxy nodes in the cluster
|
// @Description List of proxy nodes in the cluster
|
||||||
// @Tags v16.?.?
|
// @Tags v16.?.?
|
||||||
// @ID cluster-3-get-proxy-nodes
|
// @ID cluster-3-get-nodes
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Success 200 {array} api.ClusterNode
|
// @Success 200 {array} api.ClusterNode
|
||||||
// @Failure 404 {object} api.Error
|
// @Failure 404 {object} api.Error
|
||||||
// @Security ApiKeyAuth
|
// @Security ApiKeyAuth
|
||||||
// @Router /api/v3/cluster/proxy [get]
|
// @Router /api/v3/cluster/node [get]
|
||||||
func (h *ClusterHandler) GetProxyNodes(c echo.Context) error {
|
func (h *ClusterHandler) GetNodes(c echo.Context) error {
|
||||||
nodes := h.proxy.ListNodes()
|
nodes := h.proxy.ListNodes()
|
||||||
|
|
||||||
list := []api.ClusterNode{}
|
list := []api.ClusterNode{}
|
||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
state := node.State()
|
about := node.About()
|
||||||
n := api.ClusterNode{
|
n := api.ClusterNode{
|
||||||
Address: node.Address(),
|
ID: about.ID,
|
||||||
ID: state.ID,
|
Name: about.Name,
|
||||||
LastContact: state.LastContact.Unix(),
|
Address: about.Address,
|
||||||
Latency: state.Latency.Seconds() * 1000,
|
CreatedAt: about.CreatedAt.Format(time.RFC3339),
|
||||||
State: state.State,
|
Uptime: int64(about.Uptime.Seconds()),
|
||||||
CPU: state.Resources.CPU,
|
LastContact: about.LastContact.Unix(),
|
||||||
Mem: state.Resources.Mem,
|
Latency: about.Latency.Seconds() * 1000,
|
||||||
|
State: about.State,
|
||||||
|
Resources: api.ClusterNodeResources(about.Resources),
|
||||||
}
|
}
|
||||||
|
|
||||||
list = append(list, n)
|
list = append(list, n)
|
||||||
@@ -60,18 +65,18 @@ func (h *ClusterHandler) GetProxyNodes(c echo.Context) error {
|
|||||||
return c.JSON(http.StatusOK, list)
|
return c.JSON(http.StatusOK, list)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProxyNode returns the proxy node with the given ID
|
// GetNode returns the proxy node with the given ID
|
||||||
// @Summary List a proxy node by its ID
|
// @Summary List a proxy node by its ID
|
||||||
// @Description List a proxy node by its ID
|
// @Description List a proxy node by its ID
|
||||||
// @Tags v16.?.?
|
// @Tags v16.?.?
|
||||||
// @ID cluster-3-get-proxy-node
|
// @ID cluster-3-get-node
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param id path string true "Node ID"
|
// @Param id path string true "Node ID"
|
||||||
// @Success 200 {object} api.ClusterNode
|
// @Success 200 {object} api.ClusterNode
|
||||||
// @Failure 404 {object} api.Error
|
// @Failure 404 {object} api.Error
|
||||||
// @Security ApiKeyAuth
|
// @Security ApiKeyAuth
|
||||||
// @Router /api/v3/cluster/proxy/node/{id} [get]
|
// @Router /api/v3/cluster/node/{id} [get]
|
||||||
func (h *ClusterHandler) GetProxyNode(c echo.Context) error {
|
func (h *ClusterHandler) GetNode(c echo.Context) error {
|
||||||
id := util.PathParam(c, "id")
|
id := util.PathParam(c, "id")
|
||||||
|
|
||||||
peer, err := h.proxy.GetNode(id)
|
peer, err := h.proxy.GetNode(id)
|
||||||
@@ -79,33 +84,35 @@ func (h *ClusterHandler) GetProxyNode(c echo.Context) error {
|
|||||||
return api.Err(http.StatusNotFound, "Node not found", "%s", err)
|
return api.Err(http.StatusNotFound, "Node not found", "%s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
state := peer.State()
|
about := peer.About()
|
||||||
|
|
||||||
node := api.ClusterNode{
|
node := api.ClusterNode{
|
||||||
Address: peer.Address(),
|
ID: about.ID,
|
||||||
ID: state.ID,
|
Name: about.Name,
|
||||||
LastContact: state.LastContact.Unix(),
|
Address: about.Address,
|
||||||
Latency: state.Latency.Seconds() * 1000,
|
CreatedAt: about.CreatedAt.Format(time.RFC3339),
|
||||||
State: state.State,
|
Uptime: int64(about.Uptime.Seconds()),
|
||||||
CPU: state.Resources.CPU,
|
LastContact: about.LastContact.Unix(),
|
||||||
Mem: state.Resources.Mem,
|
Latency: about.Latency.Seconds() * 1000,
|
||||||
|
State: about.State,
|
||||||
|
Resources: api.ClusterNodeResources(about.Resources),
|
||||||
}
|
}
|
||||||
|
|
||||||
return c.JSON(http.StatusOK, node)
|
return c.JSON(http.StatusOK, node)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProxyNodeFiles returns the files from the proxy node with the given ID
|
// GetNodeFiles returns the files from the proxy node with the given ID
|
||||||
// @Summary List the files of a proxy node by its ID
|
// @Summary List the files of a proxy node by its ID
|
||||||
// @Description List the files of a proxy node by its ID
|
// @Description List the files of a proxy node by its ID
|
||||||
// @Tags v16.?.?
|
// @Tags v16.?.?
|
||||||
// @ID cluster-3-get-proxy-node-files
|
// @ID cluster-3-get-node-files
|
||||||
// @Produce json
|
// @Produce json
|
||||||
// @Param id path string true "Node ID"
|
// @Param id path string true "Node ID"
|
||||||
// @Success 200 {object} api.ClusterNodeFiles
|
// @Success 200 {object} api.ClusterNodeFiles
|
||||||
// @Failure 404 {object} api.Error
|
// @Failure 404 {object} api.Error
|
||||||
// @Security ApiKeyAuth
|
// @Security ApiKeyAuth
|
||||||
// @Router /api/v3/cluster/proxy/node/{id}/files [get]
|
// @Router /api/v3/cluster/node/{id}/files [get]
|
||||||
func (h *ClusterHandler) GetProxyNodeFiles(c echo.Context) error {
|
func (h *ClusterHandler) GetNodeFiles(c echo.Context) error {
|
||||||
id := util.PathParam(c, "id")
|
id := util.PathParam(c, "id")
|
||||||
|
|
||||||
peer, err := h.proxy.GetNode(id)
|
peer, err := h.proxy.GetNode(id)
|
||||||
@@ -153,7 +160,7 @@ func (h *ClusterHandler) About(c echo.Context) error {
|
|||||||
Address: state.Address,
|
Address: state.Address,
|
||||||
ClusterAPIAddress: state.ClusterAPIAddress,
|
ClusterAPIAddress: state.ClusterAPIAddress,
|
||||||
CoreAPIAddress: state.CoreAPIAddress,
|
CoreAPIAddress: state.CoreAPIAddress,
|
||||||
Nodes: []api.ClusterServer{},
|
Server: []api.ClusterServer{},
|
||||||
Stats: api.ClusterStats{
|
Stats: api.ClusterStats{
|
||||||
State: state.Stats.State,
|
State: state.Stats.State,
|
||||||
LastContact: state.Stats.LastContact.Seconds() * 1000,
|
LastContact: state.Stats.LastContact.Seconds() * 1000,
|
||||||
@@ -162,7 +169,7 @@ func (h *ClusterHandler) About(c echo.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, n := range state.Nodes {
|
for _, n := range state.Nodes {
|
||||||
about.Nodes = append(about.Nodes, api.ClusterServer{
|
about.Server = append(about.Server, api.ClusterServer{
|
||||||
ID: n.ID,
|
ID: n.ID,
|
||||||
Address: n.Address,
|
Address: n.Address,
|
||||||
Voter: n.Voter,
|
Voter: n.Voter,
|
||||||
@@ -173,6 +180,36 @@ func (h *ClusterHandler) About(c echo.Context) error {
|
|||||||
return c.JSON(http.StatusOK, about)
|
return c.JSON(http.StatusOK, about)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ListProcesses returns the list of processes in the cluster
|
||||||
|
// @Summary List of processes in the cluster
|
||||||
|
// @Description List of processes in the cluster
|
||||||
|
// @Tags v16.?.?
|
||||||
|
// @ID cluster-3-list-processes
|
||||||
|
// @Produce json
|
||||||
|
// @Success 200 {array} api.ClusterProcess
|
||||||
|
// @Security ApiKeyAuth
|
||||||
|
// @Router /api/v3/cluster/process [get]
|
||||||
|
func (h *ClusterHandler) ListProcesses(c echo.Context) error {
|
||||||
|
procs := h.proxy.ListProcesses()
|
||||||
|
|
||||||
|
processes := []api.ClusterProcess{}
|
||||||
|
|
||||||
|
for _, p := range procs {
|
||||||
|
processes = append(processes, api.ClusterProcess{
|
||||||
|
ProcessID: p.Config.ID,
|
||||||
|
NodeID: p.NodeID,
|
||||||
|
Reference: p.Config.Reference,
|
||||||
|
Order: p.Order,
|
||||||
|
State: p.State,
|
||||||
|
CPU: json.ToNumber(p.CPU),
|
||||||
|
Memory: p.Mem,
|
||||||
|
Runtime: int64(p.Runtime.Seconds()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.JSON(http.StatusOK, processes)
|
||||||
|
}
|
||||||
|
|
||||||
// Add adds a new process to the cluster
|
// Add adds a new process to the cluster
|
||||||
// @Summary Add a new process
|
// @Summary Add a new process
|
||||||
// @Description Add a new FFmpeg process
|
// @Description Add a new FFmpeg process
|
||||||
|
@@ -661,9 +661,11 @@ func (s *server) setRoutesV3(v3 *echo.Group) {
|
|||||||
if s.v3handler.cluster != nil {
|
if s.v3handler.cluster != nil {
|
||||||
v3.GET("/cluster", s.v3handler.cluster.About)
|
v3.GET("/cluster", s.v3handler.cluster.About)
|
||||||
|
|
||||||
v3.GET("/cluster/proxy", s.v3handler.cluster.GetProxyNodes)
|
v3.GET("/cluster/node", s.v3handler.cluster.GetNodes)
|
||||||
v3.GET("/cluster/proxy/node/:id", s.v3handler.cluster.GetProxyNode)
|
v3.GET("/cluster/node/:id", s.v3handler.cluster.GetNode)
|
||||||
v3.GET("/cluster/proxy/node/:id/files", s.v3handler.cluster.GetProxyNodeFiles)
|
v3.GET("/cluster/node/:id/files", s.v3handler.cluster.GetNodeFiles)
|
||||||
|
|
||||||
|
v3.GET("/cluster/process", s.v3handler.cluster.ListProcesses)
|
||||||
|
|
||||||
if !s.readOnly {
|
if !s.readOnly {
|
||||||
v3.POST("/cluster/process", s.v3handler.cluster.AddProcess)
|
v3.POST("/cluster/process", s.v3handler.cluster.AddProcess)
|
||||||
|
Reference in New Issue
Block a user