Allow to add self to nodes

This commit is contained in:
Ingo Oppermann
2023-05-08 11:39:43 +02:00
parent b8b2990e61
commit ab86b8fd5d
7 changed files with 205 additions and 233 deletions

View File

@@ -19,6 +19,7 @@ import (
"github.com/datarhei/core/v16/log" "github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/net" "github.com/datarhei/core/v16/net"
"github.com/datarhei/core/v16/restream/app" "github.com/datarhei/core/v16/restream/app"
hclog "github.com/hashicorp/go-hclog" hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/raft" "github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb/v2" raftboltdb "github.com/hashicorp/raft-boltdb/v2"
@@ -67,11 +68,6 @@ type Cluster interface {
Shutdown() error Shutdown() error
AddNode(id, address string) error
RemoveNode(id string) error
ListNodes() []addNodeCommand
GetNode(id string) (addNodeCommand, error)
AddProcess(origin string, config *app.Config) error AddProcess(origin string, config *app.Config) error
RemoveProcess(origin, id string) error RemoveProcess(origin, id string) error
@@ -608,69 +604,6 @@ func (rcw *readCloserWrapper) Close() error {
return nil return nil
} }
func (c *cluster) ListNodes() []addNodeCommand {
c.store.ListNodes()
return nil
}
func (c *cluster) GetNode(id string) (addNodeCommand, error) {
c.store.GetNode(id)
return addNodeCommand{}, nil
}
func (c *cluster) AddNode(id, address string) error {
if !c.IsRaftLeader() {
return fmt.Errorf("not leader")
}
com := &command{
Operation: opAddNode,
Data: &addNodeCommand{
ID: id,
Address: address,
},
}
b, err := json.Marshal(com)
if err != nil {
return err
}
future := c.raft.Apply(b, 5*time.Second)
if err := future.Error(); err != nil {
return fmt.Errorf("applying command failed: %w", err)
}
return nil
}
func (c *cluster) RemoveNode(id string) error {
if !c.IsRaftLeader() {
return fmt.Errorf("not leader")
}
com := &command{
Operation: opRemoveNode,
Data: &removeNodeCommand{
ID: id,
},
}
b, err := json.Marshal(com)
if err != nil {
return err
}
future := c.raft.Apply(b, 5*time.Second)
if err := future.Error(); err != nil {
return fmt.Errorf("applying command failed: %w", err)
}
return nil
}
func (c *cluster) trackNodeChanges() { func (c *cluster) trackNodeChanges() {
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(time.Second)
defer ticker.Stop() defer ticker.Stop()
@@ -694,9 +627,6 @@ func (c *cluster) trackNodeChanges() {
for _, server := range future.Configuration().Servers { for _, server := range future.Configuration().Servers {
id := string(server.ID) id := string(server.ID)
if id == c.id {
continue
}
_, ok := c.nodes[id] _, ok := c.nodes[id]
if !ok { if !ok {
@@ -733,10 +663,14 @@ func (c *cluster) trackNodeChanges() {
} }
for id := range removeNodes { for id := range removeNodes {
if node, ok := c.nodes[id]; ok { node, ok := c.nodes[id]
if !ok {
continue
}
node.Disconnect() node.Disconnect()
c.proxy.RemoveNode(id) c.proxy.RemoveNode(id)
} delete(c.nodes, id)
} }
c.nodesLock.Unlock() c.nodesLock.Unlock()

View File

@@ -8,20 +8,20 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"path/filepath" "path/filepath"
"regexp"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/datarhei/core/v16/client" "github.com/datarhei/core/v16/client"
httpapi "github.com/datarhei/core/v16/http/api"
) )
type Node interface { type Node interface {
Connect() error Connect() error
Disconnect() Disconnect()
Start(updates chan<- NodeState) error StartFiles(updates chan<- NodeFiles) error
Stop() StopFiles()
GetURL(path string) (string, error) GetURL(path string) (string, error)
GetFile(path string) (io.ReadCloser, error) GetFile(path string) (io.ReadCloser, error)
@@ -39,16 +39,23 @@ type NodeReader interface {
ID() string ID() string
Address() string Address() string
IPs() []string IPs() []string
Files() NodeFiles
State() NodeState State() NodeState
} }
type NodeFiles struct {
ID string
Files []string
LastUpdate time.Time
}
type NodeState struct { type NodeState struct {
ID string ID string
State string State string
Files []string LastContact time.Time
LastPing time.Time
LastUpdate time.Time
Latency time.Duration Latency time.Duration
CPU float64
Mem float64
} }
type nodeState string type nodeState string
@@ -68,13 +75,19 @@ type node struct {
peer client.RestClient peer client.RestClient
peerLock sync.RWMutex peerLock sync.RWMutex
lastPing time.Time
cancelPing context.CancelFunc cancelPing context.CancelFunc
lastContact time.Time
resources struct {
cpu float64
mem float64
}
state nodeState state nodeState
latency float64 // Seconds latency float64 // Seconds
stateLock sync.RWMutex stateLock sync.RWMutex
updates chan<- NodeState updates chan<- NodeFiles
filesList []string filesList []string
lastUpdate time.Time lastUpdate time.Time
cancelFiles context.CancelFunc cancelFiles context.CancelFunc
@@ -91,15 +104,12 @@ type node struct {
srtAddress string srtAddress string
srtPassphrase string srtPassphrase string
srtToken string srtToken string
prefix *regexp.Regexp
} }
func NewNode(address string) Node { func NewNode(address string) Node {
n := &node{ n := &node{
address: address, address: address,
state: stateDisconnected, state: stateDisconnected,
prefix: regexp.MustCompile(`^[a-z]+:`),
secure: strings.HasPrefix(address, "https://"), secure: strings.HasPrefix(address, "https://"),
} }
@@ -195,14 +205,14 @@ func (n *node) Connect() error {
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
// ping // Ping
ok, latency := n.peer.Ping() ok, latency := n.peer.Ping()
n.stateLock.Lock() n.stateLock.Lock()
if !ok { if !ok {
n.state = stateDisconnected n.state = stateDisconnected
} else { } else {
n.lastPing = time.Now() n.lastContact = time.Now()
n.state = stateConnected n.state = stateConnected
} }
n.latency = n.latency*0.2 + latency.Seconds()*0.8 n.latency = n.latency*0.2 + latency.Seconds()*0.8
@@ -213,6 +223,63 @@ func (n *node) Connect() error {
} }
}(ctx) }(ctx)
go func(ctx context.Context) {
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// Metrics
metrics, err := n.peer.Metrics(httpapi.MetricsQuery{
Metrics: []httpapi.MetricsQueryMetric{
{
Name: "cpu_idle",
},
{
Name: "mem_total",
},
{
Name: "mem_free",
},
},
})
if err != nil {
n.stateLock.Lock()
n.resources.cpu = 100
n.resources.mem = 100
n.stateLock.Unlock()
}
cpu_idle := .0
mem_total := .0
mem_free := .0
for _, x := range metrics.Metrics {
if x.Name == "cpu_idle" {
cpu_idle = x.Values[0].Value
} else if x.Name == "mem_total" {
mem_total = x.Values[0].Value
} else if x.Name == "mem_free" {
mem_free = x.Values[0].Value
}
}
n.stateLock.Lock()
n.resources.cpu = 100 - cpu_idle
if mem_total != 0 {
n.resources.mem = (mem_total - mem_free) / mem_total * 100
} else {
n.resources.mem = 100
}
n.lastContact = time.Now()
n.stateLock.Unlock()
case <-ctx.Done():
return
}
}
}(ctx)
return nil return nil
} }
@@ -228,7 +295,7 @@ func (n *node) Disconnect() {
n.peer = nil n.peer = nil
} }
func (n *node) Start(updates chan<- NodeState) error { func (n *node) StartFiles(updates chan<- NodeFiles) error {
n.runningLock.Lock() n.runningLock.Lock()
defer n.runningLock.Unlock() defer n.runningLock.Unlock()
@@ -254,7 +321,7 @@ func (n *node) Start(updates chan<- NodeState) error {
n.files() n.files()
select { select {
case n.updates <- n.State(): case n.updates <- n.Files():
default: default:
} }
} }
@@ -264,7 +331,7 @@ func (n *node) Start(updates chan<- NodeState) error {
return nil return nil
} }
func (n *node) Stop() { func (n *node) StopFiles() {
n.runningLock.Lock() n.runningLock.Lock()
defer n.runningLock.Unlock() defer n.runningLock.Unlock()
@@ -289,23 +356,34 @@ func (n *node) ID() string {
return n.peer.ID() return n.peer.ID()
} }
func (n *node) Files() NodeFiles {
n.stateLock.RLock()
defer n.stateLock.RUnlock()
state := NodeFiles{
ID: n.peer.ID(),
LastUpdate: n.lastUpdate,
}
if n.state != stateDisconnected && time.Since(n.lastUpdate) <= 2*time.Second {
state.Files = make([]string, len(n.filesList))
copy(state.Files, n.filesList)
}
return state
}
func (n *node) State() NodeState { func (n *node) State() NodeState {
n.stateLock.RLock() n.stateLock.RLock()
defer n.stateLock.RUnlock() defer n.stateLock.RUnlock()
state := NodeState{ state := NodeState{
ID: n.peer.ID(), ID: n.peer.ID(),
LastPing: n.lastPing, LastContact: n.lastContact,
LastUpdate: n.lastUpdate, State: n.state.String(),
Latency: time.Duration(n.latency * float64(time.Second)), Latency: time.Duration(n.latency * float64(time.Second)),
} CPU: n.resources.cpu,
Mem: n.resources.mem,
if n.state == stateDisconnected || time.Since(n.lastUpdate) > 2*time.Second {
state.State = stateDisconnected.String()
} else {
state.State = n.state.String()
state.Files = make([]string, len(n.filesList))
copy(state.Files, n.filesList)
} }
return state return state
@@ -416,27 +494,29 @@ func (n *node) files() {
n.filesList = make([]string, len(filesList)) n.filesList = make([]string, len(filesList))
copy(n.filesList, filesList) copy(n.filesList, filesList)
n.lastUpdate = time.Now() n.lastUpdate = time.Now()
n.lastContact = time.Now()
n.stateLock.Unlock() n.stateLock.Unlock()
} }
func (n *node) GetURL(path string) (string, error) { func (n *node) GetURL(path string) (string, error) {
// Remove prefix from path prefix, path, found := strings.Cut(path, ":")
prefix := n.prefix.FindString(path) if !found {
path = n.prefix.ReplaceAllString(path, "") return "", fmt.Errorf("no prefix provided")
}
u := "" u := ""
if prefix == "mem:" { if prefix == "mem" {
u = n.address + "/" + filepath.Join("memfs", path) u = n.address + "/" + filepath.Join("memfs", path)
} else if prefix == "disk:" { } else if prefix == "disk" {
u = n.address + path u = n.address + path
} else if prefix == "rtmp:" { } else if prefix == "rtmp" {
u = n.rtmpAddress + path u = n.rtmpAddress + path
if len(n.rtmpToken) != 0 { if len(n.rtmpToken) != 0 {
u += "?token=" + url.QueryEscape(n.rtmpToken) u += "?token=" + url.QueryEscape(n.rtmpToken)
} }
} else if prefix == "srt:" { } else if prefix == "srt" {
u = n.srtAddress + "?mode=caller" u = n.srtAddress + "?mode=caller"
if len(n.srtPassphrase) != 0 { if len(n.srtPassphrase) != 0 {
u += "&passphrase=" + url.QueryEscape(n.srtPassphrase) u += "&passphrase=" + url.QueryEscape(n.srtPassphrase)
@@ -454,16 +534,17 @@ func (n *node) GetURL(path string) (string, error) {
} }
func (n *node) GetFile(path string) (io.ReadCloser, error) { func (n *node) GetFile(path string) (io.ReadCloser, error) {
// Remove prefix from path prefix, path, found := strings.Cut(path, ":")
prefix := n.prefix.FindString(path) if !found {
path = n.prefix.ReplaceAllString(path, "") return nil, fmt.Errorf("no prefix provided")
}
n.peerLock.RLock() n.peerLock.RLock()
defer n.peerLock.RUnlock() defer n.peerLock.RUnlock()
if prefix == "mem:" { if prefix == "mem" {
return n.peer.MemFSGetFile(path) return n.peer.MemFSGetFile(path)
} else if prefix == "disk:" { } else if prefix == "disk" {
return n.peer.DiskFSGetFile(path) return n.peer.DiskFSGetFile(path)
} }

View File

@@ -90,7 +90,7 @@ type proxy struct {
limiter net.IPLimiter limiter net.IPLimiter
updates chan NodeState updates chan NodeFiles
lock sync.RWMutex lock sync.RWMutex
cancel context.CancelFunc cancel context.CancelFunc
@@ -109,7 +109,7 @@ func NewProxy(config ProxyConfig) (Proxy, error) {
idupdate: map[string]time.Time{}, idupdate: map[string]time.Time{},
fileid: map[string]string{}, fileid: map[string]string{},
limiter: config.IPLimiter, limiter: config.IPLimiter,
updates: make(chan NodeState, 64), updates: make(chan NodeFiles, 64),
logger: config.Logger, logger: config.Logger,
} }
@@ -144,31 +144,32 @@ func (p *proxy) Start() {
select { select {
case <-ctx.Done(): case <-ctx.Done():
return return
case state := <-p.updates: case update := <-p.updates:
p.logger.Debug().WithFields(log.Fields{ p.logger.Debug().WithFields(log.Fields{
"node": state.ID, "node": update.ID,
"state": state.State, "files": len(update.Files),
"files": len(state.Files),
}).Log("Got update") }).Log("Got update")
if p.id == update.ID {
continue
}
p.lock.Lock() p.lock.Lock()
// Cleanup // Cleanup
files := p.idfiles[state.ID] files := p.idfiles[update.ID]
for _, file := range files { for _, file := range files {
delete(p.fileid, file) delete(p.fileid, file)
} }
delete(p.idfiles, state.ID) delete(p.idfiles, update.ID)
delete(p.idupdate, state.ID) delete(p.idupdate, update.ID)
if state.State == "connected" {
// Add files // Add files
for _, file := range state.Files { for _, file := range update.Files {
p.fileid[file] = state.ID p.fileid[file] = update.ID
}
p.idfiles[state.ID] = files
p.idupdate[state.ID] = state.LastUpdate
} }
p.idfiles[update.ID] = files
p.idupdate[update.ID] = update.LastUpdate
p.lock.Unlock() p.lock.Unlock()
} }
@@ -192,7 +193,7 @@ func (p *proxy) Stop() {
p.cancel = nil p.cancel = nil
for _, node := range p.nodes { for _, node := range p.nodes {
node.Stop() node.StopFiles()
} }
p.nodes = map[string]Node{} p.nodes = map[string]Node{}
@@ -205,10 +206,6 @@ func (p *proxy) Reader() ProxyReader {
} }
func (p *proxy) AddNode(id string, node Node) (string, error) { func (p *proxy) AddNode(id string, node Node) (string, error) {
if id == p.id {
return "", fmt.Errorf("can't add myself as node or a node with the same ID")
}
if id != node.ID() { if id != node.ID() {
return "", fmt.Errorf("the provided (%s) and retrieved (%s) ID's don't match", id, node.ID()) return "", fmt.Errorf("the provided (%s) and retrieved (%s) ID's don't match", id, node.ID())
} }
@@ -217,7 +214,7 @@ func (p *proxy) AddNode(id string, node Node) (string, error) {
defer p.lock.Unlock() defer p.lock.Unlock()
if n, ok := p.nodes[id]; ok { if n, ok := p.nodes[id]; ok {
n.Stop() n.StopFiles()
delete(p.nodes, id) delete(p.nodes, id)
@@ -237,7 +234,7 @@ func (p *proxy) AddNode(id string, node Node) (string, error) {
p.nodes[id] = node p.nodes[id] = node
node.Start(p.updates) node.StartFiles(p.updates)
p.logger.Info().WithFields(log.Fields{ p.logger.Info().WithFields(log.Fields{
"address": node.Address(), "address": node.Address(),
@@ -256,7 +253,7 @@ func (p *proxy) RemoveNode(id string) error {
return ErrNodeNotFound return ErrNodeNotFound
} }
node.Stop() node.StopFiles()
delete(p.nodes, id) delete(p.nodes, id)

View File

@@ -14,9 +14,6 @@ import (
type Store interface { type Store interface {
raft.FSM raft.FSM
ListNodes() []StoreNode
GetNode(id string) (StoreNode, error)
ListProcesses() []app.Config ListProcesses() []app.Config
GetProcess(id string) (app.Config, error) GetProcess(id string) (app.Config, error)
} }
@@ -24,8 +21,6 @@ type Store interface {
type operation string type operation string
const ( const (
opAddNode operation = "addNode"
opRemoveNode operation = "removeNode"
opAddProcess operation = "addProcess" opAddProcess operation = "addProcess"
opRemoveProcess operation = "removeProcess" opRemoveProcess operation = "removeProcess"
) )
@@ -35,15 +30,6 @@ type command struct {
Data interface{} Data interface{}
} }
type addNodeCommand struct {
ID string
Address string
}
type removeNodeCommand struct {
ID string
}
type StoreNode struct { type StoreNode struct {
ID string ID string
Address string Address string
@@ -60,13 +46,11 @@ type removeProcessCommand struct {
// Implement a FSM // Implement a FSM
type store struct { type store struct {
lock sync.RWMutex lock sync.RWMutex
Nodes map[string]string
Process map[string]app.Config Process map[string]app.Config
} }
func NewStore() (Store, error) { func NewStore() (Store, error) {
return &store{ return &store{
Nodes: map[string]string{},
Process: map[string]app.Config{}, Process: map[string]app.Config{},
}, nil }, nil
} }
@@ -86,26 +70,6 @@ func (s *store) Apply(log *raft.Log) interface{} {
fmt.Printf("op: %+v\n", c) fmt.Printf("op: %+v\n", c)
switch c.Operation { switch c.Operation {
case opAddNode:
b, _ := json.Marshal(c.Data)
cmd := addNodeCommand{}
json.Unmarshal(b, &cmd)
fmt.Printf("addNode: %+v\n", cmd)
s.lock.Lock()
s.Nodes[cmd.ID] = cmd.Address
s.lock.Unlock()
case opRemoveNode:
b, _ := json.Marshal(c.Data)
cmd := removeNodeCommand{}
json.Unmarshal(b, &cmd)
fmt.Printf("removeNode: %+v\n", cmd)
s.lock.Lock()
delete(s.Nodes, cmd.ID)
s.lock.Unlock()
case opAddProcess: case opAddProcess:
b, _ := json.Marshal(c.Data) b, _ := json.Marshal(c.Data)
cmd := addProcessCommand{} cmd := addProcessCommand{}
@@ -162,37 +126,6 @@ func (s *store) Restore(snapshot io.ReadCloser) error {
return nil return nil
} }
func (s *store) ListNodes() []StoreNode {
nodes := []StoreNode{}
s.lock.Lock()
defer s.lock.Unlock()
for id, address := range s.Nodes {
nodes = append(nodes, StoreNode{
ID: id,
Address: address,
})
}
return nodes
}
func (s *store) GetNode(id string) (StoreNode, error) {
s.lock.Lock()
defer s.lock.Unlock()
address, ok := s.Nodes[id]
if !ok {
return StoreNode{}, fmt.Errorf("not found")
}
return StoreNode{
ID: id,
Address: address,
}, nil
}
func (s *store) ListProcesses() []app.Config { func (s *store) ListProcesses() []app.Config {
s.lock.RLock() s.lock.RLock()
defer s.lock.RUnlock() defer s.lock.RUnlock()

View File

@@ -9,13 +9,17 @@ type ClusterNodeConfig struct {
type ClusterNode struct { type ClusterNode struct {
Address string `json:"address"` Address string `json:"address"`
ID string `json:"id"` ID string `json:"id"`
LastPing int64 `json:"last_ping"` LastContact int64 `json:"last_contact"` // unix timestamp
LastUpdate int64 `json:"last_update"`
Latency float64 `json:"latency_ms"` // milliseconds Latency float64 `json:"latency_ms"` // milliseconds
State string `json:"state"` State string `json:"state"`
CPU float64 `json:"cpu_used"` // percent
Mem float64 `json:"mem_used"` // percent
} }
type ClusterNodeFiles map[string][]string type ClusterNodeFiles struct {
LastUpdate int64 `json:"last_update"` // unix timestamp
Files map[string][]string `json:"files"`
}
type ClusterServer struct { type ClusterServer struct {
ID string `json:"id"` ID string `json:"id"`

View File

@@ -1,6 +1,7 @@
package api package api
import ( import (
"encoding/json"
"fmt" "fmt"
"time" "time"
@@ -50,6 +51,21 @@ func (v MetricsResponseValue) MarshalJSON() ([]byte, error) {
return []byte(s), nil return []byte(s), nil
} }
// MarshalJSON unmarshals a JSON to MetricsResponseValue
func (v *MetricsResponseValue) UnmarshalJSON(data []byte) error {
x := []float64{}
err := json.Unmarshal(data, &x)
if err != nil {
return err
}
v.TS = time.Unix(int64(x[0]), 0)
v.Value = x[1]
return nil
}
type MetricsResponse struct { type MetricsResponse struct {
Timerange int64 `json:"timerange_sec" format:"int64"` Timerange int64 `json:"timerange_sec" format:"int64"`
Interval int64 `json:"interval_sec" format:"int64"` Interval int64 `json:"interval_sec" format:"int64"`

View File

@@ -2,7 +2,6 @@ package api
import ( import (
"net/http" "net/http"
"regexp"
"sort" "sort"
"strings" "strings"
@@ -17,7 +16,6 @@ import (
type ClusterHandler struct { type ClusterHandler struct {
cluster cluster.Cluster cluster cluster.Cluster
proxy cluster.ProxyReader proxy cluster.ProxyReader
prefix *regexp.Regexp
} }
// NewCluster return a new ClusterHandler type. You have to provide a cluster. // NewCluster return a new ClusterHandler type. You have to provide a cluster.
@@ -25,7 +23,6 @@ func NewCluster(cluster cluster.Cluster) *ClusterHandler {
return &ClusterHandler{ return &ClusterHandler{
cluster: cluster, cluster: cluster,
proxy: cluster.ProxyReader(), proxy: cluster.ProxyReader(),
prefix: regexp.MustCompile(`^[a-z]+:`),
} }
} }
@@ -49,10 +46,11 @@ func (h *ClusterHandler) GetProxyNodes(c echo.Context) error {
n := api.ClusterNode{ n := api.ClusterNode{
Address: node.Address(), Address: node.Address(),
ID: state.ID, ID: state.ID,
LastPing: state.LastPing.Unix(), LastContact: state.LastContact.Unix(),
LastUpdate: state.LastUpdate.Unix(),
Latency: state.Latency.Seconds() * 1000, Latency: state.Latency.Seconds() * 1000,
State: state.State, State: state.State,
CPU: state.CPU,
Mem: state.Mem,
} }
list = append(list, n) list = append(list, n)
@@ -85,8 +83,11 @@ func (h *ClusterHandler) GetProxyNode(c echo.Context) error {
node := api.ClusterNode{ node := api.ClusterNode{
Address: peer.Address(), Address: peer.Address(),
ID: state.ID, ID: state.ID,
LastUpdate: state.LastUpdate.Unix(), LastContact: state.LastContact.Unix(),
Latency: state.Latency.Seconds() * 1000,
State: state.State, State: state.State,
CPU: state.CPU,
Mem: state.Mem,
} }
return c.JSON(http.StatusOK, node) return c.JSON(http.StatusOK, node)
@@ -111,17 +112,23 @@ func (h *ClusterHandler) GetProxyNodeFiles(c echo.Context) error {
return api.Err(http.StatusNotFound, "Node not found", "%s", err) return api.Err(http.StatusNotFound, "Node not found", "%s", err)
} }
files := api.ClusterNodeFiles{} files := api.ClusterNodeFiles{
Files: make(map[string][]string),
}
state := peer.State() peerFiles := peer.Files()
sort.Strings(state.Files) files.LastUpdate = peerFiles.LastUpdate.Unix()
for _, path := range state.Files { sort.Strings(peerFiles.Files)
prefix := strings.TrimSuffix(h.prefix.FindString(path), ":")
path = h.prefix.ReplaceAllString(path, "")
files[prefix] = append(files[prefix], path) for _, path := range peerFiles.Files {
prefix, path, found := strings.Cut(path, ":")
if !found {
continue
}
files.Files[prefix] = append(files.Files[prefix], path)
} }
return c.JSON(http.StatusOK, files) return c.JSON(http.StatusOK, files)