mirror of
https://github.com/gravitl/netmaker.git
synced 2025-10-06 09:22:42 +08:00
added context to peer updates, moved nodes to memory
This commit is contained in:
@@ -1,6 +1,7 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -433,7 +434,7 @@ func getNode(w http.ResponseWriter, r *http.Request) {
|
||||
logic.ReturnErrorResponse(w, r, logic.FormatError(err, "internal"))
|
||||
return
|
||||
}
|
||||
hostPeerUpdate, err := logic.GetPeerUpdateForHost(node.Network, host, nil)
|
||||
hostPeerUpdate, err := logic.GetPeerUpdateForHost(node.Network, host, nil, context.Background())
|
||||
if err != nil && !database.IsEmptyRecord(err) {
|
||||
logger.Log(0, r.Header.Get("user"),
|
||||
fmt.Sprintf("error fetching wg peers config for host [ %s ]: %v", host.ID.String(), err))
|
||||
@@ -622,7 +623,7 @@ func createNode(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
}
|
||||
hostPeerUpdate, err := logic.GetPeerUpdateForHost(networkName, &data.Host, nil)
|
||||
hostPeerUpdate, err := logic.GetPeerUpdateForHost(networkName, &data.Host, nil, context.Background())
|
||||
if err != nil && !database.IsEmptyRecord(err) {
|
||||
logger.Log(0, r.Header.Get("user"),
|
||||
fmt.Sprintf("error fetching wg peers config for host [ %s ]: %v", data.Host.ID.String(), err))
|
||||
|
@@ -32,17 +32,24 @@ const (
|
||||
|
||||
// GetNetworkNodes - gets the nodes of a network
|
||||
func GetNetworkNodes(network string) ([]models.Node, error) {
|
||||
var nodes []models.Node
|
||||
allnodes, err := GetAllNodes()
|
||||
if err != nil {
|
||||
return []models.Node{}, err
|
||||
}
|
||||
for _, node := range allnodes {
|
||||
|
||||
return GetNetworkNodesMemory(allnodes, network), nil
|
||||
}
|
||||
|
||||
// GetNetworkNodesMemory - gets all nodes belonging to a network from list in memory
|
||||
func GetNetworkNodesMemory(allNodes []models.Node, network string) []models.Node {
|
||||
var nodes = []models.Node{}
|
||||
for i := range allNodes {
|
||||
node := allNodes[i]
|
||||
if node.Network == network {
|
||||
nodes = append(nodes, node)
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
return nodes
|
||||
}
|
||||
|
||||
// UpdateNode - takes a node and updates another node with it's values
|
||||
|
358
logic/peers.go
358
logic/peers.go
@@ -1,6 +1,7 @@
|
||||
package logic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
@@ -15,8 +16,15 @@ import (
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
)
|
||||
|
||||
var (
|
||||
// PeerUpdateCtx context to send to host peer updates
|
||||
PeerUpdateCtx context.Context
|
||||
// PeerUpdateStop - the cancel for PeerUpdateCtx
|
||||
PeerUpdateStop context.CancelFunc
|
||||
)
|
||||
|
||||
// GetProxyUpdateForHost - gets the proxy update for host
|
||||
func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error) {
|
||||
func GetProxyUpdateForHost(host *models.Host, ctx context.Context) (models.ProxyManagerPayload, error) {
|
||||
proxyPayload := models.ProxyManagerPayload{
|
||||
Action: models.ProxyUpdate,
|
||||
}
|
||||
@@ -39,7 +47,7 @@ func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error
|
||||
relayPeersMap := make(map[string]models.RelayedConf)
|
||||
for _, relayedHost := range relayedHosts {
|
||||
relayedHost := relayedHost
|
||||
payload, err := GetPeerUpdateForHost("", &relayedHost, nil)
|
||||
payload, err := GetPeerUpdateForHost("", &relayedHost, nil, ctx)
|
||||
if err == nil {
|
||||
relayedEndpoint, udpErr := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", relayedHost.EndpointIP, GetPeerListenPort(&relayedHost)))
|
||||
if udpErr == nil {
|
||||
@@ -115,11 +123,24 @@ func GetProxyUpdateForHost(host *models.Host) (models.ProxyManagerPayload, error
|
||||
return proxyPayload, nil
|
||||
}
|
||||
|
||||
// ResetPeerUpdateContext - kills any current peer updates and resets the context
|
||||
func ResetPeerUpdateContext() {
|
||||
if PeerUpdateCtx != nil && PeerUpdateStop != nil {
|
||||
PeerUpdateStop() // tell any current peer updates to stop
|
||||
}
|
||||
|
||||
PeerUpdateCtx, PeerUpdateStop = context.WithCancel(context.Background())
|
||||
}
|
||||
|
||||
// GetPeerUpdateForHost - gets the consolidated peer update for the host from all networks
|
||||
func GetPeerUpdateForHost(network string, host *models.Host, deletedNode *models.Node) (models.HostPeerUpdate, error) {
|
||||
func GetPeerUpdateForHost(network string, host *models.Host, deletedNode *models.Node, ctx context.Context) (models.HostPeerUpdate, error) {
|
||||
if host == nil {
|
||||
return models.HostPeerUpdate{}, errors.New("host is nil")
|
||||
}
|
||||
allNodes, err := GetAllNodes()
|
||||
if err != nil {
|
||||
return models.HostPeerUpdate{}, err
|
||||
}
|
||||
// track which nodes are deleted
|
||||
// after peer calculation, if peer not in list, add delete config of peer
|
||||
hostPeerUpdate := models.HostPeerUpdate{
|
||||
@@ -148,197 +169,200 @@ func GetPeerUpdateForHost(network string, host *models.Host, deletedNode *models
|
||||
if !node.Connected || node.PendingDelete || node.Action == models.NODE_DELETE {
|
||||
continue
|
||||
}
|
||||
currentPeers, err := GetNetworkNodes(node.Network)
|
||||
if err != nil {
|
||||
return models.HostPeerUpdate{}, err
|
||||
}
|
||||
currentPeers := GetNetworkNodesMemory(allNodes, node.Network)
|
||||
var nodePeerMap map[string]models.PeerRouteInfo
|
||||
if node.IsIngressGateway || node.IsEgressGateway {
|
||||
nodePeerMap = make(map[string]models.PeerRouteInfo)
|
||||
}
|
||||
for _, peer := range currentPeers {
|
||||
peer := peer
|
||||
if peer.ID.String() == node.ID.String() {
|
||||
logger.Log(2, "peer update, skipping self")
|
||||
//skip yourself
|
||||
continue
|
||||
}
|
||||
var peerConfig wgtypes.PeerConfig
|
||||
peerHost, err := GetHost(peer.HostID.String())
|
||||
if err != nil {
|
||||
logger.Log(1, "no peer host", peer.HostID.String(), err.Error())
|
||||
return models.HostPeerUpdate{}, err
|
||||
}
|
||||
|
||||
peerConfig.PublicKey = peerHost.PublicKey
|
||||
peerConfig.PersistentKeepaliveInterval = &peer.PersistentKeepalive
|
||||
peerConfig.ReplaceAllowedIPs = true
|
||||
uselocal := false
|
||||
if host.EndpointIP.String() == peerHost.EndpointIP.String() {
|
||||
//peer is on same network
|
||||
// set to localaddress
|
||||
uselocal = true
|
||||
if node.LocalAddress.IP == nil {
|
||||
// use public endpint
|
||||
uselocal = false
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
logger.Log(2, "cancelled peer update for host", host.Name, host.ID.String())
|
||||
return models.HostPeerUpdate{}, fmt.Errorf("peer update cancelled")
|
||||
default:
|
||||
peer := peer
|
||||
if peer.ID.String() == node.ID.String() {
|
||||
logger.Log(2, "peer update, skipping self")
|
||||
//skip yourself
|
||||
continue
|
||||
}
|
||||
if node.LocalAddress.String() == peer.LocalAddress.String() {
|
||||
uselocal = false
|
||||
var peerConfig wgtypes.PeerConfig
|
||||
peerHost, err := GetHost(peer.HostID.String())
|
||||
if err != nil {
|
||||
logger.Log(1, "no peer host", peer.HostID.String(), err.Error())
|
||||
return models.HostPeerUpdate{}, err
|
||||
}
|
||||
}
|
||||
peerConfig.Endpoint = &net.UDPAddr{
|
||||
IP: peerHost.EndpointIP,
|
||||
Port: GetPeerListenPort(peerHost),
|
||||
}
|
||||
|
||||
if uselocal {
|
||||
peerConfig.Endpoint.IP = peer.LocalAddress.IP
|
||||
}
|
||||
allowedips := GetAllowedIPs(&node, &peer, nil)
|
||||
if peer.IsIngressGateway {
|
||||
for _, entry := range peer.IngressGatewayRange {
|
||||
_, cidr, err := net.ParseCIDR(string(entry))
|
||||
if err == nil {
|
||||
allowedips = append(allowedips, *cidr)
|
||||
peerConfig.PublicKey = peerHost.PublicKey
|
||||
peerConfig.PersistentKeepaliveInterval = &peer.PersistentKeepalive
|
||||
peerConfig.ReplaceAllowedIPs = true
|
||||
uselocal := false
|
||||
if host.EndpointIP.String() == peerHost.EndpointIP.String() {
|
||||
//peer is on same network
|
||||
// set to localaddress
|
||||
uselocal = true
|
||||
if node.LocalAddress.IP == nil {
|
||||
// use public endpint
|
||||
uselocal = false
|
||||
}
|
||||
if node.LocalAddress.String() == peer.LocalAddress.String() {
|
||||
uselocal = false
|
||||
}
|
||||
}
|
||||
}
|
||||
if peer.IsEgressGateway {
|
||||
allowedips = append(allowedips, getEgressIPs(&node, &peer)...)
|
||||
}
|
||||
if peer.Action != models.NODE_DELETE &&
|
||||
!peer.PendingDelete &&
|
||||
peer.Connected &&
|
||||
nodeacls.AreNodesAllowed(nodeacls.NetworkID(node.Network), nodeacls.NodeID(node.ID.String()), nodeacls.NodeID(peer.ID.String())) &&
|
||||
(deletedNode == nil || (deletedNode != nil && peer.ID.String() != deletedNode.ID.String())) {
|
||||
peerConfig.AllowedIPs = allowedips // only append allowed IPs if valid connection
|
||||
}
|
||||
peerConfig.Endpoint = &net.UDPAddr{
|
||||
IP: peerHost.EndpointIP,
|
||||
Port: GetPeerListenPort(peerHost),
|
||||
}
|
||||
|
||||
if node.IsIngressGateway || node.IsEgressGateway {
|
||||
if uselocal {
|
||||
peerConfig.Endpoint.IP = peer.LocalAddress.IP
|
||||
}
|
||||
allowedips := GetAllowedIPs(&node, &peer, nil)
|
||||
if peer.IsIngressGateway {
|
||||
_, extPeerIDAndAddrs, err := getExtPeers(&peer)
|
||||
if err == nil {
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
|
||||
PeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
},
|
||||
PeerKey: extPeerIdAndAddr.ID,
|
||||
Allow: true,
|
||||
}
|
||||
for _, entry := range peer.IngressGatewayRange {
|
||||
_, cidr, err := net.ParseCIDR(string(entry))
|
||||
if err == nil {
|
||||
allowedips = append(allowedips, *cidr)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.IsIngressGateway && peer.IsEgressGateway {
|
||||
hostPeerUpdate.IngressInfo.EgressRanges = append(hostPeerUpdate.IngressInfo.EgressRanges,
|
||||
peer.EgressGatewayRanges...)
|
||||
if peer.IsEgressGateway {
|
||||
allowedips = append(allowedips, getEgressIPs(&node, &peer)...)
|
||||
}
|
||||
nodePeerMap[peerHost.PublicKey.String()] = models.PeerRouteInfo{
|
||||
PeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(peer.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(peer.PrimaryAddress()),
|
||||
},
|
||||
PeerKey: peerHost.PublicKey.String(),
|
||||
Allow: true,
|
||||
if peer.Action != models.NODE_DELETE &&
|
||||
!peer.PendingDelete &&
|
||||
peer.Connected &&
|
||||
nodeacls.AreNodesAllowed(nodeacls.NetworkID(node.Network), nodeacls.NodeID(node.ID.String()), nodeacls.NodeID(peer.ID.String())) &&
|
||||
(deletedNode == nil || (deletedNode != nil && peer.ID.String() != deletedNode.ID.String())) {
|
||||
peerConfig.AllowedIPs = allowedips // only append allowed IPs if valid connection
|
||||
}
|
||||
}
|
||||
|
||||
var nodePeer wgtypes.PeerConfig
|
||||
if _, ok := hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()]; !ok {
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()] = make(map[string]models.IDandAddr)
|
||||
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, peerConfig)
|
||||
peerIndexMap[peerHost.PublicKey.String()] = len(hostPeerUpdate.Peers) - 1
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
nodePeer = peerConfig
|
||||
} else {
|
||||
peerAllowedIPs := hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs
|
||||
peerAllowedIPs = append(peerAllowedIPs, allowedips...)
|
||||
hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs = peerAllowedIPs
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
nodePeer = hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]]
|
||||
}
|
||||
|
||||
if node.Network == network { // add to peers map for metrics
|
||||
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, nodePeer)
|
||||
}
|
||||
}
|
||||
var extPeers []wgtypes.PeerConfig
|
||||
var extPeerIDAndAddrs []models.IDandAddr
|
||||
if node.IsIngressGateway {
|
||||
extPeers, extPeerIDAndAddrs, err = getExtPeers(&node)
|
||||
if err == nil {
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
|
||||
if node.IsIngressGateway || node.IsEgressGateway {
|
||||
if peer.IsIngressGateway {
|
||||
_, extPeerIDAndAddrs, err := getExtPeers(&peer)
|
||||
if err == nil {
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
|
||||
PeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
},
|
||||
PeerKey: extPeerIdAndAddr.ID,
|
||||
Allow: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.IsIngressGateway && peer.IsEgressGateway {
|
||||
hostPeerUpdate.IngressInfo.EgressRanges = append(hostPeerUpdate.IngressInfo.EgressRanges,
|
||||
peer.EgressGatewayRanges...)
|
||||
}
|
||||
nodePeerMap[peerHost.PublicKey.String()] = models.PeerRouteInfo{
|
||||
PeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
IP: net.ParseIP(peer.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(peer.PrimaryAddress()),
|
||||
},
|
||||
PeerKey: extPeerIdAndAddr.ID,
|
||||
PeerKey: peerHost.PublicKey.String(),
|
||||
Allow: true,
|
||||
}
|
||||
}
|
||||
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, extPeers...)
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID] = make(map[string]models.IDandAddr)
|
||||
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID][extPeerIdAndAddr.ID] = models.IDandAddr{
|
||||
ID: extPeerIdAndAddr.ID,
|
||||
Address: extPeerIdAndAddr.Address,
|
||||
Name: extPeerIdAndAddr.Name,
|
||||
Network: node.Network,
|
||||
|
||||
var nodePeer wgtypes.PeerConfig
|
||||
if _, ok := hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()]; !ok {
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()] = make(map[string]models.IDandAddr)
|
||||
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, peerConfig)
|
||||
peerIndexMap[peerHost.PublicKey.String()] = len(hostPeerUpdate.Peers) - 1
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
hostPeerUpdate.IngressInfo.ExtPeers[extPeerIdAndAddr.ID] = models.ExtClientInfo{
|
||||
Masquerade: true,
|
||||
IngGwAddr: net.IPNet{
|
||||
IP: net.ParseIP(node.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(node.PrimaryAddress()),
|
||||
},
|
||||
Network: node.PrimaryNetworkRange(),
|
||||
ExtPeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
},
|
||||
ExtPeerKey: extPeerIdAndAddr.ID,
|
||||
Peers: nodePeerMap,
|
||||
}
|
||||
if node.Network == network {
|
||||
hostPeerUpdate.PeerIDs[extPeerIdAndAddr.ID] = extPeerIdAndAddr
|
||||
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, extPeers...)
|
||||
nodePeer = peerConfig
|
||||
} else {
|
||||
peerAllowedIPs := hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs
|
||||
peerAllowedIPs = append(peerAllowedIPs, allowedips...)
|
||||
hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]].AllowedIPs = peerAllowedIPs
|
||||
hostPeerUpdate.HostPeerIDs[peerHost.PublicKey.String()][peer.ID.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
nodePeer = hostPeerUpdate.Peers[peerIndexMap[peerHost.PublicKey.String()]]
|
||||
}
|
||||
|
||||
if node.Network == network { // add to peers map for metrics
|
||||
hostPeerUpdate.PeerIDs[peerHost.PublicKey.String()] = models.IDandAddr{
|
||||
ID: peer.ID.String(),
|
||||
Address: peer.PrimaryAddress(),
|
||||
Name: peerHost.Name,
|
||||
Network: peer.Network,
|
||||
}
|
||||
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, nodePeer)
|
||||
}
|
||||
} else if !database.IsEmptyRecord(err) {
|
||||
logger.Log(1, "error retrieving external clients:", err.Error())
|
||||
}
|
||||
}
|
||||
if node.IsEgressGateway {
|
||||
hostPeerUpdate.EgressInfo[node.ID.String()] = models.EgressInfo{
|
||||
EgressID: node.ID.String(),
|
||||
Network: node.PrimaryNetworkRange(),
|
||||
EgressGwAddr: net.IPNet{
|
||||
IP: net.ParseIP(node.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(node.PrimaryAddress()),
|
||||
},
|
||||
GwPeers: nodePeerMap,
|
||||
EgressGWCfg: node.EgressGatewayRequest,
|
||||
var extPeers []wgtypes.PeerConfig
|
||||
var extPeerIDAndAddrs []models.IDandAddr
|
||||
if node.IsIngressGateway {
|
||||
extPeers, extPeerIDAndAddrs, err = getExtPeers(&node)
|
||||
if err == nil {
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
nodePeerMap[extPeerIdAndAddr.ID] = models.PeerRouteInfo{
|
||||
PeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
},
|
||||
PeerKey: extPeerIdAndAddr.ID,
|
||||
Allow: true,
|
||||
}
|
||||
}
|
||||
hostPeerUpdate.Peers = append(hostPeerUpdate.Peers, extPeers...)
|
||||
for _, extPeerIdAndAddr := range extPeerIDAndAddrs {
|
||||
extPeerIdAndAddr := extPeerIdAndAddr
|
||||
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID] = make(map[string]models.IDandAddr)
|
||||
hostPeerUpdate.HostPeerIDs[extPeerIdAndAddr.ID][extPeerIdAndAddr.ID] = models.IDandAddr{
|
||||
ID: extPeerIdAndAddr.ID,
|
||||
Address: extPeerIdAndAddr.Address,
|
||||
Name: extPeerIdAndAddr.Name,
|
||||
Network: node.Network,
|
||||
}
|
||||
hostPeerUpdate.IngressInfo.ExtPeers[extPeerIdAndAddr.ID] = models.ExtClientInfo{
|
||||
Masquerade: true,
|
||||
IngGwAddr: net.IPNet{
|
||||
IP: net.ParseIP(node.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(node.PrimaryAddress()),
|
||||
},
|
||||
Network: node.PrimaryNetworkRange(),
|
||||
ExtPeerAddr: net.IPNet{
|
||||
IP: net.ParseIP(extPeerIdAndAddr.Address),
|
||||
Mask: getCIDRMaskFromAddr(extPeerIdAndAddr.Address),
|
||||
},
|
||||
ExtPeerKey: extPeerIdAndAddr.ID,
|
||||
Peers: nodePeerMap,
|
||||
}
|
||||
if node.Network == network {
|
||||
hostPeerUpdate.PeerIDs[extPeerIdAndAddr.ID] = extPeerIdAndAddr
|
||||
hostPeerUpdate.NodePeers = append(hostPeerUpdate.NodePeers, extPeers...)
|
||||
}
|
||||
}
|
||||
} else if !database.IsEmptyRecord(err) {
|
||||
logger.Log(1, "error retrieving external clients:", err.Error())
|
||||
}
|
||||
}
|
||||
if node.IsEgressGateway {
|
||||
hostPeerUpdate.EgressInfo[node.ID.String()] = models.EgressInfo{
|
||||
EgressID: node.ID.String(),
|
||||
Network: node.PrimaryNetworkRange(),
|
||||
EgressGwAddr: net.IPNet{
|
||||
IP: net.ParseIP(node.PrimaryAddress()),
|
||||
Mask: getCIDRMaskFromAddr(node.PrimaryAddress()),
|
||||
},
|
||||
GwPeers: nodePeerMap,
|
||||
EgressGWCfg: node.EgressGatewayRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package mq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
@@ -184,7 +185,7 @@ func UpdateHost(client mqtt.Client, msg mqtt.Message) {
|
||||
logger.Log(0, "failed to send new node to host", hostUpdate.Host.Name, currentHost.ID.String(), err.Error())
|
||||
return
|
||||
} else {
|
||||
if err = PublishSingleHostPeerUpdate(currentHost, nil); err != nil {
|
||||
if err = PublishSingleHostPeerUpdate(currentHost, nil, context.Background()); err != nil {
|
||||
logger.Log(0, "failed peers publish after join acknowledged", hostUpdate.Host.Name, currentHost.ID.String(), err.Error())
|
||||
return
|
||||
}
|
||||
@@ -278,7 +279,7 @@ func UpdateMetrics(client mqtt.Client, msg mqtt.Message) {
|
||||
logger.Log(2, "updating peers after node", currentNode.ID.String(), currentNode.Network, "detected connectivity issues")
|
||||
host, err := logic.GetHost(currentNode.HostID.String())
|
||||
if err == nil {
|
||||
if err = PublishSingleHostPeerUpdate(host, nil); err != nil {
|
||||
if err = PublishSingleHostPeerUpdate(host, nil, context.Background()); err != nil {
|
||||
logger.Log(0, "failed to publish update after failover peer change for node", currentNode.ID.String(), currentNode.Network)
|
||||
}
|
||||
}
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package mq
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -23,10 +24,10 @@ func PublishPeerUpdate() error {
|
||||
logger.Log(1, "err getting all hosts", err.Error())
|
||||
return err
|
||||
}
|
||||
logic.ResetPeerUpdateContext()
|
||||
for _, host := range hosts {
|
||||
host := host
|
||||
err = PublishSingleHostPeerUpdate(&host, nil)
|
||||
if err != nil {
|
||||
if err = PublishSingleHostPeerUpdate(&host, nil, logic.PeerUpdateCtx); err != nil {
|
||||
logger.Log(1, "failed to publish peer update to host", host.ID.String(), ": ", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -45,9 +46,10 @@ func PublishDeletedNodePeerUpdate(delNode *models.Node) error {
|
||||
logger.Log(1, "err getting all hosts", err.Error())
|
||||
return err
|
||||
}
|
||||
logic.ResetPeerUpdateContext()
|
||||
for _, host := range hosts {
|
||||
host := host
|
||||
if err = PublishSingleHostPeerUpdate(&host, delNode); err != nil {
|
||||
if err = PublishSingleHostPeerUpdate(&host, delNode, logic.PeerUpdateCtx); err != nil {
|
||||
logger.Log(1, "failed to publish peer update to host", host.ID.String(), ": ", err.Error())
|
||||
}
|
||||
}
|
||||
@@ -55,9 +57,9 @@ func PublishDeletedNodePeerUpdate(delNode *models.Node) error {
|
||||
}
|
||||
|
||||
// PublishSingleHostPeerUpdate --- determines and publishes a peer update to one host
|
||||
func PublishSingleHostPeerUpdate(host *models.Host, deletedNode *models.Node) error {
|
||||
func PublishSingleHostPeerUpdate(host *models.Host, deletedNode *models.Node, ctx context.Context) error {
|
||||
|
||||
peerUpdate, err := logic.GetPeerUpdateForHost("", host, deletedNode)
|
||||
peerUpdate, err := logic.GetPeerUpdateForHost("", host, deletedNode, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -65,7 +67,7 @@ func PublishSingleHostPeerUpdate(host *models.Host, deletedNode *models.Node) er
|
||||
return nil
|
||||
}
|
||||
if host.ProxyEnabled {
|
||||
proxyUpdate, err := logic.GetProxyUpdateForHost(host)
|
||||
proxyUpdate, err := logic.GetProxyUpdateForHost(host, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -422,13 +424,12 @@ func sendPeers() {
|
||||
|
||||
//collectServerMetrics(networks[:])
|
||||
}
|
||||
|
||||
for _, host := range hosts {
|
||||
if force {
|
||||
if force {
|
||||
logic.ResetPeerUpdateContext()
|
||||
for _, host := range hosts {
|
||||
host := host
|
||||
logger.Log(2, "sending scheduled peer update (5 min)")
|
||||
err = PublishSingleHostPeerUpdate(&host, nil)
|
||||
if err != nil {
|
||||
if err = PublishSingleHostPeerUpdate(&host, nil, logic.PeerUpdateCtx); err != nil {
|
||||
logger.Log(1, "error publishing peer updates for host: ", host.ID.String(), " Err: ", err.Error())
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user