mirror of
https://github.com/gravitl/netmaker.git
synced 2025-10-05 08:47:35 +08:00
cleaned up logs, go initial updates working
This commit is contained in:
@@ -35,17 +35,7 @@ type cachedMessage struct {
|
||||
|
||||
// Daemon runs netclient daemon from command line
|
||||
func Daemon() error {
|
||||
commsNetworks, err := getCommsNetworks()
|
||||
if err != nil {
|
||||
return errors.New("no comm networks exist")
|
||||
}
|
||||
for net := range commsNetworks {
|
||||
ncutils.PrintLog("started comms network daemon, "+net, 1)
|
||||
client := setupMQTT(false, net)
|
||||
defer client.Disconnect(250)
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// == initial pull of all networks ==
|
||||
networks, _ := ncutils.GetSystemNetworks()
|
||||
for _, network := range networks {
|
||||
var cfg config.ClientConfig
|
||||
@@ -53,37 +43,61 @@ func Daemon() error {
|
||||
cfg.ReadConfig()
|
||||
initialPull(cfg.Network)
|
||||
}
|
||||
|
||||
// == get all the comms networks on machine ==
|
||||
commsNetworks, err := getCommsNetworks(networks[:])
|
||||
if err != nil {
|
||||
return errors.New("no comm networks exist")
|
||||
}
|
||||
|
||||
// == subscribe to all nodes on each comms network on machine ==
|
||||
for currCommsNet := range commsNetworks {
|
||||
ncutils.PrintLog("started comms network daemon, "+currCommsNet, 1)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
networkcontext.Store(currCommsNet, cancel)
|
||||
go messageQueue(ctx, currCommsNet)
|
||||
}
|
||||
|
||||
// == add waitgroup and cancel for checkin routine ==
|
||||
wg := sync.WaitGroup{}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
wg.Add(1)
|
||||
go Checkin(ctx, wg)
|
||||
go Checkin(ctx, &wg, commsNetworks)
|
||||
quit := make(chan os.Signal, 1)
|
||||
signal.Notify(quit, syscall.SIGTERM, os.Interrupt)
|
||||
<-quit
|
||||
for currCommsNet := range commsNetworks {
|
||||
if cancel, ok := networkcontext.Load(currCommsNet); ok {
|
||||
cancel.(context.CancelFunc)()
|
||||
}
|
||||
}
|
||||
cancel()
|
||||
ncutils.Log("shutting down message queue ")
|
||||
ncutils.Log("shutting down netclient daemon")
|
||||
wg.Wait()
|
||||
ncutils.Log("shutdown complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateKeys -- updates private key and returns new publickey
|
||||
func UpdateKeys(cfg *config.ClientConfig, client mqtt.Client) error {
|
||||
func UpdateKeys(nodeCfg *config.ClientConfig, client mqtt.Client) error {
|
||||
ncutils.Log("received message to update keys")
|
||||
key, err := wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
ncutils.Log("error generating privatekey " + err.Error())
|
||||
return err
|
||||
}
|
||||
file := ncutils.GetNetclientPathSpecific() + cfg.Node.Interface + ".conf"
|
||||
file := ncutils.GetNetclientPathSpecific() + nodeCfg.Node.Interface + ".conf"
|
||||
if err := wireguard.UpdatePrivateKey(file, key.String()); err != nil {
|
||||
ncutils.Log("error updating wireguard key " + err.Error())
|
||||
return err
|
||||
}
|
||||
cfg.Node.PublicKey = key.PublicKey().String()
|
||||
if err := config.ModConfig(&cfg.Node); err != nil {
|
||||
nodeCfg.Node.PublicKey = key.PublicKey().String()
|
||||
if err := config.ModConfig(&nodeCfg.Node); err != nil {
|
||||
ncutils.Log("error updating local config " + err.Error())
|
||||
}
|
||||
PublishNodeUpdate(cfg)
|
||||
if err = wireguard.ApplyConf(&cfg.Node, cfg.Node.Interface, file); err != nil {
|
||||
var commsCfg = getCommsCfgByNode(&nodeCfg.Node)
|
||||
PublishNodeUpdate(&commsCfg, nodeCfg)
|
||||
if err = wireguard.ApplyConf(&nodeCfg.Node, nodeCfg.Node.Interface, file); err != nil {
|
||||
ncutils.Log("error applying new config " + err.Error())
|
||||
return err
|
||||
}
|
||||
@@ -91,8 +105,9 @@ func UpdateKeys(cfg *config.ClientConfig, client mqtt.Client) error {
|
||||
}
|
||||
|
||||
// PingServer -- checks if server is reachable
|
||||
func PingServer(cfg *config.ClientConfig) error {
|
||||
node := getServerAddress(cfg)
|
||||
// use commsCfg only*
|
||||
func PingServer(commsCfg *config.ClientConfig) error {
|
||||
node := getServerAddress(commsCfg)
|
||||
pinger, err := ping.NewPinger(node)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -108,16 +123,71 @@ func PingServer(cfg *config.ClientConfig) error {
|
||||
|
||||
// == Private ==
|
||||
|
||||
// sets MQ client subscriptions for a specific node config
|
||||
// should be called for each node belonging to a given comms network
|
||||
func setSubscriptions(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
||||
if nodeCfg.DebugOn {
|
||||
if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
ncutils.Log("subscribed to all topics for debugging purposes")
|
||||
}
|
||||
|
||||
if token := client.Subscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID), 0, mqtt.MessageHandler(NodeUpdate)); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
if nodeCfg.DebugOn {
|
||||
ncutils.Log(fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
}
|
||||
if token := client.Subscribe(fmt.Sprintf("peers/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID), 0, mqtt.MessageHandler(UpdatePeers)); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
if nodeCfg.DebugOn {
|
||||
ncutils.Log(fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", nodeCfg.Node.Name, nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// on a delete usually, pass in the nodecfg to unsubscribe client broker communications
|
||||
// for the node in nodeCfg
|
||||
func unsubscribeNode(client mqtt.Client, nodeCfg *config.ClientConfig) {
|
||||
client.Unsubscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID))
|
||||
var ok = true
|
||||
if token := client.Unsubscribe(fmt.Sprintf("update/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID)); token.Wait() && token.Error() != nil {
|
||||
ncutils.PrintLog("unable to unsubscribe from updates for node "+nodeCfg.Node.Name+"\n"+token.Error().Error(), 1)
|
||||
ok = false
|
||||
}
|
||||
if token := client.Unsubscribe(fmt.Sprintf("peers/%s/%s", nodeCfg.Node.Network, nodeCfg.Node.ID)); token.Wait() && token.Error() != nil {
|
||||
ncutils.PrintLog("unable to unsubscribe from peer updates for node "+nodeCfg.Node.Name+"\n"+token.Error().Error(), 1)
|
||||
ok = false
|
||||
}
|
||||
if ok {
|
||||
ncutils.PrintLog("successfully unsubscribed node "+nodeCfg.Node.ID+" : "+nodeCfg.Node.Name, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// sets up Message Queue and subsribes/publishes updates to/from server
|
||||
// the client should subscribe to ALL nodes that exist on unique comms network locally
|
||||
func messageQueue(ctx context.Context, commsNet string) {
|
||||
var commsCfg config.ClientConfig
|
||||
commsCfg.Network = commsNet
|
||||
commsCfg.ReadConfig()
|
||||
ncutils.Log("netclient daemon started for network: " + commsNet)
|
||||
client := setupMQTT(&commsCfg, false)
|
||||
defer client.Disconnect(250)
|
||||
<-ctx.Done()
|
||||
ncutils.Log("shutting down daemon for comms network " + commsNet)
|
||||
}
|
||||
|
||||
// setupMQTT creates a connection to broker and return client
|
||||
func setupMQTT(publish bool, networkName string) mqtt.Client {
|
||||
var cfg *config.ClientConfig
|
||||
cfg.Network = networkName
|
||||
cfg.ReadConfig()
|
||||
// utilizes comms client configs to setup connections
|
||||
func setupMQTT(commsCfg *config.ClientConfig, publish bool) mqtt.Client {
|
||||
opts := mqtt.NewClientOptions()
|
||||
server := getServerAddress(cfg)
|
||||
opts.AddBroker(server + ":1883") // TODO get the appropriate port of the comms mq server
|
||||
id := ncutils.MakeRandomString(23)
|
||||
opts.ClientID = id
|
||||
server := getServerAddress(commsCfg)
|
||||
opts.AddBroker(server + ":1883") // TODO get the appropriate port of the comms mq server
|
||||
opts.ClientID = ncutils.MakeRandomString(23) // helps avoid id duplication on broker
|
||||
opts.SetDefaultPublishHandler(All)
|
||||
opts.SetAutoReconnect(true)
|
||||
opts.SetConnectRetry(true)
|
||||
@@ -131,18 +201,18 @@ func setupMQTT(publish bool, networkName string) mqtt.Client {
|
||||
ncutils.Log("error retriving networks " + err.Error())
|
||||
}
|
||||
for _, network := range networks {
|
||||
var currConf config.ClientConfig
|
||||
currConf.Network = network
|
||||
currConf.ReadConfig()
|
||||
SetSubscriptions(client, &currConf)
|
||||
var currNodeCfg config.ClientConfig
|
||||
currNodeCfg.Network = network
|
||||
currNodeCfg.ReadConfig()
|
||||
setSubscriptions(client, &currNodeCfg)
|
||||
}
|
||||
}
|
||||
})
|
||||
opts.SetOrderMatters(true)
|
||||
opts.SetResumeSubs(true)
|
||||
opts.SetConnectionLostHandler(func(c mqtt.Client, e error) {
|
||||
ncutils.Log("detected broker connection lost, running pull for " + cfg.Node.Network)
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
ncutils.Log("detected broker connection lost, running pull for " + commsCfg.Node.Network)
|
||||
_, err := Pull(commsCfg.Node.Network, true)
|
||||
if err != nil {
|
||||
ncutils.Log("could not run pull, server unreachable: " + err.Error())
|
||||
ncutils.Log("waiting to retry...")
|
||||
@@ -155,10 +225,10 @@ func setupMQTT(publish bool, networkName string) mqtt.Client {
|
||||
for {
|
||||
//if after 12 seconds, try a gRPC pull on the last try
|
||||
if time.Now().After(tperiod) {
|
||||
ncutils.Log("running pull for " + cfg.Node.Network)
|
||||
_, err := Pull(cfg.Node.Network, true)
|
||||
ncutils.Log("running pull for " + commsCfg.Node.Network)
|
||||
_, err := Pull(commsCfg.Node.Network, true)
|
||||
if err != nil {
|
||||
ncutils.Log("could not run pull, exiting " + cfg.Node.Network + " setup: " + err.Error())
|
||||
ncutils.Log("could not run pull, exiting " + commsCfg.Node.Network + " setup: " + err.Error())
|
||||
return client
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
@@ -166,10 +236,10 @@ func setupMQTT(publish bool, networkName string) mqtt.Client {
|
||||
if token := client.Connect(); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log("unable to connect to broker, retrying ...")
|
||||
if time.Now().After(tperiod) {
|
||||
ncutils.Log("could not connect to broker, exiting " + cfg.Node.Network + " setup: " + token.Error().Error())
|
||||
ncutils.Log("could not connect to broker, exiting " + commsCfg.Node.Network + " setup: " + token.Error().Error())
|
||||
if strings.Contains(token.Error().Error(), "connectex") || strings.Contains(token.Error().Error(), "i/o timeout") {
|
||||
ncutils.PrintLog("connection issue detected.. pulling and restarting daemon", 0)
|
||||
Pull(cfg.Node.Network, true)
|
||||
Pull(commsCfg.Node.Network, true)
|
||||
daemon.Restart()
|
||||
}
|
||||
return client
|
||||
@@ -182,35 +252,9 @@ func setupMQTT(publish bool, networkName string) mqtt.Client {
|
||||
return client
|
||||
}
|
||||
|
||||
// SetSubscriptions - sets MQ subscriptions
|
||||
func SetSubscriptions(client mqtt.Client, cfg *config.ClientConfig) {
|
||||
if cfg.DebugOn {
|
||||
if token := client.Subscribe("#", 0, nil); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
ncutils.Log("subscribed to all topics for debugging purposes")
|
||||
}
|
||||
|
||||
if token := client.Subscribe(fmt.Sprintf("update/%s/%s", cfg.Node.Network, cfg.Node.ID), 0, mqtt.MessageHandler(NodeUpdate)); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
if cfg.DebugOn {
|
||||
ncutils.Log(fmt.Sprintf("subscribed to node updates for node %s update/%s/%s", cfg.Node.Name, cfg.Node.Network, cfg.Node.ID))
|
||||
}
|
||||
if token := client.Subscribe(fmt.Sprintf("peers/%s/%s", cfg.Node.Network, cfg.Node.ID), 0, mqtt.MessageHandler(UpdatePeers)); token.Wait() && token.Error() != nil {
|
||||
ncutils.Log(token.Error().Error())
|
||||
return
|
||||
}
|
||||
if cfg.DebugOn {
|
||||
ncutils.Log(fmt.Sprintf("subscribed to peer updates for node %s peers/%s/%s", cfg.Node.Name, cfg.Node.Network, cfg.Node.ID))
|
||||
}
|
||||
}
|
||||
|
||||
// publishes a message to server to update peers on this peer's behalf
|
||||
func publishSignal(cfg *config.ClientConfig, signal byte) error {
|
||||
if err := publish(cfg, fmt.Sprintf("signal/%s", cfg.Node.ID), []byte{signal}, 1); err != nil {
|
||||
func publishSignal(commsCfg, nodeCfg *config.ClientConfig, signal byte) error {
|
||||
if err := publish(commsCfg, nodeCfg, fmt.Sprintf("signal/%s", nodeCfg.Node.ID), []byte{signal}, 1); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -248,18 +292,19 @@ func parseNetworkFromTopic(topic string) string {
|
||||
return strings.Split(topic, "/")[1]
|
||||
}
|
||||
|
||||
func decryptMsg(cfg *config.ClientConfig, msg []byte) ([]byte, error) {
|
||||
// should only ever use node client configs
|
||||
func decryptMsg(nodeCfg *config.ClientConfig, msg []byte) ([]byte, error) {
|
||||
if len(msg) <= 24 { // make sure message is of appropriate length
|
||||
return nil, fmt.Errorf("recieved invalid message from broker %v", msg)
|
||||
}
|
||||
|
||||
// setup the keys
|
||||
diskKey, keyErr := auth.RetrieveTrafficKey(cfg.Node.Network)
|
||||
diskKey, keyErr := auth.RetrieveTrafficKey(nodeCfg.Node.Network)
|
||||
if keyErr != nil {
|
||||
return nil, keyErr
|
||||
}
|
||||
|
||||
serverPubKey, err := ncutils.ConvertBytesToKey(cfg.Node.TrafficKeys.Server)
|
||||
serverPubKey, err := ncutils.ConvertBytesToKey(nodeCfg.Node.TrafficKeys.Server)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -277,12 +322,8 @@ func getServerAddress(cfg *config.ClientConfig) string {
|
||||
return server.Address
|
||||
}
|
||||
|
||||
func getCommsNetworks() (map[string]bool, error) {
|
||||
func getCommsNetworks(networks []string) (map[string]bool, error) {
|
||||
var cfg config.ClientConfig
|
||||
networks, err := ncutils.GetSystemNetworks()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var response = make(map[string]bool, 1)
|
||||
for _, network := range networks {
|
||||
cfg.Network = network
|
||||
@@ -292,6 +333,13 @@ func getCommsNetworks() (map[string]bool, error) {
|
||||
return response, nil
|
||||
}
|
||||
|
||||
func getCommsCfgByNode(node *models.Node) config.ClientConfig {
|
||||
var commsCfg config.ClientConfig
|
||||
commsCfg.Network = node.Network
|
||||
commsCfg.ReadConfig()
|
||||
return commsCfg
|
||||
}
|
||||
|
||||
// == Message Caches ==
|
||||
|
||||
func insert(network, which, cache string) {
|
||||
|
Reference in New Issue
Block a user