Update On Fri Sep 6 20:35:08 CEST 2024

This commit is contained in:
github-action[bot]
2024-09-06 20:35:09 +02:00
parent d6a419be50
commit 1a9fdafeaa
118 changed files with 7507 additions and 4309 deletions

View File

@@ -11,6 +11,8 @@ import (
var (
doOnce sync.Once
globalInitd bool
globalWebSocketSyncher *WebSocketLogSyncher
)
func initLogger(logLevel string, replaceGlobal bool) (*zap.Logger, error) {
@@ -18,8 +20,8 @@ func initLogger(logLevel string, replaceGlobal bool) (*zap.Logger, error) {
if err := level.UnmarshalText([]byte(logLevel)); err != nil {
return nil, err
}
writers := []zapcore.WriteSyncer{zapcore.AddSync(os.Stdout)}
encoder := zapcore.EncoderConfig{
consoleEncoder := zapcore.NewConsoleEncoder(zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
MessageKey: "msg",
@@ -27,12 +29,29 @@ func initLogger(logLevel string, replaceGlobal bool) (*zap.Logger, error) {
EncodeLevel: zapcore.LowercaseColorLevelEncoder,
EncodeTime: zapcore.RFC3339TimeEncoder,
EncodeName: zapcore.FullNameEncoder,
}
core := zapcore.NewCore(
zapcore.NewConsoleEncoder(encoder),
zapcore.NewMultiWriteSyncer(writers...),
level,
)
})
stdoutCore := zapcore.NewCore(consoleEncoder, zapcore.AddSync(os.Stdout), level)
jsonEncoder := zapcore.NewJSONEncoder(zapcore.EncoderConfig{
TimeKey: "ts",
LevelKey: "level",
NameKey: "logger",
CallerKey: "caller",
MessageKey: "msg",
StacktraceKey: "stacktrace",
LineEnding: zapcore.DefaultLineEnding,
EncodeLevel: zapcore.LowercaseLevelEncoder,
EncodeTime: zapcore.ISO8601TimeEncoder,
EncodeDuration: zapcore.SecondsDurationEncoder,
EncodeCaller: zapcore.ShortCallerEncoder,
})
globalWebSocketSyncher = NewWebSocketLogSyncher()
wsCore := zapcore.NewCore(jsonEncoder, globalWebSocketSyncher, level)
// 合并两个 core
core := zapcore.NewTee(stdoutCore, wsCore)
l := zap.New(core)
if replaceGlobal {
zap.ReplaceGlobals(l)

52
echo/pkg/log/ws.go Normal file
View File

@@ -0,0 +1,52 @@
package log
import (
"encoding/json"
"net"
"sync"
"github.com/gobwas/ws"
)
type WebSocketLogSyncher struct {
conn net.Conn
mu sync.Mutex
}
func NewWebSocketLogSyncher() *WebSocketLogSyncher {
return &WebSocketLogSyncher{}
}
func (wsSync *WebSocketLogSyncher) Write(p []byte) (n int, err error) {
wsSync.mu.Lock()
defer wsSync.mu.Unlock()
if wsSync.conn != nil {
var logEntry map[string]interface{}
if err := json.Unmarshal(p, &logEntry); err == nil {
jsonData, _ := json.Marshal(logEntry)
_ = ws.WriteFrame(wsSync.conn, ws.NewTextFrame(jsonData))
}
if err != nil {
return 0, err
}
}
return len(p), nil
}
func (wsSync *WebSocketLogSyncher) Sync() error {
return nil
}
func (wsSync *WebSocketLogSyncher) SetWSConn(conn net.Conn) {
wsSync.mu.Lock()
defer wsSync.mu.Unlock()
wsSync.conn = conn
}
func SetWebSocketConn(conn net.Conn) {
if globalWebSocketSyncher != nil {
globalWebSocketSyncher.SetWSConn(conn)
}
}

View File

@@ -0,0 +1,165 @@
package metric_reader
import (
"fmt"
"math"
"strings"
"time"
dto "github.com/prometheus/client_model/go"
)
const (
metricCPUSecondsTotal = "node_cpu_seconds_total"
metricLoad1 = "node_load1"
metricLoad5 = "node_load5"
metricLoad15 = "node_load15"
metricMemoryTotalBytes = "node_memory_total_bytes"
metricMemoryActiveBytes = "node_memory_active_bytes"
metricMemoryWiredBytes = "node_memory_wired_bytes"
metricMemoryMemTotalBytes = "node_memory_MemTotal_bytes"
metricMemoryMemAvailableBytes = "node_memory_MemAvailable_bytes"
metricFilesystemSizeBytes = "node_filesystem_size_bytes"
metricFilesystemAvailBytes = "node_filesystem_avail_bytes"
metricNetworkReceiveBytesTotal = "node_network_receive_bytes_total"
metricNetworkTransmitBytesTotal = "node_network_transmit_bytes_total"
)
type NodeMetrics struct {
// cpu
CpuCoreCount int `json:"cpu_core_count"`
CpuLoadInfo string `json:"cpu_load_info"`
CpuUsagePercent float64 `json:"cpu_usage_percent"`
// memory
MemoryTotalBytes int64 `json:"memory_total_bytes"`
MemoryUsageBytes int64 `json:"memory_usage_bytes"`
MemoryUsagePercent float64 `json:"memory_usage_percent"`
// disk
DiskTotalBytes int64 `json:"disk_total_bytes"`
DiskUsageBytes int64 `json:"disk_usage_bytes"`
DiskUsagePercent float64 `json:"disk_usage_percent"`
// network
NetworkReceiveBytesTotal int64 `json:"network_receive_bytes_total"`
NetworkTransmitBytesTotal int64 `json:"network_transmit_bytes_total"`
NetworkReceiveBytesRate float64 `json:"network_receive_bytes_rate"`
NetworkTransmitBytesRate float64 `json:"network_transmit_bytes_rate"`
SyncTime time.Time
}
type cpuStats struct {
totalTime float64
idleTime float64
cores int
}
func (b *readerImpl) ParseNodeMetrics(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
isMac := metricMap[metricMemoryTotalBytes] != nil
cpu := &cpuStats{}
b.processCPUMetrics(metricMap, cpu)
b.processMemoryMetrics(metricMap, nm, isMac)
b.processDiskMetrics(metricMap, nm)
b.processNetworkMetrics(metricMap, nm)
b.processLoadMetrics(metricMap, nm)
b.calculateFinalMetrics(nm, cpu)
return nil
}
func (b *readerImpl) processCPUMetrics(metricMap map[string]*dto.MetricFamily, cpu *cpuStats) {
if cpuMetric, ok := metricMap[metricCPUSecondsTotal]; ok {
for _, metric := range cpuMetric.Metric {
value := getMetricValue(metric, cpuMetric.GetType())
cpu.totalTime += value
if getLabel(metric, "mode") == "idle" {
cpu.idleTime += value
cpu.cores++
}
}
}
}
func (b *readerImpl) processMemoryMetrics(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics, isMac bool) {
if isMac {
nm.MemoryTotalBytes = sumInt64Metric(metricMap, metricMemoryTotalBytes)
nm.MemoryUsageBytes = sumInt64Metric(metricMap, metricMemoryActiveBytes) + sumInt64Metric(metricMap, metricMemoryWiredBytes)
} else {
nm.MemoryTotalBytes = sumInt64Metric(metricMap, metricMemoryMemTotalBytes)
availableMemory := sumInt64Metric(metricMap, metricMemoryMemAvailableBytes)
nm.MemoryUsageBytes = nm.MemoryTotalBytes - availableMemory
}
}
func (b *readerImpl) processDiskMetrics(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) {
nm.DiskTotalBytes = sumInt64Metric(metricMap, metricFilesystemSizeBytes)
availableDisk := sumInt64Metric(metricMap, metricFilesystemAvailBytes)
nm.DiskUsageBytes = nm.DiskTotalBytes - availableDisk
}
func (b *readerImpl) processNetworkMetrics(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) {
nm.NetworkReceiveBytesTotal = sumInt64Metric(metricMap, metricNetworkReceiveBytesTotal)
nm.NetworkTransmitBytesTotal = sumInt64Metric(metricMap, metricNetworkTransmitBytesTotal)
}
func (b *readerImpl) processLoadMetrics(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) {
loads := []string{metricLoad1, metricLoad5, metricLoad15}
for _, load := range loads {
value := sumFloat64Metric(metricMap, load)
nm.CpuLoadInfo += fmt.Sprintf("%.2f|", value)
}
nm.CpuLoadInfo = strings.TrimRight(nm.CpuLoadInfo, "|")
}
func (b *readerImpl) calculateFinalMetrics(nm *NodeMetrics, cpu *cpuStats) {
nm.CpuCoreCount = cpu.cores
nm.CpuUsagePercent = 100 * (cpu.totalTime - cpu.idleTime) / cpu.totalTime
nm.MemoryUsagePercent = 100 * float64(nm.MemoryUsageBytes) / float64(nm.MemoryTotalBytes)
nm.DiskUsagePercent = 100 * float64(nm.DiskUsageBytes) / float64(nm.DiskTotalBytes)
nm.CpuUsagePercent = math.Round(nm.CpuUsagePercent*100) / 100
nm.MemoryUsagePercent = math.Round(nm.MemoryUsagePercent*100) / 100
nm.DiskUsagePercent = math.Round(nm.DiskUsagePercent*100) / 100
if b.lastMetrics != nil {
duration := time.Since(b.lastMetrics.SyncTime).Seconds()
if duration > 0.1 {
nm.NetworkReceiveBytesRate = math.Max(0, float64(nm.NetworkReceiveBytesTotal-b.lastMetrics.NetworkReceiveBytesTotal)/duration)
nm.NetworkTransmitBytesRate = math.Max(0, float64(nm.NetworkTransmitBytesTotal-b.lastMetrics.NetworkTransmitBytesTotal)/duration)
nm.NetworkReceiveBytesRate = math.Round(nm.NetworkReceiveBytesRate)
nm.NetworkTransmitBytesRate = math.Round(nm.NetworkTransmitBytesRate)
}
}
}
func sumInt64Metric(metricMap map[string]*dto.MetricFamily, metricName string) int64 {
ret := int64(0)
if metric, ok := metricMap[metricName]; ok && len(metric.Metric) > 0 {
for _, m := range metric.Metric {
ret += int64(getMetricValue(m, metric.GetType()))
}
}
return ret
}
func sumFloat64Metric(metricMap map[string]*dto.MetricFamily, metricName string) float64 {
ret := float64(0)
if metric, ok := metricMap[metricName]; ok && len(metric.Metric) > 0 {
for _, m := range metric.Metric {
ret += getMetricValue(m, metric.GetType())
}
}
return 0
}
func getLabel(metric *dto.Metric, name string) string {
for _, label := range metric.Label {
if label.GetName() == name {
return label.GetValue()
}
}
return ""
}

View File

@@ -2,25 +2,28 @@ package metric_reader
import (
"context"
"fmt"
"io"
"net/http"
"strings"
"time"
"github.com/pkg/errors"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"go.uber.org/zap"
)
type Reader interface {
ReadOnce(ctx context.Context) (*NodeMetrics, error)
ReadOnce(ctx context.Context) (*NodeMetrics, map[string]*RuleMetrics, error)
}
type readerImpl struct {
metricsURL string
httpClient *http.Client
lastMetrics *NodeMetrics
metricsURL string
httpClient *http.Client
lastMetrics *NodeMetrics
lastRuleMetrics map[string]*RuleMetrics // key: label value: RuleMetrics
l *zap.SugaredLogger
}
func NewReader(metricsURL string) *readerImpl {
@@ -28,267 +31,47 @@ func NewReader(metricsURL string) *readerImpl {
return &readerImpl{
httpClient: c,
metricsURL: metricsURL,
l: zap.S().Named("metric_reader"),
}
}
func (b *readerImpl) parsePingInfo(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
metric, ok := metricMap["ehco_ping_response_duration_seconds"]
if !ok {
// this metric is optional when enable_ping = false
zap.S().Debug("ping metric not found")
return nil
}
for _, m := range metric.Metric {
g := m.GetHistogram()
ip := ""
val := float64(g.GetSampleSum()) / float64(g.GetSampleCount()) * 1000 // to ms
for _, label := range m.GetLabel() {
if label.GetName() == "ip" {
ip = label.GetValue()
}
}
nm.PingMetrics = append(nm.PingMetrics, PingMetric{Latency: val, Target: ip})
}
return nil
}
func (b *readerImpl) parseCpuInfo(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
handleMetric := func(metricName string, handleValue func(float64, string)) error {
metric, ok := metricMap[metricName]
if !ok {
return fmt.Errorf("%s not found", metricName)
}
for _, m := range metric.Metric {
g := m.GetCounter()
mode := ""
for _, label := range m.GetLabel() {
if label.GetName() == "mode" {
mode = label.GetValue()
}
}
handleValue(g.GetValue(), mode)
}
return nil
}
var (
totalIdleTime float64
totalCpuTime float64
cpuCores int
)
err := handleMetric("node_cpu_seconds_total", func(val float64, mode string) {
totalCpuTime += val
if mode == "idle" {
totalIdleTime += val
cpuCores++
}
})
func (b *readerImpl) ReadOnce(ctx context.Context) (*NodeMetrics, map[string]*RuleMetrics, error) {
metricMap, err := b.fetchMetrics(ctx)
if err != nil {
return err
return nil, nil, errors.Wrap(err, "failed to fetch metrics")
}
nm := &NodeMetrics{SyncTime: time.Now()}
if err := b.ParseNodeMetrics(metricMap, nm); err != nil {
return nil, nil, err
}
nm.CpuCoreCount = cpuCores
nm.CpuUsagePercent = 100 * (totalCpuTime - totalIdleTime) / totalCpuTime
for _, load := range []string{"1", "5", "15"} {
loadMetricName := fmt.Sprintf("node_load%s", load)
loadMetric, ok := metricMap[loadMetricName]
if !ok {
return fmt.Errorf("%s not found", loadMetricName)
}
for _, m := range loadMetric.Metric {
g := m.GetGauge()
nm.CpuLoadInfo += fmt.Sprintf("%.2f|", g.GetValue())
}
}
nm.CpuLoadInfo = strings.TrimRight(nm.CpuLoadInfo, "|")
return nil
}
func (b *readerImpl) parseMemoryInfo(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
handleMetric := func(metricName string, handleValue func(float64)) error {
metric, ok := metricMap[metricName]
if !ok {
return fmt.Errorf("%s not found", metricName)
}
for _, m := range metric.Metric {
g := m.GetGauge()
handleValue(g.GetValue())
}
return nil
}
isMac := false
if _, ok := metricMap["node_memory_total_bytes"]; ok {
isMac = true
}
if isMac {
err := handleMetric("node_memory_total_bytes", func(val float64) {
nm.MemoryTotalBytes = val
})
if err != nil {
return err
}
err = handleMetric("node_memory_active_bytes", func(val float64) {
nm.MemoryUsageBytes += val
})
if err != nil {
return err
}
err = handleMetric("node_memory_wired_bytes", func(val float64) {
nm.MemoryUsageBytes += val
})
if err != nil {
return err
}
} else {
err := handleMetric("node_memory_MemTotal_bytes", func(val float64) {
nm.MemoryTotalBytes = val
})
if err != nil {
return err
}
err = handleMetric("node_memory_MemAvailable_bytes", func(val float64) {
nm.MemoryUsageBytes = nm.MemoryTotalBytes - val
})
if err != nil {
return err
}
}
if nm.MemoryTotalBytes != 0 {
nm.MemoryUsagePercent = 100 * nm.MemoryUsageBytes / nm.MemoryTotalBytes
}
return nil
}
func (b *readerImpl) parseDiskInfo(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
handleMetric := func(metricName string, handleValue func(float64)) error {
forMac := false
diskMap := make(map[string]float64)
metric, ok := metricMap[metricName]
if !ok {
return fmt.Errorf("%s not found", metricName)
}
for _, m := range metric.Metric {
g := m.GetGauge()
disk := ""
for _, label := range m.GetLabel() {
if label.GetName() == "device" {
disk = getDiskName(label.GetValue())
}
if label.GetName() == "fstype" && label.GetValue() == "apfs" {
forMac = true
}
}
diskMap[disk] = g.GetValue()
}
// 对于 macos 的 apfs 文件系统,可能会有多个相同大小的磁盘,这是因为 apfs 磁盘(卷)会共享物理磁盘
seenVal := map[float64]bool{}
for _, val := range diskMap {
if seenVal[val] && forMac {
continue
}
handleValue(val)
seenVal[val] = true
}
return nil
}
err := handleMetric("node_filesystem_size_bytes", func(val float64) {
nm.DiskTotalBytes += val
})
if err != nil {
return err
}
var availBytes float64
err = handleMetric("node_filesystem_avail_bytes", func(val float64) {
availBytes += val
})
if err != nil {
return err
}
nm.DiskUsageBytes = nm.DiskTotalBytes - availBytes
if nm.DiskTotalBytes != 0 {
nm.DiskUsagePercent = 100 * nm.DiskUsageBytes / nm.DiskTotalBytes
}
return nil
}
func (b *readerImpl) parseNetworkInfo(metricMap map[string]*dto.MetricFamily, nm *NodeMetrics) error {
now := time.Now()
handleMetric := func(metricName string, handleValue func(float64)) error {
metric, ok := metricMap[metricName]
if !ok {
return fmt.Errorf("%s not found", metricName)
}
for _, m := range metric.Metric {
g := m.GetCounter()
handleValue(g.GetValue())
}
return nil
}
err := handleMetric("node_network_receive_bytes_total", func(val float64) {
nm.NetworkReceiveBytesTotal += val
})
if err != nil {
return err
}
err = handleMetric("node_network_transmit_bytes_total", func(val float64) {
nm.NetworkTransmitBytesTotal += val
})
if err != nil {
return err
}
if b.lastMetrics != nil {
passedTime := now.Sub(b.lastMetrics.SyncTime).Seconds()
nm.NetworkReceiveBytesRate = (nm.NetworkReceiveBytesTotal - b.lastMetrics.NetworkReceiveBytesTotal) / passedTime
nm.NetworkTransmitBytesRate = (nm.NetworkTransmitBytesTotal - b.lastMetrics.NetworkTransmitBytesTotal) / passedTime
}
return nil
}
func (b *readerImpl) ReadOnce(ctx context.Context) (*NodeMetrics, error) {
response, err := b.httpClient.Get(b.metricsURL)
if err != nil {
return nil, err
}
defer response.Body.Close()
body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
var parser expfmt.TextParser
parsed, err := parser.TextToMetricFamilies(strings.NewReader(string(body)))
if err != nil {
return nil, err
}
nm := &NodeMetrics{SyncTime: time.Now(), PingMetrics: []PingMetric{}}
if err := b.parseCpuInfo(parsed, nm); err != nil {
return nil, err
}
if err := b.parseMemoryInfo(parsed, nm); err != nil {
return nil, err
}
if err := b.parseDiskInfo(parsed, nm); err != nil {
return nil, err
}
if err := b.parseNetworkInfo(parsed, nm); err != nil {
return nil, err
}
if err := b.parsePingInfo(parsed, nm); err != nil {
return nil, err
rm := make(map[string]*RuleMetrics)
if err := b.ParseRuleMetrics(metricMap, rm); err != nil {
return nil, nil, err
}
b.lastMetrics = nm
return nm, nil
b.lastRuleMetrics = rm
return nm, rm, nil
}
func (r *readerImpl) fetchMetrics(ctx context.Context) (map[string]*dto.MetricFamily, error) {
req, err := http.NewRequestWithContext(ctx, "GET", r.metricsURL, nil)
if err != nil {
return nil, errors.Wrap(err, "failed to create request")
}
resp, err := r.httpClient.Do(req)
if err != nil {
return nil, errors.Wrap(err, "failed to send request")
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, errors.Wrap(err, "failed to read response body")
}
var parser expfmt.TextParser
return parser.TextToMetricFamilies(strings.NewReader(string(body)))
}

View File

@@ -0,0 +1,146 @@
package metric_reader
import (
"time"
dto "github.com/prometheus/client_model/go"
)
const (
metricConnectionCount = "ehco_traffic_current_connection_count"
metricNetworkTransmit = "ehco_traffic_network_transmit_bytes"
metricPingResponse = "ehco_ping_response_duration_milliseconds"
metricHandshakeDuration = "ehco_traffic_handshake_duration_milliseconds"
labelKey = "label"
remoteKey = "remote"
connTypeKey = "conn_type"
flowKey = "flow"
ipKey = "ip"
)
type PingMetric struct {
Latency int64 `json:"latency"` // in ms
Target string `json:"target"`
}
type RuleMetrics struct {
Label string // rule label
PingMetrics map[string]*PingMetric // key: remote
TCPConnectionCount map[string]int64 // key: remote
TCPHandShakeDuration map[string]int64 // key: remote in ms
TCPNetworkTransmitBytes map[string]int64 // key: remote
UDPConnectionCount map[string]int64 // key: remote
UDPHandShakeDuration map[string]int64 // key: remote in ms
UDPNetworkTransmitBytes map[string]int64 // key: remote
SyncTime time.Time
}
func (b *readerImpl) ParseRuleMetrics(metricMap map[string]*dto.MetricFamily, rm map[string]*RuleMetrics) error {
requiredMetrics := []string{
metricConnectionCount,
metricNetworkTransmit,
metricPingResponse,
metricHandshakeDuration,
}
for _, metricName := range requiredMetrics {
metricFamily, ok := metricMap[metricName]
if !ok {
continue
}
for _, metric := range metricFamily.Metric {
labels := getLabelMap(metric)
value := int64(getMetricValue(metric, metricFamily.GetType()))
label, ok := labels[labelKey]
if !ok || label == "" {
continue
}
ruleMetric := b.ensureRuleMetric(rm, label)
switch metricName {
case metricConnectionCount:
b.updateConnectionCount(ruleMetric, labels, value)
case metricNetworkTransmit:
b.updateNetworkTransmit(ruleMetric, labels, value)
case metricPingResponse:
b.updatePingMetrics(ruleMetric, labels, value)
case metricHandshakeDuration:
b.updateHandshakeDuration(ruleMetric, labels, value)
}
}
}
return nil
}
func (b *readerImpl) ensureRuleMetric(rm map[string]*RuleMetrics, label string) *RuleMetrics {
if _, ok := rm[label]; !ok {
rm[label] = &RuleMetrics{
Label: label,
PingMetrics: make(map[string]*PingMetric),
TCPConnectionCount: make(map[string]int64),
TCPHandShakeDuration: make(map[string]int64),
TCPNetworkTransmitBytes: make(map[string]int64),
UDPConnectionCount: make(map[string]int64),
UDPHandShakeDuration: make(map[string]int64),
UDPNetworkTransmitBytes: make(map[string]int64),
SyncTime: time.Now(),
}
}
return rm[label]
}
func (b *readerImpl) updateConnectionCount(rm *RuleMetrics, labels map[string]string, value int64) {
key := labels[remoteKey]
switch labels[connTypeKey] {
case "tcp":
rm.TCPConnectionCount[key] = value
default:
rm.UDPConnectionCount[key] = value
}
}
func (b *readerImpl) updateNetworkTransmit(rm *RuleMetrics, labels map[string]string, value int64) {
if labels[flowKey] == "read" {
key := labels[remoteKey]
switch labels[connTypeKey] {
case "tcp":
rm.TCPNetworkTransmitBytes[key] += value
default:
rm.UDPNetworkTransmitBytes[key] += value
}
}
}
func (b *readerImpl) updatePingMetrics(rm *RuleMetrics, labels map[string]string, value int64) {
remote := labels[remoteKey]
rm.PingMetrics[remote] = &PingMetric{
Latency: value,
Target: labels[ipKey],
}
}
func (b *readerImpl) updateHandshakeDuration(rm *RuleMetrics, labels map[string]string, value int64) {
key := labels[remoteKey]
switch labels[connTypeKey] {
case "tcp":
rm.TCPHandShakeDuration[key] = value
default:
rm.UDPHandShakeDuration[key] = value
}
}
func getLabelMap(metric *dto.Metric) map[string]string {
labels := make(map[string]string)
for _, label := range metric.Label {
labels[label.GetName()] = label.GetValue()
}
return labels
}

View File

@@ -1,38 +0,0 @@
package metric_reader
import (
"time"
)
type NodeMetrics struct {
// cpu
CpuCoreCount int `json:"cpu_core_count"`
CpuLoadInfo string `json:"cpu_load_info"`
CpuUsagePercent float64 `json:"cpu_usage_percent"`
// memory
MemoryTotalBytes float64 `json:"memory_total_bytes"`
MemoryUsageBytes float64 `json:"memory_usage_bytes"`
MemoryUsagePercent float64 `json:"memory_usage_percent"`
// disk
DiskTotalBytes float64 `json:"disk_total_bytes"`
DiskUsageBytes float64 `json:"disk_usage_bytes"`
DiskUsagePercent float64 `json:"disk_usage_percent"`
// network
NetworkReceiveBytesTotal float64 `json:"network_receive_bytes_total"`
NetworkTransmitBytesTotal float64 `json:"network_transmit_bytes_total"`
NetworkReceiveBytesRate float64 `json:"network_receive_bytes_rate"`
NetworkTransmitBytesRate float64 `json:"network_transmit_bytes_rate"`
// ping
PingMetrics []PingMetric `json:"ping_metrics"`
SyncTime time.Time
}
type PingMetric struct {
Latency float64 `json:"latency"` // in ms
Target string `json:"target"`
}

View File

@@ -1,22 +1,46 @@
package metric_reader
import "regexp"
import (
"math"
// parse disk name from device path,such as:
// e.g. /dev/disk1s1 -> disk1
// e.g. /dev/disk1s2 -> disk1
// e.g. ntfs://disk1s1 -> disk1
// e.g. ntfs://disk1s2 -> disk1
// e.g. /dev/sda1 -> sda
// e.g. /dev/sda2 -> sda
var diskNameRegex = regexp.MustCompile(`/dev/disk(\d+)|ntfs://disk(\d+)|/dev/sd[a-zA-Z]`)
dto "github.com/prometheus/client_model/go"
)
func getDiskName(devicePath string) string {
matches := diskNameRegex.FindStringSubmatch(devicePath)
for _, match := range matches {
if match != "" {
return match
func calculatePercentile(histogram *dto.Histogram, percentile float64) float64 {
if histogram == nil {
return 0
}
totalSamples := histogram.GetSampleCount()
targetSample := percentile * float64(totalSamples)
cumulativeCount := uint64(0)
var lastBucketBound float64
for _, bucket := range histogram.Bucket {
cumulativeCount += bucket.GetCumulativeCount()
if float64(cumulativeCount) >= targetSample {
// Linear interpolation between bucket boundaries
if bucket.GetCumulativeCount() > 0 && lastBucketBound != bucket.GetUpperBound() {
return lastBucketBound + (float64(targetSample-float64(cumulativeCount-bucket.GetCumulativeCount()))/float64(bucket.GetCumulativeCount()))*(bucket.GetUpperBound()-lastBucketBound)
} else {
return bucket.GetUpperBound()
}
}
lastBucketBound = bucket.GetUpperBound()
}
return math.NaN()
}
func getMetricValue(metric *dto.Metric, metricType dto.MetricType) float64 {
switch metricType {
case dto.MetricType_COUNTER:
return metric.Counter.GetValue()
case dto.MetricType_GAUGE:
return metric.Gauge.GetValue()
case dto.MetricType_HISTOGRAM:
histogram := metric.Histogram
if histogram != nil {
return calculatePercentile(histogram, 0.9)
}
}
return ""
return 0
}