mirror of
https://github.com/oarkflow/mq.git
synced 2025-09-26 20:11:16 +08:00
update: dependencies
This commit is contained in:
33
dag/context_keys.go
Normal file
33
dag/context_keys.go
Normal file
@@ -0,0 +1,33 @@
|
||||
// context_keys.go
|
||||
package dag
|
||||
|
||||
import (
|
||||
"github.com/oarkflow/mq"
|
||||
"github.com/oarkflow/mq/logger"
|
||||
"time"
|
||||
)
|
||||
|
||||
type contextKey string
|
||||
|
||||
const (
|
||||
contextKeyTaskID contextKey = "task_id"
|
||||
contextKeyMethod contextKey = "method"
|
||||
contextKeyInitialNode contextKey = "initial_node"
|
||||
)
|
||||
|
||||
// updateTaskMetrics is a placeholder for updating metrics for the task.
|
||||
// In a real implementation, you could update a persistent store or an in-memory metrics structure.
|
||||
func (tm *DAG) updateTaskMetrics(taskID string, result mq.Result, duration time.Duration) {
|
||||
// Example: Update last executed timestamp, last error, total execution count, success count, etc.
|
||||
// For demonstration, we simply log the KPI updates.
|
||||
var success bool
|
||||
if result.Error == nil {
|
||||
success = true
|
||||
}
|
||||
tm.Logger().Info("Updating task metrics",
|
||||
logger.Field{Key: "taskID", Value: taskID},
|
||||
logger.Field{Key: "lastExecuted", Value: time.Now()},
|
||||
logger.Field{Key: "duration", Value: duration},
|
||||
logger.Field{Key: "success", Value: success},
|
||||
)
|
||||
}
|
17
dag/dag.go
17
dag/dag.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/oarkflow/mq/logger"
|
||||
"log"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -53,7 +54,6 @@ type DAG struct {
|
||||
startNode string
|
||||
name string
|
||||
report string
|
||||
opts []mq.Option
|
||||
hasPageNode bool
|
||||
paused bool
|
||||
}
|
||||
@@ -75,7 +75,6 @@ func NewDAG(name, key string, finalResultCallback func(taskID string, result mq.
|
||||
mq.WithConsumerOnClose(d.onConsumerClose),
|
||||
)
|
||||
d.server = mq.NewBroker(opts...)
|
||||
d.opts = opts
|
||||
options := d.server.Options()
|
||||
d.pool = mq.NewPool(
|
||||
options.NumOfWorkers(),
|
||||
@@ -230,6 +229,10 @@ func (tm *DAG) getCurrentNode(manager *TaskManager) string {
|
||||
return manager.currentNodePayload.Keys()[0]
|
||||
}
|
||||
|
||||
func (tm *DAG) Logger() logger.Logger {
|
||||
return tm.server.Options().Logger()
|
||||
}
|
||||
|
||||
func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
||||
ctx = context.WithValue(ctx, "task_id", task.ID)
|
||||
userContext := form.UserContext(ctx)
|
||||
@@ -282,11 +285,10 @@ func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
||||
if tm.hasPageNode {
|
||||
return <-resultCh
|
||||
}
|
||||
// Timeout handling
|
||||
select {
|
||||
case result := <-resultCh:
|
||||
return result
|
||||
case <-time.After(30 * time.Second): // Set a timeout duration
|
||||
case <-time.After(30 * time.Second):
|
||||
return mq.Result{
|
||||
Error: fmt.Errorf("timeout waiting for task result"),
|
||||
Ctx: ctx,
|
||||
@@ -335,7 +337,7 @@ func (tm *DAG) AddDAGNode(nodeType NodeType, name string, key string, dag *DAG,
|
||||
}
|
||||
|
||||
func (tm *DAG) Start(ctx context.Context, addr string) error {
|
||||
// Start the server in a separate goroutine
|
||||
|
||||
go func() {
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
if err := tm.server.Start(ctx); err != nil {
|
||||
@@ -343,12 +345,11 @@ func (tm *DAG) Start(ctx context.Context, addr string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the node consumers if not in sync mode
|
||||
if !tm.server.SyncMode() {
|
||||
tm.nodes.ForEach(func(_ string, con *Node) bool {
|
||||
go func(con *Node) {
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
limiter := rate.NewLimiter(rate.Every(1*time.Second), 1) // Retry every second
|
||||
limiter := rate.NewLimiter(rate.Every(1*time.Second), 1)
|
||||
for {
|
||||
err := con.processor.Consume(ctx)
|
||||
if err != nil {
|
||||
@@ -357,7 +358,7 @@ func (tm *DAG) Start(ctx context.Context, addr string) error {
|
||||
log.Printf("[INFO] - Consumer %s started successfully", con.ID)
|
||||
break
|
||||
}
|
||||
limiter.Wait(ctx) // Wait with rate limiting before retrying
|
||||
limiter.Wait(ctx)
|
||||
}
|
||||
}(con)
|
||||
return true
|
||||
|
209
dag/v1/api.go
209
dag/v1/api.go
@@ -1,209 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/oarkflow/mq/jsonparser"
|
||||
"github.com/oarkflow/mq/sio"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
"github.com/oarkflow/mq/consts"
|
||||
"github.com/oarkflow/mq/metrics"
|
||||
)
|
||||
|
||||
type Request struct {
|
||||
Payload json.RawMessage `json:"payload"`
|
||||
Interval time.Duration `json:"interval"`
|
||||
Schedule bool `json:"schedule"`
|
||||
Overlap bool `json:"overlap"`
|
||||
Recurring bool `json:"recurring"`
|
||||
}
|
||||
|
||||
func (tm *DAG) SetupWS() *sio.Server {
|
||||
ws := sio.New(sio.Config{
|
||||
CheckOrigin: func(r *http.Request) bool { return true },
|
||||
EnableCompression: true,
|
||||
})
|
||||
WsEvents(ws)
|
||||
tm.Notifier = ws
|
||||
return ws
|
||||
}
|
||||
|
||||
func (tm *DAG) Handlers() {
|
||||
metrics.HandleHTTP()
|
||||
http.Handle("/", http.FileServer(http.Dir("webroot")))
|
||||
http.Handle("/notify", tm.SetupWS())
|
||||
http.HandleFunc("GET /render", tm.Render)
|
||||
http.HandleFunc("POST /request", tm.Request)
|
||||
http.HandleFunc("POST /publish", tm.Publish)
|
||||
http.HandleFunc("POST /schedule", tm.Schedule)
|
||||
http.HandleFunc("/pause-consumer/{id}", func(writer http.ResponseWriter, request *http.Request) {
|
||||
id := request.PathValue("id")
|
||||
if id != "" {
|
||||
tm.PauseConsumer(request.Context(), id)
|
||||
}
|
||||
})
|
||||
http.HandleFunc("/resume-consumer/{id}", func(writer http.ResponseWriter, request *http.Request) {
|
||||
id := request.PathValue("id")
|
||||
if id != "" {
|
||||
tm.ResumeConsumer(request.Context(), id)
|
||||
}
|
||||
})
|
||||
http.HandleFunc("/pause", func(w http.ResponseWriter, request *http.Request) {
|
||||
err := tm.Pause(request.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to pause", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
json.NewEncoder(w).Encode(map[string]string{"status": "paused"})
|
||||
})
|
||||
http.HandleFunc("/resume", func(w http.ResponseWriter, request *http.Request) {
|
||||
err := tm.Resume(request.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to resume", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
json.NewEncoder(w).Encode(map[string]string{"status": "resumed"})
|
||||
})
|
||||
http.HandleFunc("/stop", func(w http.ResponseWriter, request *http.Request) {
|
||||
err := tm.Stop(request.Context())
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
json.NewEncoder(w).Encode(map[string]string{"status": "stopped"})
|
||||
})
|
||||
http.HandleFunc("/close", func(w http.ResponseWriter, request *http.Request) {
|
||||
err := tm.Close()
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
json.NewEncoder(w).Encode(map[string]string{"status": "closed"})
|
||||
})
|
||||
http.HandleFunc("/dot", func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
fmt.Fprintln(w, tm.ExportDOT())
|
||||
})
|
||||
http.HandleFunc("/ui", func(w http.ResponseWriter, r *http.Request) {
|
||||
image := fmt.Sprintf("%s.svg", mq.NewID())
|
||||
err := tm.SaveSVG(image)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
defer os.Remove(image)
|
||||
svgBytes, err := os.ReadFile(image)
|
||||
if err != nil {
|
||||
http.Error(w, "Could not read SVG file", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "image/svg+xml")
|
||||
if _, err := w.Write(svgBytes); err != nil {
|
||||
http.Error(w, "Could not write SVG response", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (tm *DAG) request(w http.ResponseWriter, r *http.Request, async bool) {
|
||||
if r.Method != http.MethodPost {
|
||||
http.Error(w, "Invalid request method", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
var request Request
|
||||
if r.Body != nil {
|
||||
defer r.Body.Close()
|
||||
var err error
|
||||
payload, err := io.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err = json.Unmarshal(payload, &request)
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to unmarshal body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
http.Error(w, "Empty request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
ctx := r.Context()
|
||||
if async {
|
||||
ctx = mq.SetHeaders(ctx, map[string]string{consts.AwaitResponseKey: "true"})
|
||||
}
|
||||
var opts []mq.SchedulerOption
|
||||
if request.Interval > 0 {
|
||||
opts = append(opts, mq.WithInterval(request.Interval))
|
||||
}
|
||||
if request.Overlap {
|
||||
opts = append(opts, mq.WithOverlap())
|
||||
}
|
||||
if request.Recurring {
|
||||
opts = append(opts, mq.WithRecurring())
|
||||
}
|
||||
ctx = context.WithValue(ctx, "query_params", r.URL.Query())
|
||||
var rs mq.Result
|
||||
if request.Schedule {
|
||||
rs = tm.ScheduleTask(ctx, request.Payload, opts...)
|
||||
} else {
|
||||
rs = tm.Process(ctx, request.Payload)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(rs)
|
||||
}
|
||||
|
||||
func (tm *DAG) Render(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := mq.SetHeaders(r.Context(), map[string]string{consts.AwaitResponseKey: "true", "request_type": "render"})
|
||||
ctx = context.WithValue(ctx, "query_params", r.URL.Query())
|
||||
rs := tm.Process(ctx, nil)
|
||||
content, err := jsonparser.GetString(rs.Payload, "html_content")
|
||||
if err != nil {
|
||||
http.Error(w, "Failed to read request body", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", consts.TypeHtml)
|
||||
w.Write([]byte(content))
|
||||
}
|
||||
|
||||
func (tm *DAG) Request(w http.ResponseWriter, r *http.Request) {
|
||||
tm.request(w, r, true)
|
||||
}
|
||||
|
||||
func (tm *DAG) Publish(w http.ResponseWriter, r *http.Request) {
|
||||
tm.request(w, r, false)
|
||||
}
|
||||
|
||||
func (tm *DAG) Schedule(w http.ResponseWriter, r *http.Request) {
|
||||
tm.request(w, r, false)
|
||||
}
|
||||
|
||||
func GetTaskID(ctx context.Context) string {
|
||||
if queryParams := ctx.Value("query_params"); queryParams != nil {
|
||||
if params, ok := queryParams.(url.Values); ok {
|
||||
if id := params.Get("taskID"); id != "" {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func CanNextNode(ctx context.Context) string {
|
||||
if queryParams := ctx.Value("query_params"); queryParams != nil {
|
||||
if params, ok := queryParams.(url.Values); ok {
|
||||
if id := params.Get("next"); id != "" {
|
||||
return id
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
@@ -1,26 +0,0 @@
|
||||
package v1
|
||||
|
||||
type NodeStatus int
|
||||
|
||||
func (c NodeStatus) IsValid() bool { return c >= Pending && c <= Failed }
|
||||
|
||||
func (c NodeStatus) String() string {
|
||||
switch c {
|
||||
case Pending:
|
||||
return "Pending"
|
||||
case Processing:
|
||||
return "Processing"
|
||||
case Completed:
|
||||
return "Completed"
|
||||
case Failed:
|
||||
return "Failed"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
const (
|
||||
Pending NodeStatus = iota
|
||||
Processing
|
||||
Completed
|
||||
Failed
|
||||
)
|
612
dag/v1/dag.go
612
dag/v1/dag.go
@@ -1,612 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/oarkflow/mq/storage"
|
||||
"github.com/oarkflow/mq/storage/memory"
|
||||
|
||||
"github.com/oarkflow/mq/sio"
|
||||
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
"github.com/oarkflow/mq/consts"
|
||||
"github.com/oarkflow/mq/metrics"
|
||||
)
|
||||
|
||||
type EdgeType int
|
||||
|
||||
func (c EdgeType) IsValid() bool { return c >= Simple && c <= Iterator }
|
||||
|
||||
const (
|
||||
Simple EdgeType = iota
|
||||
Iterator
|
||||
)
|
||||
|
||||
type NodeType int
|
||||
|
||||
func (c NodeType) IsValid() bool { return c >= Process && c <= Page }
|
||||
|
||||
const (
|
||||
Process NodeType = iota
|
||||
Page
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
processor mq.Processor
|
||||
Name string
|
||||
Type NodeType
|
||||
Key string
|
||||
Edges []Edge
|
||||
isReady bool
|
||||
}
|
||||
|
||||
func (n *Node) ProcessTask(ctx context.Context, msg *mq.Task) mq.Result {
|
||||
return n.processor.ProcessTask(ctx, msg)
|
||||
}
|
||||
|
||||
func (n *Node) Close() error {
|
||||
return n.processor.Close()
|
||||
}
|
||||
|
||||
type Edge struct {
|
||||
Label string
|
||||
From *Node
|
||||
To []*Node
|
||||
Type EdgeType
|
||||
}
|
||||
|
||||
type (
|
||||
FromNode string
|
||||
When string
|
||||
Then string
|
||||
)
|
||||
|
||||
type DAG struct {
|
||||
server *mq.Broker
|
||||
consumer *mq.Consumer
|
||||
taskContext storage.IMap[string, *TaskManager]
|
||||
nodes map[string]*Node
|
||||
iteratorNodes storage.IMap[string, []Edge]
|
||||
conditions map[FromNode]map[When]Then
|
||||
pool *mq.Pool
|
||||
taskCleanupCh chan string
|
||||
name string
|
||||
key string
|
||||
startNode string
|
||||
consumerTopic string
|
||||
opts []mq.Option
|
||||
reportNodeResultCallback func(mq.Result)
|
||||
Notifier *sio.Server
|
||||
paused bool
|
||||
Error error
|
||||
report string
|
||||
index string
|
||||
}
|
||||
|
||||
func (tm *DAG) SetKey(key string) {
|
||||
tm.key = key
|
||||
}
|
||||
|
||||
func (tm *DAG) ReportNodeResult(callback func(mq.Result)) {
|
||||
tm.reportNodeResultCallback = callback
|
||||
}
|
||||
|
||||
func (tm *DAG) GetType() string {
|
||||
return tm.key
|
||||
}
|
||||
|
||||
func (tm *DAG) listenForTaskCleanup() {
|
||||
for taskID := range tm.taskCleanupCh {
|
||||
if tm.server.Options().CleanTaskOnComplete() {
|
||||
tm.taskCleanup(taskID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) taskCleanup(taskID string) {
|
||||
tm.taskContext.Del(taskID)
|
||||
log.Printf("DAG - Task %s cleaned up", taskID)
|
||||
}
|
||||
|
||||
func (tm *DAG) Consume(ctx context.Context) error {
|
||||
if tm.consumer != nil {
|
||||
tm.server.Options().SetSyncMode(true)
|
||||
return tm.consumer.Consume(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Stop(ctx context.Context) error {
|
||||
for _, n := range tm.nodes {
|
||||
err := n.processor.Stop(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) GetKey() string {
|
||||
return tm.key
|
||||
}
|
||||
|
||||
func (tm *DAG) AssignTopic(topic string) {
|
||||
tm.consumer = mq.NewConsumer(topic, topic, tm.ProcessTask, mq.WithRespondPendingResult(false), mq.WithBrokerURL(tm.server.URL()))
|
||||
tm.consumerTopic = topic
|
||||
}
|
||||
|
||||
func NewDAG(name, key string, opts ...mq.Option) *DAG {
|
||||
callback := func(ctx context.Context, result mq.Result) error { return nil }
|
||||
d := &DAG{
|
||||
name: name,
|
||||
key: key,
|
||||
nodes: make(map[string]*Node),
|
||||
iteratorNodes: memory.New[string, []Edge](),
|
||||
taskContext: memory.New[string, *TaskManager](),
|
||||
conditions: make(map[FromNode]map[When]Then),
|
||||
taskCleanupCh: make(chan string),
|
||||
}
|
||||
opts = append(opts, mq.WithCallback(d.onTaskCallback), mq.WithConsumerOnSubscribe(d.onConsumerJoin), mq.WithConsumerOnClose(d.onConsumerClose))
|
||||
d.server = mq.NewBroker(opts...)
|
||||
d.opts = opts
|
||||
options := d.server.Options()
|
||||
d.pool = mq.NewPool(
|
||||
options.NumOfWorkers(),
|
||||
mq.WithTaskQueueSize(options.QueueSize()),
|
||||
mq.WithMaxMemoryLoad(options.MaxMemoryLoad()),
|
||||
mq.WithHandler(d.ProcessTask),
|
||||
mq.WithPoolCallback(callback),
|
||||
mq.WithTaskStorage(options.Storage()),
|
||||
)
|
||||
d.pool.Start(d.server.Options().NumOfWorkers())
|
||||
go d.listenForTaskCleanup()
|
||||
return d
|
||||
}
|
||||
|
||||
func (tm *DAG) callbackToConsumer(ctx context.Context, result mq.Result) {
|
||||
if tm.consumer != nil {
|
||||
result.Topic = tm.consumerTopic
|
||||
if tm.consumer.Conn() == nil {
|
||||
tm.onTaskCallback(ctx, result)
|
||||
} else {
|
||||
tm.consumer.OnResponse(ctx, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) onTaskCallback(ctx context.Context, result mq.Result) mq.Result {
|
||||
if taskContext, ok := tm.taskContext.Get(result.TaskID); ok && result.Topic != "" {
|
||||
return taskContext.handleNextTask(ctx, result)
|
||||
}
|
||||
return mq.Result{}
|
||||
}
|
||||
|
||||
func (tm *DAG) onConsumerJoin(_ context.Context, topic, _ string) {
|
||||
if node, ok := tm.nodes[topic]; ok {
|
||||
log.Printf("DAG - CONSUMER ~> ready on %s", topic)
|
||||
node.isReady = true
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) onConsumerClose(_ context.Context, topic, _ string) {
|
||||
if node, ok := tm.nodes[topic]; ok {
|
||||
log.Printf("DAG - CONSUMER ~> down on %s", topic)
|
||||
node.isReady = false
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) SetStartNode(node string) {
|
||||
tm.startNode = node
|
||||
}
|
||||
|
||||
func (tm *DAG) GetStartNode() string {
|
||||
return tm.startNode
|
||||
}
|
||||
|
||||
func (tm *DAG) Start(ctx context.Context, addr string) error {
|
||||
// Start the server in a separate goroutine
|
||||
go func() {
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
if err := tm.server.Start(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start the node consumers if not in sync mode
|
||||
if !tm.server.SyncMode() {
|
||||
for _, con := range tm.nodes {
|
||||
go func(con *Node) {
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
limiter := rate.NewLimiter(rate.Every(1*time.Second), 1) // Retry every second
|
||||
for {
|
||||
err := con.processor.Consume(ctx)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] - Consumer %s failed to start: %v", con.Key, err)
|
||||
} else {
|
||||
log.Printf("[INFO] - Consumer %s started successfully", con.Key)
|
||||
break
|
||||
}
|
||||
limiter.Wait(ctx) // Wait with rate limiting before retrying
|
||||
}
|
||||
}(con)
|
||||
}
|
||||
}
|
||||
log.Printf("DAG - HTTP_SERVER ~> started on http://localhost%s", addr)
|
||||
tm.Handlers()
|
||||
config := tm.server.TLSConfig()
|
||||
if config.UseTLS {
|
||||
return http.ListenAndServeTLS(addr, config.CertPath, config.KeyPath, nil)
|
||||
}
|
||||
return http.ListenAndServe(addr, nil)
|
||||
}
|
||||
|
||||
func (tm *DAG) AddDAGNode(name string, key string, dag *DAG, firstNode ...bool) *DAG {
|
||||
dag.AssignTopic(key)
|
||||
tm.nodes[key] = &Node{
|
||||
Name: name,
|
||||
Key: key,
|
||||
processor: dag,
|
||||
isReady: true,
|
||||
}
|
||||
if len(firstNode) > 0 && firstNode[0] {
|
||||
tm.startNode = key
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (tm *DAG) AddNode(name, key string, handler mq.Processor, firstNode ...bool) *DAG {
|
||||
con := mq.NewConsumer(key, key, handler.ProcessTask, tm.opts...)
|
||||
n := &Node{
|
||||
Name: name,
|
||||
Key: key,
|
||||
processor: con,
|
||||
}
|
||||
if handler.GetType() == "page" {
|
||||
n.Type = Page
|
||||
}
|
||||
if tm.server.SyncMode() {
|
||||
n.isReady = true
|
||||
}
|
||||
tm.nodes[key] = n
|
||||
if len(firstNode) > 0 && firstNode[0] {
|
||||
tm.startNode = key
|
||||
}
|
||||
return tm
|
||||
}
|
||||
|
||||
func (tm *DAG) AddDeferredNode(name, key string, firstNode ...bool) error {
|
||||
if tm.server.SyncMode() {
|
||||
return fmt.Errorf("DAG cannot have deferred node in Sync Mode")
|
||||
}
|
||||
tm.nodes[key] = &Node{
|
||||
Name: name,
|
||||
Key: key,
|
||||
}
|
||||
if len(firstNode) > 0 && firstNode[0] {
|
||||
tm.startNode = key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) IsReady() bool {
|
||||
for _, node := range tm.nodes {
|
||||
if !node.isReady {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (tm *DAG) AddCondition(fromNode FromNode, conditions map[When]Then) *DAG {
|
||||
tm.conditions[fromNode] = conditions
|
||||
return tm
|
||||
}
|
||||
|
||||
func (tm *DAG) AddIterator(label, from string, targets ...string) *DAG {
|
||||
tm.Error = tm.addEdge(Iterator, label, from, targets...)
|
||||
tm.iteratorNodes.Set(from, []Edge{})
|
||||
return tm
|
||||
}
|
||||
|
||||
func (tm *DAG) AddEdge(label, from string, targets ...string) *DAG {
|
||||
tm.Error = tm.addEdge(Simple, label, from, targets...)
|
||||
return tm
|
||||
}
|
||||
|
||||
func (tm *DAG) addEdge(edgeType EdgeType, label, from string, targets ...string) error {
|
||||
fromNode, ok := tm.nodes[from]
|
||||
if !ok {
|
||||
return fmt.Errorf("Error: 'from' node %s does not exist\n", from)
|
||||
}
|
||||
var nodes []*Node
|
||||
for _, target := range targets {
|
||||
toNode, ok := tm.nodes[target]
|
||||
if !ok {
|
||||
return fmt.Errorf("Error: 'from' node %s does not exist\n", target)
|
||||
}
|
||||
nodes = append(nodes, toNode)
|
||||
}
|
||||
edge := Edge{From: fromNode, To: nodes, Type: edgeType, Label: label}
|
||||
fromNode.Edges = append(fromNode.Edges, edge)
|
||||
if edgeType != Iterator {
|
||||
if edges, ok := tm.iteratorNodes.Get(fromNode.Key); ok {
|
||||
edges = append(edges, edge)
|
||||
tm.iteratorNodes.Set(fromNode.Key, edges)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Validate() error {
|
||||
report, hasCycle, err := tm.ClassifyEdges()
|
||||
if hasCycle || err != nil {
|
||||
tm.Error = err
|
||||
return err
|
||||
}
|
||||
tm.report = report
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) GetReport() string {
|
||||
return tm.report
|
||||
}
|
||||
|
||||
func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
||||
if task.ID == "" {
|
||||
task.ID = mq.NewID()
|
||||
}
|
||||
if index, ok := mq.GetHeader(ctx, "index"); ok {
|
||||
tm.index = index
|
||||
}
|
||||
manager, exists := tm.taskContext.Get(task.ID)
|
||||
if !exists {
|
||||
manager = NewTaskManager(tm, task.ID, tm.iteratorNodes)
|
||||
manager.createdAt = task.CreatedAt
|
||||
tm.taskContext.Set(task.ID, manager)
|
||||
}
|
||||
|
||||
if tm.consumer != nil {
|
||||
initialNode, err := tm.parseInitialNode(ctx)
|
||||
if err != nil {
|
||||
metrics.TasksErrors.WithLabelValues("unknown").Inc() // Increase error count
|
||||
return mq.Result{Error: err}
|
||||
}
|
||||
task.Topic = initialNode
|
||||
}
|
||||
if manager.topic != "" {
|
||||
task.Topic = manager.topic
|
||||
canNext := CanNextNode(ctx)
|
||||
if canNext != "" {
|
||||
if n, ok := tm.nodes[task.Topic]; ok {
|
||||
if len(n.Edges) > 0 {
|
||||
task.Topic = n.Edges[0].To[0].Key
|
||||
}
|
||||
}
|
||||
} else {
|
||||
}
|
||||
}
|
||||
result := manager.processTask(ctx, task.Topic, task.Payload)
|
||||
if result.Ctx != nil && tm.index != "" {
|
||||
result.Ctx = mq.SetHeaders(result.Ctx, map[string]string{"index": tm.index})
|
||||
}
|
||||
if result.Error != nil {
|
||||
metrics.TasksErrors.WithLabelValues(task.Topic).Inc() // Increase error count
|
||||
} else {
|
||||
metrics.TasksProcessed.WithLabelValues("success").Inc() // Increase processed task count
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tm *DAG) check(ctx context.Context, payload []byte) (context.Context, *mq.Task, error) {
|
||||
if tm.paused {
|
||||
return ctx, nil, fmt.Errorf("unable to process task, error: DAG is not accepting any task")
|
||||
}
|
||||
if !tm.IsReady() {
|
||||
return ctx, nil, fmt.Errorf("unable to process task, error: DAG is not ready yet")
|
||||
}
|
||||
initialNode, err := tm.parseInitialNode(ctx)
|
||||
if err != nil {
|
||||
return ctx, nil, err
|
||||
}
|
||||
if tm.server.SyncMode() {
|
||||
ctx = mq.SetHeaders(ctx, map[string]string{consts.AwaitResponseKey: "true"})
|
||||
}
|
||||
taskID := GetTaskID(ctx)
|
||||
if taskID != "" {
|
||||
if _, exists := tm.taskContext.Get(taskID); !exists {
|
||||
return ctx, nil, fmt.Errorf("provided task ID doesn't exist")
|
||||
}
|
||||
}
|
||||
if taskID == "" {
|
||||
taskID = mq.NewID()
|
||||
}
|
||||
return ctx, mq.NewTask(taskID, payload, initialNode), nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Process(ctx context.Context, payload []byte) mq.Result {
|
||||
ctx, task, err := tm.check(ctx, payload)
|
||||
if err != nil {
|
||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
||||
}
|
||||
awaitResponse, _ := mq.GetAwaitResponse(ctx)
|
||||
if awaitResponse != "true" {
|
||||
headers, ok := mq.GetHeaders(ctx)
|
||||
ctxx := context.Background()
|
||||
if ok {
|
||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||
}
|
||||
if err := tm.pool.EnqueueTask(ctxx, task, 0); err != nil {
|
||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "FAILED", Error: err}
|
||||
}
|
||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "PENDING"}
|
||||
}
|
||||
return tm.ProcessTask(ctx, task)
|
||||
}
|
||||
|
||||
func (tm *DAG) ScheduleTask(ctx context.Context, payload []byte, opts ...mq.SchedulerOption) mq.Result {
|
||||
ctx, task, err := tm.check(ctx, payload)
|
||||
if err != nil {
|
||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
||||
}
|
||||
headers, ok := mq.GetHeaders(ctx)
|
||||
ctxx := context.Background()
|
||||
if ok {
|
||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||
}
|
||||
tm.pool.Scheduler().AddTask(ctxx, task, opts...)
|
||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "PENDING"}
|
||||
}
|
||||
|
||||
func (tm *DAG) parseInitialNode(ctx context.Context) (string, error) {
|
||||
val := ctx.Value("initial_node")
|
||||
initialNode, ok := val.(string)
|
||||
if ok {
|
||||
return initialNode, nil
|
||||
}
|
||||
if tm.startNode == "" {
|
||||
firstNode := tm.findStartNode()
|
||||
if firstNode != nil {
|
||||
tm.startNode = firstNode.Key
|
||||
}
|
||||
}
|
||||
|
||||
if tm.startNode == "" {
|
||||
return "", fmt.Errorf("initial node not found")
|
||||
}
|
||||
return tm.startNode, nil
|
||||
}
|
||||
|
||||
func (tm *DAG) findStartNode() *Node {
|
||||
incomingEdges := make(map[string]bool)
|
||||
connectedNodes := make(map[string]bool)
|
||||
for _, node := range tm.nodes {
|
||||
for _, edge := range node.Edges {
|
||||
if edge.Type.IsValid() {
|
||||
connectedNodes[node.Key] = true
|
||||
for _, to := range edge.To {
|
||||
connectedNodes[to.Key] = true
|
||||
incomingEdges[to.Key] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if cond, ok := tm.conditions[FromNode(node.Key)]; ok {
|
||||
for _, target := range cond {
|
||||
connectedNodes[string(target)] = true
|
||||
incomingEdges[string(target)] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
for nodeID, node := range tm.nodes {
|
||||
if !incomingEdges[nodeID] && connectedNodes[nodeID] {
|
||||
return node
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Pause(_ context.Context) error {
|
||||
tm.paused = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Resume(_ context.Context) error {
|
||||
tm.paused = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) Close() error {
|
||||
for _, n := range tm.nodes {
|
||||
err := n.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) PauseConsumer(ctx context.Context, id string) {
|
||||
tm.doConsumer(ctx, id, consts.CONSUMER_PAUSE)
|
||||
}
|
||||
|
||||
func (tm *DAG) ResumeConsumer(ctx context.Context, id string) {
|
||||
tm.doConsumer(ctx, id, consts.CONSUMER_RESUME)
|
||||
}
|
||||
|
||||
func (tm *DAG) doConsumer(ctx context.Context, id string, action consts.CMD) {
|
||||
if node, ok := tm.nodes[id]; ok {
|
||||
switch action {
|
||||
case consts.CONSUMER_PAUSE:
|
||||
err := node.processor.Pause(ctx)
|
||||
if err == nil {
|
||||
node.isReady = false
|
||||
log.Printf("[INFO] - Consumer %s paused successfully", node.Key)
|
||||
} else {
|
||||
log.Printf("[ERROR] - Failed to pause consumer %s: %v", node.Key, err)
|
||||
}
|
||||
case consts.CONSUMER_RESUME:
|
||||
err := node.processor.Resume(ctx)
|
||||
if err == nil {
|
||||
node.isReady = true
|
||||
log.Printf("[INFO] - Consumer %s resumed successfully", node.Key)
|
||||
} else {
|
||||
log.Printf("[ERROR] - Failed to resume consumer %s: %v", node.Key, err)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("[WARNING] - Consumer %s not found", id)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) SetNotifyResponse(callback mq.Callback) {
|
||||
tm.server.SetNotifyHandler(callback)
|
||||
}
|
||||
|
||||
func (tm *DAG) GetNextNodes(key string) ([]*Node, error) {
|
||||
node, exists := tm.nodes[key]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("Node with key %s does not exist", key)
|
||||
}
|
||||
var successors []*Node
|
||||
for _, edge := range node.Edges {
|
||||
successors = append(successors, edge.To...)
|
||||
}
|
||||
if conds, exists := tm.conditions[FromNode(key)]; exists {
|
||||
for _, targetKey := range conds {
|
||||
if targetNode, exists := tm.nodes[string(targetKey)]; exists {
|
||||
successors = append(successors, targetNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
return successors, nil
|
||||
}
|
||||
|
||||
func (tm *DAG) GetPreviousNodes(key string) ([]*Node, error) {
|
||||
var predecessors []*Node
|
||||
for _, node := range tm.nodes {
|
||||
for _, edge := range node.Edges {
|
||||
for _, target := range edge.To {
|
||||
if target.Key == key {
|
||||
predecessors = append(predecessors, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for fromNode, conds := range tm.conditions {
|
||||
for _, targetKey := range conds {
|
||||
if string(targetKey) == key {
|
||||
node, exists := tm.nodes[string(fromNode)]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("Node with key %s does not exist", fromNode)
|
||||
}
|
||||
predecessors = append(predecessors, node)
|
||||
}
|
||||
}
|
||||
}
|
||||
return predecessors, nil
|
||||
}
|
@@ -1,477 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oarkflow/json"
|
||||
|
||||
"github.com/oarkflow/date"
|
||||
"github.com/oarkflow/dipper"
|
||||
"github.com/oarkflow/errors"
|
||||
"github.com/oarkflow/expr"
|
||||
"github.com/oarkflow/xid"
|
||||
"golang.org/x/exp/maps"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
)
|
||||
|
||||
type Processor interface {
|
||||
mq.Processor
|
||||
SetConfig(Payload)
|
||||
}
|
||||
|
||||
type Condition interface {
|
||||
Match(data any) bool
|
||||
}
|
||||
|
||||
type ConditionProcessor interface {
|
||||
Processor
|
||||
SetConditions(map[string]Condition)
|
||||
}
|
||||
|
||||
type Provider struct {
|
||||
Mapping map[string]any `json:"mapping"`
|
||||
UpdateMapping map[string]any `json:"update_mapping"`
|
||||
InsertMapping map[string]any `json:"insert_mapping"`
|
||||
Defaults map[string]any `json:"defaults"`
|
||||
ProviderType string `json:"provider_type"`
|
||||
Database string `json:"database"`
|
||||
Source string `json:"source"`
|
||||
Query string `json:"query"`
|
||||
}
|
||||
|
||||
type Payload struct {
|
||||
Data map[string]any `json:"data"`
|
||||
Mapping map[string]string `json:"mapping"`
|
||||
GeneratedFields []string `json:"generated_fields"`
|
||||
Providers []Provider `json:"providers"`
|
||||
}
|
||||
|
||||
type Operation struct {
|
||||
ID string `json:"id"`
|
||||
Type string `json:"type"`
|
||||
Key string `json:"key"`
|
||||
RequiredFields []string `json:"required_fields"`
|
||||
OptionalFields []string `json:"optional_fields"`
|
||||
GeneratedFields []string `json:"generated_fields"`
|
||||
Payload Payload
|
||||
}
|
||||
|
||||
func (e *Operation) Consume(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Operation) Pause(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Operation) Resume(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Operation) Stop(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Operation) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Operation) ProcessTask(_ context.Context, task *mq.Task) mq.Result {
|
||||
return mq.Result{Payload: task.Payload}
|
||||
}
|
||||
|
||||
func (e *Operation) SetConfig(payload Payload) {
|
||||
e.Payload = payload
|
||||
e.GeneratedFields = slices.Compact(append(e.GeneratedFields, payload.GeneratedFields...))
|
||||
}
|
||||
|
||||
func (e *Operation) GetType() string {
|
||||
return e.Type
|
||||
}
|
||||
|
||||
func (e *Operation) GetKey() string {
|
||||
return e.Key
|
||||
}
|
||||
|
||||
func (e *Operation) SetKey(key string) {
|
||||
e.Key = key
|
||||
}
|
||||
|
||||
func (e *Operation) ValidateFields(c context.Context, payload []byte) (map[string]any, error) {
|
||||
var keys []string
|
||||
var data map[string]any
|
||||
err := json.Unmarshal(payload, &data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for k, v := range e.Payload.Mapping {
|
||||
_, val := GetVal(c, v, data)
|
||||
if val != nil {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
for k := range e.Payload.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
for _, k := range e.RequiredFields {
|
||||
if !slices.Contains(keys, k) {
|
||||
return nil, errors.New("Required field doesn't exist")
|
||||
}
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func GetVal(c context.Context, v string, data map[string]any) (key string, val any) {
|
||||
key, val = getVal(c, v, data)
|
||||
if val == nil {
|
||||
if strings.Contains(v, "+") {
|
||||
vPartsG := strings.Split(v, "+")
|
||||
var value []string
|
||||
for _, v := range vPartsG {
|
||||
key, val = getVal(c, strings.TrimSpace(v), data)
|
||||
if val == nil {
|
||||
continue
|
||||
}
|
||||
value = append(value, val.(string))
|
||||
}
|
||||
val = strings.Join(value, "")
|
||||
} else {
|
||||
key, val = getVal(c, v, data)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func Header(c context.Context, headerKey string) (val map[string]any, exists bool) {
|
||||
header := c.Value("header")
|
||||
switch header := header.(type) {
|
||||
case map[string]any:
|
||||
if p, exist := header[headerKey]; exist && p != nil {
|
||||
val = p.(map[string]any)
|
||||
exists = exist
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func HeaderVal(c context.Context, headerKey string, key string) (val any) {
|
||||
header := c.Value("header")
|
||||
switch header := header.(type) {
|
||||
case map[string]any:
|
||||
if p, exists := header[headerKey]; exists && p != nil {
|
||||
headerField := p.(map[string]any)
|
||||
if v, e := headerField[key]; e {
|
||||
val = v
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getVal(c context.Context, v string, data map[string]any) (key string, val any) {
|
||||
var param, query, consts map[string]any
|
||||
var enums map[string]map[string]any
|
||||
headerData := make(map[string]any)
|
||||
header := c.Value("header")
|
||||
switch header := header.(type) {
|
||||
case map[string]any:
|
||||
if p, exists := header["param"]; exists && p != nil {
|
||||
param = p.(map[string]any)
|
||||
}
|
||||
if p, exists := header["query"]; exists && p != nil {
|
||||
query = p.(map[string]any)
|
||||
}
|
||||
if p, exists := header["consts"]; exists && p != nil {
|
||||
consts = p.(map[string]any)
|
||||
}
|
||||
if p, exists := header["enums"]; exists && p != nil {
|
||||
enums = p.(map[string]map[string]any)
|
||||
}
|
||||
params := []string{"param", "query", "consts", "enums", "scopes"}
|
||||
// add other data in header, other than param, query, consts, enums to data
|
||||
for k, v := range header {
|
||||
if !slices.Contains(params, k) {
|
||||
headerData[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
v = strings.TrimPrefix(v, "header.")
|
||||
vParts := strings.Split(v, ".")
|
||||
switch vParts[0] {
|
||||
case "body":
|
||||
v := vParts[1]
|
||||
if strings.Contains(v, "*_") {
|
||||
fieldSuffix := strings.ReplaceAll(v, "*", "")
|
||||
for k, vt := range data {
|
||||
if strings.HasSuffix(k, fieldSuffix) {
|
||||
val = vt
|
||||
key = k
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if vd, ok := data[v]; ok {
|
||||
val = vd
|
||||
key = v
|
||||
}
|
||||
}
|
||||
case "param":
|
||||
v := vParts[1]
|
||||
if strings.Contains(v, "*_") {
|
||||
fieldSuffix := strings.ReplaceAll(v, "*", "")
|
||||
for k, vt := range param {
|
||||
if strings.HasSuffix(k, fieldSuffix) {
|
||||
val = vt
|
||||
key = k
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if vd, ok := param[v]; ok {
|
||||
val = vd
|
||||
key = v
|
||||
}
|
||||
}
|
||||
case "query":
|
||||
v := vParts[1]
|
||||
if strings.Contains(v, "*_") {
|
||||
fieldSuffix := strings.ReplaceAll(v, "*", "")
|
||||
for k, vt := range query {
|
||||
if strings.HasSuffix(k, fieldSuffix) {
|
||||
val = vt
|
||||
key = k
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if vd, ok := query[v]; ok {
|
||||
val = vd
|
||||
key = v
|
||||
}
|
||||
}
|
||||
case "eval":
|
||||
// connect string except the first one if more than two parts exist
|
||||
var v string
|
||||
if len(vParts) > 2 {
|
||||
v = strings.Join(vParts[1:], ".")
|
||||
} else {
|
||||
v = vParts[1]
|
||||
}
|
||||
// remove '{{' and '}}'
|
||||
v = v[2 : len(v)-2]
|
||||
|
||||
// parse the expression
|
||||
p, err := expr.Parse(v)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
// evaluate the expression
|
||||
val, err := p.Eval(data)
|
||||
if err != nil {
|
||||
val, err := p.Eval(headerData)
|
||||
if err == nil {
|
||||
return v, val
|
||||
}
|
||||
return "", nil
|
||||
} else {
|
||||
return v, val
|
||||
}
|
||||
case "eval_raw", "gorm_eval":
|
||||
// connect string except the first one if more than two parts exist
|
||||
var v string
|
||||
if len(vParts) > 2 {
|
||||
v = strings.Join(vParts[1:], ".")
|
||||
} else {
|
||||
v = vParts[1]
|
||||
}
|
||||
// remove '{{' and '}}'
|
||||
v = v[2 : len(v)-2]
|
||||
|
||||
// parse the expression
|
||||
p, err := expr.Parse(v)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
dt := map[string]any{
|
||||
"header": header,
|
||||
}
|
||||
for k, vt := range data {
|
||||
dt[k] = vt
|
||||
}
|
||||
// evaluate the expression
|
||||
val, err := p.Eval(dt)
|
||||
if err != nil {
|
||||
val, err := p.Eval(headerData)
|
||||
if err == nil {
|
||||
return v, val
|
||||
}
|
||||
return "", nil
|
||||
} else {
|
||||
return v, val
|
||||
}
|
||||
case "consts":
|
||||
constG := vParts[1]
|
||||
if constVal, ok := consts[constG]; ok {
|
||||
val = constVal
|
||||
key = v
|
||||
}
|
||||
case "enums":
|
||||
enumG := vParts[1]
|
||||
if enumGVal, ok := enums[enumG]; ok {
|
||||
if enumVal, ok := enumGVal[vParts[2]]; ok {
|
||||
val = enumVal
|
||||
key = v
|
||||
}
|
||||
}
|
||||
default:
|
||||
if strings.Contains(v, "*_") {
|
||||
fieldSuffix := strings.ReplaceAll(v, "*", "")
|
||||
for k, vt := range data {
|
||||
if strings.HasSuffix(k, fieldSuffix) {
|
||||
val = vt
|
||||
key = k
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vd, err := dipper.Get(data, v)
|
||||
if err == nil {
|
||||
val = vd
|
||||
key = v
|
||||
} else {
|
||||
vd, err := dipper.Get(headerData, v)
|
||||
if err == nil {
|
||||
val = vd
|
||||
key = v
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func init() {
|
||||
// define custom functions for use in config
|
||||
expr.AddFunction("trim", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
val, ok := params[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
return strings.TrimSpace(val), nil
|
||||
})
|
||||
expr.AddFunction("upper", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
val, ok := params[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
return strings.ToUpper(val), nil
|
||||
})
|
||||
expr.AddFunction("lower", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
val, ok := params[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
return strings.ToLower(val), nil
|
||||
})
|
||||
expr.AddFunction("date", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
val, ok := params[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
t, err := date.Parse(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.Format("2006-01-02"), nil
|
||||
})
|
||||
expr.AddFunction("datetime", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
val, ok := params[0].(string)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
t, err := date.Parse(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return t.Format(time.RFC3339), nil
|
||||
})
|
||||
expr.AddFunction("addSecondsToNow", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
// if type of params[0] is not float64 or int, return error
|
||||
tt, isFloat := params[0].(float64)
|
||||
if !isFloat {
|
||||
if _, ok := params[0].(int); !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
}
|
||||
// add expiry to the current time
|
||||
// convert parms[0] to int from float64
|
||||
if isFloat {
|
||||
params[0] = int(tt)
|
||||
}
|
||||
t := time.Now().UTC()
|
||||
t = t.Add(time.Duration(params[0].(int)) * time.Second)
|
||||
return t, nil
|
||||
})
|
||||
expr.AddFunction("values", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 2 {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
// get values from map
|
||||
mapList, ok := params[0].([]any)
|
||||
if !ok {
|
||||
return nil, errors.New("Invalid argument type")
|
||||
}
|
||||
keyToGet, hasKey := params[1].(string)
|
||||
var values []any
|
||||
if hasKey {
|
||||
for _, m := range mapList {
|
||||
mp := m.(map[string]any)
|
||||
if val, ok := mp[keyToGet]; ok {
|
||||
values = append(values, val)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, m := range mapList {
|
||||
mp := m.(map[string]any)
|
||||
vals := maps.Values(mp)
|
||||
values = append(values, vals...)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
})
|
||||
expr.AddFunction("uniqueid", func(params ...interface{}) (interface{}, error) {
|
||||
// create a new xid
|
||||
return xid.New().String(), nil
|
||||
})
|
||||
expr.AddFunction("now", func(params ...interface{}) (interface{}, error) {
|
||||
// get the current time in UTC
|
||||
return time.Now().UTC(), nil
|
||||
})
|
||||
expr.AddFunction("toString", func(params ...interface{}) (interface{}, error) {
|
||||
if len(params) == 0 || len(params) > 1 || params[0] == nil {
|
||||
return nil, errors.New("Invalid number of arguments")
|
||||
}
|
||||
// convert to string
|
||||
return fmt.Sprint(params[0]), nil
|
||||
})
|
||||
}
|
@@ -1,64 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
)
|
||||
|
||||
type Operations struct {
|
||||
mu *sync.RWMutex
|
||||
Handlers map[string]func(string) mq.Processor
|
||||
}
|
||||
|
||||
var ops = &Operations{mu: &sync.RWMutex{}, Handlers: make(map[string]func(string) mq.Processor)}
|
||||
|
||||
func AddHandler(key string, handler func(string) mq.Processor) {
|
||||
ops.mu.Lock()
|
||||
ops.Handlers[key] = handler
|
||||
ops.mu.Unlock()
|
||||
}
|
||||
|
||||
func GetHandler(key string) func(string) mq.Processor {
|
||||
return ops.Handlers[key]
|
||||
}
|
||||
|
||||
func AvailableHandlers() []string {
|
||||
var op []string
|
||||
for opt := range ops.Handlers {
|
||||
op = append(op, opt)
|
||||
}
|
||||
return op
|
||||
}
|
||||
|
||||
type List struct {
|
||||
mu *sync.RWMutex
|
||||
Handlers map[string]*DAG
|
||||
}
|
||||
|
||||
var dags = &List{mu: &sync.RWMutex{}, Handlers: make(map[string]*DAG)}
|
||||
|
||||
func AddDAG(key string, handler *DAG) {
|
||||
dags.mu.Lock()
|
||||
dags.Handlers[key] = handler
|
||||
dags.mu.Unlock()
|
||||
}
|
||||
|
||||
func GetDAG(key string) *DAG {
|
||||
return dags.Handlers[key]
|
||||
}
|
||||
|
||||
func ClearDAG() {
|
||||
dags.mu.Lock()
|
||||
clear(dags.Handlers)
|
||||
dags.mu.Unlock()
|
||||
dags.Handlers = make(map[string]*DAG)
|
||||
}
|
||||
|
||||
func AvailableDAG() []string {
|
||||
var op []string
|
||||
for opt := range dags.Handlers {
|
||||
op = append(op, opt)
|
||||
}
|
||||
return op
|
||||
}
|
@@ -1,374 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/oarkflow/mq/consts"
|
||||
"github.com/oarkflow/mq/storage"
|
||||
"github.com/oarkflow/mq/storage/memory"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
)
|
||||
|
||||
type TaskManager struct {
|
||||
createdAt time.Time
|
||||
processedAt time.Time
|
||||
status string
|
||||
dag *DAG
|
||||
taskID string
|
||||
wg *WaitGroup
|
||||
topic string
|
||||
result mq.Result
|
||||
|
||||
iteratorNodes storage.IMap[string, []Edge]
|
||||
taskNodeStatus storage.IMap[string, *taskNodeStatus]
|
||||
}
|
||||
|
||||
func NewTaskManager(d *DAG, taskID string, iteratorNodes storage.IMap[string, []Edge]) *TaskManager {
|
||||
return &TaskManager{
|
||||
dag: d,
|
||||
taskNodeStatus: memory.New[string, *taskNodeStatus](),
|
||||
taskID: taskID,
|
||||
iteratorNodes: iteratorNodes,
|
||||
wg: NewWaitGroup(),
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) dispatchFinalResult(ctx context.Context) mq.Result {
|
||||
tm.updateTS(&tm.result)
|
||||
tm.dag.callbackToConsumer(ctx, tm.result)
|
||||
if tm.dag.server.NotifyHandler() != nil {
|
||||
_ = tm.dag.server.NotifyHandler()(ctx, tm.result)
|
||||
}
|
||||
tm.dag.taskCleanupCh <- tm.taskID
|
||||
tm.topic = tm.result.Topic
|
||||
return tm.result
|
||||
}
|
||||
|
||||
func (tm *TaskManager) reportNodeResult(result mq.Result, final bool) {
|
||||
if tm.dag.reportNodeResultCallback != nil {
|
||||
tm.dag.reportNodeResultCallback(result)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) SetTotalItems(topic string, i int) {
|
||||
if nodeStatus, ok := tm.taskNodeStatus.Get(topic); ok {
|
||||
nodeStatus.totalItems = i
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) processNode(ctx context.Context, node *Node, payload json.RawMessage) {
|
||||
topic := getTopic(ctx, node.Key)
|
||||
tm.taskNodeStatus.Set(topic, newNodeStatus(topic))
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
dag, isDAG := isDAGNode(node)
|
||||
if isDAG {
|
||||
if tm.dag.server.SyncMode() && !dag.server.SyncMode() {
|
||||
dag.server.Options().SetSyncMode(true)
|
||||
}
|
||||
}
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Processing, mq.Result{Payload: payload, Topic: node.Key})
|
||||
var result mq.Result
|
||||
if tm.dag.server.SyncMode() {
|
||||
defer func() {
|
||||
if isDAG {
|
||||
result.Topic = dag.consumerTopic
|
||||
result.TaskID = tm.taskID
|
||||
tm.reportNodeResult(result, false)
|
||||
tm.handleNextTask(result.Ctx, result)
|
||||
} else {
|
||||
result.Topic = node.Key
|
||||
tm.reportNodeResult(result, false)
|
||||
tm.handleNextTask(ctx, result)
|
||||
}
|
||||
}()
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
result = mq.Result{TaskID: tm.taskID, Topic: node.Key, Error: ctx.Err(), Ctx: ctx}
|
||||
tm.reportNodeResult(result, true)
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Failed, result)
|
||||
return
|
||||
default:
|
||||
ctx = mq.SetHeaders(ctx, map[string]string{consts.QueueKey: node.Key})
|
||||
if tm.dag.server.SyncMode() {
|
||||
result = node.ProcessTask(ctx, mq.NewTask(tm.taskID, payload, node.Key))
|
||||
if result.Error != nil {
|
||||
tm.reportNodeResult(result, true)
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Failed, result)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
err := tm.dag.server.Publish(ctx, mq.NewTask(tm.taskID, payload, node.Key), node.Key)
|
||||
if err != nil {
|
||||
tm.reportNodeResult(mq.Result{Error: err}, true)
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Failed, result)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) processTask(ctx context.Context, nodeID string, payload json.RawMessage) mq.Result {
|
||||
defer mq.RecoverPanic(mq.RecoverTitle)
|
||||
node, ok := tm.dag.nodes[nodeID]
|
||||
if !ok {
|
||||
return mq.Result{Error: fmt.Errorf("nodeID %s not found", nodeID)}
|
||||
}
|
||||
if tm.createdAt.IsZero() {
|
||||
tm.createdAt = time.Now()
|
||||
}
|
||||
tm.wg.Add(1)
|
||||
go func() {
|
||||
ctxx := context.Background()
|
||||
if headers, ok := mq.GetHeaders(ctx); ok {
|
||||
headers.Set(consts.QueueKey, node.Key)
|
||||
headers.Set("index", fmt.Sprintf("%s__%d", node.Key, 0))
|
||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||
}
|
||||
go tm.processNode(ctx, node, payload)
|
||||
}()
|
||||
tm.wg.Wait()
|
||||
requestType, ok := mq.GetHeader(ctx, "request_type")
|
||||
if ok && requestType == "render" {
|
||||
return tm.renderResult(ctx)
|
||||
}
|
||||
return tm.dispatchFinalResult(ctx)
|
||||
}
|
||||
|
||||
func (tm *TaskManager) handleNextTask(ctx context.Context, result mq.Result) mq.Result {
|
||||
tm.topic = result.Topic
|
||||
defer func() {
|
||||
tm.wg.Done()
|
||||
mq.RecoverPanic(mq.RecoverTitle)
|
||||
}()
|
||||
if result.Ctx != nil {
|
||||
if headers, ok := mq.GetHeaders(ctx); ok {
|
||||
ctx = mq.SetHeaders(result.Ctx, headers.AsMap())
|
||||
}
|
||||
}
|
||||
node, ok := tm.dag.nodes[result.Topic]
|
||||
if !ok {
|
||||
return result
|
||||
}
|
||||
if result.Error != nil {
|
||||
tm.reportNodeResult(result, true)
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Failed, result)
|
||||
return result
|
||||
}
|
||||
edges := tm.getConditionalEdges(node, result)
|
||||
if len(edges) == 0 {
|
||||
tm.reportNodeResult(result, true)
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Completed, result)
|
||||
return result
|
||||
} else {
|
||||
tm.reportNodeResult(result, false)
|
||||
}
|
||||
if node.Type == Page {
|
||||
return result
|
||||
}
|
||||
for _, edge := range edges {
|
||||
switch edge.Type {
|
||||
case Iterator:
|
||||
var items []json.RawMessage
|
||||
err := json.Unmarshal(result.Payload, &items)
|
||||
if err != nil {
|
||||
tm.reportNodeResult(mq.Result{TaskID: tm.taskID, Topic: node.Key, Error: err}, false)
|
||||
result.Error = err
|
||||
tm.ChangeNodeStatus(ctx, node.Key, Failed, result)
|
||||
return result
|
||||
}
|
||||
tm.SetTotalItems(getTopic(ctx, edge.From.Key), len(items)*len(edge.To))
|
||||
for _, target := range edge.To {
|
||||
for i, item := range items {
|
||||
tm.wg.Add(1)
|
||||
go func(ctx context.Context, target *Node, item json.RawMessage, i int) {
|
||||
ctxx := context.Background()
|
||||
if headers, ok := mq.GetHeaders(ctx); ok {
|
||||
headers.Set(consts.QueueKey, target.Key)
|
||||
headers.Set("index", fmt.Sprintf("%s__%d", target.Key, i))
|
||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||
}
|
||||
tm.processNode(ctxx, target, item)
|
||||
}(ctx, target, item, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, edge := range edges {
|
||||
switch edge.Type {
|
||||
case Simple:
|
||||
if _, ok := tm.iteratorNodes.Get(edge.From.Key); ok {
|
||||
continue
|
||||
}
|
||||
tm.processEdge(ctx, edge, result)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (tm *TaskManager) processEdge(ctx context.Context, edge Edge, result mq.Result) {
|
||||
tm.SetTotalItems(getTopic(ctx, edge.From.Key), len(edge.To))
|
||||
index, _ := mq.GetHeader(ctx, "index")
|
||||
if index != "" && strings.Contains(index, "__") {
|
||||
index = strings.Split(index, "__")[1]
|
||||
} else {
|
||||
index = "0"
|
||||
}
|
||||
for _, target := range edge.To {
|
||||
tm.wg.Add(1)
|
||||
go func(ctx context.Context, target *Node, result mq.Result) {
|
||||
ctxx := context.Background()
|
||||
if headers, ok := mq.GetHeaders(ctx); ok {
|
||||
headers.Set(consts.QueueKey, target.Key)
|
||||
headers.Set("index", fmt.Sprintf("%s__%s", target.Key, index))
|
||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||
}
|
||||
tm.processNode(ctxx, target, result.Payload)
|
||||
}(ctx, target, result)
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) getConditionalEdges(node *Node, result mq.Result) []Edge {
|
||||
edges := make([]Edge, len(node.Edges))
|
||||
copy(edges, node.Edges)
|
||||
if result.ConditionStatus != "" {
|
||||
if conditions, ok := tm.dag.conditions[FromNode(result.Topic)]; ok {
|
||||
if targetNodeKey, ok := conditions[When(result.ConditionStatus)]; ok {
|
||||
if targetNode, ok := tm.dag.nodes[string(targetNodeKey)]; ok {
|
||||
edges = append(edges, Edge{From: node, To: []*Node{targetNode}})
|
||||
}
|
||||
} else if targetNodeKey, ok = conditions["default"]; ok {
|
||||
if targetNode, ok := tm.dag.nodes[string(targetNodeKey)]; ok {
|
||||
edges = append(edges, Edge{From: node, To: []*Node{targetNode}})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return edges
|
||||
}
|
||||
|
||||
func (tm *TaskManager) renderResult(ctx context.Context) mq.Result {
|
||||
var rs mq.Result
|
||||
tm.updateTS(&rs)
|
||||
tm.dag.callbackToConsumer(ctx, rs)
|
||||
tm.topic = rs.Topic
|
||||
return rs
|
||||
}
|
||||
|
||||
func (tm *TaskManager) ChangeNodeStatus(ctx context.Context, nodeID string, status NodeStatus, rs mq.Result) {
|
||||
topic := nodeID
|
||||
if !strings.Contains(nodeID, "__") {
|
||||
nodeID = getTopic(ctx, nodeID)
|
||||
} else {
|
||||
topic = strings.Split(nodeID, "__")[0]
|
||||
}
|
||||
nodeStatus, ok := tm.taskNodeStatus.Get(nodeID)
|
||||
if !ok || nodeStatus == nil {
|
||||
return
|
||||
}
|
||||
|
||||
nodeStatus.markAs(rs, status)
|
||||
switch status {
|
||||
case Completed:
|
||||
canProceed := false
|
||||
edges, ok := tm.iteratorNodes.Get(topic)
|
||||
if ok {
|
||||
if len(edges) == 0 {
|
||||
canProceed = true
|
||||
} else {
|
||||
nodeStatus.status = Processing
|
||||
nodeStatus.totalItems = 1
|
||||
nodeStatus.itemResults.Clear()
|
||||
for _, edge := range edges {
|
||||
tm.processEdge(ctx, edge, rs)
|
||||
}
|
||||
tm.iteratorNodes.Del(topic)
|
||||
}
|
||||
}
|
||||
if canProceed || !ok {
|
||||
if topic == tm.dag.startNode {
|
||||
tm.result = rs
|
||||
} else {
|
||||
tm.markParentTask(ctx, topic, nodeID, status, rs)
|
||||
}
|
||||
}
|
||||
case Failed:
|
||||
if topic == tm.dag.startNode {
|
||||
tm.result = rs
|
||||
} else {
|
||||
tm.markParentTask(ctx, topic, nodeID, status, rs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) markParentTask(ctx context.Context, topic, nodeID string, status NodeStatus, rs mq.Result) {
|
||||
parentNodes, err := tm.dag.GetPreviousNodes(topic)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var index string
|
||||
nodeParts := strings.Split(nodeID, "__")
|
||||
if len(nodeParts) == 2 {
|
||||
index = nodeParts[1]
|
||||
}
|
||||
for _, parentNode := range parentNodes {
|
||||
parentKey := fmt.Sprintf("%s__%s", parentNode.Key, index)
|
||||
parentNodeStatus, exists := tm.taskNodeStatus.Get(parentKey)
|
||||
if !exists {
|
||||
parentKey = fmt.Sprintf("%s__%s", parentNode.Key, "0")
|
||||
parentNodeStatus, exists = tm.taskNodeStatus.Get(parentKey)
|
||||
}
|
||||
if exists {
|
||||
parentNodeStatus.itemResults.Set(nodeID, rs)
|
||||
if parentNodeStatus.IsDone() {
|
||||
rt := tm.prepareResult(ctx, parentNodeStatus)
|
||||
tm.ChangeNodeStatus(ctx, parentKey, status, rt)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) prepareResult(ctx context.Context, nodeStatus *taskNodeStatus) mq.Result {
|
||||
aggregatedOutput := make([]json.RawMessage, 0)
|
||||
var status mq.Status
|
||||
var topic string
|
||||
var err1 error
|
||||
if nodeStatus.totalItems == 1 {
|
||||
rs := nodeStatus.itemResults.Values()[0]
|
||||
if rs.Ctx == nil {
|
||||
rs.Ctx = ctx
|
||||
}
|
||||
return rs
|
||||
}
|
||||
nodeStatus.itemResults.ForEach(func(key string, result mq.Result) bool {
|
||||
if topic == "" {
|
||||
topic = result.Topic
|
||||
status = result.Status
|
||||
}
|
||||
if result.Error != nil {
|
||||
err1 = result.Error
|
||||
return false
|
||||
}
|
||||
var item json.RawMessage
|
||||
err := json.Unmarshal(result.Payload, &item)
|
||||
if err != nil {
|
||||
err1 = err
|
||||
return false
|
||||
}
|
||||
aggregatedOutput = append(aggregatedOutput, item)
|
||||
return true
|
||||
})
|
||||
if err1 != nil {
|
||||
return mq.HandleError(ctx, err1)
|
||||
}
|
||||
finalOutput, err := json.Marshal(aggregatedOutput)
|
||||
if err != nil {
|
||||
return mq.HandleError(ctx, err)
|
||||
}
|
||||
return mq.Result{TaskID: tm.taskID, Payload: finalOutput, Status: status, Topic: topic, Ctx: ctx}
|
||||
}
|
258
dag/v1/ui.go
258
dag/v1/ui.go
@@ -1,258 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func (tm *DAG) PrintGraph() {
|
||||
fmt.Println("DAG Graph structure:")
|
||||
for _, node := range tm.nodes {
|
||||
fmt.Printf("Node: %s (%s) -> ", node.Name, node.Key)
|
||||
if conditions, ok := tm.conditions[FromNode(node.Key)]; ok {
|
||||
var c []string
|
||||
for when, then := range conditions {
|
||||
if target, ok := tm.nodes[string(then)]; ok {
|
||||
c = append(c, fmt.Sprintf("If [%s] Then %s (%s)", when, target.Name, target.Key))
|
||||
}
|
||||
}
|
||||
fmt.Println(strings.Join(c, ", "))
|
||||
}
|
||||
var edges []string
|
||||
for _, edge := range node.Edges {
|
||||
for _, target := range edge.To {
|
||||
edges = append(edges, fmt.Sprintf("%s (%s)", target.Name, target.Key))
|
||||
}
|
||||
}
|
||||
fmt.Println(strings.Join(edges, ", "))
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *DAG) ClassifyEdges(startNodes ...string) (string, bool, error) {
|
||||
builder := &strings.Builder{}
|
||||
startNode := tm.GetStartNode()
|
||||
if len(startNodes) > 0 && startNodes[0] != "" {
|
||||
startNode = startNodes[0]
|
||||
}
|
||||
visited := make(map[string]bool)
|
||||
discoveryTime := make(map[string]int)
|
||||
finishedTime := make(map[string]int)
|
||||
timeVal := 0
|
||||
inRecursionStack := make(map[string]bool) // track nodes in the recursion stack for cycle detection
|
||||
if startNode == "" {
|
||||
firstNode := tm.findStartNode()
|
||||
if firstNode != nil {
|
||||
startNode = firstNode.Key
|
||||
}
|
||||
}
|
||||
if startNode == "" {
|
||||
return "", false, fmt.Errorf("no start node found")
|
||||
}
|
||||
hasCycle, cycleErr := tm.dfs(startNode, visited, discoveryTime, finishedTime, &timeVal, inRecursionStack, builder)
|
||||
if cycleErr != nil {
|
||||
return builder.String(), hasCycle, cycleErr
|
||||
}
|
||||
return builder.String(), hasCycle, nil
|
||||
}
|
||||
|
||||
func (tm *DAG) dfs(v string, visited map[string]bool, discoveryTime, finishedTime map[string]int, timeVal *int, inRecursionStack map[string]bool, builder *strings.Builder) (bool, error) {
|
||||
visited[v] = true
|
||||
inRecursionStack[v] = true // mark node as part of recursion stack
|
||||
*timeVal++
|
||||
discoveryTime[v] = *timeVal
|
||||
node := tm.nodes[v]
|
||||
hasCycle := false
|
||||
var err error
|
||||
for _, edge := range node.Edges {
|
||||
for _, adj := range edge.To {
|
||||
if !visited[adj.Key] {
|
||||
builder.WriteString(fmt.Sprintf("Traversing Edge: %s -> %s\n", v, adj.Key))
|
||||
hasCycle, err := tm.dfs(adj.Key, visited, discoveryTime, finishedTime, timeVal, inRecursionStack, builder)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if hasCycle {
|
||||
return true, nil
|
||||
}
|
||||
} else if inRecursionStack[adj.Key] {
|
||||
cycleMsg := fmt.Sprintf("Cycle detected: %s -> %s\n", v, adj.Key)
|
||||
return true, fmt.Errorf(cycleMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
hasCycle, err = tm.handleConditionalEdges(v, visited, discoveryTime, finishedTime, timeVal, inRecursionStack, builder)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
*timeVal++
|
||||
finishedTime[v] = *timeVal
|
||||
inRecursionStack[v] = false // remove from recursion stack after finishing processing
|
||||
return hasCycle, nil
|
||||
}
|
||||
|
||||
func (tm *DAG) handleConditionalEdges(v string, visited map[string]bool, discoveryTime, finishedTime map[string]int, time *int, inRecursionStack map[string]bool, builder *strings.Builder) (bool, error) {
|
||||
node := tm.nodes[v]
|
||||
for when, then := range tm.conditions[FromNode(node.Key)] {
|
||||
if targetNode, ok := tm.nodes[string(then)]; ok {
|
||||
if !visited[targetNode.Key] {
|
||||
builder.WriteString(fmt.Sprintf("Traversing Conditional Edge [%s]: %s -> %s\n", when, v, targetNode.Key))
|
||||
hasCycle, err := tm.dfs(targetNode.Key, visited, discoveryTime, finishedTime, time, inRecursionStack, builder)
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
if hasCycle {
|
||||
return true, nil
|
||||
}
|
||||
} else if inRecursionStack[targetNode.Key] {
|
||||
cycleMsg := fmt.Sprintf("Cycle detected in Conditional Edge [%s]: %s -> %s\n", when, v, targetNode.Key)
|
||||
return true, fmt.Errorf(cycleMsg)
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (tm *DAG) SaveDOTFile(filename string) error {
|
||||
dotContent := tm.ExportDOT()
|
||||
return os.WriteFile(filename, []byte(dotContent), 0644)
|
||||
}
|
||||
|
||||
func (tm *DAG) SaveSVG(svgFile string) error {
|
||||
return tm.saveImage(svgFile, "-Tsvg")
|
||||
}
|
||||
|
||||
func (tm *DAG) SavePNG(pngFile string) error {
|
||||
return tm.saveImage(pngFile, "-Tpng")
|
||||
}
|
||||
|
||||
func (tm *DAG) saveImage(fileName string, arg string) error {
|
||||
dotFile := fileName[:len(fileName)-4] + ".dot"
|
||||
if err := tm.SaveDOTFile(dotFile); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
_ = os.Remove(dotFile)
|
||||
}()
|
||||
cmd := exec.Command("dot", arg, dotFile, "-o", fileName)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to convert image: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tm *DAG) ExportDOT() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(fmt.Sprintf(`digraph "%s" {`, tm.name))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(fmt.Sprintf(` label="%s";`, tm.name))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` labelloc="t";`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` fontsize=20;`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` node [shape=box, style="rounded,filled", fillcolor="lightgray", fontname="Arial", margin="0.2,0.1"];`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` edge [fontname="Arial", fontsize=12, arrowsize=0.8];`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` size="10,10";`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` ratio="fill";`)
|
||||
sb.WriteString("\n")
|
||||
sortedNodes := tm.TopologicalSort()
|
||||
for _, nodeKey := range sortedNodes {
|
||||
node := tm.nodes[nodeKey]
|
||||
nodeColor := "lightblue"
|
||||
sb.WriteString(fmt.Sprintf(` "%s" [label=" %s", fillcolor="%s", id="node_%s"];`, node.Key, node.Name, nodeColor, node.Key))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
for _, nodeKey := range sortedNodes {
|
||||
node := tm.nodes[nodeKey]
|
||||
for _, edge := range node.Edges {
|
||||
var edgeStyle string
|
||||
switch edge.Type {
|
||||
case Iterator:
|
||||
edgeStyle = "dashed"
|
||||
default:
|
||||
edgeStyle = "solid"
|
||||
}
|
||||
edgeColor := "black"
|
||||
for _, to := range edge.To {
|
||||
sb.WriteString(fmt.Sprintf(` "%s" -> "%s" [label=" %s", color="%s", style=%s, fontsize=10, arrowsize=0.6];`, node.Key, to.Key, edge.Label, edgeColor, edgeStyle))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
for fromNodeKey, conditions := range tm.conditions {
|
||||
for when, then := range conditions {
|
||||
if toNode, ok := tm.nodes[string(then)]; ok {
|
||||
sb.WriteString(fmt.Sprintf(` "%s" -> "%s" [label=" %s", color="purple", style=dotted, fontsize=10, arrowsize=0.6];`, fromNodeKey, toNode.Key, when))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, nodeKey := range sortedNodes {
|
||||
node := tm.nodes[nodeKey]
|
||||
if node.processor != nil {
|
||||
subDAG, _ := isDAGNode(node)
|
||||
if subDAG != nil {
|
||||
sb.WriteString(fmt.Sprintf(` subgraph "cluster_%s" {`, subDAG.name))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(fmt.Sprintf(` label=" %s";`, subDAG.name))
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` style=dashed;`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` bgcolor="lightgray";`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(` node [shape=rectangle, style="filled", fillcolor="lightblue", fontname="Arial", margin="0.2,0.1"];`)
|
||||
sb.WriteString("\n")
|
||||
for subNodeKey, subNode := range subDAG.nodes {
|
||||
sb.WriteString(fmt.Sprintf(` "%s" [label=" %s"];`, subNodeKey, subNode.Name))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
for subNodeKey, subNode := range subDAG.nodes {
|
||||
for _, edge := range subNode.Edges {
|
||||
for _, to := range edge.To {
|
||||
sb.WriteString(fmt.Sprintf(` "%s" -> "%s" [label=" %s", color="black", style=solid, arrowsize=0.6];`, subNodeKey, to.Key, edge.Label))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.WriteString(` }`)
|
||||
sb.WriteString("\n")
|
||||
sb.WriteString(fmt.Sprintf(` "%s" -> "%s" [label=" %s", color="black", style=solid, arrowsize=0.6];`, node.Key, subDAG.startNode, subDAG.name))
|
||||
sb.WriteString("\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
sb.WriteString(`}`)
|
||||
sb.WriteString("\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func (tm *DAG) TopologicalSort() (stack []string) {
|
||||
visited := make(map[string]bool)
|
||||
for _, node := range tm.nodes {
|
||||
if !visited[node.Key] {
|
||||
tm.topologicalSortUtil(node.Key, visited, &stack)
|
||||
}
|
||||
}
|
||||
for i, j := 0, len(stack)-1; i < j; i, j = i+1, j-1 {
|
||||
stack[i], stack[j] = stack[j], stack[i]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (tm *DAG) topologicalSortUtil(v string, visited map[string]bool, stack *[]string) {
|
||||
visited[v] = true
|
||||
node := tm.nodes[v]
|
||||
for _, edge := range node.Edges {
|
||||
for _, to := range edge.To {
|
||||
if !visited[to.Key] {
|
||||
tm.topologicalSortUtil(to.Key, visited, stack)
|
||||
}
|
||||
}
|
||||
}
|
||||
*stack = append(*stack, v)
|
||||
}
|
@@ -1,57 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/oarkflow/mq"
|
||||
"github.com/oarkflow/mq/storage"
|
||||
"github.com/oarkflow/mq/storage/memory"
|
||||
)
|
||||
|
||||
type taskNodeStatus struct {
|
||||
node string
|
||||
itemResults storage.IMap[string, mq.Result]
|
||||
status NodeStatus
|
||||
result mq.Result
|
||||
totalItems int
|
||||
}
|
||||
|
||||
func newNodeStatus(node string) *taskNodeStatus {
|
||||
return &taskNodeStatus{
|
||||
node: node,
|
||||
itemResults: memory.New[string, mq.Result](),
|
||||
status: Pending,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *taskNodeStatus) IsDone() bool {
|
||||
return t.itemResults.Size() >= t.totalItems
|
||||
}
|
||||
|
||||
func (t *taskNodeStatus) markAs(rs mq.Result, status NodeStatus) {
|
||||
t.result = rs
|
||||
t.status = status
|
||||
}
|
||||
|
||||
func isDAGNode(node *Node) (*DAG, bool) {
|
||||
switch node := node.processor.(type) {
|
||||
case *DAG:
|
||||
return node, true
|
||||
default:
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func (tm *TaskManager) updateTS(result *mq.Result) {
|
||||
result.CreatedAt = tm.createdAt
|
||||
result.ProcessedAt = time.Now()
|
||||
result.Latency = time.Since(tm.createdAt).String()
|
||||
}
|
||||
|
||||
func getTopic(ctx context.Context, topic string) string {
|
||||
if index, ok := mq.GetHeader(ctx, "index"); ok && index != "" {
|
||||
topic = index
|
||||
}
|
||||
return topic
|
||||
}
|
@@ -1,51 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
type WaitGroup struct {
|
||||
cond *sync.Cond
|
||||
counter int
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
func NewWaitGroup() *WaitGroup {
|
||||
awg := &WaitGroup{}
|
||||
awg.cond = sync.NewCond(&awg.Mutex)
|
||||
return awg
|
||||
}
|
||||
|
||||
// Add increments the counter for an async task
|
||||
func (awg *WaitGroup) Add(delta int) {
|
||||
awg.Lock()
|
||||
awg.counter += delta
|
||||
awg.Unlock()
|
||||
}
|
||||
|
||||
// Reset sets the counter to zero and notifies waiting goroutines
|
||||
func (awg *WaitGroup) Reset() {
|
||||
awg.Lock()
|
||||
awg.counter = 0
|
||||
awg.cond.Broadcast() // Notify any waiting goroutines that we're done
|
||||
awg.Unlock()
|
||||
}
|
||||
|
||||
// Done decrements the counter when a task is completed
|
||||
func (awg *WaitGroup) Done() {
|
||||
awg.Lock()
|
||||
awg.counter--
|
||||
if awg.counter == 0 {
|
||||
awg.cond.Broadcast() // Notify all waiting goroutines
|
||||
}
|
||||
awg.Unlock()
|
||||
}
|
||||
|
||||
// Wait blocks until the counter is zero
|
||||
func (awg *WaitGroup) Wait() {
|
||||
awg.Lock()
|
||||
for awg.counter > 0 {
|
||||
awg.cond.Wait() // Wait for notification
|
||||
}
|
||||
awg.Unlock()
|
||||
}
|
@@ -1,32 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/oarkflow/mq/sio"
|
||||
)
|
||||
|
||||
func WsEvents(s *sio.Server) {
|
||||
s.On("join", join)
|
||||
s.On("message", message)
|
||||
}
|
||||
|
||||
func join(s *sio.Socket, data []byte) {
|
||||
//just one room at a time for the simple example
|
||||
currentRooms := s.GetRooms()
|
||||
for _, room := range currentRooms {
|
||||
s.Leave(room)
|
||||
}
|
||||
s.Join(string(data))
|
||||
s.Emit("joinedRoom", string(data))
|
||||
}
|
||||
|
||||
type msg struct {
|
||||
Room string
|
||||
Message string
|
||||
}
|
||||
|
||||
func message(s *sio.Socket, data []byte) {
|
||||
var m msg
|
||||
json.Unmarshal(data, &m)
|
||||
s.ToRoom(m.Room, "message", m.Message)
|
||||
}
|
5
go.mod
5
go.mod
@@ -12,6 +12,7 @@ require (
|
||||
github.com/oarkflow/form v0.0.0-20241203111156-b1be5636af43
|
||||
github.com/oarkflow/jet v0.0.4
|
||||
github.com/oarkflow/json v0.0.13
|
||||
github.com/oarkflow/log v1.0.79
|
||||
github.com/oarkflow/xid v1.2.5
|
||||
github.com/prometheus/client_golang v1.20.5
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
|
||||
@@ -35,6 +36,6 @@ require (
|
||||
github.com/valyala/bytebufferpool v1.0.0 // indirect
|
||||
github.com/valyala/fasthttp v1.51.0 // indirect
|
||||
github.com/valyala/tcplisten v1.0.0 // indirect
|
||||
golang.org/x/sys v0.29.0 // indirect
|
||||
google.golang.org/protobuf v1.36.3 // indirect
|
||||
golang.org/x/sys v0.30.0 // indirect
|
||||
google.golang.org/protobuf v1.36.5 // indirect
|
||||
)
|
||||
|
20
go.sum
20
go.sum
@@ -41,6 +41,8 @@ github.com/oarkflow/jet v0.0.4 h1:rs0nTzodye/9zhrSX7FlR80Gjaty6ei2Ln0pmaUrdwg=
|
||||
github.com/oarkflow/jet v0.0.4/go.mod h1:YXIc47aYyx1xKpnmuz1Z9o88cxxa47r7X3lfUAxZ0Qg=
|
||||
github.com/oarkflow/json v0.0.13 h1:/ZKW924/v4U1ht34WY7rj/GC/qW9+10IiV5+MR2vO0A=
|
||||
github.com/oarkflow/json v0.0.13/go.mod h1:S5BZA4/rM87+MY8mFrga3jISzxCL9RtLE6xHSk63VxI=
|
||||
github.com/oarkflow/log v1.0.79 h1:DxhtkBGG+pUu6cudSVw5g75FbKEQJkij5w7n5AEN00M=
|
||||
github.com/oarkflow/log v1.0.79/go.mod h1:U/4chr1DyOiQvS6JiQpjYTCJhK7RGR8xrXPsGlouLzM=
|
||||
github.com/oarkflow/xid v1.2.5 h1:6RcNJm9+oZ/B647gkME9trCzhpxGQaSdNoD56Vmkeho=
|
||||
github.com/oarkflow/xid v1.2.5/go.mod h1:jG4YBh+swbjlWApGWDBYnsJEa7hi3CCpmuqhB3RAxVo=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
@@ -49,16 +51,14 @@ github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+
|
||||
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.60.1 h1:FUas6GcOw66yB/73KC+BOZoFJmbo/1pojoILArPAaSc=
|
||||
github.com/prometheus/common v0.60.1/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.51.0 h1:8b30A5JlZ6C7AS81RsWjYMQmrZG6feChmgAolCl1SqA=
|
||||
@@ -69,15 +69,11 @@ golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBn
|
||||
golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
|
||||
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
|
||||
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
60
logger/log.go
Normal file
60
logger/log.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// phuslulog.go
|
||||
package logger
|
||||
|
||||
import "github.com/oarkflow/log"
|
||||
|
||||
// DefaultLogger implements the Logger interface using phuslu/log.
|
||||
type DefaultLogger struct {
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
func NewDefaultLogger(loggers ...*log.Logger) *DefaultLogger {
|
||||
var logger *log.Logger
|
||||
if len(loggers) > 0 {
|
||||
logger = loggers[0]
|
||||
} else {
|
||||
logger = &log.DefaultLogger
|
||||
}
|
||||
return &DefaultLogger{logger: logger}
|
||||
}
|
||||
|
||||
// Debug logs a debug-level message.
|
||||
func (l *DefaultLogger) Debug(msg string, fields ...Field) {
|
||||
if l.logger == nil {
|
||||
return
|
||||
}
|
||||
l.logger.Debug().Map(flattenFields(fields)).Msg(msg)
|
||||
}
|
||||
|
||||
// Info logs an info-level message.
|
||||
func (l *DefaultLogger) Info(msg string, fields ...Field) {
|
||||
if l.logger == nil {
|
||||
return
|
||||
}
|
||||
l.logger.Info().Map(flattenFields(fields)).Msg(msg)
|
||||
}
|
||||
|
||||
// Warn logs a warn-level message.
|
||||
func (l *DefaultLogger) Warn(msg string, fields ...Field) {
|
||||
if l.logger == nil {
|
||||
return
|
||||
}
|
||||
l.logger.Warn().Map(flattenFields(fields)).Msg(msg)
|
||||
}
|
||||
|
||||
// Error logs an error-level message.
|
||||
func (l *DefaultLogger) Error(msg string, fields ...Field) {
|
||||
if l.logger == nil {
|
||||
return
|
||||
}
|
||||
l.logger.Error().Map(flattenFields(fields)).Msg(msg)
|
||||
}
|
||||
|
||||
// flattenFields converts a slice of Field into a slice of interface{} key/value pairs.
|
||||
func flattenFields(fields []Field) map[string]any {
|
||||
kv := make(map[string]any)
|
||||
for _, field := range fields {
|
||||
kv[field.Key] = field.Value
|
||||
}
|
||||
return kv
|
||||
}
|
15
logger/logger.go
Normal file
15
logger/logger.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package logger
|
||||
|
||||
// Field represents a key-value pair used for structured logging.
|
||||
type Field struct {
|
||||
Key string
|
||||
Value any
|
||||
}
|
||||
|
||||
// Logger is an interface that provides logging at various levels.
|
||||
type Logger interface {
|
||||
Debug(msg string, fields ...Field)
|
||||
Info(msg string, fields ...Field)
|
||||
Warn(msg string, fields ...Field)
|
||||
Error(msg string, fields ...Field)
|
||||
}
|
15
options.go
15
options.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/oarkflow/mq/logger"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
@@ -128,6 +129,7 @@ type Options struct {
|
||||
cleanTaskOnComplete bool
|
||||
enableWorkerPool bool
|
||||
respondPendingResult bool
|
||||
logger logger.Logger
|
||||
}
|
||||
|
||||
func (o *Options) SetSyncMode(sync bool) {
|
||||
@@ -138,6 +140,10 @@ func (o *Options) NumOfWorkers() int {
|
||||
return o.numOfWorkers
|
||||
}
|
||||
|
||||
func (o *Options) Logger() logger.Logger {
|
||||
return o.logger
|
||||
}
|
||||
|
||||
func (o *Options) Storage() TaskStorage {
|
||||
return o.storage
|
||||
}
|
||||
@@ -156,7 +162,7 @@ func (o *Options) MaxMemoryLoad() int64 {
|
||||
|
||||
func defaultOptions() *Options {
|
||||
return &Options{
|
||||
brokerAddr: ":8080",
|
||||
brokerAddr: ":8081",
|
||||
maxRetries: 5,
|
||||
respondPendingResult: true,
|
||||
initialDelay: 2 * time.Second,
|
||||
@@ -166,6 +172,7 @@ func defaultOptions() *Options {
|
||||
numOfWorkers: runtime.NumCPU(),
|
||||
maxMemoryLoad: 5000000,
|
||||
storage: NewMemoryTaskStorage(10 * time.Minute),
|
||||
logger: logger.NewDefaultLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,6 +193,12 @@ func WithNotifyResponse(callback Callback) Option {
|
||||
}
|
||||
}
|
||||
|
||||
func WithLogger(log logger.Logger) Option {
|
||||
return func(opts *Options) {
|
||||
opts.logger = log
|
||||
}
|
||||
}
|
||||
|
||||
func WithWorkerPool(queueSize, numOfWorkers int, maxMemoryLoad int64) Option {
|
||||
return func(opts *Options) {
|
||||
opts.enableWorkerPool = true
|
||||
|
Reference in New Issue
Block a user