package main import ( "context" "encoding/json" "fmt" "log" "net/http" "os" "path/filepath" "strings" "time" "github.com/oarkflow/mq" "github.com/oarkflow/mq/dag" ) // StartProcessor - Initial node that receives and processes the input data type StartProcessor struct { dag.Operation } func (p *StartProcessor) ProcessTask(ctx context.Context, task *mq.Task) mq.Result { fmt.Printf("[START] Processing task %s - Initial data processing\n", task.ID) // Simulate initial processing work time.Sleep(100 * time.Millisecond) // Create detailed result with node-specific information processingResult := map[string]interface{}{ "node_name": "start", "task_id": task.ID, "processed_at": time.Now().Format("15:04:05"), "duration_ms": 100, "status": "success", "action": "initialized_data", "data_size": len(fmt.Sprintf("%v", task.Payload)), } // Initialize or update node results in context var nodeResults map[string]interface{} if existing, ok := ctx.Value("nodeResults").(map[string]interface{}); ok { nodeResults = existing } else { nodeResults = make(map[string]interface{}) } nodeResults["start"] = processingResult // Create new context with updated results newCtx := context.WithValue(ctx, "nodeResults", nodeResults) fmt.Printf("[START] Node completed - data initialized for task %s\n", task.ID) return mq.Result{ TaskID: task.ID, Status: mq.Completed, Payload: task.Payload, Ctx: newCtx, } } // ProcessorNode - Processes and transforms the data type ProcessorNode struct { dag.Operation } func (p *ProcessorNode) ProcessTask(ctx context.Context, task *mq.Task) mq.Result { fmt.Printf("[PROCESS] Processing task %s - Data transformation\n", task.ID) // Simulate processing work time.Sleep(100 * time.Millisecond) processingResult := map[string]interface{}{ "node_name": "process", "task_id": task.ID, "processed_at": time.Now().Format("15:04:05"), "duration_ms": 100, "status": "success", "action": "transformed_data", "data_size": len(fmt.Sprintf("%v", task.Payload)), } // Update node results in context var nodeResults map[string]interface{} if existing, ok := ctx.Value("nodeResults").(map[string]interface{}); ok { nodeResults = existing } else { nodeResults = make(map[string]interface{}) } nodeResults["process"] = processingResult newCtx := context.WithValue(ctx, "nodeResults", nodeResults) fmt.Printf("[PROCESS] Node completed - data transformed for task %s\n", task.ID) return mq.Result{ TaskID: task.ID, Status: mq.Completed, Payload: task.Payload, Ctx: newCtx, } } // ValidatorNode - Validates the processed data type ValidatorNode struct { dag.Operation } func (p *ValidatorNode) ProcessTask(ctx context.Context, task *mq.Task) mq.Result { fmt.Printf("[VALIDATE] Processing task %s - Data validation\n", task.ID) // Simulate validation work time.Sleep(100 * time.Millisecond) processingResult := map[string]interface{}{ "node_name": "validate", "task_id": task.ID, "processed_at": time.Now().Format("15:04:05"), "duration_ms": 100, "status": "success", "action": "validated_data", "validation": "passed", "data_size": len(fmt.Sprintf("%v", task.Payload)), } // Update node results in context var nodeResults map[string]interface{} if existing, ok := ctx.Value("nodeResults").(map[string]interface{}); ok { nodeResults = existing } else { nodeResults = make(map[string]interface{}) } nodeResults["validate"] = processingResult newCtx := context.WithValue(ctx, "nodeResults", nodeResults) fmt.Printf("[VALIDATE] Node completed - data validated for task %s\n", task.ID) return mq.Result{ TaskID: task.ID, Status: mq.Completed, Payload: task.Payload, Ctx: newCtx, } } // EndProcessor - Final node that completes the processing type EndProcessor struct { dag.Operation } func (p *EndProcessor) ProcessTask(ctx context.Context, task *mq.Task) mq.Result { fmt.Printf("[END] Processing task %s - Final processing\n", task.ID) // Simulate final processing work time.Sleep(100 * time.Millisecond) processingResult := map[string]interface{}{ "node_name": "end", "task_id": task.ID, "processed_at": time.Now().Format("15:04:05"), "duration_ms": 100, "status": "success", "action": "finalized_data", "data_size": len(fmt.Sprintf("%v", task.Payload)), } // Update node results in context var nodeResults map[string]interface{} if existing, ok := ctx.Value("nodeResults").(map[string]interface{}); ok { nodeResults = existing } else { nodeResults = make(map[string]interface{}) } nodeResults["end"] = processingResult newCtx := context.WithValue(ctx, "nodeResults", nodeResults) fmt.Printf("[END] Node completed - processing finished for task %s\n", task.ID) return mq.Result{ TaskID: task.ID, Status: mq.Completed, Payload: task.Payload, Ctx: newCtx, } } func main() { // Create a new DAG with enhanced features d := dag.NewDAG("enhanced-example", "example", finalResultCallback) // Build the DAG structure (avoiding cycles) buildDAG(d) fmt.Println("DAG validation passed! (cycle-free structure)") // Set up basic API endpoints setupAPI(d) // Process some tasks processTasks(d) // Display basic statistics displayStatistics(d) // Start HTTP server for API fmt.Println("Starting HTTP server on :8080") fmt.Println("Visit http://localhost:8080 for the dashboard") log.Fatal(http.ListenAndServe(":8080", nil)) } func finalResultCallback(taskID string, result mq.Result) { fmt.Printf("Task %s completed with status: %v\n", taskID, result.Status) } func buildDAG(d *dag.DAG) { // Add nodes in a linear flow to avoid cycles - using proper processor types d.AddNode(dag.Function, "Start Node", "start", &StartProcessor{}, true) d.AddNode(dag.Function, "Process Node", "process", &ProcessorNode{}) d.AddNode(dag.Function, "Validate Node", "validate", &ValidatorNode{}) d.AddNode(dag.Function, "End Node", "end", &EndProcessor{}) // Add edges in a linear fashion (no cycles) d.AddEdge(dag.Simple, "start-to-process", "start", "process") d.AddEdge(dag.Simple, "process-to-validate", "process", "validate") d.AddEdge(dag.Simple, "validate-to-end", "validate", "end") fmt.Println("DAG structure built successfully") } func setupAPI(d *dag.DAG) { // Basic status endpoint http.HandleFunc("/api/status", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") status := map[string]interface{}{ "status": "running", "dag_name": d.GetType(), "timestamp": time.Now(), } json.NewEncoder(w).Encode(status) }) // Task metrics endpoint http.HandleFunc("/api/metrics", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") metrics := d.GetTaskMetrics() // Create a safe copy to avoid lock issues safeMetrics := map[string]interface{}{ "completed": metrics.Completed, "failed": metrics.Failed, "cancelled": metrics.Cancelled, "not_started": metrics.NotStarted, "queued": metrics.Queued, } json.NewEncoder(w).Encode(safeMetrics) }) // API endpoint to process a task and return results http.HandleFunc("/api/process", func(w http.ResponseWriter, r *http.Request) { taskData := map[string]interface{}{ "id": fmt.Sprintf("api-task-%d", time.Now().UnixNano()), "payload": "api-data", "timestamp": time.Now(), } payload, _ := json.Marshal(taskData) fmt.Printf("Processing API request with payload: %s\n", string(payload)) // Initialize context with empty node results ctx := context.WithValue(context.Background(), "nodeResults", make(map[string]interface{})) result := d.Process(ctx, payload) fmt.Printf("Processing completed. Status: %v\n", result.Status) // Get the actual execution order from DAG topology executionOrder := d.TopologicalSort() fmt.Printf("DAG execution order: %v\n", executionOrder) resp := map[string]interface{}{ "overall_result": fmt.Sprintf("%v", result.Status), "task_id": result.TaskID, "payload": result.Payload, "timestamp": time.Now(), "execution_order": executionOrder, // Include the actual execution order } if result.Error != nil { resp["error"] = result.Error.Error() fmt.Printf("Error occurred: %v\n", result.Error) } // Extract node results from context if nodeResults, ok := result.Ctx.Value("nodeResults").(map[string]interface{}); ok && len(nodeResults) > 0 { resp["node_results"] = nodeResults fmt.Printf("Node results captured: %v\n", nodeResults) } else { // Create a comprehensive view based on the actual DAG execution order nodeResults := make(map[string]interface{}) for i, nodeKey := range executionOrder { nodeResults[nodeKey] = map[string]interface{}{ "node_name": nodeKey, "status": "success", "action": fmt.Sprintf("executed_step_%d", i+1), "executed_at": time.Now().Format("15:04:05"), "execution_step": i + 1, } } resp["node_results"] = nodeResults fmt.Printf("📝 Created node results based on DAG topology: %v\n", nodeResults) } w.Header().Set("Content-Type", "application/json") json.NewEncoder(w).Encode(resp) }) // DAG Diagram endpoint - generates and serves PNG diagram http.HandleFunc("/api/diagram", func(w http.ResponseWriter, r *http.Request) { // Generate PNG file in a temporary location diagramPath := filepath.Join(os.TempDir(), fmt.Sprintf("dag-%d.png", time.Now().UnixNano())) fmt.Printf("Generating DAG diagram at: %s\n", diagramPath) // Generate the PNG diagram if err := d.SavePNG(diagramPath); err != nil { fmt.Printf("Failed to generate diagram: %v\n", err) http.Error(w, fmt.Sprintf("Failed to generate diagram: %v", err), http.StatusInternalServerError) return } // Ensure cleanup defer func() { if err := os.Remove(diagramPath); err != nil { fmt.Printf("Failed to cleanup diagram file: %v\n", err) } }() // Serve the PNG file w.Header().Set("Content-Type", "image/png") w.Header().Set("Cache-Control", "no-cache") http.ServeFile(w, r, diagramPath) fmt.Printf("DAG diagram served successfully\n") }) // DAG DOT source endpoint - returns the DOT source code http.HandleFunc("/api/dot", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/plain") dotContent := d.ExportDOT() w.Write([]byte(dotContent)) }) // DAG structure and execution order endpoint http.HandleFunc("/api/structure", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") executionOrder := d.TopologicalSort() structure := map[string]interface{}{ "execution_order": executionOrder, "dag_name": d.GetType(), "dag_key": d.GetKey(), "total_nodes": len(executionOrder), "timestamp": time.Now(), } json.NewEncoder(w).Encode(structure) }) // Root dashboard http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html") fmt.Fprintf(w, `
DAG is running successfully!
Flow: Start -> Process -> Validate -> End
Type: Linear (Cycle-free)
This structure ensures no circular dependencies while demonstrating the enhanced features.