mirror of
https://github.com/oarkflow/mq.git
synced 2025-10-05 16:06:55 +08:00
update
This commit is contained in:
175
dag/dag.go
175
dag/dag.go
@@ -40,6 +40,7 @@ type Node struct {
|
|||||||
NodeType NodeType
|
NodeType NodeType
|
||||||
isReady bool
|
isReady bool
|
||||||
Timeout time.Duration // ...new field for node-level timeout...
|
Timeout time.Duration // ...new field for node-level timeout...
|
||||||
|
Debug bool // Individual node debug mode
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetTimeout allows setting a maximum processing duration for the node.
|
// SetTimeout allows setting a maximum processing duration for the node.
|
||||||
@@ -47,6 +48,16 @@ func (n *Node) SetTimeout(d time.Duration) {
|
|||||||
n.Timeout = d
|
n.Timeout = d
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetDebug enables or disables debug mode for this specific node.
|
||||||
|
func (n *Node) SetDebug(enabled bool) {
|
||||||
|
n.Debug = enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDebugEnabled checks if debug is enabled for this node or globally.
|
||||||
|
func (n *Node) IsDebugEnabled(dagDebug bool) bool {
|
||||||
|
return n.Debug || dagDebug
|
||||||
|
}
|
||||||
|
|
||||||
type Edge struct {
|
type Edge struct {
|
||||||
From *Node
|
From *Node
|
||||||
FromSource string
|
FromSource string
|
||||||
@@ -100,6 +111,9 @@ type DAG struct {
|
|||||||
// Circuit breakers per node
|
// Circuit breakers per node
|
||||||
circuitBreakers map[string]*CircuitBreaker
|
circuitBreakers map[string]*CircuitBreaker
|
||||||
circuitBreakersMu sync.RWMutex
|
circuitBreakersMu sync.RWMutex
|
||||||
|
|
||||||
|
// Debug configuration
|
||||||
|
debug bool // Global debug mode for the entire DAG
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetPreProcessHook configures a function to be called before each node is processed.
|
// SetPreProcessHook configures a function to be called before each node is processed.
|
||||||
@@ -112,6 +126,63 @@ func (tm *DAG) SetPostProcessHook(hook func(ctx context.Context, node *Node, tas
|
|||||||
tm.PostProcessHook = hook
|
tm.PostProcessHook = hook
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetDebug enables or disables debug mode for the entire DAG.
|
||||||
|
func (tm *DAG) SetDebug(enabled bool) {
|
||||||
|
tm.debug = enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDebugEnabled returns whether debug mode is enabled for the DAG.
|
||||||
|
func (tm *DAG) IsDebugEnabled() bool {
|
||||||
|
return tm.debug
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNodeDebug enables or disables debug mode for a specific node.
|
||||||
|
func (tm *DAG) SetNodeDebug(nodeID string, enabled bool) error {
|
||||||
|
node, exists := tm.nodes.Get(nodeID)
|
||||||
|
if !exists {
|
||||||
|
return fmt.Errorf("node with ID '%s' not found", nodeID)
|
||||||
|
}
|
||||||
|
node.SetDebug(enabled)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetAllNodesDebug enables or disables debug mode for all nodes in the DAG.
|
||||||
|
func (tm *DAG) SetAllNodesDebug(enabled bool) {
|
||||||
|
tm.nodes.ForEach(func(nodeID string, node *Node) bool {
|
||||||
|
node.SetDebug(enabled)
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDebugInfo returns debug information about the DAG and its nodes.
|
||||||
|
func (tm *DAG) GetDebugInfo() map[string]interface{} {
|
||||||
|
debugInfo := map[string]interface{}{
|
||||||
|
"dag_name": tm.name,
|
||||||
|
"dag_key": tm.key,
|
||||||
|
"dag_debug_enabled": tm.debug,
|
||||||
|
"start_node": tm.startNode,
|
||||||
|
"has_page_node": tm.hasPageNode,
|
||||||
|
"is_paused": tm.paused,
|
||||||
|
"nodes": make(map[string]map[string]interface{}),
|
||||||
|
}
|
||||||
|
|
||||||
|
nodesInfo := debugInfo["nodes"].(map[string]map[string]interface{})
|
||||||
|
tm.nodes.ForEach(func(nodeID string, node *Node) bool {
|
||||||
|
nodesInfo[nodeID] = map[string]interface{}{
|
||||||
|
"id": node.ID,
|
||||||
|
"label": node.Label,
|
||||||
|
"type": node.NodeType.String(),
|
||||||
|
"debug_enabled": node.Debug,
|
||||||
|
"has_timeout": node.Timeout > 0,
|
||||||
|
"timeout": node.Timeout.String(),
|
||||||
|
"edge_count": len(node.Edges),
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
|
||||||
|
return debugInfo
|
||||||
|
}
|
||||||
|
|
||||||
func NewDAG(name, key string, finalResultCallback func(taskID string, result mq.Result), opts ...mq.Option) *DAG {
|
func NewDAG(name, key string, finalResultCallback func(taskID string, result mq.Result), opts ...mq.Option) *DAG {
|
||||||
callback := func(ctx context.Context, result mq.Result) error { return nil }
|
callback := func(ctx context.Context, result mq.Result) error { return nil }
|
||||||
d := &DAG{
|
d := &DAG{
|
||||||
@@ -284,6 +355,15 @@ func (tm *DAG) AddNode(nodeType NodeType, name, nodeID string, handler mq.Proces
|
|||||||
return tm
|
return tm
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AddNodeWithDebug adds a node to the DAG with optional debug mode enabled
|
||||||
|
func (tm *DAG) AddNodeWithDebug(nodeType NodeType, name, nodeID string, handler mq.Processor, debug bool, startNode ...bool) *DAG {
|
||||||
|
dag := tm.AddNode(nodeType, name, nodeID, handler, startNode...)
|
||||||
|
if dag.Error == nil {
|
||||||
|
dag.SetNodeDebug(nodeID, debug)
|
||||||
|
}
|
||||||
|
return dag
|
||||||
|
}
|
||||||
|
|
||||||
func (tm *DAG) AddDeferredNode(nodeType NodeType, name, key string, firstNode ...bool) error {
|
func (tm *DAG) AddDeferredNode(nodeType NodeType, name, key string, firstNode ...bool) error {
|
||||||
if tm.server.SyncMode() {
|
if tm.server.SyncMode() {
|
||||||
return fmt.Errorf("DAG cannot have deferred node in Sync Mode")
|
return fmt.Errorf("DAG cannot have deferred node in Sync Mode")
|
||||||
@@ -361,6 +441,11 @@ func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
|||||||
// Enhanced processing with monitoring and rate limiting
|
// Enhanced processing with monitoring and rate limiting
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
|
|
||||||
|
// Debug logging at DAG level
|
||||||
|
if tm.IsDebugEnabled() {
|
||||||
|
tm.debugDAGTaskStart(ctx, task, startTime)
|
||||||
|
}
|
||||||
|
|
||||||
// Record task start in monitoring
|
// Record task start in monitoring
|
||||||
if tm.monitor != nil {
|
if tm.monitor != nil {
|
||||||
tm.monitor.metrics.RecordTaskStart(task.ID)
|
tm.monitor.metrics.RecordTaskStart(task.ID)
|
||||||
@@ -402,6 +487,11 @@ func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
|||||||
// Update internal metrics
|
// Update internal metrics
|
||||||
tm.updateTaskMetrics(task.ID, result, duration)
|
tm.updateTaskMetrics(task.ID, result, duration)
|
||||||
|
|
||||||
|
// Debug logging at DAG level for task completion
|
||||||
|
if tm.IsDebugEnabled() {
|
||||||
|
tm.debugDAGTaskComplete(ctx, task, result, duration, startTime)
|
||||||
|
}
|
||||||
|
|
||||||
// Trigger webhooks if configured
|
// Trigger webhooks if configured
|
||||||
if tm.webhookManager != nil {
|
if tm.webhookManager != nil {
|
||||||
event := WebhookEvent{
|
event := WebhookEvent{
|
||||||
@@ -1608,3 +1698,88 @@ func (h *ActivityAlertHandler) HandleAlert(alert Alert) error {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// debugDAGTaskStart logs debug information when a task starts at DAG level
|
||||||
|
func (tm *DAG) debugDAGTaskStart(ctx context.Context, task *mq.Task, startTime time.Time) {
|
||||||
|
var payload map[string]any
|
||||||
|
if err := json.Unmarshal(task.Payload, &payload); err != nil {
|
||||||
|
payload = map[string]any{"raw_payload": string(task.Payload)}
|
||||||
|
}
|
||||||
|
tm.Logger().Info("🚀 [DEBUG] DAG task processing started",
|
||||||
|
logger.Field{Key: "dag_name", Value: tm.name},
|
||||||
|
logger.Field{Key: "dag_key", Value: tm.key},
|
||||||
|
logger.Field{Key: "task_id", Value: task.ID},
|
||||||
|
logger.Field{Key: "task_topic", Value: task.Topic},
|
||||||
|
logger.Field{Key: "timestamp", Value: startTime.Format(time.RFC3339)},
|
||||||
|
logger.Field{Key: "start_node", Value: tm.startNode},
|
||||||
|
logger.Field{Key: "has_page_node", Value: tm.hasPageNode},
|
||||||
|
logger.Field{Key: "is_paused", Value: tm.paused},
|
||||||
|
logger.Field{Key: "payload_size", Value: len(task.Payload)},
|
||||||
|
logger.Field{Key: "payload_preview", Value: tm.getDAGPayloadPreview(payload)},
|
||||||
|
logger.Field{Key: "debug_enabled", Value: tm.debug},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugDAGTaskComplete logs debug information when a task completes at DAG level
|
||||||
|
func (tm *DAG) debugDAGTaskComplete(ctx context.Context, task *mq.Task, result mq.Result, duration time.Duration, startTime time.Time) {
|
||||||
|
var resultPayload map[string]any
|
||||||
|
if len(result.Payload) > 0 {
|
||||||
|
if err := json.Unmarshal(result.Payload, &resultPayload); err != nil {
|
||||||
|
resultPayload = map[string]any{"raw_payload": string(result.Payload)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tm.Logger().Info("🏁 [DEBUG] DAG task processing completed",
|
||||||
|
logger.Field{Key: "dag_name", Value: tm.name},
|
||||||
|
logger.Field{Key: "dag_key", Value: tm.key},
|
||||||
|
logger.Field{Key: "task_id", Value: task.ID},
|
||||||
|
logger.Field{Key: "task_topic", Value: task.Topic},
|
||||||
|
logger.Field{Key: "result_topic", Value: result.Topic},
|
||||||
|
logger.Field{Key: "timestamp", Value: time.Now().Format(time.RFC3339)},
|
||||||
|
logger.Field{Key: "total_duration", Value: duration.String()},
|
||||||
|
logger.Field{Key: "status", Value: string(result.Status)},
|
||||||
|
logger.Field{Key: "has_error", Value: result.Error != nil},
|
||||||
|
logger.Field{Key: "error_message", Value: tm.getDAGErrorMessage(result.Error)},
|
||||||
|
logger.Field{Key: "result_size", Value: len(result.Payload)},
|
||||||
|
logger.Field{Key: "result_preview", Value: tm.getDAGPayloadPreview(resultPayload)},
|
||||||
|
logger.Field{Key: "is_last", Value: result.Last},
|
||||||
|
logger.Field{Key: "metrics", Value: tm.GetTaskMetrics()},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDAGPayloadPreview returns a truncated version of the payload for debug logging
|
||||||
|
func (tm *DAG) getDAGPayloadPreview(payload map[string]any) string {
|
||||||
|
if payload == nil {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
|
||||||
|
preview := make(map[string]any)
|
||||||
|
count := 0
|
||||||
|
maxFields := 3 // Limit to first 3 fields for DAG level logging
|
||||||
|
|
||||||
|
for key, value := range payload {
|
||||||
|
if count >= maxFields {
|
||||||
|
preview["..."] = fmt.Sprintf("and %d more fields", len(payload)-maxFields)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate string values if they're too long
|
||||||
|
if strVal, ok := value.(string); ok && len(strVal) > 50 {
|
||||||
|
preview[key] = strVal[:47] + "..."
|
||||||
|
} else {
|
||||||
|
preview[key] = value
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
previewBytes, _ := json.Marshal(preview)
|
||||||
|
return string(previewBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getDAGErrorMessage safely extracts error message
|
||||||
|
func (tm *DAG) getDAGErrorMessage(err error) string {
|
||||||
|
if err == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
@@ -209,6 +209,12 @@ func (tm *TaskManager) processNode(exec *task) {
|
|||||||
if tm.dag.PreProcessHook != nil {
|
if tm.dag.PreProcessHook != nil {
|
||||||
exec.ctx = tm.dag.PreProcessHook(exec.ctx, node, exec.taskID, exec.payload)
|
exec.ctx = tm.dag.PreProcessHook(exec.ctx, node, exec.taskID, exec.payload)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Debug logging before processing
|
||||||
|
if node.IsDebugEnabled(tm.dag.IsDebugEnabled()) {
|
||||||
|
tm.debugNodeStart(exec, node)
|
||||||
|
}
|
||||||
|
|
||||||
state, _ := tm.taskStates.Get(exec.nodeID)
|
state, _ := tm.taskStates.Get(exec.nodeID)
|
||||||
if state == nil {
|
if state == nil {
|
||||||
tm.dag.Logger().Warn("State not found; creating new state", logger.Field{Key: "nodeID", Value: exec.nodeID})
|
tm.dag.Logger().Warn("State not found; creating new state", logger.Field{Key: "nodeID", Value: exec.nodeID})
|
||||||
@@ -260,6 +266,11 @@ func (tm *TaskManager) processNode(exec *task) {
|
|||||||
tm.dag.PostProcessHook(exec.ctx, node, exec.taskID, result)
|
tm.dag.PostProcessHook(exec.ctx, node, exec.taskID, result)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Debug logging after processing
|
||||||
|
if node.IsDebugEnabled(tm.dag.IsDebugEnabled()) {
|
||||||
|
tm.debugNodeComplete(exec, node, result, nodeLatency, attempts)
|
||||||
|
}
|
||||||
|
|
||||||
if result.Error != nil {
|
if result.Error != nil {
|
||||||
result.Status = mq.Failed
|
result.Status = mq.Failed
|
||||||
state.Status = mq.Failed
|
state.Status = mq.Failed
|
||||||
@@ -614,3 +625,94 @@ func (tm *TaskManager) Stop() {
|
|||||||
tm.currentNodePayload.Clear()
|
tm.currentNodePayload.Clear()
|
||||||
tm.currentNodeResult.Clear()
|
tm.currentNodeResult.Clear()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// debugNodeStart logs debug information when a node starts processing
|
||||||
|
func (tm *TaskManager) debugNodeStart(exec *task, node *Node) {
|
||||||
|
var payload map[string]any
|
||||||
|
if err := json.Unmarshal(exec.payload, &payload); err != nil {
|
||||||
|
payload = map[string]any{"raw_payload": string(exec.payload)}
|
||||||
|
}
|
||||||
|
|
||||||
|
tm.dag.Logger().Info("🐛 [DEBUG] Node processing started",
|
||||||
|
logger.Field{Key: "dag_name", Value: tm.dag.name},
|
||||||
|
logger.Field{Key: "task_id", Value: exec.taskID},
|
||||||
|
logger.Field{Key: "node_id", Value: node.ID},
|
||||||
|
logger.Field{Key: "node_type", Value: node.NodeType.String()},
|
||||||
|
logger.Field{Key: "node_label", Value: node.Label},
|
||||||
|
logger.Field{Key: "timestamp", Value: time.Now().Format(time.RFC3339)},
|
||||||
|
logger.Field{Key: "has_timeout", Value: node.Timeout > 0},
|
||||||
|
logger.Field{Key: "timeout_duration", Value: node.Timeout.String()},
|
||||||
|
logger.Field{Key: "payload_size", Value: len(exec.payload)},
|
||||||
|
logger.Field{Key: "payload_preview", Value: tm.getPayloadPreview(payload)},
|
||||||
|
logger.Field{Key: "debug_mode", Value: "individual_node:" + fmt.Sprintf("%t", node.Debug) + ", dag_global:" + fmt.Sprintf("%t", tm.dag.IsDebugEnabled())},
|
||||||
|
)
|
||||||
|
|
||||||
|
// Log processor type if it implements the Debugger interface
|
||||||
|
if debugger, ok := node.processor.(Debugger); ok {
|
||||||
|
debugger.Debug(exec.ctx, mq.NewTask(exec.taskID, exec.payload, exec.nodeID, mq.WithDAG(tm.dag)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugNodeComplete logs debug information when a node completes processing
|
||||||
|
func (tm *TaskManager) debugNodeComplete(exec *task, node *Node, result mq.Result, latency time.Duration, attempts int) {
|
||||||
|
var resultPayload map[string]any
|
||||||
|
if len(result.Payload) > 0 {
|
||||||
|
if err := json.Unmarshal(result.Payload, &resultPayload); err != nil {
|
||||||
|
resultPayload = map[string]any{"raw_payload": string(result.Payload)}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tm.dag.Logger().Info("🐛 [DEBUG] Node processing completed",
|
||||||
|
logger.Field{Key: "dag_name", Value: tm.dag.name},
|
||||||
|
logger.Field{Key: "task_id", Value: exec.taskID},
|
||||||
|
logger.Field{Key: "node_id", Value: node.ID},
|
||||||
|
logger.Field{Key: "node_type", Value: node.NodeType.String()},
|
||||||
|
logger.Field{Key: "node_label", Value: node.Label},
|
||||||
|
logger.Field{Key: "timestamp", Value: time.Now().Format(time.RFC3339)},
|
||||||
|
logger.Field{Key: "status", Value: string(result.Status)},
|
||||||
|
logger.Field{Key: "latency", Value: latency.String()},
|
||||||
|
logger.Field{Key: "attempts", Value: attempts + 1},
|
||||||
|
logger.Field{Key: "has_error", Value: result.Error != nil},
|
||||||
|
logger.Field{Key: "error_message", Value: tm.getErrorMessage(result.Error)},
|
||||||
|
logger.Field{Key: "result_size", Value: len(result.Payload)},
|
||||||
|
logger.Field{Key: "result_preview", Value: tm.getPayloadPreview(resultPayload)},
|
||||||
|
logger.Field{Key: "is_last_node", Value: result.Last},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getPayloadPreview returns a truncated version of the payload for debug logging
|
||||||
|
func (tm *TaskManager) getPayloadPreview(payload map[string]any) string {
|
||||||
|
if payload == nil {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
|
||||||
|
preview := make(map[string]any)
|
||||||
|
count := 0
|
||||||
|
maxFields := 5 // Limit to first 5 fields to avoid log spam
|
||||||
|
|
||||||
|
for key, value := range payload {
|
||||||
|
if count >= maxFields {
|
||||||
|
preview["..."] = fmt.Sprintf("and %d more fields", len(payload)-maxFields)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Truncate string values if they're too long
|
||||||
|
if strVal, ok := value.(string); ok && len(strVal) > 100 {
|
||||||
|
preview[key] = strVal[:97] + "..."
|
||||||
|
} else {
|
||||||
|
preview[key] = value
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
previewBytes, _ := json.Marshal(preview)
|
||||||
|
return string(previewBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getErrorMessage safely extracts error message
|
||||||
|
func (tm *TaskManager) getErrorMessage(err error) string {
|
||||||
|
if err == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return err.Error()
|
||||||
|
}
|
||||||
|
@@ -45,7 +45,6 @@ func main() {
|
|||||||
panic(flow.Error)
|
panic(flow.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Println(flow.ExportDOT())
|
|
||||||
rs := flow.Process(context.Background(), data)
|
rs := flow.Process(context.Background(), data)
|
||||||
if rs.Error != nil {
|
if rs.Error != nil {
|
||||||
panic(rs.Error)
|
panic(rs.Error)
|
||||||
|
3
services/examples/app/data/login_output.json
Normal file
3
services/examples/app/data/login_output.json
Normal file
File diff suppressed because one or more lines are too long
@@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "Login Flow",
|
"name": "Login Flow",
|
||||||
"key": "login:flow",
|
"key": "login:flow",
|
||||||
"disable_log": true,
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "LoginForm",
|
"id": "LoginForm",
|
||||||
|
@@ -1,7 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "Email Notification System",
|
"name": "Email Notification System",
|
||||||
"key": "email:notification",
|
"key": "email:notification",
|
||||||
"disable_log": true,
|
|
||||||
"nodes": [
|
"nodes": [
|
||||||
{
|
{
|
||||||
"id": "Login",
|
"id": "Login",
|
||||||
|
@@ -55,6 +55,7 @@ func SetupHandler(handler Handler, brokerAddr string, async ...bool) *dag.DAG {
|
|||||||
opts = append(opts, mq.WithLogger(nil))
|
opts = append(opts, mq.WithLogger(nil))
|
||||||
}
|
}
|
||||||
flow := dag.NewDAG(handler.Name, handler.Key, nil, opts...)
|
flow := dag.NewDAG(handler.Name, handler.Key, nil, opts...)
|
||||||
|
flow.SetDebug(handler.Debug)
|
||||||
for _, node := range handler.Nodes {
|
for _, node := range handler.Nodes {
|
||||||
if node.Node == "" && node.NodeKey == "" {
|
if node.Node == "" && node.NodeKey == "" {
|
||||||
flow.Error = errors.New("Node not defined " + node.ID)
|
flow.Error = errors.New("Node not defined " + node.ID)
|
||||||
@@ -182,7 +183,7 @@ func prepareNode(flow *dag.DAG, node Node) error {
|
|||||||
if node.Name == "" {
|
if node.Name == "" {
|
||||||
node.Name = node.ID
|
node.Name = node.ID
|
||||||
}
|
}
|
||||||
flow.AddNode(nodeType, node.Name, node.ID, nodeHandler, node.FirstNode)
|
flow.AddNodeWithDebug(nodeType, node.Name, node.ID, nodeHandler, node.Debug, node.FirstNode)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -139,6 +139,7 @@ type Node struct {
|
|||||||
NodeKey string `json:"node_key" yaml:"node_key"`
|
NodeKey string `json:"node_key" yaml:"node_key"`
|
||||||
Node string `json:"node" yaml:"node"`
|
Node string `json:"node" yaml:"node"`
|
||||||
Data Data `json:"data" yaml:"data"`
|
Data Data `json:"data" yaml:"data"`
|
||||||
|
Debug bool `json:"debug" yaml:"debug"`
|
||||||
FirstNode bool `json:"first_node" yaml:"first_node"`
|
FirstNode bool `json:"first_node" yaml:"first_node"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -163,6 +164,7 @@ type Handler struct {
|
|||||||
Name string `json:"name" yaml:"name"`
|
Name string `json:"name" yaml:"name"`
|
||||||
Key string `json:"key" yaml:"key"`
|
Key string `json:"key" yaml:"key"`
|
||||||
DisableLog bool `json:"disable_log" yaml:"disable_log"`
|
DisableLog bool `json:"disable_log" yaml:"disable_log"`
|
||||||
|
Debug bool `json:"debug" yaml:"debug"`
|
||||||
Nodes []Node `json:"nodes,omitempty" yaml:"nodes,omitempty"`
|
Nodes []Node `json:"nodes,omitempty" yaml:"nodes,omitempty"`
|
||||||
Edges []Edge `json:"edges,omitempty" yaml:"edges,omitempty"`
|
Edges []Edge `json:"edges,omitempty" yaml:"edges,omitempty"`
|
||||||
Branches []Branch `json:"branches,omitempty" yaml:"branches,omitempty"`
|
Branches []Branch `json:"branches,omitempty" yaml:"branches,omitempty"`
|
||||||
|
Reference in New Issue
Block a user