feat: use "GetTags" and "SetTags"

This commit is contained in:
sujit
2025-05-05 09:07:01 +05:45
parent b5e6c29ffb
commit 28870f1629
4 changed files with 150 additions and 45 deletions

58
examples/dedup.go Normal file
View File

@@ -0,0 +1,58 @@
package main
import (
"context"
"fmt"
"time"
"github.com/oarkflow/json"
"github.com/oarkflow/mq"
"github.com/oarkflow/mq/examples/tasks"
)
func main() {
handler := tasks.SchedulerHandler
callback := tasks.SchedulerCallback
// Initialize the pool with various parameters.
pool := mq.NewPool(3,
mq.WithTaskQueueSize(5),
mq.WithMaxMemoryLoad(1000),
mq.WithHandler(handler),
mq.WithPoolCallback(callback),
mq.WithTaskStorage(mq.NewMemoryTaskStorage(10*time.Minute)),
)
scheduler := mq.NewScheduler(pool)
scheduler.Start()
ctx := context.Background()
// Example: Schedule an email task with deduplication.
// DedupKey here is set to the recipient's email (e.g., "user@example.com") to avoid duplicate email tasks.
emailPayload := json.RawMessage(`{"email": "user@example.com", "message": "Hello, Customer!"}`)
scheduler.AddTask(ctx, mq.NewTask("Email Task", emailPayload, "email",
mq.WithDedupKey("user@example.com"),
),
mq.WithScheduleSpec("@every 1m"), // runs every minute for demonstration
mq.WithRecurring(),
)
scheduler.AddTask(ctx, mq.NewTask("Duplicate Email Task", emailPayload, "email",
mq.WithDedupKey("user@example.com"),
),
mq.WithScheduleSpec("@every 1m"),
mq.WithRecurring(),
)
go func() {
for {
for _, task := range scheduler.ListScheduledTasks() {
fmt.Println("Scheduled.....", task)
}
time.Sleep(1 * time.Minute)
}
}()
time.Sleep(10 * time.Minute)
}

View File

@@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/oarkflow/log" "github.com/oarkflow/log"
"github.com/oarkflow/xid"
"github.com/oarkflow/mq/storage" "github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory" "github.com/oarkflow/mq/storage/memory"
@@ -351,8 +352,8 @@ type ScheduledTask struct {
schedule *Schedule schedule *Schedule
stop chan struct{} stop chan struct{}
executionHistory []ExecutionHistory executionHistory []ExecutionHistory
// running is used to indicate whether the task is currently executing (if !Overlap).
running int32 running int32
id string
} }
type SchedulerConfig struct { type SchedulerConfig struct {
@@ -368,8 +369,6 @@ type ExecutionHistory struct {
// Scheduler manages scheduling and executing tasks. // Scheduler manages scheduling and executing tasks.
type Scheduler struct { type Scheduler struct {
pool *Pool pool *Pool
tasks []*ScheduledTask
mu sync.Mutex
storage storage.IMap[string, *ScheduledTask] // added storage field storage storage.IMap[string, *ScheduledTask] // added storage field
} }
@@ -397,11 +396,10 @@ func NewScheduler(pool *Pool, opts ...SchedulerOpt) *Scheduler {
// Start begins executing scheduled tasks. // Start begins executing scheduled tasks.
func (s *Scheduler) Start() { func (s *Scheduler) Start() {
s.mu.Lock() s.storage.ForEach(func(_ string, task *ScheduledTask) bool {
defer s.mu.Unlock()
for _, task := range s.tasks {
go s.schedule(task) go s.schedule(task)
} return true
})
} }
func (s *Scheduler) Close() error { func (s *Scheduler) Close() error {
@@ -552,9 +550,20 @@ func (s *Scheduler) executeTask(task *ScheduledTask) {
} }
// AddTask adds a new scheduled task using the supplied context, payload, and options. // AddTask adds a new scheduled task using the supplied context, payload, and options.
func (s *Scheduler) AddTask(ctx context.Context, payload *Task, opts ...SchedulerOption) { func (s *Scheduler) AddTask(ctx context.Context, payload *Task, opts ...SchedulerOption) string {
s.mu.Lock() var hasDuplicate bool
defer s.mu.Unlock() if payload.DedupKey != "" {
s.storage.ForEach(func(_ string, task *ScheduledTask) bool {
if task.payload.DedupKey == payload.DedupKey {
hasDuplicate = true
Logger.Warn().Str("dedup", payload.DedupKey).Msg("Duplicate scheduled task prevented")
}
return true
})
}
if hasDuplicate {
return ""
}
options := defaultSchedulerOptions() options := defaultSchedulerOptions()
for _, opt := range opts { for _, opt := range opts {
opt(options) opt(options)
@@ -581,6 +590,7 @@ func (s *Scheduler) AddTask(ctx context.Context, payload *Task, opts ...Schedule
stop := make(chan struct{}) stop := make(chan struct{})
newTask := &ScheduledTask{ newTask := &ScheduledTask{
id: xid.New().String(),
ctx: ctx, ctx: ctx,
handler: options.Handler, handler: options.Handler,
payload: payload, payload: payload,
@@ -591,55 +601,43 @@ func (s *Scheduler) AddTask(ctx context.Context, payload *Task, opts ...Schedule
}, },
schedule: sched, schedule: sched,
} }
s.tasks = append(s.tasks, newTask) s.storage.Set(newTask.id, newTask)
// Persist the task in storage if available.
if s.storage != nil {
s.storage.Set(newTask.payload.ID, newTask)
}
go s.schedule(newTask) go s.schedule(newTask)
return newTask.id
} }
// RemoveTask stops and removes a task by its payload ID. // RemoveTask stops and removes a task by its payload ID.
func (s *Scheduler) RemoveTask(payloadID string) { func (s *Scheduler) RemoveTask(id string) error {
s.mu.Lock() task, ok := s.storage.Get(id)
defer s.mu.Unlock() if !ok {
for i, task := range s.tasks { return fmt.Errorf("No task found with ID: %s\n", id)
if task.payload.ID == payloadID { }
close(task.stop) close(task.stop)
s.tasks = append(s.tasks[:i], s.tasks[i+1:]...)
// Remove task from storage if available.
if s.storage != nil { if s.storage != nil {
s.storage.Del(payloadID) s.storage.Del(id)
}
break
}
} }
return nil
} }
// PrintAllTasks prints a summary of all scheduled tasks. // PrintAllTasks prints a summary of all scheduled tasks.
func (s *Scheduler) PrintAllTasks() { func (s *Scheduler) PrintAllTasks() {
s.mu.Lock()
defer s.mu.Unlock()
fmt.Println("Scheduled Tasks:") fmt.Println("Scheduled Tasks:")
for _, task := range s.tasks { s.storage.ForEach(func(_ string, task *ScheduledTask) bool {
fmt.Printf("Task ID: %s, Next Execution: %s\n", task.payload.ID, task.getNextRunTime(time.Now())) fmt.Printf("Task ID: %s, Next Execution: %s\n", task.payload.ID, task.getNextRunTime(time.Now()))
} return true
})
} }
// PrintExecutionHistory prints the execution history for a task by its ID. // PrintExecutionHistory prints the execution history for a task by its ID.
func (s *Scheduler) PrintExecutionHistory(taskID string) { func (s *Scheduler) PrintExecutionHistory(id string) error {
s.mu.Lock() task, ok := s.storage.Get(id)
defer s.mu.Unlock() if !ok {
for _, task := range s.tasks { return fmt.Errorf("No task found with ID: %s\n", id)
if task.payload.ID == taskID { }
fmt.Printf("Execution History for Task ID: %s\n", taskID)
for _, history := range task.executionHistory { for _, history := range task.executionHistory {
fmt.Printf("Timestamp: %s, Result: %v\n", history.Timestamp, history.Result.Error) fmt.Printf("Timestamp: %s, Result: %v\n", history.Timestamp, history.Result.Error)
} }
return return nil
}
}
fmt.Printf("No task found with ID: %s\n", taskID)
} }
// getNextRunTime computes the next run time for the task. // getNextRunTime computes the next run time for the task.
@@ -667,6 +665,26 @@ func (task *ScheduledTask) getNextRunTime(now time.Time) time.Time {
return now return now
} }
// New type to hold scheduled task information.
type TaskInfo struct {
TaskID string `json:"task_id"`
NextRunTime time.Time `json:"next_run_time"`
}
// ListScheduledTasks returns details of all scheduled tasks along with their next run time.
func (s *Scheduler) ListScheduledTasks() []TaskInfo {
now := time.Now()
infos := make([]TaskInfo, 0, s.storage.Size())
s.storage.ForEach(func(_ string, task *ScheduledTask) bool {
infos = append(infos, TaskInfo{
TaskID: task.payload.ID,
NextRunTime: task.getNextRunTime(now),
})
return true
})
return infos
}
// -------------------------------------------------------- // --------------------------------------------------------
// Additional Helper Functions and Stubs // Additional Helper Functions and Stubs
// -------------------------------------------------------- // --------------------------------------------------------

View File

@@ -20,18 +20,29 @@ type MemoryTaskStorage struct {
tasks PriorityQueue tasks PriorityQueue
taskLock sync.Mutex taskLock sync.Mutex
expiryTime time.Duration expiryTime time.Duration
dedup map[string]*QueueTask
} }
func NewMemoryTaskStorage(expiryTime time.Duration) *MemoryTaskStorage { func NewMemoryTaskStorage(expiryTime time.Duration) *MemoryTaskStorage {
return &MemoryTaskStorage{ return &MemoryTaskStorage{
tasks: make(PriorityQueue, 0), tasks: make(PriorityQueue, 0),
expiryTime: expiryTime, expiryTime: expiryTime,
dedup: make(map[string]*QueueTask),
} }
} }
func (m *MemoryTaskStorage) SaveTask(task *QueueTask) error { func (m *MemoryTaskStorage) SaveTask(task *QueueTask) error {
m.taskLock.Lock() m.taskLock.Lock()
defer m.taskLock.Unlock() defer m.taskLock.Unlock()
if key := task.payload.DedupKey; key != "" {
if existing, ok := m.dedup[key]; ok {
if existing.payload.CreatedAt.Add(m.expiryTime).After(time.Now()) {
return fmt.Errorf("duplicate task with dedup key: %s", key)
}
delete(m.dedup, key)
}
m.dedup[key] = task
}
heap.Push(&m.tasks, task) heap.Push(&m.tasks, task)
return nil return nil
} }
@@ -52,6 +63,9 @@ func (m *MemoryTaskStorage) DeleteTask(taskID string) error {
defer m.taskLock.Unlock() defer m.taskLock.Unlock()
for i, task := range m.tasks { for i, task := range m.tasks {
if task.payload.ID == taskID { if task.payload.ID == taskID {
if key := task.payload.DedupKey; key != "" {
delete(m.dedup, key)
}
heap.Remove(&m.tasks, i) heap.Remove(&m.tasks, i)
return nil return nil
} }
@@ -81,6 +95,9 @@ func (m *MemoryTaskStorage) FetchNextTask() (*QueueTask, error) {
m.DeleteTask(task.payload.ID) m.DeleteTask(task.payload.ID)
return m.FetchNextTask() return m.FetchNextTask()
} }
if key := task.payload.DedupKey; key != "" {
delete(m.dedup, key)
}
return task, nil return task, nil
} }
@@ -91,6 +108,9 @@ func (m *MemoryTaskStorage) CleanupExpiredTasks() error {
for i := 0; i < len(m.tasks); i++ { for i := 0; i < len(m.tasks); i++ {
task := m.tasks[i] task := m.tasks[i]
if task.payload.CreatedAt.Add(m.expiryTime).Before(time.Now()) { if task.payload.CreatedAt.Add(m.expiryTime).Before(time.Now()) {
if key := task.payload.DedupKey; key != "" {
delete(m.dedup, key)
}
heap.Remove(&m.tasks, i) heap.Remove(&m.tasks, i)
i-- // Adjust index after removal i-- // Adjust index after removal
} }

View File

@@ -68,6 +68,8 @@ type Task struct {
Status string `json:"status"` Status string `json:"status"`
Payload json.RawMessage `json:"payload"` Payload json.RawMessage `json:"payload"`
dag any dag any
// new deduplication field
DedupKey string `json:"dedup_key,omitempty"`
} }
func (t *Task) GetFlow() any { func (t *Task) GetFlow() any {
@@ -84,3 +86,10 @@ func NewTask(id string, payload json.RawMessage, nodeKey string, opts ...TaskOpt
} }
return task return task
} }
// new TaskOption for deduplication:
func WithDedupKey(key string) TaskOption {
return func(t *Task) {
t.DedupKey = key
}
}