This commit is contained in:
Oarkflow
2025-05-10 18:24:12 +05:45
parent 711441a87a
commit 4ce78c4848
4 changed files with 293 additions and 60 deletions

View File

@@ -16,6 +16,8 @@ import (
"github.com/oarkflow/mq/codec" "github.com/oarkflow/mq/codec"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
"github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory"
"github.com/oarkflow/mq/utils" "github.com/oarkflow/mq/utils"
) )
@@ -38,6 +40,7 @@ type Consumer struct {
opts *Options opts *Options
id string id string
queue string queue string
pIDs storage.IMap[string, bool]
} }
func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Consumer { func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Consumer {
@@ -47,6 +50,7 @@ func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Cons
opts: options, opts: options,
queue: queue, queue: queue,
handler: handler, handler: handler,
pIDs: memory.New[string, bool](),
} }
} }
@@ -154,11 +158,33 @@ func (c *Consumer) ConsumeMessage(ctx context.Context, msg *codec.Message, conn
log.Printf("Error unmarshalling message: %v", err) log.Printf("Error unmarshalling message: %v", err)
return return
} }
ctx = SetHeaders(ctx, map[string]string{consts.QueueKey: msg.Queue})
if err := c.pool.EnqueueTask(ctx, &task, 1); err != nil { // Check if the task has already been processed
c.sendDenyMessage(ctx, task.ID, msg.Queue, err) if _, exists := c.pIDs.Get(task.ID); exists {
log.Printf("Task %s already processed, skipping...", task.ID)
return return
} }
ctx = SetHeaders(ctx, map[string]string{consts.QueueKey: msg.Queue})
retryCount := 0
for {
err := c.pool.EnqueueTask(ctx, &task, 1)
if err == nil {
// Mark the task as processed
c.pIDs.Set(task.ID, true)
break
}
if retryCount >= c.opts.maxRetries {
c.sendDenyMessage(ctx, task.ID, msg.Queue, err)
return
}
retryCount++
backoffDuration := utils.CalculateJitter(c.opts.initialDelay*(1<<retryCount), c.opts.jitterPercent)
log.Printf("Retrying task %s after %v (attempt %d/%d)", task.ID, backoffDuration, retryCount, c.opts.maxRetries)
time.Sleep(backoffDuration)
}
} }
func (c *Consumer) ProcessTask(ctx context.Context, msg *Task) Result { func (c *Consumer) ProcessTask(ctx context.Context, msg *Task) Result {

View File

@@ -8,27 +8,36 @@ import (
) )
var ( var (
TasksProcessed = prometheus.NewCounterVec( taskProcessed = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Name: "tasks_processed_total", Name: "tasks_processed_total",
Help: "Total number of processed tasks.", Help: "Total number of tasks processed.",
}, },
[]string{"status"}, []string{"status"},
) )
TasksErrors = prometheus.NewCounterVec( taskProcessingTime = prometheus.NewHistogram(
prometheus.CounterOpts{ prometheus.HistogramOpts{
Name: "tasks_errors_total", Name: "task_processing_time_seconds",
Help: "Total number of errors encountered while processing tasks.", Help: "Histogram of task processing times.",
Buckets: prometheus.DefBuckets,
}, },
[]string{"node"},
) )
) )
func init() { func init() {
prometheus.MustRegister(TasksProcessed) prometheus.MustRegister(taskProcessed)
prometheus.MustRegister(TasksErrors) prometheus.MustRegister(taskProcessingTime)
} }
func HandleHTTP() { func RecordTaskProcessed(status string) {
http.Handle("/metrics", promhttp.Handler()) taskProcessed.WithLabelValues(status).Inc()
}
func RecordTaskProcessingTime(duration float64) {
taskProcessingTime.Observe(duration)
}
func StartMetricsServer(port string) {
http.Handle("/metrics", promhttp.Handler())
go http.ListenAndServe(port, nil)
} }

218
mq.go
View File

@@ -283,21 +283,23 @@ type publisher struct {
} }
type Broker struct { type Broker struct {
queues storage.IMap[string, *Queue] queues storage.IMap[string, storage.IMap[string, *Queue]] // Modified to support tenant-specific queues
consumers storage.IMap[string, *consumer] consumers storage.IMap[string, *consumer]
publishers storage.IMap[string, *publisher] publishers storage.IMap[string, *publisher]
deadLetter storage.IMap[string, *Queue] deadLetter storage.IMap[string, *Queue]
opts *Options opts *Options
pIDs storage.IMap[string, bool]
listener net.Listener listener net.Listener
} }
func NewBroker(opts ...Option) *Broker { func NewBroker(opts ...Option) *Broker {
options := SetupOptions(opts...) options := SetupOptions(opts...)
return &Broker{ return &Broker{
queues: memory.New[string, *Queue](), queues: memory.New[string, storage.IMap[string, *Queue]](),
publishers: memory.New[string, *publisher](), publishers: memory.New[string, *publisher](),
consumers: memory.New[string, *consumer](), consumers: memory.New[string, *consumer](),
deadLetter: memory.New[string, *Queue](), deadLetter: memory.New[string, *Queue](),
pIDs: memory.New[string, bool](),
opts: options, opts: options,
} }
} }
@@ -314,13 +316,16 @@ func (b *Broker) OnClose(ctx context.Context, conn net.Conn) error {
con.conn.Close() con.conn.Close()
b.consumers.Del(consumerID) b.consumers.Del(consumerID)
} }
b.queues.ForEach(func(_ string, queue *Queue) bool { b.queues.ForEach(func(_ string, tenantQueues storage.IMap[string, *Queue]) bool {
if _, ok := queue.consumers.Get(consumerID); ok { tenantQueues.ForEach(func(_ string, queue *Queue) bool {
if b.opts.consumerOnClose != nil { if _, ok := queue.consumers.Get(consumerID); ok {
b.opts.consumerOnClose(ctx, queue.name, consumerID) if b.opts.consumerOnClose != nil {
b.opts.consumerOnClose(ctx, queue.name, consumerID)
}
queue.consumers.Del(consumerID)
} }
queue.consumers.Del(consumerID) return true
} })
return true return true
}) })
} else { } else {
@@ -329,13 +334,16 @@ func (b *Broker) OnClose(ctx context.Context, conn net.Conn) error {
log.Printf("Broker: Consumer connection closed: %s, address: %s", consumerID, conn.RemoteAddr()) log.Printf("Broker: Consumer connection closed: %s, address: %s", consumerID, conn.RemoteAddr())
con.conn.Close() con.conn.Close()
b.consumers.Del(consumerID) b.consumers.Del(consumerID)
b.queues.ForEach(func(_ string, queue *Queue) bool { b.queues.ForEach(func(_ string, tenantQueues storage.IMap[string, *Queue]) bool {
queue.consumers.Del(consumerID) tenantQueues.ForEach(func(_ string, queue *Queue) bool {
if _, ok := queue.consumers.Get(consumerID); ok { queue.consumers.Del(consumerID)
if b.opts.consumerOnClose != nil { if _, ok := queue.consumers.Get(consumerID); ok {
b.opts.consumerOnClose(ctx, queue.name, consumerID) if b.opts.consumerOnClose != nil {
b.opts.consumerOnClose(ctx, queue.name, consumerID)
}
} }
} return true
})
return true return true
}) })
} }
@@ -591,9 +599,14 @@ func (b *Broker) receive(ctx context.Context, c net.Conn) (*codec.Message, error
} }
func (b *Broker) broadcastToConsumers(msg *codec.Message) { func (b *Broker) broadcastToConsumers(msg *codec.Message) {
if queue, ok := b.queues.Get(msg.Queue); ok { if tenantQueues, ok := b.queues.Get(msg.Queue); ok {
task := &QueuedTask{Message: msg, RetryCount: 0} tenantQueues.ForEach(func(_, queueName string) bool {
queue.tasks <- task if queue, ok := tenantQueues.Get(queueName); ok {
task := &QueuedTask{Message: msg, RetryCount: 0}
queue.tasks <- task
}
return true
})
} }
} }
@@ -755,6 +768,7 @@ func (b *Broker) dispatchWorker(ctx context.Context, queue *Queue) {
for !success && task.RetryCount <= b.opts.maxRetries { for !success && task.RetryCount <= b.opts.maxRetries {
if b.dispatchTaskToConsumer(ctx, queue, task) { if b.dispatchTaskToConsumer(ctx, queue, task) {
success = true success = true
b.acknowledgeTask(ctx, task.Message.Queue, queue.name)
} else { } else {
task.RetryCount++ task.RetryCount++
delay = b.backoffRetry(queue, task, delay) delay = b.backoffRetry(queue, task, delay)
@@ -779,6 +793,14 @@ func (b *Broker) sendToDLQ(queue *Queue, task *QueuedTask) {
func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task *QueuedTask) bool { func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task *QueuedTask) bool {
var consumerFound bool var consumerFound bool
var err error var err error
// Deduplication: Check if the task has already been processed
taskID, _ := jsonparser.GetString(task.Message.Payload, "id")
if _, exists := b.pIDs.Get(taskID); exists {
log.Printf("Task %s already processed, skipping...", taskID)
return true
}
queue.consumers.ForEach(func(_ string, con *consumer) bool { queue.consumers.ForEach(func(_ string, con *consumer) bool {
if con.state != consts.ConsumerStateActive { if con.state != consts.ConsumerStateActive {
err = fmt.Errorf("consumer %s is not active", con.id) err = fmt.Errorf("consumer %s is not active", con.id)
@@ -786,10 +808,13 @@ func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task
} }
if err := b.send(ctx, con.conn, task.Message); err == nil { if err := b.send(ctx, con.conn, task.Message); err == nil {
consumerFound = true consumerFound = true
// Mark the task as processed
b.pIDs.Set(taskID, true)
return false return false
} }
return true return true
}) })
if err != nil { if err != nil {
log.Println(err.Error()) log.Println(err.Error())
return false return false
@@ -800,7 +825,7 @@ func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task
result := Result{ result := Result{
Status: "NO_CONSUMER", Status: "NO_CONSUMER",
Topic: queue.name, Topic: queue.name,
TaskID: "", TaskID: taskID,
Ctx: ctx, Ctx: ctx,
} }
_ = b.opts.notifyResponse(ctx, result) _ = b.opts.notifyResponse(ctx, result)
@@ -843,7 +868,31 @@ func (b *Broker) NewQueue(name string) *Queue {
tasks: make(chan *QueuedTask, b.opts.queueSize), tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](), consumers: memory.New[string, *consumer](),
} }
b.queues.Set(name, q) b.queues.Set(name, memory.New[string, *Queue]())
b.queues.Get(name).Set(name, q)
// Create DLQ for the queue
dlq := &Queue{
name: name + "_dlq",
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
b.deadLetter.Set(name, dlq)
ctx := context.Background()
go b.dispatchWorker(ctx, q)
go b.dispatchWorker(ctx, dlq)
return q
}
// Ensure message ordering in task queues
func (b *Broker) NewQueueWithOrdering(name string) *Queue {
q := &Queue{
name: name,
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
b.queues.Set(name, memory.New[string, *Queue]())
b.queues.Get(name).Set(name, q)
// Create DLQ for the queue // Create DLQ for the queue
dlq := &Queue{ dlq := &Queue{
@@ -885,3 +934,134 @@ func (b *Broker) HandleCallback(ctx context.Context, msg *codec.Message) {
} }
} }
} }
// Add explicit acknowledgment for successful task processing
func (b *Broker) acknowledgeTask(ctx context.Context, taskID string, queueName string) {
log.Printf("Acknowledging task %s on queue %s", taskID, queueName)
if b.opts.notifyResponse != nil {
result := Result{
Status: "ACKNOWLEDGED",
Topic: queueName,
TaskID: taskID,
Ctx: ctx,
}
_ = b.opts.notifyResponse(ctx, result)
}
}
// Add authentication and authorization for publishers and consumers
func (b *Broker) Authenticate(ctx context.Context, credentials map[string]string) error {
username, userExists := credentials["username"]
password, passExists := credentials["password"]
if !userExists || !passExists {
return fmt.Errorf("missing credentials")
}
// Example: Hardcoded credentials for simplicity
if username != "admin" || password != "password" {
return fmt.Errorf("invalid credentials")
}
return nil
}
func (b *Broker) Authorize(ctx context.Context, role string, action string) error {
// Example: Simple role-based authorization
if role == "publisher" && action == "publish" {
return nil
}
if role == "consumer" && action == "consume" {
return nil
}
return fmt.Errorf("unauthorized action")
}
// Add support for multi-tenancy
func (b *Broker) AddTenant(tenantID string) error {
if _, exists := b.queues.Get(tenantID); exists {
return fmt.Errorf("tenant %s already exists", tenantID)
}
b.queues.Set(tenantID, memory.New[string, *Queue]())
return nil
}
func (b *Broker) RemoveTenant(tenantID string) error {
if _, exists := b.queues.Get(tenantID); !exists {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
b.queues.Del(tenantID)
return nil
}
// Ensure tenant-specific queues and operations
func (b *Broker) NewQueueForTenant(tenantID, queueName string) (*Queue, error) {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return nil, fmt.Errorf("tenant %s does not exist", tenantID)
}
if _, exists := tenantQueues.Get(queueName); exists {
return nil, fmt.Errorf("queue %s already exists for tenant %s", queueName, tenantID)
}
q := &Queue{
name: queueName,
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
tenantQueues.Set(queueName, q)
// Create tenant-specific DLQ
dlq := &Queue{
name: queueName + "_dlq",
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
tenantQueues.Set(queueName+"_dlq", dlq)
ctx := context.Background()
go b.dispatchWorker(ctx, q)
go b.dispatchWorker(ctx, dlq)
return q, nil
}
func (b *Broker) PublishForTenant(ctx context.Context, tenantID string, task *Task, queueName string) error {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
queue, ok := tenantQueues.Get(queueName)
if !ok {
return fmt.Errorf("queue %s does not exist for tenant %s", queueName, tenantID)
}
taskID := task.ID
if taskID == "" {
taskID = NewID()
task.ID = taskID
}
queuedTask := &QueuedTask{Message: codec.NewMessage(consts.PUBLISH, task.Payload, queueName, nil), RetryCount: 0}
queue.tasks <- queuedTask
return nil
}
func (b *Broker) SubscribeForTenant(ctx context.Context, tenantID, queueName string, conn net.Conn) error {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
queue, ok := tenantQueues.Get(queueName)
if !ok {
return fmt.Errorf("queue %s does not exist for tenant %s", queueName, tenantID)
}
consumerID := b.AddConsumer(ctx, queueName, conn)
queue.consumers.Set(consumerID, &consumer{id: consumerID, conn: conn})
return nil
}
func (b *Broker) ListQueuesForTenant(tenantID string) ([]string, error) {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return nil, fmt.Errorf("tenant %s does not exist", tenantID)
}
var queueNames []string
tenantQueues.ForEach(func(queueName string, _ *Queue) bool {
queueNames = append(queueNames, queueName)
return true
})
return queueNames, nil
}

18
task.go
View File

@@ -2,6 +2,7 @@ package mq
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/oarkflow/json" "github.com/oarkflow/json"
@@ -93,3 +94,20 @@ func WithDedupKey(key string) TaskOption {
t.DedupKey = key t.DedupKey = key
} }
} }
// Add advanced dead-letter queue management
func (b *Broker) ReprocessDLQ(queueName string) error {
dlqName := queueName + "_dlq"
dlq, ok := b.deadLetter.Get(dlqName)
if !ok {
return fmt.Errorf("dead-letter queue %s does not exist", dlqName)
}
for {
select {
case task := <-dlq.tasks:
b.NewQueue(queueName).tasks <- task
default:
return nil
}
}
}