This commit is contained in:
Oarkflow
2025-05-10 18:24:12 +05:45
parent 711441a87a
commit 4ce78c4848
4 changed files with 293 additions and 60 deletions

View File

@@ -9,13 +9,15 @@ import (
"net/http" "net/http"
"strings" "strings"
"time" "time"
"github.com/oarkflow/json" "github.com/oarkflow/json"
"github.com/oarkflow/json/jsonparser" "github.com/oarkflow/json/jsonparser"
"github.com/oarkflow/mq/codec" "github.com/oarkflow/mq/codec"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
"github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory"
"github.com/oarkflow/mq/utils" "github.com/oarkflow/mq/utils"
) )
@@ -38,6 +40,7 @@ type Consumer struct {
opts *Options opts *Options
id string id string
queue string queue string
pIDs storage.IMap[string, bool]
} }
func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Consumer { func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Consumer {
@@ -47,6 +50,7 @@ func NewConsumer(id string, queue string, handler Handler, opts ...Option) *Cons
opts: options, opts: options,
queue: queue, queue: queue,
handler: handler, handler: handler,
pIDs: memory.New[string, bool](),
} }
} }
@@ -154,11 +158,33 @@ func (c *Consumer) ConsumeMessage(ctx context.Context, msg *codec.Message, conn
log.Printf("Error unmarshalling message: %v", err) log.Printf("Error unmarshalling message: %v", err)
return return
} }
ctx = SetHeaders(ctx, map[string]string{consts.QueueKey: msg.Queue})
if err := c.pool.EnqueueTask(ctx, &task, 1); err != nil { // Check if the task has already been processed
c.sendDenyMessage(ctx, task.ID, msg.Queue, err) if _, exists := c.pIDs.Get(task.ID); exists {
log.Printf("Task %s already processed, skipping...", task.ID)
return return
} }
ctx = SetHeaders(ctx, map[string]string{consts.QueueKey: msg.Queue})
retryCount := 0
for {
err := c.pool.EnqueueTask(ctx, &task, 1)
if err == nil {
// Mark the task as processed
c.pIDs.Set(task.ID, true)
break
}
if retryCount >= c.opts.maxRetries {
c.sendDenyMessage(ctx, task.ID, msg.Queue, err)
return
}
retryCount++
backoffDuration := utils.CalculateJitter(c.opts.initialDelay*(1<<retryCount), c.opts.jitterPercent)
log.Printf("Retrying task %s after %v (attempt %d/%d)", task.ID, backoffDuration, retryCount, c.opts.maxRetries)
time.Sleep(backoffDuration)
}
} }
func (c *Consumer) ProcessTask(ctx context.Context, msg *Task) Result { func (c *Consumer) ProcessTask(ctx context.Context, msg *Task) Result {
@@ -218,7 +244,7 @@ func (c *Consumer) attemptConnect() error {
delay = c.opts.maxBackoff delay = c.opts.maxBackoff
} }
} }
return fmt.Errorf("could not connect to server %s after %d attempts: %w", c.opts.brokerAddr, c.opts.maxRetries, err) return fmt.Errorf("could not connect to server %s after %d attempts: %w", c.opts.brokerAddr, c.opts.maxRetries, err)
} }
@@ -362,7 +388,7 @@ func (c *Consumer) StartHTTPAPI() (int, error) {
return 0, fmt.Errorf("failed to start listener: %w", err) return 0, fmt.Errorf("failed to start listener: %w", err)
} }
port := ln.Addr().(*net.TCPAddr).Port port := ln.Addr().(*net.TCPAddr).Port
// Create a new HTTP mux and register endpoints. // Create a new HTTP mux and register endpoints.
mux := http.NewServeMux() mux := http.NewServeMux()
mux.HandleFunc("/stats", c.handleStats) mux.HandleFunc("/stats", c.handleStats)
@@ -370,7 +396,7 @@ func (c *Consumer) StartHTTPAPI() (int, error) {
mux.HandleFunc("/pause", c.handlePause) mux.HandleFunc("/pause", c.handlePause)
mux.HandleFunc("/resume", c.handleResume) mux.HandleFunc("/resume", c.handleResume)
mux.HandleFunc("/stop", c.handleStop) mux.HandleFunc("/stop", c.handleStop)
// Start the server in a new goroutine. // Start the server in a new goroutine.
go func() { go func() {
// Log errors if the HTTP server stops. // Log errors if the HTTP server stops.
@@ -378,7 +404,7 @@ func (c *Consumer) StartHTTPAPI() (int, error) {
log.Printf("HTTP server error on port %d: %v", port, err) log.Printf("HTTP server error on port %d: %v", port, err)
} }
}() }()
log.Printf("HTTP API for consumer %s started on port %d", c.id, port) log.Printf("HTTP API for consumer %s started on port %d", c.id, port)
return port, nil return port, nil
} }
@@ -389,14 +415,14 @@ func (c *Consumer) handleStats(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
// Gather consumer and pool stats using formatted metrics. // Gather consumer and pool stats using formatted metrics.
stats := map[string]interface{}{ stats := map[string]interface{}{
"consumer_id": c.id, "consumer_id": c.id,
"queue": c.queue, "queue": c.queue,
"pool_metrics": c.pool.FormattedMetrics(), "pool_metrics": c.pool.FormattedMetrics(),
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
if err := json.NewEncoder(w).Encode(stats); err != nil { if err := json.NewEncoder(w).Encode(stats); err != nil {
http.Error(w, fmt.Sprintf("failed to encode stats: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to encode stats: %v", err), http.StatusInternalServerError)
@@ -410,7 +436,7 @@ func (c *Consumer) handleUpdate(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
// Read the request body. // Read the request body.
body, err := io.ReadAll(r.Body) body, err := io.ReadAll(r.Body)
if err != nil { if err != nil {
@@ -418,13 +444,13 @@ func (c *Consumer) handleUpdate(w http.ResponseWriter, r *http.Request) {
return return
} }
defer r.Body.Close() defer r.Body.Close()
// Call the Update method on the consumer (which in turn updates the pool configuration). // Call the Update method on the consumer (which in turn updates the pool configuration).
if err := c.Update(r.Context(), body); err != nil { if err := c.Update(r.Context(), body); err != nil {
http.Error(w, fmt.Sprintf("failed to update configuration: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to update configuration: %v", err), http.StatusInternalServerError)
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
resp := map[string]string{"status": "configuration updated"} resp := map[string]string{"status": "configuration updated"}
if err := json.NewEncoder(w).Encode(resp); err != nil { if err := json.NewEncoder(w).Encode(resp); err != nil {
@@ -438,12 +464,12 @@ func (c *Consumer) handlePause(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
if err := c.Pause(r.Context()); err != nil { if err := c.Pause(r.Context()); err != nil {
http.Error(w, fmt.Sprintf("failed to pause consumer: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to pause consumer: %v", err), http.StatusInternalServerError)
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
resp := map[string]string{"status": "consumer paused"} resp := map[string]string{"status": "consumer paused"}
json.NewEncoder(w).Encode(resp) json.NewEncoder(w).Encode(resp)
@@ -455,12 +481,12 @@ func (c *Consumer) handleResume(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
if err := c.Resume(r.Context()); err != nil { if err := c.Resume(r.Context()); err != nil {
http.Error(w, fmt.Sprintf("failed to resume consumer: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to resume consumer: %v", err), http.StatusInternalServerError)
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
resp := map[string]string{"status": "consumer resumed"} resp := map[string]string{"status": "consumer resumed"}
json.NewEncoder(w).Encode(resp) json.NewEncoder(w).Encode(resp)
@@ -472,13 +498,13 @@ func (c *Consumer) handleStop(w http.ResponseWriter, r *http.Request) {
http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed) http.Error(w, "Method Not Allowed", http.StatusMethodNotAllowed)
return return
} }
// Stop the consumer. // Stop the consumer.
if err := c.Stop(r.Context()); err != nil { if err := c.Stop(r.Context()); err != nil {
http.Error(w, fmt.Sprintf("failed to stop consumer: %v", err), http.StatusInternalServerError) http.Error(w, fmt.Sprintf("failed to stop consumer: %v", err), http.StatusInternalServerError)
return return
} }
w.Header().Set("Content-Type", "application/json") w.Header().Set("Content-Type", "application/json")
resp := map[string]string{"status": "consumer stopped"} resp := map[string]string{"status": "consumer stopped"}
json.NewEncoder(w).Encode(resp) json.NewEncoder(w).Encode(resp)

View File

@@ -8,27 +8,36 @@ import (
) )
var ( var (
TasksProcessed = prometheus.NewCounterVec( taskProcessed = prometheus.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
Name: "tasks_processed_total", Name: "tasks_processed_total",
Help: "Total number of processed tasks.", Help: "Total number of tasks processed.",
}, },
[]string{"status"}, []string{"status"},
) )
TasksErrors = prometheus.NewCounterVec( taskProcessingTime = prometheus.NewHistogram(
prometheus.CounterOpts{ prometheus.HistogramOpts{
Name: "tasks_errors_total", Name: "task_processing_time_seconds",
Help: "Total number of errors encountered while processing tasks.", Help: "Histogram of task processing times.",
Buckets: prometheus.DefBuckets,
}, },
[]string{"node"},
) )
) )
func init() { func init() {
prometheus.MustRegister(TasksProcessed) prometheus.MustRegister(taskProcessed)
prometheus.MustRegister(TasksErrors) prometheus.MustRegister(taskProcessingTime)
} }
func HandleHTTP() { func RecordTaskProcessed(status string) {
http.Handle("/metrics", promhttp.Handler()) taskProcessed.WithLabelValues(status).Inc()
}
func RecordTaskProcessingTime(duration float64) {
taskProcessingTime.Observe(duration)
}
func StartMetricsServer(port string) {
http.Handle("/metrics", promhttp.Handler())
go http.ListenAndServe(port, nil)
} }

236
mq.go
View File

@@ -9,12 +9,12 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/oarkflow/errors" "github.com/oarkflow/errors"
"github.com/oarkflow/json" "github.com/oarkflow/json"
"github.com/oarkflow/json/jsonparser" "github.com/oarkflow/json/jsonparser"
"github.com/oarkflow/mq/codec" "github.com/oarkflow/mq/codec"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
"github.com/oarkflow/mq/logger" "github.com/oarkflow/mq/logger"
@@ -69,7 +69,7 @@ func (r *Result) UnmarshalJSON(data []byte) error {
}{ }{
Alias: (*Alias)(r), Alias: (*Alias)(r),
} }
if err := json.Unmarshal(data, &aux); err != nil { if err := json.Unmarshal(data, &aux); err != nil {
return err return err
} }
@@ -78,7 +78,7 @@ func (r *Result) UnmarshalJSON(data []byte) error {
} else { } else {
r.Error = nil r.Error = nil
} }
return nil return nil
} }
@@ -174,7 +174,7 @@ func (rl *RateLimiter) Wait() {
func (rl *RateLimiter) Update(newRate, newBurst int) { func (rl *RateLimiter) Update(newRate, newBurst int) {
rl.mu.Lock() rl.mu.Lock()
defer rl.mu.Unlock() defer rl.mu.Unlock()
// Stop the old ticker. // Stop the old ticker.
rl.ticker.Stop() rl.ticker.Stop()
// Replace the channel with a new one of the new burst capacity. // Replace the channel with a new one of the new burst capacity.
@@ -283,21 +283,23 @@ type publisher struct {
} }
type Broker struct { type Broker struct {
queues storage.IMap[string, *Queue] queues storage.IMap[string, storage.IMap[string, *Queue]] // Modified to support tenant-specific queues
consumers storage.IMap[string, *consumer] consumers storage.IMap[string, *consumer]
publishers storage.IMap[string, *publisher] publishers storage.IMap[string, *publisher]
deadLetter storage.IMap[string, *Queue] deadLetter storage.IMap[string, *Queue]
opts *Options opts *Options
pIDs storage.IMap[string, bool]
listener net.Listener listener net.Listener
} }
func NewBroker(opts ...Option) *Broker { func NewBroker(opts ...Option) *Broker {
options := SetupOptions(opts...) options := SetupOptions(opts...)
return &Broker{ return &Broker{
queues: memory.New[string, *Queue](), queues: memory.New[string, storage.IMap[string, *Queue]](),
publishers: memory.New[string, *publisher](), publishers: memory.New[string, *publisher](),
consumers: memory.New[string, *consumer](), consumers: memory.New[string, *consumer](),
deadLetter: memory.New[string, *Queue](), deadLetter: memory.New[string, *Queue](),
pIDs: memory.New[string, bool](),
opts: options, opts: options,
} }
} }
@@ -314,13 +316,16 @@ func (b *Broker) OnClose(ctx context.Context, conn net.Conn) error {
con.conn.Close() con.conn.Close()
b.consumers.Del(consumerID) b.consumers.Del(consumerID)
} }
b.queues.ForEach(func(_ string, queue *Queue) bool { b.queues.ForEach(func(_ string, tenantQueues storage.IMap[string, *Queue]) bool {
if _, ok := queue.consumers.Get(consumerID); ok { tenantQueues.ForEach(func(_ string, queue *Queue) bool {
if b.opts.consumerOnClose != nil { if _, ok := queue.consumers.Get(consumerID); ok {
b.opts.consumerOnClose(ctx, queue.name, consumerID) if b.opts.consumerOnClose != nil {
b.opts.consumerOnClose(ctx, queue.name, consumerID)
}
queue.consumers.Del(consumerID)
} }
queue.consumers.Del(consumerID) return true
} })
return true return true
}) })
} else { } else {
@@ -329,20 +334,23 @@ func (b *Broker) OnClose(ctx context.Context, conn net.Conn) error {
log.Printf("Broker: Consumer connection closed: %s, address: %s", consumerID, conn.RemoteAddr()) log.Printf("Broker: Consumer connection closed: %s, address: %s", consumerID, conn.RemoteAddr())
con.conn.Close() con.conn.Close()
b.consumers.Del(consumerID) b.consumers.Del(consumerID)
b.queues.ForEach(func(_ string, queue *Queue) bool { b.queues.ForEach(func(_ string, tenantQueues storage.IMap[string, *Queue]) bool {
queue.consumers.Del(consumerID) tenantQueues.ForEach(func(_ string, queue *Queue) bool {
if _, ok := queue.consumers.Get(consumerID); ok { queue.consumers.Del(consumerID)
if b.opts.consumerOnClose != nil { if _, ok := queue.consumers.Get(consumerID); ok {
b.opts.consumerOnClose(ctx, queue.name, consumerID) if b.opts.consumerOnClose != nil {
b.opts.consumerOnClose(ctx, queue.name, consumerID)
}
} }
} return true
})
return true return true
}) })
} }
return true return true
}) })
} }
publisherID, ok := GetPublisherID(ctx) publisherID, ok := GetPublisherID(ctx)
if ok && publisherID != "" { if ok && publisherID != "" {
log.Printf("Broker: Publisher connection closed: %s, address: %s", publisherID, conn.RemoteAddr()) log.Printf("Broker: Publisher connection closed: %s, address: %s", publisherID, conn.RemoteAddr())
@@ -496,7 +504,7 @@ func (b *Broker) PublishHandler(ctx context.Context, conn net.Conn, msg *codec.M
pub := b.addPublisher(ctx, msg.Queue, conn) pub := b.addPublisher(ctx, msg.Queue, conn)
taskID, _ := jsonparser.GetString(msg.Payload, "id") taskID, _ := jsonparser.GetString(msg.Payload, "id")
log.Printf("BROKER - PUBLISH ~> received from %s on %s for Task %s", pub.id, msg.Queue, taskID) log.Printf("BROKER - PUBLISH ~> received from %s on %s for Task %s", pub.id, msg.Queue, taskID)
ack := codec.NewMessage(consts.PUBLISH_ACK, utils.ToByte(fmt.Sprintf(`{"id":"%s"}`, taskID)), msg.Queue, msg.Headers) ack := codec.NewMessage(consts.PUBLISH_ACK, utils.ToByte(fmt.Sprintf(`{"id":"%s"}`, taskID)), msg.Queue, msg.Headers)
if err := b.send(ctx, conn, ack); err != nil { if err := b.send(ctx, conn, ack); err != nil {
log.Printf("Error sending PUBLISH_ACK: %v\n", err) log.Printf("Error sending PUBLISH_ACK: %v\n", err)
@@ -591,9 +599,14 @@ func (b *Broker) receive(ctx context.Context, c net.Conn) (*codec.Message, error
} }
func (b *Broker) broadcastToConsumers(msg *codec.Message) { func (b *Broker) broadcastToConsumers(msg *codec.Message) {
if queue, ok := b.queues.Get(msg.Queue); ok { if tenantQueues, ok := b.queues.Get(msg.Queue); ok {
task := &QueuedTask{Message: msg, RetryCount: 0} tenantQueues.ForEach(func(_, queueName string) bool {
queue.tasks <- task if queue, ok := tenantQueues.Get(queueName); ok {
task := &QueuedTask{Message: msg, RetryCount: 0}
queue.tasks <- task
}
return true
})
} }
} }
@@ -755,6 +768,7 @@ func (b *Broker) dispatchWorker(ctx context.Context, queue *Queue) {
for !success && task.RetryCount <= b.opts.maxRetries { for !success && task.RetryCount <= b.opts.maxRetries {
if b.dispatchTaskToConsumer(ctx, queue, task) { if b.dispatchTaskToConsumer(ctx, queue, task) {
success = true success = true
b.acknowledgeTask(ctx, task.Message.Queue, queue.name)
} else { } else {
task.RetryCount++ task.RetryCount++
delay = b.backoffRetry(queue, task, delay) delay = b.backoffRetry(queue, task, delay)
@@ -779,6 +793,14 @@ func (b *Broker) sendToDLQ(queue *Queue, task *QueuedTask) {
func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task *QueuedTask) bool { func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task *QueuedTask) bool {
var consumerFound bool var consumerFound bool
var err error var err error
// Deduplication: Check if the task has already been processed
taskID, _ := jsonparser.GetString(task.Message.Payload, "id")
if _, exists := b.pIDs.Get(taskID); exists {
log.Printf("Task %s already processed, skipping...", taskID)
return true
}
queue.consumers.ForEach(func(_ string, con *consumer) bool { queue.consumers.ForEach(func(_ string, con *consumer) bool {
if con.state != consts.ConsumerStateActive { if con.state != consts.ConsumerStateActive {
err = fmt.Errorf("consumer %s is not active", con.id) err = fmt.Errorf("consumer %s is not active", con.id)
@@ -786,10 +808,13 @@ func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task
} }
if err := b.send(ctx, con.conn, task.Message); err == nil { if err := b.send(ctx, con.conn, task.Message); err == nil {
consumerFound = true consumerFound = true
// Mark the task as processed
b.pIDs.Set(taskID, true)
return false return false
} }
return true return true
}) })
if err != nil { if err != nil {
log.Println(err.Error()) log.Println(err.Error())
return false return false
@@ -800,7 +825,7 @@ func (b *Broker) dispatchTaskToConsumer(ctx context.Context, queue *Queue, task
result := Result{ result := Result{
Status: "NO_CONSUMER", Status: "NO_CONSUMER",
Topic: queue.name, Topic: queue.name,
TaskID: "", TaskID: taskID,
Ctx: ctx, Ctx: ctx,
} }
_ = b.opts.notifyResponse(ctx, result) _ = b.opts.notifyResponse(ctx, result)
@@ -843,8 +868,32 @@ func (b *Broker) NewQueue(name string) *Queue {
tasks: make(chan *QueuedTask, b.opts.queueSize), tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](), consumers: memory.New[string, *consumer](),
} }
b.queues.Set(name, q) b.queues.Set(name, memory.New[string, *Queue]())
b.queues.Get(name).Set(name, q)
// Create DLQ for the queue
dlq := &Queue{
name: name + "_dlq",
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
b.deadLetter.Set(name, dlq)
ctx := context.Background()
go b.dispatchWorker(ctx, q)
go b.dispatchWorker(ctx, dlq)
return q
}
// Ensure message ordering in task queues
func (b *Broker) NewQueueWithOrdering(name string) *Queue {
q := &Queue{
name: name,
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
b.queues.Set(name, memory.New[string, *Queue]())
b.queues.Get(name).Set(name, q)
// Create DLQ for the queue // Create DLQ for the queue
dlq := &Queue{ dlq := &Queue{
name: name + "_dlq", name: name + "_dlq",
@@ -885,3 +934,134 @@ func (b *Broker) HandleCallback(ctx context.Context, msg *codec.Message) {
} }
} }
} }
// Add explicit acknowledgment for successful task processing
func (b *Broker) acknowledgeTask(ctx context.Context, taskID string, queueName string) {
log.Printf("Acknowledging task %s on queue %s", taskID, queueName)
if b.opts.notifyResponse != nil {
result := Result{
Status: "ACKNOWLEDGED",
Topic: queueName,
TaskID: taskID,
Ctx: ctx,
}
_ = b.opts.notifyResponse(ctx, result)
}
}
// Add authentication and authorization for publishers and consumers
func (b *Broker) Authenticate(ctx context.Context, credentials map[string]string) error {
username, userExists := credentials["username"]
password, passExists := credentials["password"]
if !userExists || !passExists {
return fmt.Errorf("missing credentials")
}
// Example: Hardcoded credentials for simplicity
if username != "admin" || password != "password" {
return fmt.Errorf("invalid credentials")
}
return nil
}
func (b *Broker) Authorize(ctx context.Context, role string, action string) error {
// Example: Simple role-based authorization
if role == "publisher" && action == "publish" {
return nil
}
if role == "consumer" && action == "consume" {
return nil
}
return fmt.Errorf("unauthorized action")
}
// Add support for multi-tenancy
func (b *Broker) AddTenant(tenantID string) error {
if _, exists := b.queues.Get(tenantID); exists {
return fmt.Errorf("tenant %s already exists", tenantID)
}
b.queues.Set(tenantID, memory.New[string, *Queue]())
return nil
}
func (b *Broker) RemoveTenant(tenantID string) error {
if _, exists := b.queues.Get(tenantID); !exists {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
b.queues.Del(tenantID)
return nil
}
// Ensure tenant-specific queues and operations
func (b *Broker) NewQueueForTenant(tenantID, queueName string) (*Queue, error) {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return nil, fmt.Errorf("tenant %s does not exist", tenantID)
}
if _, exists := tenantQueues.Get(queueName); exists {
return nil, fmt.Errorf("queue %s already exists for tenant %s", queueName, tenantID)
}
q := &Queue{
name: queueName,
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
tenantQueues.Set(queueName, q)
// Create tenant-specific DLQ
dlq := &Queue{
name: queueName + "_dlq",
tasks: make(chan *QueuedTask, b.opts.queueSize),
consumers: memory.New[string, *consumer](),
}
tenantQueues.Set(queueName+"_dlq", dlq)
ctx := context.Background()
go b.dispatchWorker(ctx, q)
go b.dispatchWorker(ctx, dlq)
return q, nil
}
func (b *Broker) PublishForTenant(ctx context.Context, tenantID string, task *Task, queueName string) error {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
queue, ok := tenantQueues.Get(queueName)
if !ok {
return fmt.Errorf("queue %s does not exist for tenant %s", queueName, tenantID)
}
taskID := task.ID
if taskID == "" {
taskID = NewID()
task.ID = taskID
}
queuedTask := &QueuedTask{Message: codec.NewMessage(consts.PUBLISH, task.Payload, queueName, nil), RetryCount: 0}
queue.tasks <- queuedTask
return nil
}
func (b *Broker) SubscribeForTenant(ctx context.Context, tenantID, queueName string, conn net.Conn) error {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return fmt.Errorf("tenant %s does not exist", tenantID)
}
queue, ok := tenantQueues.Get(queueName)
if !ok {
return fmt.Errorf("queue %s does not exist for tenant %s", queueName, tenantID)
}
consumerID := b.AddConsumer(ctx, queueName, conn)
queue.consumers.Set(consumerID, &consumer{id: consumerID, conn: conn})
return nil
}
func (b *Broker) ListQueuesForTenant(tenantID string) ([]string, error) {
tenantQueues, ok := b.queues.Get(tenantID)
if !ok {
return nil, fmt.Errorf("tenant %s does not exist", tenantID)
}
var queueNames []string
tenantQueues.ForEach(func(queueName string, _ *Queue) bool {
queueNames = append(queueNames, queueName)
return true
})
return queueNames, nil
}

18
task.go
View File

@@ -2,6 +2,7 @@ package mq
import ( import (
"context" "context"
"fmt"
"time" "time"
"github.com/oarkflow/json" "github.com/oarkflow/json"
@@ -93,3 +94,20 @@ func WithDedupKey(key string) TaskOption {
t.DedupKey = key t.DedupKey = key
} }
} }
// Add advanced dead-letter queue management
func (b *Broker) ReprocessDLQ(queueName string) error {
dlqName := queueName + "_dlq"
dlq, ok := b.deadLetter.Get(dlqName)
if !ok {
return fmt.Errorf("dead-letter queue %s does not exist", dlqName)
}
for {
select {
case task := <-dlq.tasks:
b.NewQueue(queueName).tasks <- task
default:
return nil
}
}
}