mirror of
https://github.com/oarkflow/mq.git
synced 2025-10-06 00:16:49 +08:00
fix: ui for DOT
This commit is contained in:
18
broker.go
18
broker.go
@@ -38,7 +38,7 @@ type Broker struct {
|
|||||||
queues storage.IMap[string, *Queue]
|
queues storage.IMap[string, *Queue]
|
||||||
consumers storage.IMap[string, *consumer]
|
consumers storage.IMap[string, *consumer]
|
||||||
publishers storage.IMap[string, *publisher]
|
publishers storage.IMap[string, *publisher]
|
||||||
deadLetter storage.IMap[string, *Queue] // DLQ mapping for each queue
|
deadLetter storage.IMap[string, *Queue]
|
||||||
opts *Options
|
opts *Options
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -241,7 +241,6 @@ func (b *Broker) SubscribeHandler(ctx context.Context, conn net.Conn, msg *codec
|
|||||||
func (b *Broker) Start(ctx context.Context) error {
|
func (b *Broker) Start(ctx context.Context) error {
|
||||||
var listener net.Listener
|
var listener net.Listener
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if b.opts.tlsConfig.UseTLS {
|
if b.opts.tlsConfig.UseTLS {
|
||||||
cert, err := tls.LoadX509KeyPair(b.opts.tlsConfig.CertPath, b.opts.tlsConfig.KeyPath)
|
cert, err := tls.LoadX509KeyPair(b.opts.tlsConfig.CertPath, b.opts.tlsConfig.KeyPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -263,37 +262,30 @@ func (b *Broker) Start(ctx context.Context) error {
|
|||||||
log.Println("BROKER - RUNNING ~> started on", b.opts.brokerAddr)
|
log.Println("BROKER - RUNNING ~> started on", b.opts.brokerAddr)
|
||||||
}
|
}
|
||||||
defer listener.Close()
|
defer listener.Close()
|
||||||
|
|
||||||
// Limit the number of concurrent connections
|
|
||||||
const maxConcurrentConnections = 100
|
const maxConcurrentConnections = 100
|
||||||
sem := make(chan struct{}, maxConcurrentConnections)
|
sem := make(chan struct{}, maxConcurrentConnections)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
conn, err := listener.Accept()
|
conn, err := listener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.OnError(ctx, conn, err)
|
b.OnError(ctx, conn, err)
|
||||||
time.Sleep(50 * time.Millisecond) // Slow down retry on errors
|
time.Sleep(50 * time.Millisecond)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
// Control concurrency by using a semaphore
|
|
||||||
sem <- struct{}{}
|
sem <- struct{}{}
|
||||||
go func(c net.Conn) {
|
go func(c net.Conn) {
|
||||||
defer func() {
|
defer func() {
|
||||||
<-sem // Release semaphore
|
<-sem
|
||||||
c.Close()
|
c.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
// Attempt to read the message
|
|
||||||
err := b.readMessage(ctx, c)
|
err := b.readMessage(ctx, c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
if netErr, ok := err.(net.Error); ok && netErr.Temporary() {
|
||||||
log.Println("Temporary network error, retrying:", netErr)
|
log.Println("Temporary network error, retrying:", netErr)
|
||||||
continue // Retry on temporary errors
|
continue
|
||||||
}
|
}
|
||||||
log.Println("Connection closed due to error:", err)
|
log.Println("Connection closed due to error:", err)
|
||||||
break // Break the loop and close the connection
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}(conn)
|
}(conn)
|
||||||
|
@@ -56,7 +56,6 @@ func SendMessage(ctx context.Context, conn net.Conn, msg *Message) error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
totalLength := 4 + len(data)
|
totalLength := 4 + len(data)
|
||||||
buffer := byteBufferPool.Get().([]byte)
|
buffer := byteBufferPool.Get().([]byte)
|
||||||
if cap(buffer) < totalLength {
|
if cap(buffer) < totalLength {
|
||||||
@@ -65,10 +64,8 @@ func SendMessage(ctx context.Context, conn net.Conn, msg *Message) error {
|
|||||||
buffer = buffer[:totalLength]
|
buffer = buffer[:totalLength]
|
||||||
}
|
}
|
||||||
defer byteBufferPool.Put(buffer)
|
defer byteBufferPool.Put(buffer)
|
||||||
|
|
||||||
binary.BigEndian.PutUint32(buffer[:4], uint32(len(data)))
|
binary.BigEndian.PutUint32(buffer[:4], uint32(len(data)))
|
||||||
copy(buffer[4:], data)
|
copy(buffer[4:], data)
|
||||||
|
|
||||||
writer := bufio.NewWriter(conn)
|
writer := bufio.NewWriter(conn)
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
@@ -78,7 +75,6 @@ func SendMessage(ctx context.Context, conn net.Conn, msg *Message) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return writer.Flush()
|
return writer.Flush()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -71,6 +71,10 @@ func (c *Consumer) SetKey(key string) {
|
|||||||
c.id = key
|
c.id = key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *Consumer) Metrics() Metrics {
|
||||||
|
return c.pool.Metrics()
|
||||||
|
}
|
||||||
|
|
||||||
func (c *Consumer) subscribe(ctx context.Context, queue string) error {
|
func (c *Consumer) subscribe(ctx context.Context, queue string) error {
|
||||||
headers := HeadersWithConsumerID(ctx, c.id)
|
headers := HeadersWithConsumerID(ctx, c.id)
|
||||||
msg := codec.NewMessage(consts.SUBSCRIBE, utils.ToByte("{}"), queue, headers)
|
msg := codec.NewMessage(consts.SUBSCRIBE, utils.ToByte("{}"), queue, headers)
|
||||||
|
40
dag/dag.go
40
dag/dag.go
@@ -358,24 +358,31 @@ func (tm *DAG) ProcessTask(ctx context.Context, task *mq.Task) mq.Result {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *DAG) Process(ctx context.Context, payload []byte) mq.Result {
|
func (tm *DAG) check(ctx context.Context, payload []byte) (context.Context, *mq.Task, error) {
|
||||||
tm.mu.RLock()
|
tm.mu.RLock()
|
||||||
if tm.paused {
|
if tm.paused {
|
||||||
tm.mu.RUnlock()
|
tm.mu.RUnlock()
|
||||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
return ctx, nil, fmt.Errorf("unable to process task, error: DAG is not accepting any task")
|
||||||
}
|
}
|
||||||
tm.mu.RUnlock()
|
tm.mu.RUnlock()
|
||||||
if !tm.IsReady() {
|
if !tm.IsReady() {
|
||||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not ready yet")}
|
return ctx, nil, fmt.Errorf("unable to process task, error: DAG is not ready yet")
|
||||||
}
|
}
|
||||||
initialNode, err := tm.parseInitialNode(ctx)
|
initialNode, err := tm.parseInitialNode(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return mq.Result{Error: err}
|
return ctx, nil, err
|
||||||
}
|
}
|
||||||
if tm.server.SyncMode() {
|
if tm.server.SyncMode() {
|
||||||
ctx = mq.SetHeaders(ctx, map[string]string{consts.AwaitResponseKey: "true"})
|
ctx = mq.SetHeaders(ctx, map[string]string{consts.AwaitResponseKey: "true"})
|
||||||
}
|
}
|
||||||
task := mq.NewTask(mq.NewID(), payload, initialNode)
|
return ctx, mq.NewTask(mq.NewID(), payload, initialNode), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tm *DAG) Process(ctx context.Context, payload []byte) mq.Result {
|
||||||
|
ctx, task, err := tm.check(ctx, payload)
|
||||||
|
if err != nil {
|
||||||
|
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
||||||
|
}
|
||||||
awaitResponse, _ := mq.GetAwaitResponse(ctx)
|
awaitResponse, _ := mq.GetAwaitResponse(ctx)
|
||||||
if awaitResponse != "true" {
|
if awaitResponse != "true" {
|
||||||
headers, ok := mq.GetHeaders(ctx)
|
headers, ok := mq.GetHeaders(ctx)
|
||||||
@@ -384,38 +391,25 @@ func (tm *DAG) Process(ctx context.Context, payload []byte) mq.Result {
|
|||||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||||
}
|
}
|
||||||
if err := tm.pool.EnqueueTask(ctxx, task, 0); err != nil {
|
if err := tm.pool.EnqueueTask(ctxx, task, 0); err != nil {
|
||||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: initialNode, Status: "FAILED", Error: err}
|
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "FAILED", Error: err}
|
||||||
}
|
}
|
||||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: initialNode, Status: "PENDING"}
|
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "PENDING"}
|
||||||
}
|
}
|
||||||
return tm.ProcessTask(ctx, task)
|
return tm.ProcessTask(ctx, task)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *DAG) ScheduleTask(ctx context.Context, payload []byte, opts ...mq.SchedulerOption) mq.Result {
|
func (tm *DAG) ScheduleTask(ctx context.Context, payload []byte, opts ...mq.SchedulerOption) mq.Result {
|
||||||
tm.mu.RLock()
|
ctx, task, err := tm.check(ctx, payload)
|
||||||
if tm.paused {
|
if err != nil {
|
||||||
tm.mu.RUnlock()
|
|
||||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not accepting any task")}
|
||||||
}
|
}
|
||||||
tm.mu.RUnlock()
|
|
||||||
if !tm.IsReady() {
|
|
||||||
return mq.Result{Error: fmt.Errorf("unable to process task, error: DAG is not ready yet")}
|
|
||||||
}
|
|
||||||
initialNode, err := tm.parseInitialNode(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return mq.Result{Error: err}
|
|
||||||
}
|
|
||||||
if tm.server.SyncMode() {
|
|
||||||
ctx = mq.SetHeaders(ctx, map[string]string{consts.AwaitResponseKey: "true"})
|
|
||||||
}
|
|
||||||
task := mq.NewTask(mq.NewID(), payload, initialNode)
|
|
||||||
headers, ok := mq.GetHeaders(ctx)
|
headers, ok := mq.GetHeaders(ctx)
|
||||||
ctxx := context.Background()
|
ctxx := context.Background()
|
||||||
if ok {
|
if ok {
|
||||||
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
ctxx = mq.SetHeaders(ctxx, headers.AsMap())
|
||||||
}
|
}
|
||||||
tm.pool.Scheduler().AddTask(ctxx, task, opts...)
|
tm.pool.Scheduler().AddTask(ctxx, task, opts...)
|
||||||
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: initialNode, Status: "PENDING"}
|
return mq.Result{CreatedAt: task.CreatedAt, TaskID: task.ID, Topic: task.Topic, Status: "PENDING"}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tm *DAG) parseInitialNode(ctx context.Context) (string, error) {
|
func (tm *DAG) parseInitialNode(ctx context.Context) (string, error) {
|
||||||
|
@@ -14,6 +14,7 @@ import (
|
|||||||
type TaskManager struct {
|
type TaskManager struct {
|
||||||
createdAt time.Time
|
createdAt time.Time
|
||||||
processedAt time.Time
|
processedAt time.Time
|
||||||
|
status string
|
||||||
dag *DAG
|
dag *DAG
|
||||||
nodeResults map[string]mq.Result
|
nodeResults map[string]mq.Result
|
||||||
wg *WaitGroup
|
wg *WaitGroup
|
||||||
|
Reference in New Issue
Block a user