feat: remove un-necessary dependencies

This commit is contained in:
sujit
2024-10-14 22:21:53 +05:45
parent fb3be07d6b
commit aa0cecf1fe
10 changed files with 185 additions and 42 deletions

View File

@@ -10,11 +10,11 @@ import (
"strings" "strings"
"time" "time"
"github.com/oarkflow/xsync"
"github.com/oarkflow/mq/codec" "github.com/oarkflow/mq/codec"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
"github.com/oarkflow/mq/jsonparser" "github.com/oarkflow/mq/jsonparser"
"github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory"
"github.com/oarkflow/mq/utils" "github.com/oarkflow/mq/utils"
) )
@@ -35,18 +35,18 @@ type publisher struct {
} }
type Broker struct { type Broker struct {
queues xsync.IMap[string, *Queue] queues storage.IMap[string, *Queue]
consumers xsync.IMap[string, *consumer] consumers storage.IMap[string, *consumer]
publishers xsync.IMap[string, *publisher] publishers storage.IMap[string, *publisher]
opts *Options opts *Options
} }
func NewBroker(opts ...Option) *Broker { func NewBroker(opts ...Option) *Broker {
options := SetupOptions(opts...) options := SetupOptions(opts...)
return &Broker{ return &Broker{
queues: xsync.NewMap[string, *Queue](), queues: memory.New[string, *Queue](),
publishers: xsync.NewMap[string, *publisher](), publishers: memory.New[string, *publisher](),
consumers: xsync.NewMap[string, *consumer](), consumers: memory.New[string, *consumer](),
opts: options, opts: options,
} }
} }

View File

@@ -5,8 +5,6 @@ import (
"net" "net"
"sync" "sync"
"github.com/oarkflow/msgpack"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
) )
@@ -30,7 +28,7 @@ func NewMessage(cmd consts.CMD, payload []byte, queue string, headers map[string
func (m *Message) Serialize() ([]byte, error) { func (m *Message) Serialize() ([]byte, error) {
m.m.RLock() m.m.RLock()
defer m.m.RUnlock() defer m.m.RUnlock()
data, err := msgpack.Marshal(m) data, err := Marshal(m)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@@ -39,7 +37,7 @@ func (m *Message) Serialize() ([]byte, error) {
func Deserialize(data []byte) (*Message, error) { func Deserialize(data []byte) (*Message, error) {
var msg Message var msg Message
if err := msgpack.Unmarshal(data, &msg); err != nil { if err := Unmarshal(data, &msg); err != nil {
return nil, err return nil, err
} }

37
codec/serializer.go Normal file
View File

@@ -0,0 +1,37 @@
package codec
import (
"encoding/json"
)
type MarshallerFunc func(v any) ([]byte, error)
type UnmarshallerFunc func(data []byte, v any) error
func (f MarshallerFunc) Marshal(v any) ([]byte, error) {
return f(v)
}
func (f UnmarshallerFunc) Unmarshal(data []byte, v any) error {
return f(data, v)
}
var defaultMarshaller MarshallerFunc = json.Marshal
var defaultUnmarshaller UnmarshallerFunc = json.Unmarshal
func SetMarshaller(marshaller MarshallerFunc) {
defaultMarshaller = marshaller
}
func SetUnmarshaller(unmarshaller UnmarshallerFunc) {
defaultUnmarshaller = unmarshaller
}
func Marshal(v any) ([]byte, error) {
return defaultMarshaller(v)
}
func Unmarshal(data []byte, v any) error {
return defaultUnmarshaller(data, v)
}

11
ctx.go
View File

@@ -11,9 +11,10 @@ import (
"time" "time"
"github.com/oarkflow/xid" "github.com/oarkflow/xid"
"github.com/oarkflow/xsync"
"github.com/oarkflow/mq/consts" "github.com/oarkflow/mq/consts"
"github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory"
) )
type Task struct { type Task struct {
@@ -41,7 +42,7 @@ func IsClosed(conn net.Conn) bool {
func SetHeaders(ctx context.Context, headers map[string]string) context.Context { func SetHeaders(ctx context.Context, headers map[string]string) context.Context {
hd, _ := GetHeaders(ctx) hd, _ := GetHeaders(ctx)
if hd == nil { if hd == nil {
hd = xsync.NewMap[string, string]() hd = memory.New[string, string]()
} }
for key, val := range headers { for key, val := range headers {
hd.Set(key, val) hd.Set(key, val)
@@ -52,7 +53,7 @@ func SetHeaders(ctx context.Context, headers map[string]string) context.Context
func WithHeaders(ctx context.Context, headers map[string]string) map[string]string { func WithHeaders(ctx context.Context, headers map[string]string) map[string]string {
hd, _ := GetHeaders(ctx) hd, _ := GetHeaders(ctx)
if hd == nil { if hd == nil {
hd = xsync.NewMap[string, string]() hd = memory.New[string, string]()
} }
for key, val := range headers { for key, val := range headers {
hd.Set(key, val) hd.Set(key, val)
@@ -60,8 +61,8 @@ func WithHeaders(ctx context.Context, headers map[string]string) map[string]stri
return hd.AsMap() return hd.AsMap()
} }
func GetHeaders(ctx context.Context) (xsync.IMap[string, string], bool) { func GetHeaders(ctx context.Context) (storage.IMap[string, string], bool) {
headers, ok := ctx.Value(consts.HeaderKey).(xsync.IMap[string, string]) headers, ok := ctx.Value(consts.HeaderKey).(storage.IMap[string, string])
return headers, ok return headers, ok
} }

View File

@@ -25,22 +25,26 @@ var (
) )
func main() { func main() {
subDag := dag.NewDAG(
"Sub DAG",
"D",
mq.WithNotifyResponse(tasks.NotifySubDAGResponse),
)
subDag.AddNode("D", "D", tasks.Node4, true)
subDag.AddNode("F", "F", tasks.Node6)
subDag.AddNode("G", "G", tasks.Node7)
subDag.AddNode("H", "H", tasks.Node8)
subDag.AddEdge("Label 2", "D", "F")
subDag.AddEdge("Label 4", "F", "G", "H")
d.AddNode("A", "A", tasks.Node1, true) d.AddNode("A", "A", tasks.Node1, true)
d.AddNode("B", "B", tasks.Node2) d.AddNode("B", "B", tasks.Node2)
d.AddNode("C", "C", tasks.Node3) d.AddNode("C", "C", tasks.Node3)
d.AddNode("D", "D", tasks.Node4) d.AddDAGNode("D", "D", subDag)
d.AddNode("E", "E", tasks.Node5) d.AddNode("E", "E", tasks.Node5)
d.AddNode("F", "F", tasks.Node6)
d.AddNode("G", "G", tasks.Node7)
d.AddNode("H", "H", tasks.Node8)
d.AddLoop("Send each item", "A", "B") d.AddLoop("Send each item", "A", "B")
d.AddCondition("C", map[dag.When]dag.Then{"PASS": "D", "FAIL": "E"}) d.AddCondition("C", map[dag.When]dag.Then{"PASS": "D", "FAIL": "E"})
d.AddEdge("Label 1", "B", "C") d.AddEdge("Label 1", "B", "C")
d.AddEdge("Label 2", "D", "F")
d.AddEdge("Label 3", "E", "F")
d.AddEdge("Label 4", "F", "G", "H")
// Classify edges // Classify edges
// d.ClassifyEdges() // d.ClassifyEdges()
// fmt.Println(d.ExportDOT()) // fmt.Println(d.ExportDOT())

8
go.mod
View File

@@ -2,10 +2,4 @@ module github.com/oarkflow/mq
go 1.23.0 go 1.23.0
require ( require github.com/oarkflow/xid v1.2.5
github.com/oarkflow/msgpack v0.0.1
github.com/oarkflow/xid v1.2.5
github.com/oarkflow/xsync v0.0.5
)
require github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect

6
go.sum
View File

@@ -1,8 +1,2 @@
github.com/oarkflow/msgpack v0.0.1 h1:q7CvtT1/2TU+qoXgiQZ6BW4E9fAGOQ9bATWFFdFNZUI=
github.com/oarkflow/msgpack v0.0.1/go.mod h1:LthukEYeLGz+NEYzN6voNUVHAiLR8A3HX2DM40O3QBg=
github.com/oarkflow/xid v1.2.5 h1:6RcNJm9+oZ/B647gkME9trCzhpxGQaSdNoD56Vmkeho= github.com/oarkflow/xid v1.2.5 h1:6RcNJm9+oZ/B647gkME9trCzhpxGQaSdNoD56Vmkeho=
github.com/oarkflow/xid v1.2.5/go.mod h1:jG4YBh+swbjlWApGWDBYnsJEa7hi3CCpmuqhB3RAxVo= github.com/oarkflow/xid v1.2.5/go.mod h1:jG4YBh+swbjlWApGWDBYnsJEa7hi3CCpmuqhB3RAxVo=
github.com/oarkflow/xsync v0.0.5 h1:7HBQjmDus4YFLQFC5D197TB4c2YJTVwsTFuqk5zWKBM=
github.com/oarkflow/xsync v0.0.5/go.mod h1:KAaEc506OEd3ISxfhgUBKxk8eQzkz+mb0JkpGGd/QwU=
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=

View File

@@ -1,11 +1,12 @@
package mq package mq
import ( import (
"github.com/oarkflow/xsync" "github.com/oarkflow/mq/storage"
"github.com/oarkflow/mq/storage/memory"
) )
type Queue struct { type Queue struct {
consumers xsync.IMap[string, *consumer] consumers storage.IMap[string, *consumer]
tasks chan *QueuedTask // channel to hold tasks tasks chan *QueuedTask // channel to hold tasks
name string name string
} }
@@ -13,7 +14,7 @@ type Queue struct {
func newQueue(name string, queueSize int) *Queue { func newQueue(name string, queueSize int) *Queue {
return &Queue{ return &Queue{
name: name, name: name,
consumers: xsync.NewMap[string, *consumer](), consumers: memory.New[string, *consumer](),
tasks: make(chan *QueuedTask, queueSize), // buffer size for tasks tasks: make(chan *QueuedTask, queueSize), // buffer size for tasks
} }
} }

15
storage/interface.go Normal file
View File

@@ -0,0 +1,15 @@
package storage
// IMap is a thread-safe map interface.
type IMap[K comparable, V any] interface {
Get(K) (V, bool)
Set(K, V)
Del(K)
ForEach(func(K, V) bool)
Clear()
Size() int
Keys() []K
Values() []V
AsMap() map[K]V
Clone() IMap[K, V]
}

99
storage/memory/memory.go Normal file
View File

@@ -0,0 +1,99 @@
package memory
import (
"sync"
"github.com/oarkflow/mq/storage"
)
type Map[K comparable, V any] struct {
data map[K]V
mu sync.RWMutex
}
func New[K comparable, V any]() *Map[K, V] {
return &Map[K, V]{
data: make(map[K]V),
}
}
func (m *Map[K, V]) Get(key K) (V, bool) {
m.mu.RLock()
defer m.mu.RUnlock()
val, exists := m.data[key]
return val, exists
}
func (m *Map[K, V]) Set(key K, value V) {
m.mu.Lock()
defer m.mu.Unlock()
m.data[key] = value
}
func (m *Map[K, V]) Del(key K) {
m.mu.Lock()
defer m.mu.Unlock()
delete(m.data, key)
}
func (m *Map[K, V]) ForEach(f func(K, V) bool) {
m.mu.RLock()
defer m.mu.RUnlock()
for k, v := range m.data {
if !f(k, v) {
break
}
}
}
func (m *Map[K, V]) Clear() {
m.mu.Lock()
defer m.mu.Unlock()
m.data = make(map[K]V)
}
func (m *Map[K, V]) Size() int {
m.mu.RLock()
defer m.mu.RUnlock()
return len(m.data)
}
func (m *Map[K, V]) Keys() []K {
m.mu.RLock()
defer m.mu.RUnlock()
keys := make([]K, 0, len(m.data))
for k := range m.data {
keys = append(keys, k)
}
return keys
}
func (m *Map[K, V]) Values() []V {
m.mu.RLock()
defer m.mu.RUnlock()
values := make([]V, 0, len(m.data))
for _, v := range m.data {
values = append(values, v)
}
return values
}
func (m *Map[K, V]) AsMap() map[K]V {
m.mu.RLock()
defer m.mu.RUnlock()
copiedMap := make(map[K]V, len(m.data))
for k, v := range m.data {
copiedMap[k] = v
}
return copiedMap
}
func (m *Map[K, V]) Clone() storage.IMap[K, V] {
m.mu.RLock()
defer m.mu.RUnlock()
clonedMap := New[K, V]()
for k, v := range m.data {
clonedMap.Set(k, v)
}
return clonedMap
}