mirror of
https://github.com/HDT3213/delayqueue.git
synced 2025-09-27 03:26:05 +08:00
travis ci
This commit is contained in:
9
.travis.yml
Normal file
9
.travis.yml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.16.x
|
||||||
|
before_install:
|
||||||
|
- sudo apt-get install redis-server; redis-server &
|
||||||
|
- go install github.com/mattn/goveralls@latest
|
||||||
|
script:
|
||||||
|
- $GOPATH/bin/goveralls -service=travis-ci
|
15
README.md
15
README.md
@@ -1,5 +1,11 @@
|
|||||||
# DelayQueue
|
# DelayQueue
|
||||||
|
|
||||||
|

|
||||||
|
[](https://app.travis-ci.com/github/HDT3213/delayqueue)
|
||||||
|
[](https://coveralls.io/github/HDT3213/delayqueue?branch=master)
|
||||||
|
[](https://goreportcard.com/report/github.com/HDT3213/delayqueue)
|
||||||
|
[](https://pkg.go.dev/github.com/hdt3213/delayqueue)
|
||||||
|
|
||||||
DelayQueue is a message queue supporting delayed/scheduled delivery based on redis.
|
DelayQueue is a message queue supporting delayed/scheduled delivery based on redis.
|
||||||
|
|
||||||
DelayQueue guarantees to deliver at least once.
|
DelayQueue guarantees to deliver at least once.
|
||||||
@@ -76,3 +82,12 @@ WithFetchLimit(limit uint)
|
|||||||
```
|
```
|
||||||
|
|
||||||
WithFetchLimit limits the max number of messages at one time
|
WithFetchLimit limits the max number of messages at one time
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
|
WithDefaultRetryCount(count uint)
|
||||||
|
```
|
||||||
|
|
||||||
|
WithDefaultRetryCount customizes the max number of retry, it effects of messages in this queue
|
||||||
|
|
||||||
|
use WithRetryCount during DelayQueue.SendScheduleMsg or DelayQueue.SendDelayMsg to specific retry count of particular message
|
@@ -29,7 +29,7 @@ type DelayQueue struct {
|
|||||||
|
|
||||||
maxConsumeDuration time.Duration
|
maxConsumeDuration time.Duration
|
||||||
msgTTL time.Duration
|
msgTTL time.Duration
|
||||||
defaultRetryCount int
|
defaultRetryCount uint
|
||||||
fetchInterval time.Duration
|
fetchInterval time.Duration
|
||||||
fetchLimit uint
|
fetchLimit uint
|
||||||
}
|
}
|
||||||
@@ -92,6 +92,13 @@ func (q *DelayQueue) WithFetchLimit(limit uint) *DelayQueue {
|
|||||||
return q
|
return q
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithDefaultRetryCount customizes the max number of retry, it effects of messages in this queue
|
||||||
|
// use WithRetryCount during DelayQueue.SendScheduleMsg or DelayQueue.SendDelayMsg to specific retry count of particular message
|
||||||
|
func (q *DelayQueue) WithDefaultRetryCount(count uint) *DelayQueue {
|
||||||
|
q.defaultRetryCount = count
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
|
||||||
func (q *DelayQueue) genMsgKey(idStr string) string {
|
func (q *DelayQueue) genMsgKey(idStr string) string {
|
||||||
return "dp:" + q.name + ":msg:" + idStr
|
return "dp:" + q.name + ":msg:" + idStr
|
||||||
}
|
}
|
||||||
@@ -127,7 +134,7 @@ func (q *DelayQueue) SendScheduleMsg(payload string, t time.Time, opts ...interf
|
|||||||
for _, opt := range opts {
|
for _, opt := range opts {
|
||||||
switch o := opt.(type) {
|
switch o := opt.(type) {
|
||||||
case retryCountOpt:
|
case retryCountOpt:
|
||||||
retryCount = int(o)
|
retryCount = uint(o)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// generate id
|
// generate id
|
||||||
|
@@ -71,7 +71,7 @@ func TestDelayQueue_StopConsume(t *testing.T) {
|
|||||||
queue.StopConsume()
|
queue.StopConsume()
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
}).WithDefaultRetryCount(1)
|
||||||
for i := 0; i < size; i++ {
|
for i := 0; i < size; i++ {
|
||||||
err := queue.SendDelayMsg(strconv.Itoa(i), 0)
|
err := queue.SendDelayMsg(strconv.Itoa(i), 0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -102,7 +102,3 @@ func TestIDOverflow(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDelayQUeu(t *testing.T) {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
Reference in New Issue
Block a user