Compare commits

...

29 Commits

Author SHA1 Message Date
mochi-co
074e1b06ae fix docker image 2024-03-18 04:10:16 +00:00
JB
26418c6fd8 Implement File based configuration (#351)
* Implement file-based configuration

* Implement file-based configuration

* Replace DefaultServerCapabilities with NewDefaultServerCapabilities() to avoid data race (#360)

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>

* Only pass a copy of system.Info to hooks (#365)

* Only pass a copy of system.Info to hooks

* Rename Itoa to Int64toa

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>

* Allow configurable max stored qos > 0 messages (#359)

* Allow configurable max stored qos > 0 messages

* Only rollback Inflight if QoS > 0

* Only rollback Inflight if QoS > 0

* Minor refactor

* Update server version

* Implement file-based configuration

* Implement file-based configuration

* update configs with maximum_inflight value

* update docker configuration

* fix tests

---------

Co-authored-by: mochi-co <moumochi@icloud.com>
Co-authored-by: thedevop <60499013+thedevop@users.noreply.github.com>
2024-03-18 03:28:12 +00:00
werben
26720c2f6e Add BadgerDB garbage collection. (#371)
* For issues #370, #369, and #363, add BadgerDB garbage collection.

* Add default configuration for defaultGcInterval.

* Solve DATA RACE.

* Place Badger's configuration in main.go for users to adjust as needed.

* Add TestGcLoop() for coverage.

* Modify GcInterval to shorten test time.

* Add the GcDiscardRatio option for the Badger hook, and include more detailed comments in the example.

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2024-03-18 00:16:55 +00:00
dependabot[bot]
d30592b95b Bump google.golang.org/protobuf from 1.28.1 to 1.33.0 (#372)
Bumps google.golang.org/protobuf from 1.28.1 to 1.33.0.

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-17 22:18:03 +00:00
mochi-co
40e9cdb383 Update server version 2024-02-05 21:37:41 +00:00
thedevop
e9f72154b6 Allow configurable max stored qos > 0 messages (#359)
* Allow configurable max stored qos > 0 messages

* Only rollback Inflight if QoS > 0

* Only rollback Inflight if QoS > 0

* Minor refactor
2024-02-05 21:24:34 +00:00
thedevop
69412dd23c Only pass a copy of system.Info to hooks (#365)
* Only pass a copy of system.Info to hooks

* Rename Itoa to Int64toa

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2024-02-04 13:36:50 +00:00
thedevop
686c35ac0c Replace DefaultServerCapabilities with NewDefaultServerCapabilities() to avoid data race (#360)
Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2024-02-04 09:23:26 +00:00
mochi-co
65c78534dc Update server version 2024-01-11 22:56:55 +00:00
thedevop
83db7fff56 Buffer optimizations (#355)
* Avoid creating buffer if pkt larger than ClientNetWriteBufferSize

* Use mempool for Properties Encode

* Use the more efficient Write instead of Write for Buffer to Buffer write

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2024-01-10 08:15:06 +00:00
werben
10a02ab3c7 Fix #349, MQTT v3 unsuback does not require Reason Code. (#353) 2024-01-10 07:55:41 +00:00
mochi-co
5058333f36 Update server version 2023-12-22 00:40:56 +00:00
werben
b2ab984949 Move cl.WriteLoop() to attachClient() and call cl.Stop() in loadClients() to update client.State. (#344)
* Moving go cl.WriteLoop() out of NewClient() and placing it in server.attachClient().

* Call cl.Stop() to cancel the context, update cl.State with information such as disconnected time, and set the stopCause.

* update README-CN.md

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2023-12-22 00:34:29 +00:00
Aaron France
5523d15a9b feat: return tcp.Address from listener, if exists (#336)
* This is the more accurate and correct address of the listener
* Useful if you want to listen on port 0 to dynamically create
listeners (think of unit/integration tests)

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2023-12-21 23:59:51 +00:00
thedevop
c6c7c296f6 Packet encoding optimization (#343)
* Dynamically allocate buffer for writes if needed

* Remove unused net.Buffer

* Return bytes written to buffer instead of conn

* Dynamic write buffer

* Reduce double write of pk.Payload

* Use memory pool for packet encode

* Pool doesn't guarantee value between Put and Get

* Add benchmark for bufpool

* Fix issue #346

* Change default pool not to have size cap

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2023-12-21 23:23:35 +00:00
thedevop
4c682384c5 Fix data race issue with write buffer (#347) 2023-12-21 23:13:36 +00:00
werben
624dde0986 Handle expired clients in server.loadClients(). (#341)
* Handle expired clients in server.loadClients().

* No need to call s.Clients.Delete().

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2023-12-12 23:45:56 +00:00
thedevop
dc4eecdfb7 Dynamically allocate write buffer if needed. (ready for merge) (#324)
* Dynamically allocate buffer for writes if needed

* Remove unused net.Buffer

* Return bytes written to buffer instead of conn

* Dynamic write buffer

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
2023-12-12 23:43:50 +00:00
Naohiro Heya
e8f151bf1f Add a Japanese version of README.md (#338)
* Add a Japanese version of README.md

* add jp link
2023-12-03 10:04:04 +00:00
werben
4983b6b977 Add a demonstration in examples/hooks on how to subscribe to a topic and publish messages directly within the hook. (#333) 2023-11-24 11:25:21 +00:00
JB
99e50ae74e Revert "improve transport performance with bufio (#321)" (#323)
This reverts commit 8e52e49b94.
2023-10-24 18:28:39 +01:00
x20080406
8e52e49b94 improve transport performance with bufio (#321)
* improve transport performance with bufio

* fix issue of unit test

* fix issue

* optimize code
2023-10-22 11:41:28 +01:00
Marco Debus
4c0c862dcd Fix for unlimited maximum message expiry interval (#315)
* fix when no max msg expiry interval is set

* fix expiry handling of clearExpiredInflights

* Modify it to handle cases where the MaximumMessageExpiryInterval is set to 0 or math.MaxInt64 for no expiry, and optimize some of the code and test cases.

* Set MaximumMessageExpiryInterval to 0 or math.MaxInt64 for no expiration, and optimize some of the code and test cases.

* Addressing the issue of numeric overflow with expiration values.

* Only when server.Options.Capabilities.MaximumMessageExpiryInterval is set to math.MaxInt64 for no expiry.

* fix typo in README.md

* There is no need to verify whether 'maximumExpiry' is 'math.MaxInt64' within 'client.ClearInflight()

* Optimize the code to make it easier to understand.

* Differentiate the handling of 'expire' in MQTTv5 and MQTTv3; skip expiration checks if MaximumMessageExpiryInterval is set to 0; optimize code and test cases.

* When MaximumMessageExpiryInterval is set to 0, it should not affect the message's own expiration(for v5) evaluation.

* Adding client.ClearExpiredInflights() to clear expired messages, while client.ClearInflights() is used to clear all inflight messages.

---------

Co-authored-by: JB <28275108+mochi-co@users.noreply.github.com>
Co-authored-by: werben <werben@aliyun.com>
Co-authored-by: werben <werben@qq.com>
2023-10-21 12:10:02 +01:00
JB
2f2d867170 Remove vendor folder (#319)
* Remove vendor folder

* Add vendor to gitignore

---------

Co-authored-by: mochi-co <moumochi@icloud.com>
2023-10-16 18:03:54 +01:00
dependabot[bot]
916d022093 Bump golang.org/x/net from 0.7.0 to 0.17.0 (#316)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.7.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.7.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-12 10:25:47 +01:00
werbenhu
11e0256959 update README-CN.md (#312) 2023-10-03 20:45:34 +01:00
JB
858a28ea4b Update README.md 2023-10-02 19:29:24 +01:00
JB
82f32cf53d Indicate translators wanted 2023-10-02 19:17:03 +01:00
JB
b4e2c61a72 Emit warning if client keepalive is less than recommended minimum (#305)
Co-authored-by: mochi-co <moumochi@icloud.com>
2023-10-01 21:49:30 +01:00
933 changed files with 2517 additions and 346547 deletions

3
.gitignore vendored
View File

@@ -1,4 +1,5 @@
cmd/mqtt
.DS_Store
*.db
.idea
.idea
vendor

View File

@@ -11,21 +11,12 @@ RUN go mod download
COPY . ./
RUN go build -o /app/mochi ./cmd
RUN go build -o /app/mochi ./cmd/docker
FROM alpine
WORKDIR /
COPY --from=builder /app/mochi .
# tcp
EXPOSE 1883
# websockets
EXPOSE 1882
# dashboard
EXPOSE 8080
ENTRYPOINT [ "/mochi" ]
CMD ["/cmd/docker", "--config", "config.yaml"]

View File

@@ -10,7 +10,8 @@
</p>
[English](README.md) | [简体中文](README-CN.md)
[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [招募翻译者!](https://github.com/orgs/mochi-mqtt/discussions/310)
🎆 **mochi-co/mqtt 现在已经是新的 mochi-mqtt 组织的一部分。** 详细信息请[阅读公告.](https://github.com/orgs/mochi-mqtt/discussions/271)
@@ -50,7 +51,7 @@ MQTT 代表 MQ Telemetry Transport。它是一种发布/订阅、非常简单和
### 兼容性说明(Compatibility Notes)
由于 v5 规范与 MQTT 的早期版本存在重叠,因此服务器可以接受 v5 和 v3 客户端,但在连接了 v5 和 v3 客户端的情况下,为 v5 客户端提供的属性和功能将会对 v3 客户端进行降级处理(例如用户属性)。
对于 MQTT v3.0.0 和 v3.1.1 的支持被视为混合兼容性。在 v3 规范中没有明确限制的情况下,将使用更新的和以安全为首要考虑的 v5 规范 - 例如保留的消息(retained messages)的过期处理,传输中的消息(inflight messages)的过期处理、客户端过期处理以及QOS消息数量的限制等。
对于 MQTT v3.0.0 和 v3.1.1 的支持被视为混合兼容性。在 v3 规范中没有明确限制的情况下,将使用更新的和以安全为首要考虑的 v5 规范 - 例如保留的消息(retained messages)的过期处理,待发送消息(inflight messages)的过期处理、客户端过期处理以及QOS消息数量的限制等。
#### 版本更新时间
除非涉及关键问题,新版本通常在周末发布。
@@ -72,12 +73,22 @@ go build -o mqtt && ./mqtt
### 使用 Docker
我们提供了一个简单的 Dockerfile用于运行 cmd/main.go 中的 Websocket、TCP 和统计信息服务器
你现在可以从 Docker Hub 仓库中拉取并运行Mochi MQTT[官方镜像](https://hub.docker.com/r/mochimqtt/server)
```sh
docker pull mochimqtt/server
或者
docker run mochimqtt/server
```
我们还在积极完善这部分的工作,现在正在实现使用[配置文件的启动](https://github.com/orgs/mochi-mqtt/projects/2)方式。更多关于 Docker 的支持正在[这里](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545)和[这里](https://github.com/orgs/mochi-mqtt/discussions/209)进行讨论。如果你有在这个场景下使用 Mochi-MQTT也可以参与到讨论中来。
我们提供了一个简单的 Dockerfile用于运行 cmd/main.go 中的 Websocket(:1882)、TCP(:1883) 和服务端状态信息(:8080)这三个服务监听:
```sh
docker build -t mochi:latest .
docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest
```
更多关于 Docker 的支持正在[这里](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545)和[这里](https://github.com/orgs/mochi-mqtt/discussions/209)进行讨论。如果你有在这个场景下使用 Mochi-MQTT也可以参与到讨论中来。
## 使用 Mochi MQTT 进行开发
### 将Mochi MQTT作为包导入使用
@@ -168,6 +179,11 @@ server := mqtt.New(&mqtt.Options{
```
请参考 mqtt.Options、mqtt.Capabilities 和 mqtt.Compatibilities 结构体以查看完整的所有服务端选项。ClientNetWriteBufferSize 和 ClientNetReadBufferSize 可以根据你的需求配置调整每个客户端的内存使用状况。
### 默认配置说明(Default Configuration Notes)
关于决定默认配置的值,在这里进行一些说明:
- 默认情况下server.Options.Capabilities.MaximumMessageExpiryInterval 的值被设置为 8640024小时以防止在使用默认配置时网络上暴露服务器而受到恶意DOS攻击如果不配置到期时间将允许无限数量的保留retained/待发送inflight消息累积。如果您在一个受信任的环境中运行或者您有更大的保留期容量您可以选择覆盖此设置设置为0 以取消到期限制)。
## 事件钩子(Event Hooks)
@@ -184,7 +200,7 @@ server := mqtt.New(&mqtt.Options{
| 数据持久性 | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | 使用 [Redis](https://redis.io) 进行持久性存储。 |
| 调试跟踪 | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | 调试输出以查看数据包在服务端的链路追踪。 |
许多内部函数都已开放给开发者你可以参考上述示例创建自己的Hook钩子。如果你有更好的关于Hook钩子方面的建议或者疑问你可以[提交问题](https://github.com/mochi-mqtt/server/issues)给我们。 |
许多内部函数都已开放给开发者你可以参考上述示例创建自己的Hook钩子。如果你有更好的关于Hook钩子方面的建议或者疑问你可以[提交问题](https://github.com/mochi-mqtt/server/issues)给我们。
### 访问控制(Access Control)
@@ -263,7 +279,7 @@ err := server.AddHook(new(auth.Hook), &auth.Options{
```
详细信息请参阅 [examples/auth/encoded/main.go](examples/auth/encoded/main.go)。
### 持久化存储
### 持久化存储(Persistent Storage)
#### Redis
@@ -343,7 +359,7 @@ if err != nil {
| OnRetainedExpired | 在保留的消息已过期并应删除时调用。| |
| StoredClients | 这个接口需要返回客户端列表,例如从持久化数据库中获取客户端列表。 |
| StoredSubscriptions | 返回客户端的所有订阅,例如从持久化数据库中获取客户端的订阅列表。 |
| StoredInflightMessages | 返回正在传输中的消息inflight messages例如从持久化数据库中获取到还有哪些消息未完成传输。 |
| StoredInflightMessages | 返回待发送消息inflight messages例如从持久化数据库中获取到还有哪些消息未完成传输。 |
| StoredRetainedMessages | 返回保留的消息,例如从持久化数据库获取保留的消息。 |
| StoredSysInfo | 返回存储的系统状态信息,例如从持久化数据库获取的系统状态信息。 |

494
README-JP.md Normal file
View File

@@ -0,0 +1,494 @@
# Mochi-MQTT Server
<p align="center">
![build status](https://github.com/mochi-mqtt/server/actions/workflows/build.yml/badge.svg)
[![Coverage Status](https://coveralls.io/repos/github/mochi-mqtt/server/badge.svg?branch=master&v2)](https://coveralls.io/github/mochi-mqtt/server?branch=master)
[![Go Report Card](https://goreportcard.com/badge/github.com/mochi-mqtt/server)](https://goreportcard.com/report/github.com/mochi-mqtt/server/v2)
[![Go Reference](https://pkg.go.dev/badge/github.com/mochi-mqtt/server.svg)](https://pkg.go.dev/github.com/mochi-mqtt/server/v2)
[![contributions welcome](https://img.shields.io/badge/contributions-welcome-brightgreen.svg?style=flat)](https://github.com/mochi-mqtt/server/issues)
</p>
[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [Translators Wanted!](https://github.com/orgs/mochi-mqtt/discussions/310)
🎆 **mochi-co/mqtt は新しい mochi-mqtt organisation の一部です.** [このページをお読みください](https://github.com/orgs/mochi-mqtt/discussions/271)
### Mochi-MQTTは MQTT v5 (と v3.1.1)に完全に準拠しているアプリケーションに組み込み可能なハイパフォーマンスなbroker/serverです.
Mochi MQTT は Goで書かれたMQTT v5に完全に[準拠](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html)しているMQTTブローカーで、IoTプロジェクトやテレメトリの開発プロジェクト向けに設計されています。 スタンドアロンのバイナリで使ったり、アプリケーションにライブラリとして組み込むことができ、プロジェクトのメンテナンス性と品質を確保できるように配慮しながら、 軽量で可能な限り速く動作するように設計されています。
#### MQTTとは?
MQTT は [MQ Telemetry Transport](https://en.wikipedia.org/wiki/MQTT)を意味します。 Pub/Sub型のシンプルで軽量なメッセージプロトコルで、低帯域、高遅延、不安定なネットワーク下での制約を考慮して設計されています([MQTTについて詳しくはこちら](https://mqtt.org/faq))。 Mochi MQTTはMQTTプロトコルv5.0.0に完全準拠した実装をしています。
#### Mochi-MQTTのもつ機能
- MQTTv5への完全な準拠とMQTT v3.1.1 および v3.0.0 との互換性:
- MQTT v5で拡張されたユーザープロパティ
- トピック・エイリアス
- 共有サブスクリプション
- サブスクリプションオプションとサブスクリプションID
- メッセージの有効期限
- クライアントセッション
- 送受信QoSフロー制御クォータ
- サーバサイド切断と認証パケット
- Will遅延間隔
- 上記に加えてQoS(0,1,2)、$SYSトピック、retain機能などすべてのMQTT v1の特徴を持ちます
- Developer-centric:
- 開発者が制御できるように、ほとんどのコアブローカーのコードをエクスポートにしてアクセスできるようにしました。
- フル機能で柔軟なフックベースのインターフェイスにすることで簡単に'プラグイン'を開発できるようにしました。
- 特別なインラインクライアントを利用することでパケットインジェクションを行うか、既存のクライアントとしてマスカレードすることができます。
- パフォーマンスと安定性:
- 古典的なツリーベースのトピックサブスクリプションモデル
- クライアント固有に書き込みバッファーをもたせることにより、読み込みの遅さや不規則なクライアントの挙動の問題を回避しています。
- MQTT v5 and MQTT v3のすべての[Paho互換性テスト](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability)をpassしています。
- 慎重に検討された多くのユニットテストシナリオでテストされています。
- TCP, Websocket (SSL/TLSを含む), $SYSのダッシュボードリスナー
- フックを利用した保存機能としてRedis, Badger, Boltを使うことができます自作のHookも可能です
- フックを利用したルールベース認証機能とアクセス制御リストLedgerを使うことができます自作のHookも可能です
### 互換性に関する注意事項
MQTTv5とそれ以前との互換性から、サーバーはv5とv3両方のクライアントを受け入れることができますが、v5とv3のクライアントが接続された場合はv5でクライアント向けの特徴と機能はv3クライアントにダウングレードされますユーザープロパティなど
MQTT v3.0.0 と v3.1.1 のサポートはハイブリッド互換性があるとみなされます。それはv3と仕様に制限されていない場合、例えば、送信メッセージ、保持メッセージの有効期限とQoSフロー制御制限などについては、よりモダンで安全なv5の動作が使用されます
#### リリースされる時期について
クリティカルなイシュー出ない限り、新しいリリースがされるのは週末です。
## Roadmap
- 新しい特徴やイベントフックのリクエストは [open an issue](https://github.com/mochi-mqtt/server/issues) へ!
- クラスターのサポート
- メトリックスサポートの強化
- ファイルベースの設定(Dockerイメージのサポート)
## Quick Start
### GoでのBrokerの動かし方
Mochi MQTTはスタンドアロンのブローカーとして使うことができます。単純にこのレポジトリーをチェックアウトして、[cmd/main.go](cmd/main.go) を起動すると内部の [cmd](cmd) フォルダのエントリポイントにしてtcp (:1883), websocket (:1882), dashboard (:8080)のポートを外部にEXPOSEします。
```
cd cmd
go build -o mqtt && ./mqtt
```
### Dockerで利用する
Dockerレポジトリの [official Mochi MQTT image](https://hub.docker.com/r/mochimqtt/server) から Pullして起動することができます。
```sh
docker pull mochimqtt/server
or
docker run mochimqtt/server
```
これは実装途中です。[file-based configuration](https://github.com/orgs/mochi-mqtt/projects/2) は、この実装をよりよくサポートするために開発中です。
より実質的なdockerのサポートが議論されています。_Docker環境で使っている方は是非この議論に参加してください。_ [ここ](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545) や [ここ](https://github.com/orgs/mochi-mqtt/discussions/209)。
[cmd/main.go](cmd/main.go)の Websocket, TCP, Statsサーバを実行するために、シンプルなDockerfileが提供されます。
```sh
docker build -t mochi:latest .
docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest
```
## Mochi MQTTを使って開発するには
### パッケージをインポート
Mochi MQTTをパッケージとしてインポートするにはほんの数行のコードで始めることができます。
``` go
import (
"log"
mqtt "github.com/mochi-mqtt/server/v2"
"github.com/mochi-mqtt/server/v2/hooks/auth"
"github.com/mochi-mqtt/server/v2/listeners"
)
func main() {
// Create signals channel to run server until interrupted
sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
done <- true
}()
// Create the new MQTT Server.
server := mqtt.New(nil)
// Allow all connections.
_ = server.AddHook(new(auth.AllowHook), nil)
// Create a TCP listener on a standard port.
tcp := listeners.NewTCP("t1", ":1883", nil)
err := server.AddListener(tcp)
if err != nil {
log.Fatal(err)
}
go func() {
err := server.Serve()
if err != nil {
log.Fatal(err)
}
}()
// Run server until interrupted
<-done
// Cleanup
}
```
ブローカーの動作例は [examples](examples)フォルダにあります。
#### Network Listeners
サーバは様々なプロトコルのコネクションのリスナーに対応しています。現在の対応リスナーは、
| Listener | Usage |
|------------------------------|----------------------------------------------------------------------------------------------|
| listeners.NewTCP | TCPリスナー |
| listeners.NewUnixSock | Unixソケットリスナー |
| listeners.NewNet | net.Listenerリスナー |
| listeners.NewWebsocket | Websocketリスナー |
| listeners.NewHTTPStats | HTTP $SYSダッシュボード |
| listeners.NewHTTPHealthCheck | ヘルスチェック応答を提供するためのHTTPヘルスチェックリスナー(クラウドインフラ) |
> 新しいリスナーを開発するためには `listeners.Listener` を使ってください。使ったら是非教えてください!
TLSを設定するには`*listeners.Config`を渡すことができます。
[examples](examples) フォルダと [cmd/main.go](cmd/main.go)に使用例があります。
## 設定できるオプションと機能
たくさんのオプションが利用可能です。サーバーの動作を変更したり、特定の機能へのアクセスを制限することができます。
```go
server := mqtt.New(&mqtt.Options{
Capabilities: mqtt.Capabilities{
MaximumSessionExpiryInterval: 3600,
Compatibilities: mqtt.Compatibilities{
ObscureNotAuthorized: true,
},
},
ClientNetWriteBufferSize: 4096,
ClientNetReadBufferSize: 4096,
SysTopicResendInterval: 10,
InlineClient: false,
})
```
mqtt.Options、mqtt.Capabilities、mqtt.Compatibilitiesの構造体はオプションの理解に役立ちます。
必要に応じて`ClientNetWriteBufferSize`と`ClientNetReadBufferSize`はクライアントの使用するメモリに合わせて設定できます。
### デフォルト設定に関する注意事項
いくつかのデフォルトの設定を決める際にいくつかの決定がなされましたのでここに記しておきます:
- デフォルトとして、敵対的なネットワーク上のDoSアタックにさらされるのを防ぐために `server.Options.Capabilities.MaximumMessageExpiryInterval`は86400 (24時間)に、とセットされています。有効期限を無限にすると、保持、送信メッセージが無限に蓄積されるからです。もし信頼できる環境であったり、より大きな保存期間が可能であれば、この設定はオーバーライドできます(`0` を設定すると有効期限はなくなります。)
## Event Hooks
ユニバーサルイベントフックシステムは、開発者にサーバとクライアントの様々なライフサイクルをフックすることができ、ブローカーの機能を追加/変更することができます。それらのユニバーサルフックは認証、永続ストレージ、デバッグツールなど、あらゆるものに使用されています。
フックは複数重ねることができ、サーバに複数のフックを設定することができます。それらは追加した順番に動作します。いくつかのフックは値を変えて、その値は動作コードに返される前にあとに続くフックに渡されます。
| Type | Import | Info |
|----------------|--------------------------------------------------------------------------|----------------------------------------------------------------------------|
| Access Control | [mochi-mqtt/server/hooks/auth . AllowHook](hooks/auth/allow_all.go) | すべてのトピックに対しての読み書きをすべてのクライアントに対して許可します。 |
| Access Control | [mochi-mqtt/server/hooks/auth . Auth](hooks/auth/auth.go) | ルールベースのアクセスコントロール台帳です。 |
| Persistence | [mochi-mqtt/server/hooks/storage/bolt](hooks/storage/bolt/bolt.go) | [BoltDB](https://dbdb.io/db/boltdb) を使った永続ストレージ (非推奨). |
| Persistence | [mochi-mqtt/server/hooks/storage/badger](hooks/storage/badger/badger.go) | [BadgerDB](https://github.com/dgraph-io/badger)を使った永続ストレージ |
| Persistence | [mochi-mqtt/server/hooks/storage/redis](hooks/storage/redis/redis.go) | [Redis](https://redis.io)を使った永続ストレージ |
| Debugging | [mochi-mqtt/server/hooks/debug](hooks/debug/debug.go) | パケットフローを可視化するデバッグ用のフック |
たくさんの内部関数が開発者に公開されています、なので、上記の例を使って自分でフックを作ることができます。もし作ったら是非[Open an issue](https://github.com/mochi-mqtt/server/issues)に投稿して教えてください!
### アクセスコントロール
#### Allow Hook
デフォルトで、Mochi MQTTはアクセスコントロールルールにDENY-ALLを使用しています。コネクションを許可するためには、アクセスコントロールフックを上書きする必要があります。一番単純なのは`auth.AllowAll`フックで、ALLOW-ALLルールがすべてのコネクション、サブスクリプション、パブリッシュに適用されます。使い方は下記のようにするだけです:
```go
server := mqtt.New(nil)
_ = server.AddHook(new(auth.AllowHook), nil)
```
> もしインターネットや信頼できないネットワークにさらされる場合は行わないでください。これは開発・テスト・デバッグ用途のみであるべきです。
#### Auth Ledger
Auth Ledgerは構造体で定義したアクセスルールの洗練された仕組みを提供します。Auth Ledgerルールつの形式から成ります、認証ルール(コネクション)とACLルール(パブリッシュ、サブスクライブ)です。
認証ルールは4つのクライテリアとアサーションフラグがあります:
| Criteria | Usage |
| -- | -- |
| Client | 接続クライアントのID |
| Username | 接続クライアントのユーザー名 |
| Password | 接続クライアントのパスワード |
| Remote | クライアントのリモートアドレスもしくはIP |
| Allow | true(このユーザーを許可する)もしくはfalse(このユーザを拒否する) |
アクセスコントロールルールは3つのクライテリアとフィルターマッチがあります:
| Criteria | Usage |
| -- | -- |
| Client | 接続クライアントのID |
| Username | 接続クライアントのユーザー名 |
| Remote | クライアントのリモートアドレスもしくはIP |
| Filters | 合致するフィルターの配列 |
ルールはインデックス順(0,1,2,3)に処理され、はじめに合致したルールが適用されます。 [hooks/auth/ledger.go](hooks/auth/ledger.go) の構造体を見てください。
```go
server := mqtt.New(nil)
err := server.AddHook(new(auth.Hook), &auth.Options{
Ledger: &auth.Ledger{
Auth: auth.AuthRules{ // Auth disallows all by default
{Username: "peach", Password: "password1", Allow: true},
{Username: "melon", Password: "password2", Allow: true},
{Remote: "127.0.0.1:*", Allow: true},
{Remote: "localhost:*", Allow: true},
},
ACL: auth.ACLRules{ // ACL allows all by default
{Remote: "127.0.0.1:*"}, // local superuser allow all
{
// user melon can read and write to their own topic
Username: "melon", Filters: auth.Filters{
"melon/#": auth.ReadWrite,
"updates/#": auth.WriteOnly, // can write to updates, but can't read updates from others
},
},
{
// Otherwise, no clients have publishing permissions
Filters: auth.Filters{
"#": auth.ReadOnly,
"updates/#": auth.Deny,
},
},
},
}
})
```
ledgeはデータフィールドを使用してJSONもしくはYAML形式で保存したものを使用することもできます。
```go
err := server.AddHook(new(auth.Hook), &auth.Options{
Data: data, // build ledger from byte slice: yaml or json
})
```
より詳しくは[examples/auth/encoded/main.go](examples/auth/encoded/main.go)を見てください。
### 永続ストレージ
#### Redis
ブローカーに永続性を提供する基本的な Redis ストレージフックが利用可能です。他のフックと同じ方法で、いくつかのオプションを使用してサーバーに追加できます。それはフック内部で github.com/go-redis/redis/v8 を使用し、Optionsの値で詳しい設定を行うことができます。
```go
err := server.AddHook(new(redis.Hook), &redis.Options{
Options: &rv8.Options{
Addr: "localhost:6379", // default redis address
Password: "", // your password
DB: 0, // your redis db
},
})
if err != nil {
log.Fatal(err)
}
```
Redisフックがどのように動くか、どのように使用するかについての詳しくは、[examples/persistence/redis/main.go](examples/persistence/redis/main.go) か [hooks/storage/redis](hooks/storage/redis) のソースコードを見てください。
#### Badger DB
もしファイルベースのストレージのほうが適しているのであれば、BadgerDBストレージも使用することができます。それもまた、他のフックと同様に追加、設定することができますオプションは若干少ないです
```go
err := server.AddHook(new(badger.Hook), &badger.Options{
Path: badgerPath,
})
if err != nil {
log.Fatal(err)
}
```
badgerフックがどのように動くか、どのように使用するかについての詳しくは、[examples/persistence/badger/main.go](examples/persistence/badger/main.go) か [hooks/storage/badger](hooks/storage/badger) のソースコードを見てください。
BoltDBフックはBadgerに代わって非推奨となりましたが、もし必要ならば [examples/persistence/bolt/main.go](examples/persistence/bolt/main.go)をチェックしてください。
## イベントフックを利用した開発
ブローカーとクライアントのライフサイクルに関わるたくさんのフックが利用できます。
そのすべてのフックと`mqtt.Hook`インターフェイスの関数シグネチャは[hooks.go](hooks.go)に記載されています。
> もっと柔軟なイベントフックはOnPacketRead、OnPacketEncodeとOnPacketSentです。それらは、すべての流入パケットと流出パケットをコントロール及び変更に使用されるフックです。
| Function | Usage |
|------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
| OnStarted | サーバーが正常にスタートした際に呼ばれます。 |
| OnStopped | サーバーが正常に終了した際に呼ばれます。 |
| OnConnectAuthenticate | ユーザーがサーバと認証を試みた際に呼ばれます。このメソッドはサーバーへのアクセス許可もしくは拒否するためには必ず使用する必要がありますhooks/auth/allow_all or basicを見てください。これは、データベースにユーザーが存在するか照合してチェックするカスタムフックに利用できます。許可する場合はtrueを返す実装をします。|
| OnACLCheck | ユーザーがあるトピックフィルタにpublishかsubscribeした際に呼ばれます。上と同様です |
| OnSysInfoTick | $SYSトピック値がpublishされた場合に呼ばれます。 |
| OnConnect | 新しいクライアントが接続した際によばれます、エラーかパケットコードを返して切断する場合があります。 |
| OnSessionEstablish | 新しいクライアントが接続された後すぐ、セッションが確立されてCONNACKが送信される前に呼ばれます。 |
| OnSessionEstablished | 新しいクライアントがセッションを確立した際(OnConnectの後)に呼ばれます。 |
| OnDisconnect | クライアントが何らかの理由で切断された場合に呼ばれます。 |
| OnAuthPacket | 認証パケットを受け取ったときに呼ばれます。これは開発者にmqtt v5の認証パケットを取り扱う仕組みを作成すること意図しています。パケットを変更することができます。 |
| OnPacketRead | クライアントからパケットを受け取った際に呼ばれます。パケットを変更することができます。 |
| OnPacketEncode | エンコードされたパケットがクライアントに送信する直前に呼ばれます。パケットを変更することができます。 |
| OnPacketSent | クライアントにパケットが送信された際に呼ばれます。 |
| OnPacketProcessed | パケットが届いてブローカーが正しく処理できた場合に呼ばれます。 |
| OnSubscribe | クライアントがつ以上のフィルタをsubscribeした場合に呼ばれます。パケットの変更ができます。 |
| OnSubscribed | クライアントがつ以上のフィルタをsubscribeに成功した場合に呼ばれます。 |
| OnSelectSubscribers | サブスクライバーがトピックに収集されたとき、共有サブスクライバーが選択される前に呼ばれる。受信者は変更可能。 |
| OnUnsubscribe | 1つ以上のあんサブスクライブが呼ばれた場合。パケットの変更は可能。 |
| OnUnsubscribed | クライアントが正常に1つ以上のトピックフィルタをサブスクライブ解除した場合。 |
| OnPublish | クライアントがメッセージをパブリッシュした場合。パケットの変更は可能。 |
| OnPublished | クライアントがサブスクライバーにメッセージをパブリッシュし終わった場合。 |
| OnPublishDropped | あるクライアントが反応に時間がかかった場合等のようにクライアントに到達する前にメッセージが失われた場合に呼ばれる。 |
| OnRetainMessage | パブリッシュされたメッセージが保持された場合に呼ばれる。 |
| OnRetainPublished | 保持されたメッセージがクライアントに到達した場合に呼ばれる。 |
| OnQosPublish | QoSが1以上のパケットがサブスクライバーに発行された場合。 |
| OnQosComplete | そのメッセージQoSフローが完了した場合に呼ばれる。 |
| OnQosDropped | インフライトメッセージが完了前に期限切れになった場合に呼ばれる。 |
| OnPacketIDExhausted | クライアントがパケットに割り当てるIDが枯渇した場合に呼ばれる。 |
| OnWill | クライアントが切断し、WILLメッセージを発行しようとした場合に呼ばれる。パケットの変更が可能。 |
| OnWillSent | LWTメッセージが切断されたクライアントから発行された場合に呼ばれる |
| OnClientExpired | クライアントセッションが期限切れで削除するべき場合に呼ばれる。 |
| OnRetainedExpired | 保持メッセージが期限切れで削除すべき場合に呼ばれる。 |
| StoredClients | クライアントを返す。例えば永続ストレージから。 |
| StoredSubscriptions | クライアントのサブスクリプションを返す。例えば永続ストレージから。 |
| StoredInflightMessages | インフライトメッセージを返す。例えば永続ストレージから。 |
| StoredRetainedMessages | 保持されたメッセージを返す。例えば永続ストレージから。 |
| StoredSysInfo | システム情報の値を返す。例えば永続ストレージから。 |
もし永続ストレージフックを作成しようとしているのであれば、すでに存在する永続的なフックを見てインスピレーションとどのようなパターンがあるか見てみてください。もし認証フックを作成しようとしているのであれば、`OnACLCheck`と`OnConnectAuthenticate`が役立つでしょう。
### Inline Client (v2.4.0+)
トピックに対して埋め込まれたコードから直接サブスクライブとパブリッシュできます。そうするには`inline client`機能を使うことができます。インラインクライアント機能はサーバの一部として組み込まれているクライアントでサーバーのオプションとしてEnableにできます。
```go
server := mqtt.New(&mqtt.Options{
InlineClient: true,
})
```
Enableにすると、`server.Publish`, `server.Subscribe`, `server.Unsubscribe`のメソッドを利用できて、ブローカーから直接メッセージを送受信できます。
> 実際の使用例は[direct examples](examples/direct/main.go)を見てください。
#### Inline Publish
組み込まれたアプリケーションからメッセージをパブリッシュするには`server.Publish(topic string, payload []byte, retain bool, qos byte) error`メソッドを利用します。
```go
err := server.Publish("direct/publish", []byte("packet scheduled message"), false, 0)
```
> このケースでのQoSはサブスクライバーに設定できる上限でしか使用されません。これはMQTTv5の仕様に従っています。
#### Inline Subscribe
組み込まれたアプリケーション内部からトピックフィルタをサブスクライブするには、`server.Subscribe(filter string, subscriptionId int, handler InlineSubFn) error`メソッドがコールバックも含めて使用できます。
インラインサブスクリプションではQoS0のみが適用されます。もし複数のコールバックを同じフィルタに設定したい場合は、MQTTv5の`subscriptionId`のプロパティがその区別に使用できます。
```go
callbackFn := func(cl *mqtt.Client, sub packets.Subscription, pk packets.Packet) {
server.Log.Info("inline client received message from subscription", "client", cl.ID, "subscriptionId", sub.Identifier, "topic", pk.TopicName, "payload", string(pk.Payload))
}
server.Subscribe("direct/#", 1, callbackFn)
```
#### Inline Unsubscribe
インラインクライアントでサブスクリプション解除をしたい場合は、`server.Unsubscribe(filter string, subscriptionId int) error` メソッドで行うことができます。
```go
server.Unsubscribe("direct/#", 1)
```
### Packet Injection
もし、より制御したい場合や、特定のMQTTv5のプロパティやその他の値をセットしたい場合は、クライアントからのパブリッシュパケットを自ら作成することができます。この方法は単なるパブリッシュではなく、MQTTパケットをまるで特定のクライアントから受け取ったかのようにランタイムに直接インジェクションすることができます。
このパケットインジェクションは例えばPING ReqやサブスクリプションなどのどんなMQTTパケットでも使用できます。そしてクライアントの構造体とメソッドはエクスポートされているので、(もし、非常にカスタマイズ性の高い要求がある場合には)まるで接続されたクライアントに代わってパケットをインジェクションすることさえできます。
たいていの場合は上記のインラインクライアントを使用するのが良いでしょう、それはACLとトピックバリデーションをバイパスできる特権があるからです。これは$SYSトピックにさえパブリッシュできることも意味します。ビルトインのクライアントと同様に振る舞うインラインクライアントを作成できます。
```go
cl := server.NewClient(nil, "local", "inline", true)
server.InjectPacket(cl, packets.Packet{
FixedHeader: packets.FixedHeader{
Type: packets.Publish,
},
TopicName: "direct/publish",
Payload: []byte("scheduled message"),
})
```
> MQTTのパケットは正しく構成する必要があり、なので[the test packets catalogue](packets/tpackets.go)と[MQTTv5 Specification](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html)を参照してください。
この機能の動作を確認するには[hooks example](examples/hooks/main.go) を見てください。
### Testing
#### ユニットテスト
それぞれの関数が期待通りの動作をするように考えられてMochi MQTTテストが作成されています。テストを走らせるには:
```
go run --cover ./...
```
#### Paho相互運用性テスト
`examples/paho/main.go`を使用してブローカーを起動し、_interoperability_フォルダの`python3 client_test5.py`のmqttv5とv3のテストを実行することで、[Paho Interoperability Test](https://github.com/eclipse/paho.mqtt.testing/tree/master/interoperability)を確認することができます。
> pahoスイートには現在は何個かの偽陰性に関わるissueがあるので、`paho/main.go`の例ではいくつかの互換性モードがオンになっていることに注意してください。
## ベンチマーク
Mochi MQTTのパフォーマンスはMosquitto、EMQX、その他などの有名なブローカーに匹敵します。
ベンチマークはApple Macbook Air M2上で[MQTT-Stresser](https://github.com/inovex/mqtt-stresser)、セッティングとして`cmd/main.go`のデフォルト設定を使用しています。高スループットと低スループットのバーストを考慮すると、中央値のスコアが最も信頼できます。この値は高いほど良いです。
> ベンチマークの値は1秒あたりのメッセージ数のスループットのそのものを表しているわけではありません。これは、mqtt-stresserによる固有の計算に依存するものではありますが、すべてのブローカーに渡って一貫性のある値として利用しています。
> ベンチマークは一般的なパフォーマンス予測ガイドラインとしてのみ提供されます。比較はそのまま使用したデフォルトの設定値で実行しています。
`mqtt-stresser -broker tcp://localhost:1883 -num-clients=2 -num-messages=10000`
| Broker | publish fastest | median | slowest | receive fastest | median | slowest |
| -- | -- | -- | -- | -- | -- | -- |
| Mochi v2.2.10 | 124,772 | 125,456 | 124,614 | 314,461 | 313,186 | 311,910 |
| [Mosquitto v2.0.15](https://github.com/eclipse/mosquitto) | 155,920 | 155,919 | 155,918 | 185,485 | 185,097 | 184,709 |
| [EMQX v5.0.11](https://github.com/emqx/emqx) | 156,945 | 156,257 | 155,568 | 17,918 | 17,783 | 17,649 |
| [Rumqtt v0.21.0](https://github.com/bytebeamio/rumqtt) | 112,208 | 108,480 | 104,753 | 135,784 | 126,446 | 117,108 |
`mqtt-stresser -broker tcp://localhost:1883 -num-clients=10 -num-messages=10000`
| Broker | publish fastest | median | slowest | receive fastest | median | slowest |
| -- | -- | -- | -- | -- | -- | -- |
| Mochi v2.2.10 | 41,825 | 31,663| 23,008 | 144,058 | 65,903 | 37,618 |
| Mosquitto v2.0.15 | 42,729 | 38,633 | 29,879 | 23,241 | 19,714 | 18,806 |
| EMQX v5.0.11 | 21,553 | 17,418 | 14,356 | 4,257 | 3,980 | 3,756 |
| Rumqtt v0.21.0 | 42,213 | 23,153 | 20,814 | 49,465 | 36,626 | 19,283 |
100万メッセージ試験 (100 万メッセージを一斉にサーバーに送信します):
`mqtt-stresser -broker tcp://localhost:1883 -num-clients=100 -num-messages=10000`
| Broker | publish fastest | median | slowest | receive fastest | median | slowest |
| -- | -- | -- | -- | -- | -- | -- |
| Mochi v2.2.10 | 13,532 | 4,425 | 2,344 | 52,120 | 7,274 | 2,701 |
| Mosquitto v2.0.15 | 3,826 | 3,395 | 3,032 | 1,200 | 1,150 | 1,118 |
| EMQX v5.0.11 | 4,086 | 2,432 | 2,274 | 434 | 333 | 311 |
| Rumqtt v0.21.0 | 78,972 | 5,047 | 3,804 | 4,286 | 3,249 | 2,027 |
> EMQXのここでの結果は何が起きているのかわかりませんが、おそらくDockerのそのままの設定が最適ではなかったのでしょう、なので、この結果はソフトウェアのひとつの側面にしか過ぎないと捉えてください。
## Contribution Guidelines
コントリビューションとフィードバックは両方とも歓迎しています![Open an issue](https://github.com/mochi-mqtt/server/issues)でバグを報告したり、質問したり、新機能のリクエストをしてください。もしプルリクエストするならば下記のガイドラインに従うようにしてください。
- 合理的で可能な限りテストカバレッジを維持してください
- なぜPRをしたのかとそのPRの内容について明確にしてください。
- 有意義な貢献をした場合はSPDX FileContributorタグをファイルにつけてください。
[SPDX Annotations](https://spdx.dev)はそのライセンス、著作権表記、コントリビューターについて明確するのために、それぞれのファイルに機械可読な形式で記されています。もし、新しいファイルをレポジトリに追加した場合は、下記のようなSPDXヘッダーを付与していることを確かめてください。
```go
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2023 mochi-mqtt
// SPDX-FileContributor: Your name or alias <optional@email.address>
package name
```
ファイルにそれぞれのコントリビューターの`SPDX-FileContributor`が追加されていることを確認してください、他のファイルを参考にしてください。あなたのこのプロジェクトへのコントリビュートは価値があり、高く評価されます!
## Stargazers over time 🥰
[![Stargazers over time](https://starchart.cc/mochi-mqtt/server.svg)](https://starchart.cc/mochi-mqtt/server)
Mochi MQTTをプロジェクトで使用していますか [是非私達に教えてください!](https://github.com/mochi-mqtt/server/issues)

View File

@@ -10,7 +10,7 @@
</p>
[English](README.md) | [简体中文](README-CN.md)
[English](README.md) | [简体中文](README-CN.md) | [日本語](README-JP.md) | [Translators Wanted!](https://github.com/orgs/mochi-mqtt/discussions/310)
🎆 **mochi-co/mqtt is now part of the new mochi-mqtt organisation.** [Read about this announcement here.](https://github.com/orgs/mochi-mqtt/discussions/271)
@@ -60,7 +60,6 @@ Unless it's a critical issue, new releases typically go out over the weekend.
- Please [open an issue](https://github.com/mochi-mqtt/server/issues) to request new features or event hooks!
- Cluster support.
- Enhanced Metrics support.
- File-based server configuration (supporting docker).
## Quick Start
### Running the Broker with Go
@@ -72,13 +71,54 @@ go build -o mqtt && ./mqtt
```
### Using Docker
A simple Dockerfile is provided for running the [cmd/main.go](cmd/main.go) Websocket, TCP, and Stats server:
You can now pull and run the [official Mochi MQTT image](https://hub.docker.com/r/mochimqtt/server) from our Docker repo:
```sh
docker pull mochimqtt/server
or
docker run -v $(pwd)/config.yaml:/config.yaml mochimqtt/server
```
For most use cases, you can use File Based Configuration to configure the server, by specifying a valid `yaml` or `json` config file.
A simple Dockerfile is provided for running the [cmd/main.go](cmd/main.go) Websocket, TCP, and Stats server, using the `allow-all` auth hook.
```sh
docker build -t mochi:latest .
docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 mochi:latest
docker run -p 1883:1883 -p 1882:1882 -p 8080:8080 -v $(pwd)/config.yaml:/config.yaml mochi:latest
```
_More substantial docker support is being discussed [here](https://github.com/orgs/mochi-mqtt/discussions/281#discussion-5544545) and [here](https://github.com/orgs/mochi-mqtt/discussions/209). Please join the discussion if you use Mochi-MQTT in this environment._
### File Based Configuration
You can use File Based Configuration with either the Docker image (described above), or by running the build binary with the `--config=config.yaml` or `--config=config.json` parameter.
Configuration files provide a convenient mechanism for easily preparing a server with the most common configurations. You can enable and configure built-in hooks and listeners, and specify server options and compatibilities:
```yaml
listeners:
- type: "tcp"
id: "tcp12"
address: ":1883"
- type: "ws"
id: "ws1"
address: ":1882"
- type: "sysinfo"
id: "stats"
address: ":1880"
hooks:
auth:
allow_all: true
options:
inline_client: true
```
Please review the examples found in `examples/config` for all available configuration options.
There are a few conditions to note:
1. If you use file-based configuration, you can only have one of each hook type.
2. You can only use built in hooks with file-based configuration, as the type and configuration structure needs to be known by the server in order for it to be applied.
3. You can only use built in listeners, for the reasons above.
If you need to implement custom hooks or listeners, please do so using the traditional manner indicated in `cmd/main.go`.
## Developing with Mochi MQTT
### Importing as a package
@@ -150,7 +190,8 @@ A `*listeners.Config` may be passed to configure TLS.
Examples of usage can be found in the [examples](examples) folder or [cmd/main.go](cmd/main.go).
### Server Options and Capabilities
## Server Options and Capabilities
A number of configurable options are available which can be used to alter the behaviour or restrict access to certain features in the server.
```go
@@ -170,6 +211,11 @@ server := mqtt.New(&mqtt.Options{
Review the mqtt.Options, mqtt.Capabilities, and mqtt.Compatibilities structs for a comprehensive list of options. `ClientNetWriteBufferSize` and `ClientNetReadBufferSize` can be configured to adjust memory usage per client, based on your needs.
### Default Configuration Notes
Some choices were made when deciding the default configuration that need to be mentioned here:
- By default, the value of `server.Options.Capabilities.MaximumMessageExpiryInterval` is set to 86400 (24 hours), in order to prevent exposing the broker to DOS attacks on hostile networks when using the out-of-the-box configuration (as an infinite expiry would allow an infinite number of retained/inflight messages to accumulate). If you are operating in a trusted environment, or you have capacity for a larger retention period, you may wish to override this (set to `0` for no expiry).
## Event Hooks
A universal event hooks system allows developers to hook into various parts of the server and client life cycle to add and modify functionality of the broker. These universal hooks are used to provide everything from authentication, persistent storage, to debugging tools.

View File

@@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"context"
"errors"
"fmt"
"io"
"net"
@@ -21,8 +22,13 @@ import (
)
const (
defaultKeepalive uint16 = 10 // the default connection keepalive value in seconds
defaultKeepalive uint16 = 10 // the default connection keepalive value in seconds.
defaultClientProtocolVersion byte = 4 // the default mqtt protocol version of connecting clients (if somehow unspecified).
minimumKeepalive uint16 = 5 // the minimum recommended keepalive - values under with display a warning.
)
var (
ErrMinimumKeepalive = errors.New("client keepalive is below minimum recommended value and may exhibit connection instability")
)
// ReadFn is the function signature for the function used for reading and processing new packets.
@@ -107,11 +113,12 @@ type Client struct {
// ClientConnection contains the connection transport and metadata for the client.
type ClientConnection struct {
Conn net.Conn // the net.Conn used to establish the connection
bconn *bufio.ReadWriter // a buffered net.Conn for reading packets
Remote string // the remote address of the client
Listener string // listener id of the client
Inline bool // if true, the client is the built-in 'inline' embedded client
Conn net.Conn // the net.Conn used to establish the connection
bconn *bufio.Reader // a buffered net.Conn for reading packets
outbuf *bytes.Buffer // a buffer for writing packets
Remote string // the remote address of the client
Listener string // listener id of the client
Inline bool // if true, the client is the built-in 'inline' embedded client
}
// ClientProperties contains the properties which define the client behaviour.
@@ -174,11 +181,8 @@ func newClient(c net.Conn, o *ops) *Client {
if c != nil {
cl.Net = ClientConnection{
Conn: c,
bconn: bufio.NewReadWriter(
bufio.NewReaderSize(c, o.options.ClientNetReadBufferSize),
bufio.NewWriterSize(c, o.options.ClientNetWriteBufferSize),
),
Conn: c,
bconn: bufio.NewReaderSize(c, o.options.ClientNetReadBufferSize),
Remote: c.RemoteAddr().String(),
}
}
@@ -211,6 +215,19 @@ func (cl *Client) ParseConnect(lid string, pk packets.Packet) {
cl.Properties.Clean = pk.Connect.Clean
cl.Properties.Props = pk.Properties.Copy(false)
if cl.Properties.Props.ReceiveMaximum > cl.ops.options.Capabilities.MaximumInflight { // 3.3.4 Non-normative
cl.Properties.Props.ReceiveMaximum = cl.ops.options.Capabilities.MaximumInflight
}
if pk.Connect.Keepalive <= minimumKeepalive {
cl.ops.log.Warn(
ErrMinimumKeepalive.Error(),
"client", cl.ID,
"keepalive", pk.Connect.Keepalive,
"recommended", minimumKeepalive,
)
}
cl.State.Keepalive = pk.Connect.Keepalive // [MQTT-3.2.2-22]
cl.State.Inflight.ResetReceiveQuota(int32(cl.ops.options.Capabilities.ReceiveMaximum)) // server receive max per client
cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum)) // client receive max
@@ -312,10 +329,26 @@ func (cl *Client) ResendInflightMessages(force bool) error {
}
// ClearInflights deletes all inflight messages for the client, e.g. for a disconnected user with a clean session.
func (cl *Client) ClearInflights(now, maximumExpiry int64) []uint16 {
func (cl *Client) ClearInflights() {
for _, tk := range cl.State.Inflight.GetAll(false) {
if ok := cl.State.Inflight.Delete(tk.PacketID); ok {
cl.ops.hooks.OnQosDropped(cl, tk)
atomic.AddInt64(&cl.ops.info.Inflight, -1)
}
}
}
// ClearExpiredInflights deletes any inflight messages which have expired.
func (cl *Client) ClearExpiredInflights(now, maximumExpiry int64) []uint16 {
deleted := []uint16{}
for _, tk := range cl.State.Inflight.GetAll(false) {
if (tk.Expiry > 0 && tk.Expiry < now) || tk.Created+maximumExpiry < now {
expired := tk.ProtocolVersion == 5 && tk.Expiry > 0 && tk.Expiry < now // [MQTT-3.3.2-5]
// If the maximum message expiry interval is set (greater than 0), and the message
// retention period exceeds the maximum expiry, the message will be forcibly removed.
enforced := maximumExpiry > 0 && now-tk.Created > maximumExpiry
if expired || enforced {
if ok := cl.State.Inflight.Delete(tk.PacketID); ok {
cl.ops.hooks.OnQosDropped(cl, tk)
atomic.AddInt64(&cl.ops.info.Inflight, -1)
@@ -553,11 +586,35 @@ func (cl *Client) WritePacket(pk packets.Packet) error {
return packets.ErrPacketTooLarge // [MQTT-3.1.2-24] [MQTT-3.1.2-25]
}
nb := net.Buffers{buf.Bytes()}
n, err := func() (int64, error) {
cl.Lock()
defer cl.Unlock()
return nb.WriteTo(cl.Net.Conn)
if len(cl.State.outbound) == 0 {
if cl.Net.outbuf == nil {
return buf.WriteTo(cl.Net.Conn)
}
// first write to buffer, then flush buffer
n, _ := cl.Net.outbuf.Write(buf.Bytes()) // will always be successful
err = cl.flushOutbuf()
return int64(n), err
}
// there are more writes in the queue
if cl.Net.outbuf == nil {
if buf.Len() >= cl.ops.options.ClientNetWriteBufferSize {
return buf.WriteTo(cl.Net.Conn)
}
cl.Net.outbuf = new(bytes.Buffer)
}
n, _ := cl.Net.outbuf.Write(buf.Bytes()) // will always be successful
if cl.Net.outbuf.Len() < cl.ops.options.ClientNetWriteBufferSize {
return int64(n), nil
}
err = cl.flushOutbuf()
return int64(n), err
}()
if err != nil {
return err
@@ -573,3 +630,15 @@ func (cl *Client) WritePacket(pk packets.Packet) error {
return err
}
func (cl *Client) flushOutbuf() (err error) {
if cl.Net.outbuf == nil {
return
}
_, err = cl.Net.outbuf.WriteTo(cl.Net.Conn)
if err == nil {
cl.Net.outbuf = nil
}
return
}

View File

@@ -5,10 +5,14 @@
package mqtt
import (
"bufio"
"bytes"
"context"
"errors"
"io"
"log/slog"
"net"
"strings"
"sync/atomic"
"testing"
"time"
@@ -33,6 +37,7 @@ func newTestClient() (cl *Client, r net.Conn, w net.Conn) {
options: &Options{
Capabilities: &Capabilities{
ReceiveMaximum: 10,
MaximumInflight: 5,
TopicAliasMaximum: 10000,
MaximumClientWritesPending: 3,
maximumPacketID: 10,
@@ -179,6 +184,45 @@ func TestClientParseConnect(t *testing.T) {
require.Equal(t, int32(pk.Properties.ReceiveMaximum), cl.State.Inflight.maximumSendQuota)
}
func TestClientParseConnectReceiveMaxExceedMaxInflight(t *testing.T) {
const MaxInflight uint16 = 1
cl, _, _ := newTestClient()
cl.ops.options.Capabilities.MaximumInflight = MaxInflight
pk := packets.Packet{
ProtocolVersion: 4,
Connect: packets.ConnectParams{
ProtocolName: []byte{'M', 'Q', 'T', 'T'},
Clean: true,
Keepalive: 60,
ClientIdentifier: "mochi",
WillFlag: true,
WillTopic: "lwt",
WillPayload: []byte("lol gg"),
WillQos: 1,
WillRetain: false,
},
Properties: packets.Properties{
ReceiveMaximum: uint16(5),
},
}
cl.ParseConnect("tcp1", pk)
require.Equal(t, pk.Connect.ClientIdentifier, cl.ID)
require.Equal(t, pk.Connect.Keepalive, cl.State.Keepalive)
require.Equal(t, pk.Connect.Clean, cl.Properties.Clean)
require.Equal(t, pk.Connect.ClientIdentifier, cl.ID)
require.Equal(t, pk.Connect.WillTopic, cl.Properties.Will.TopicName)
require.Equal(t, pk.Connect.WillPayload, cl.Properties.Will.Payload)
require.Equal(t, pk.Connect.WillQos, cl.Properties.Will.Qos)
require.Equal(t, pk.Connect.WillRetain, cl.Properties.Will.Retain)
require.Equal(t, uint32(1), cl.Properties.Will.Flag)
require.Equal(t, int32(cl.ops.options.Capabilities.ReceiveMaximum), cl.State.Inflight.receiveQuota)
require.Equal(t, int32(cl.ops.options.Capabilities.ReceiveMaximum), cl.State.Inflight.maximumReceiveQuota)
require.Equal(t, int32(MaxInflight), cl.State.Inflight.sendQuota)
require.Equal(t, int32(MaxInflight), cl.State.Inflight.maximumSendQuota)
}
func TestClientParseConnectOverrideWillDelay(t *testing.T) {
cl, _, _ := newTestClient()
@@ -210,6 +254,27 @@ func TestClientParseConnectNoID(t *testing.T) {
require.NotEmpty(t, cl.ID)
}
func TestClientParseConnectBelowMinimumKeepalive(t *testing.T) {
cl, _, _ := newTestClient()
var b bytes.Buffer
x := bufio.NewWriter(&b)
cl.ops.log = slog.New(slog.NewTextHandler(x, nil))
pk := packets.Packet{
ProtocolVersion: 4,
Connect: packets.ConnectParams{
ProtocolName: []byte{'M', 'Q', 'T', 'T'},
Keepalive: minimumKeepalive - 1,
ClientIdentifier: "mochi",
},
}
cl.ParseConnect("tcp1", pk)
err := x.Flush()
require.NoError(t, err)
require.True(t, strings.Contains(b.String(), ErrMinimumKeepalive.Error()))
require.NotEmpty(t, cl.ID)
}
func TestClientNextPacketID(t *testing.T) {
cl, _, _ := newTestClient()
@@ -277,19 +342,56 @@ func TestClientNextPacketIDOverflow(t *testing.T) {
func TestClientClearInflights(t *testing.T) {
cl, _, _ := newTestClient()
n := time.Now().Unix()
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 1, Expiry: n - 1})
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 2, Expiry: n - 2})
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 3, Created: n - 3}) // within bounds
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 5, Created: n - 5}) // over max server expiry limit
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 7, Created: n})
require.Equal(t, 5, cl.State.Inflight.Len())
cl.ClearInflights()
require.Equal(t, 0, cl.State.Inflight.Len())
}
func TestClientClearExpiredInflights(t *testing.T) {
cl, _, _ := newTestClient()
n := time.Now().Unix()
cl.State.Inflight.Set(packets.Packet{PacketID: 1, Expiry: n - 1})
cl.State.Inflight.Set(packets.Packet{PacketID: 2, Expiry: n - 2})
cl.State.Inflight.Set(packets.Packet{PacketID: 3, Created: n - 3}) // within bounds
cl.State.Inflight.Set(packets.Packet{PacketID: 5, Created: n - 5}) // over max server expiry limit
cl.State.Inflight.Set(packets.Packet{PacketID: 7, Created: n})
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 1, Expiry: n - 1})
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 2, Expiry: n - 2})
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 3, Created: n - 3}) // within bounds
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 5, Created: n - 5}) // over max server expiry limit
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 7, Created: n})
require.Equal(t, 5, cl.State.Inflight.Len())
deleted := cl.ClearInflights(n, 4)
deleted := cl.ClearExpiredInflights(n, 4)
require.Len(t, deleted, 3)
require.ElementsMatch(t, []uint16{1, 2, 5}, deleted)
require.Equal(t, 2, cl.State.Inflight.Len())
cl.State.Inflight.Set(packets.Packet{PacketID: 11, Expiry: n - 1})
cl.State.Inflight.Set(packets.Packet{PacketID: 12, Expiry: n - 2}) // expiry is ineffective for v3.
cl.State.Inflight.Set(packets.Packet{PacketID: 13, Created: n - 3}) // within bounds for v3
cl.State.Inflight.Set(packets.Packet{PacketID: 15, Created: n - 5}) // over max server expiry limit
require.Equal(t, 6, cl.State.Inflight.Len())
deleted = cl.ClearExpiredInflights(n, 4)
require.Len(t, deleted, 3)
require.ElementsMatch(t, []uint16{11, 12, 15}, deleted)
require.Equal(t, 3, cl.State.Inflight.Len())
cl.State.Inflight.Set(packets.Packet{PacketID: 17, Created: n - 1})
deleted = cl.ClearExpiredInflights(n, 0) // maximumExpiry = 0 do not process abandon messages
require.Len(t, deleted, 0)
require.Equal(t, 4, cl.State.Inflight.Len())
cl.State.Inflight.Set(packets.Packet{ProtocolVersion: 5, PacketID: 18, Expiry: n - 1})
deleted = cl.ClearExpiredInflights(n, 0) // maximumExpiry = 0 do not abandon messages
require.ElementsMatch(t, []uint16{18}, deleted) // expiry is still effective for v5.
require.Len(t, deleted, 1)
require.Equal(t, 4, cl.State.Inflight.Len())
}
func TestClientResendInflightMessages(t *testing.T) {
@@ -647,6 +749,86 @@ func TestClientWritePacket(t *testing.T) {
}
}
func TestClientWritePacketBuffer(t *testing.T) {
r, w := net.Pipe()
cl := newClient(w, &ops{
info: new(system.Info),
hooks: new(Hooks),
log: logger,
options: &Options{
Capabilities: &Capabilities{
ReceiveMaximum: 10,
TopicAliasMaximum: 10000,
MaximumClientWritesPending: 3,
maximumPacketID: 10,
},
},
})
cl.ID = "mochi"
cl.State.Inflight.maximumSendQuota = 5
cl.State.Inflight.sendQuota = 5
cl.State.Inflight.maximumReceiveQuota = 10
cl.State.Inflight.receiveQuota = 10
cl.Properties.Props.TopicAliasMaximum = 0
cl.Properties.Props.RequestResponseInfo = 0x1
cl.ops.options.ClientNetWriteBufferSize = 10
defer cl.Stop(errClientStop)
small := packets.TPacketData[packets.Publish].Get(packets.TPublishNoPayload).Packet
large := packets.TPacketData[packets.Publish].Get(packets.TPublishBasic).Packet
cl.State.outbound <- small
tt := []struct {
pks []*packets.Packet
size int
}{
{
pks: []*packets.Packet{small, small},
size: 18,
},
{
pks: []*packets.Packet{large},
size: 20,
},
{
pks: []*packets.Packet{small},
size: 0,
},
}
go func() {
for i, tx := range tt {
for _, pk := range tx.pks {
cl.Properties.ProtocolVersion = pk.ProtocolVersion
err := cl.WritePacket(*pk)
require.NoError(t, err, "index: %d", i)
if i == len(tt)-1 {
cl.Net.Conn.Close()
}
time.Sleep(100 * time.Millisecond)
}
}
}()
var n int
var err error
for i, tx := range tt {
buf := make([]byte, 100)
if i == len(tt)-1 {
buf, err = io.ReadAll(r)
n = len(buf)
} else {
n, err = io.ReadAtLeast(r, buf, 1)
}
require.NoError(t, err, "index: %d", i)
require.Equal(t, tx.size, n, "index: %d", i)
}
}
func TestWriteClientOversizePacket(t *testing.T) {
cl, _, _ := newTestClient()
cl.Properties.Props.MaximumPacketSize = 2

56
cmd/docker/main.go Normal file
View File

@@ -0,0 +1,56 @@
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2023 mochi-mqtt
// SPDX-FileContributor: dgduncan, mochi-co
package main
import (
"flag"
"github.com/mochi-mqtt/server/v2/config"
"log"
"log/slog"
"os"
"os/signal"
"syscall"
mqtt "github.com/mochi-mqtt/server/v2"
)
func main() {
slog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, nil))) // set basic logger to ensure logs before configuration are in a consistent format
configFile := flag.String("config", "config.yaml", "path to mochi config yaml or json file")
flag.Parse()
sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
done <- true
}()
configBytes, err := os.ReadFile(*configFile)
if err != nil {
log.Fatal(err)
}
options, err := config.FromBytes(configBytes)
if err != nil {
log.Fatal(err)
}
server := mqtt.New(options)
go func() {
err := server.Serve()
if err != nil {
log.Fatal(err)
}
}()
<-done
server.Log.Warn("caught signal, stopping...")
_ = server.Close()
server.Log.Info("mochi mqtt shutdown complete")
}

View File

@@ -33,19 +33,31 @@ func main() {
server := mqtt.New(nil)
_ = server.AddHook(new(auth.AllowHook), nil)
tcp := listeners.NewTCP("t1", *tcpAddr, nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: *tcpAddr,
})
err := server.AddListener(tcp)
if err != nil {
log.Fatal(err)
}
ws := listeners.NewWebsocket("ws1", *wsAddr, nil)
ws := listeners.NewWebsocket(listeners.Config{
ID: "ws1",
Address: *wsAddr,
})
err = server.AddListener(ws)
if err != nil {
log.Fatal(err)
}
stats := listeners.NewHTTPStats("stats", *infoAddr, nil, server.Info)
stats := listeners.NewHTTPStats(
listeners.Config{
ID: "info",
Address: *infoAddr,
},
server.Info,
)
err = server.AddListener(stats)
if err != nil {
log.Fatal(err)
@@ -61,6 +73,5 @@ func main() {
<-done
server.Log.Warn("caught signal, stopping...")
_ = server.Close()
server.Log.Info("main.go finished")
server.Log.Info("mochi mqtt shutdown complete")
}

15
config.yaml Normal file
View File

@@ -0,0 +1,15 @@
listeners:
- type: "tcp"
id: "tcp1"
address: ":1883"
- type: "ws"
id: "ws1"
address: ":1882"
- type: "sysinfo"
id: "stats"
address: ":1880"
hooks:
auth:
allow_all: true
options:
inline_client: true

144
config/config.go Normal file
View File

@@ -0,0 +1,144 @@
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co
// SPDX-FileContributor: mochi-co
package config
import (
"encoding/json"
"github.com/mochi-mqtt/server/v2/hooks/auth"
"github.com/mochi-mqtt/server/v2/hooks/debug"
"github.com/mochi-mqtt/server/v2/hooks/storage/badger"
"github.com/mochi-mqtt/server/v2/hooks/storage/bolt"
"github.com/mochi-mqtt/server/v2/hooks/storage/redis"
"github.com/mochi-mqtt/server/v2/listeners"
"gopkg.in/yaml.v3"
mqtt "github.com/mochi-mqtt/server/v2"
)
// config defines the structure of configuration data to be parsed from a config source.
type config struct {
Options mqtt.Options
Listeners []listeners.Config `yaml:"listeners" json:"listeners"`
HookConfigs HookConfigs `yaml:"hooks" json:"hooks"`
}
// HookConfigs contains configurations to enable individual hooks.
type HookConfigs struct {
Auth *HookAuthConfig `yaml:"auth" json:"auth"`
Storage *HookStorageConfig `yaml:"storage" json:"storage"`
Debug *debug.Options `yaml:"debug" json:"debug"`
}
// HookAuthConfig contains configurations for the auth hook.
type HookAuthConfig struct {
Ledger auth.Ledger `yaml:"ledger" json:"ledger"`
AllowAll bool `yaml:"allow_all" json:"allow_all"`
}
// HookStorageConfig contains configurations for the different storage hooks.
type HookStorageConfig struct {
Badger *badger.Options `yaml:"badger" json:"badger"`
Bolt *bolt.Options `yaml:"bolt" json:"bolt"`
Redis *redis.Options `yaml:"redis" json:"redis"`
}
// ToHooks converts Hook file configurations into Hooks to be added to the server.
func (hc HookConfigs) ToHooks() []mqtt.HookLoadConfig {
var hlc []mqtt.HookLoadConfig
if hc.Auth != nil {
hlc = append(hlc, hc.toHooksAuth()...)
}
if hc.Storage != nil {
hlc = append(hlc, hc.toHooksStorage()...)
}
if hc.Debug != nil {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(debug.Hook),
Config: hc.Debug,
})
}
return hlc
}
// toHooksAuth converts auth hook configurations into auth hooks.
func (hc HookConfigs) toHooksAuth() []mqtt.HookLoadConfig {
var hlc []mqtt.HookLoadConfig
if hc.Auth.AllowAll {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(auth.AllowHook),
})
} else {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(auth.Hook),
Config: &auth.Options{
Ledger: &auth.Ledger{ // avoid copying sync.Locker
Users: hc.Auth.Ledger.Users,
Auth: hc.Auth.Ledger.Auth,
ACL: hc.Auth.Ledger.ACL,
},
},
})
}
return hlc
}
// toHooksAuth converts storage hook configurations into storage hooks.
func (hc HookConfigs) toHooksStorage() []mqtt.HookLoadConfig {
var hlc []mqtt.HookLoadConfig
if hc.Storage.Badger != nil {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(badger.Hook),
Config: hc.Storage.Badger,
})
}
if hc.Storage.Bolt != nil {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(bolt.Hook),
Config: hc.Storage.Bolt,
})
}
if hc.Storage.Redis != nil {
hlc = append(hlc, mqtt.HookLoadConfig{
Hook: new(redis.Hook),
Config: hc.Storage.Redis,
})
}
return hlc
}
// FromBytes unmarshals a byte slice of JSON or YAML config data into a valid server options value.
// Any hooks configurations are converted into Hooks using the toHooks methods in this package.
func FromBytes(b []byte) (*mqtt.Options, error) {
c := new(config)
o := mqtt.Options{}
if len(b) == 0 {
return nil, nil
}
if b[0] == '{' {
err := json.Unmarshal(b, c)
if err != nil {
return nil, err
}
} else {
err := yaml.Unmarshal(b, c)
if err != nil {
return nil, err
}
}
o = c.Options
o.Hooks = c.HookConfigs.ToHooks()
o.Listeners = c.Listeners
return &o, nil
}

212
config/config_test.go Normal file
View File

@@ -0,0 +1,212 @@
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co
// SPDX-FileContributor: mochi-co
package config
import (
"testing"
"github.com/stretchr/testify/require"
"github.com/mochi-mqtt/server/v2/hooks/auth"
"github.com/mochi-mqtt/server/v2/hooks/storage/badger"
"github.com/mochi-mqtt/server/v2/hooks/storage/bolt"
"github.com/mochi-mqtt/server/v2/hooks/storage/redis"
"github.com/mochi-mqtt/server/v2/listeners"
mqtt "github.com/mochi-mqtt/server/v2"
)
var (
yamlBytes = []byte(`
listeners:
- type: "tcp"
id: "file-tcp1"
address: ":1883"
hooks:
auth:
allow_all: true
options:
client_net_write_buffer_size: 2048
capabilities:
minimum_protocol_version: 3
compatibilities:
restore_sys_info_on_restart: true
`)
jsonBytes = []byte(`{
"listeners": [
{
"type": "tcp",
"id": "file-tcp1",
"address": ":1883"
}
],
"hooks": {
"auth": {
"allow_all": true
}
},
"options": {
"client_net_write_buffer_size": 2048,
"capabilities": {
"minimum_protocol_version": 3,
"compatibilities": {
"restore_sys_info_on_restart": true
}
}
}
}
`)
parsedOptions = mqtt.Options{
Listeners: []listeners.Config{
{
Type: listeners.TypeTCP,
ID: "file-tcp1",
Address: ":1883",
},
},
Hooks: []mqtt.HookLoadConfig{
{
Hook: new(auth.AllowHook),
},
},
ClientNetWriteBufferSize: 2048,
Capabilities: &mqtt.Capabilities{
MinimumProtocolVersion: 3,
Compatibilities: mqtt.Compatibilities{
RestoreSysInfoOnRestart: true,
},
},
}
)
func TestFromBytesEmptyL(t *testing.T) {
_, err := FromBytes([]byte{})
require.NoError(t, err)
}
func TestFromBytesYAML(t *testing.T) {
o, err := FromBytes(yamlBytes)
require.NoError(t, err)
require.Equal(t, parsedOptions, *o)
}
func TestFromBytesYAMLError(t *testing.T) {
_, err := FromBytes(append(yamlBytes, 'a'))
require.Error(t, err)
}
func TestFromBytesJSON(t *testing.T) {
o, err := FromBytes(jsonBytes)
require.NoError(t, err)
require.Equal(t, parsedOptions, *o)
}
func TestFromBytesJSONError(t *testing.T) {
_, err := FromBytes(append(jsonBytes, 'a'))
require.Error(t, err)
}
func TestToHooksAuthAllowAll(t *testing.T) {
hc := HookConfigs{
Auth: &HookAuthConfig{
AllowAll: true,
},
}
th := hc.toHooksAuth()
expect := []mqtt.HookLoadConfig{
{Hook: new(auth.AllowHook)},
}
require.Equal(t, expect, th)
}
func TestToHooksAuthAllowLedger(t *testing.T) {
hc := HookConfigs{
Auth: &HookAuthConfig{
Ledger: auth.Ledger{
Auth: auth.AuthRules{
{Username: "peach", Password: "password1", Allow: true},
},
},
},
}
th := hc.toHooksAuth()
expect := []mqtt.HookLoadConfig{
{
Hook: new(auth.Hook),
Config: &auth.Options{
Ledger: &auth.Ledger{ // avoid copying sync.Locker
Auth: auth.AuthRules{
{Username: "peach", Password: "password1", Allow: true},
},
},
},
},
}
require.Equal(t, expect, th)
}
func TestToHooksStorageBadger(t *testing.T) {
hc := HookConfigs{
Storage: &HookStorageConfig{
Badger: &badger.Options{
Path: "badger",
},
},
}
th := hc.toHooksStorage()
expect := []mqtt.HookLoadConfig{
{
Hook: new(badger.Hook),
Config: hc.Storage.Badger,
},
}
require.Equal(t, expect, th)
}
func TestToHooksStorageBolt(t *testing.T) {
hc := HookConfigs{
Storage: &HookStorageConfig{
Bolt: &bolt.Options{
Path: "bolt",
},
},
}
th := hc.toHooksStorage()
expect := []mqtt.HookLoadConfig{
{
Hook: new(bolt.Hook),
Config: hc.Storage.Bolt,
},
}
require.Equal(t, expect, th)
}
func TestToHooksStorageRedis(t *testing.T) {
hc := HookConfigs{
Storage: &HookStorageConfig{
Redis: &redis.Options{
Username: "test",
},
},
}
th := hc.toHooksStorage()
expect := []mqtt.HookLoadConfig{
{
Hook: new(redis.Hook),
Config: hc.Storage.Redis,
},
}
require.Equal(t, expect, th)
}

View File

@@ -63,7 +63,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -45,7 +45,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -32,7 +32,10 @@ func main() {
server.Options.Capabilities.MaximumClientWritesPending = 16 * 1024
_ = server.AddHook(new(auth.AllowHook), nil)
tcp := listeners.NewTCP("t1", *tcpAddr, nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: *tcpAddr,
})
err := server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -0,0 +1,92 @@
{
"listeners": [
{
"type": "tcp",
"id": "file-tcp1",
"address": ":1883"
},
{
"type": "ws",
"id": "file-websocket",
"address": ":1882"
},
{
"type": "healthcheck",
"id": "file-healthcheck",
"address": ":1880"
}
],
"hooks": {
"debug": {
"enable": true
},
"storage": {
"badger": {
"path": "badger.db",
"gc_interval": 3,
"gc_discard_ratio": 0.5
},
"bolt": {
"path": "bolt.db"
},
"redis": {
"h_prefix": "mc",
"username": "mochi",
"password": "melon",
"address": "localhost:6379",
"database": 1
}
},
"auth": {
"allow_all": false,
"ledger": {
"auth": [
{
"username": "peach",
"password": "password1",
"allow": true
}
],
"acl": [
{
"remote": "127.0.0.1:*"
},
{
"username": "melon",
"filters": null,
"melon/#": 3,
"updates/#": 2
}
]
}
}
},
"options": {
"client_net_write_buffer_size": 2048,
"client_net_read_buffer_size": 2048,
"sys_topic_resend_interval": 10,
"inline_client": true,
"capabilities": {
"maximum_message_expiry_interval": 100,
"maximum_client_writes_pending": 8192,
"maximum_session_expiry_interval": 86400,
"maximum_packet_size": 0,
"receive_maximum": 1024,
"maximum_inflight": 8192,
"topic_alias_maximum": 65535,
"shared_sub_available": 1,
"minimum_protocol_version": 3,
"maximum_qos": 2,
"retain_available": 1,
"wildcard_sub_available": 1,
"sub_id_available": 1,
"compatibilities": {
"obscure_not_authorized": true,
"passive_client_disconnect": false,
"always_return_response_info": false,
"restore_sys_info_on_restart": false,
"no_inherited_properties_on_ack": false
}
}
}
}

View File

@@ -0,0 +1,64 @@
listeners:
- type: "tcp"
id: "file-tcp1"
address: ":1883"
- type: "ws"
id: "file-websocket"
address: ":1882"
- type: "healthcheck"
id: "file-healthcheck"
address: ":1880"
hooks:
debug:
enable: true
storage:
badger:
path: badger.db
gc_interval: 3
gc_discard_ratio: 0.5
bolt:
path: bolt.db
redis:
h_prefix: "mc"
username: "mochi"
password: "melon"
address: "localhost:6379"
database: 1
auth:
allow_all: false
ledger:
auth:
- username: peach
password: password1
allow: true
acl:
- remote: 127.0.0.1:*
- username: melon
filters:
melon/#: 3
updates/#: 2
options:
client_net_write_buffer_size: 2048
client_net_read_buffer_size: 2048
sys_topic_resend_interval: 10
inline_client: true
capabilities:
maximum_message_expiry_interval: 100
maximum_client_writes_pending: 8192
maximum_session_expiry_interval: 86400
maximum_packet_size: 0
receive_maximum: 1024
maximum_inflight: 8192
topic_alias_maximum: 65535
shared_sub_available: 1
minimum_protocol_version: 3
maximum_qos: 2
retain_available: 1
wildcard_sub_available: 1
sub_id_available: 1
compatibilities:
obscure_not_authorized: true
passive_client_disconnect: false
always_return_response_info: false
restore_sys_info_on_restart: false
no_inherited_properties_on_ack: false

49
examples/config/main.go Normal file
View File

@@ -0,0 +1,49 @@
// SPDX-License-Identifier: MIT
// SPDX-FileCopyrightText: 2023 mochi-mqtt, mochi-co
// SPDX-FileContributor: mochi-co
package main
import (
"github.com/mochi-mqtt/server/v2/config"
"log"
"os"
"os/signal"
"syscall"
mqtt "github.com/mochi-mqtt/server/v2"
)
func main() {
sigs := make(chan os.Signal, 1)
done := make(chan bool, 1)
signal.Notify(sigs, syscall.SIGINT, syscall.SIGTERM)
go func() {
<-sigs
done <- true
}()
configBytes, err := os.ReadFile("config.json")
if err != nil {
log.Fatal(err)
}
options, err := config.FromBytes(configBytes)
if err != nil {
log.Fatal(err)
}
server := mqtt.New(options)
go func() {
err := server.Serve()
if err != nil {
log.Fatal(err)
}
}()
<-done
server.Log.Warn("caught signal, stopping...")
_ = server.Close()
server.Log.Info("main.go finished")
}

View File

@@ -46,7 +46,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -28,15 +28,25 @@ func main() {
done <- true
}()
server := mqtt.New(nil)
server := mqtt.New(&mqtt.Options{
InlineClient: true, // you must enable inline client to use direct publishing and subscribing.
})
_ = server.AddHook(new(auth.AllowHook), nil)
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err := server.AddListener(tcp)
if err != nil {
log.Fatal(err)
}
err = server.AddHook(new(ExampleHook), map[string]any{})
// Add custom hook (ExampleHook) to the server
err = server.AddHook(new(ExampleHook), &ExampleHookOptions{
Server: server,
})
if err != nil {
log.Fatal(err)
}
@@ -87,8 +97,14 @@ func main() {
server.Log.Info("main.go finished")
}
// Options contains configuration settings for the hook.
type ExampleHookOptions struct {
Server *mqtt.Server
}
type ExampleHook struct {
mqtt.HookBase
config *ExampleHookOptions
}
func (h *ExampleHook) ID() string {
@@ -108,11 +124,34 @@ func (h *ExampleHook) Provides(b byte) bool {
func (h *ExampleHook) Init(config any) error {
h.Log.Info("initialised")
if _, ok := config.(*ExampleHookOptions); !ok && config != nil {
return mqtt.ErrInvalidConfigType
}
h.config = config.(*ExampleHookOptions)
if h.config.Server == nil {
return mqtt.ErrInvalidConfigType
}
return nil
}
// subscribeCallback handles messages for subscribed topics
func (h *ExampleHook) subscribeCallback(cl *mqtt.Client, sub packets.Subscription, pk packets.Packet) {
h.Log.Info("hook subscribed message", "client", cl.ID, "topic", pk.TopicName)
}
func (h *ExampleHook) OnConnect(cl *mqtt.Client, pk packets.Packet) error {
h.Log.Info("client connected", "client", cl.ID)
// Example demonstrating how to subscribe to a topic within the hook.
h.config.Server.Subscribe("hook/direct/publish", 1, h.subscribeCallback)
// Example demonstrating how to publish a message within the hook
err := h.config.Server.Publish("hook/direct/publish", []byte("packet hook message"), false, 0)
if err != nil {
h.Log.Error("hook.publish", "error", err)
}
return nil
}

View File

@@ -31,7 +31,10 @@ func main() {
server.Options.Capabilities.Compatibilities.NoInheritedPropertiesOnAck = true
_ = server.AddHook(new(pahoAuthHook), nil)
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err := server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -5,15 +5,16 @@
package main
import (
"log"
"os"
"os/signal"
"syscall"
badgerdb "github.com/dgraph-io/badger"
mqtt "github.com/mochi-mqtt/server/v2"
"github.com/mochi-mqtt/server/v2/hooks/auth"
"github.com/mochi-mqtt/server/v2/hooks/storage/badger"
"github.com/mochi-mqtt/server/v2/listeners"
"github.com/timshannon/badgerhold"
"log"
"os"
"os/signal"
"syscall"
)
func main() {
@@ -31,14 +32,39 @@ func main() {
server := mqtt.New(nil)
_ = server.AddHook(new(auth.AllowHook), nil)
// AddHook adds a BadgerDB hook to the server with the specified options.
// GcInterval specifies the interval at which BadgerDB garbage collection process runs.
// Refer to https://dgraph.io/docs/badger/get-started/#garbage-collection for more information.
err := server.AddHook(new(badger.Hook), &badger.Options{
Path: badgerPath,
// Set the interval for garbage collection. Adjust according to your actual scenario.
GcInterval: 5 * 60,
// GcDiscardRatio specifies the ratio of log discard compared to the maximum possible log discard.
// Setting it to a higher value would result in fewer space reclaims, while setting it to a lower value
// would result in more space reclaims at the cost of increased activity on the LSM tree.
// discardRatio must be in the range (0.0, 1.0), both endpoints excluded, otherwise, it will be set to the default value of 0.5.
// Adjust according to your actual scenario.
GcDiscardRatio: 0.5,
Options: &badgerhold.Options{
// BadgerDB options. Adjust according to your actual scenario.
Options: badgerdb.Options{
NumCompactors: 2, // Number of compactors. Compactions can be expensive.
MaxTableSize: 64 << 20, // Maximum size of each table (64 MB).
ValueLogFileSize: 100 * (1 << 20), // Set the default size of the log file to 100 MB.
},
},
})
if err != nil {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -40,7 +40,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -48,7 +48,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -38,7 +38,10 @@ func main() {
log.Fatal(err)
}
tcp := listeners.NewTCP("t1", ":1883", nil)
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
})
err = server.AddListener(tcp)
if err != nil {
log.Fatal(err)

View File

@@ -79,7 +79,9 @@ func main() {
server := mqtt.New(nil)
_ = server.AddHook(new(auth.AllowHook), nil)
tcp := listeners.NewTCP("t1", ":1883", &listeners.Config{
tcp := listeners.NewTCP(listeners.Config{
ID: "t1",
Address: ":1883",
TLSConfig: tlsConfig,
})
err = server.AddListener(tcp)
@@ -87,7 +89,9 @@ func main() {
log.Fatal(err)
}
ws := listeners.NewWebsocket("ws1", ":1882", &listeners.Config{
ws := listeners.NewWebsocket(listeners.Config{
ID: "ws1",
Address: ":1882",
TLSConfig: tlsConfig,
})
err = server.AddListener(ws)
@@ -95,9 +99,13 @@ func main() {
log.Fatal(err)
}
stats := listeners.NewHTTPStats("stats", ":8080", &listeners.Config{
TLSConfig: tlsConfig,
}, server.Info)
stats := listeners.NewHTTPStats(
listeners.Config{
ID: "stats",
Address: ":8080",
TLSConfig: tlsConfig,
}, server.Info,
)
err = server.AddListener(stats)
if err != nil {
log.Fatal(err)

View File

@@ -27,7 +27,10 @@ func main() {
server := mqtt.New(nil)
_ = server.AddHook(new(auth.AllowHook), nil)
ws := listeners.NewWebsocket("ws1", ":1882", nil)
ws := listeners.NewWebsocket(listeners.Config{
ID: "ws1",
Address: ":1882",
})
err := server.AddListener(ws)
if err != nil {
log.Fatal(err)

6
go.mod
View File

@@ -31,7 +31,7 @@ require (
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 // indirect
golang.org/x/net v0.7.0 // indirect
golang.org/x/sys v0.5.0 // indirect
google.golang.org/protobuf v1.28.1 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/sys v0.13.0 // indirect
google.golang.org/protobuf v1.33.0 // indirect
)

16
go.sum
View File

@@ -106,26 +106,26 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191105084925-a882066a44e0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@@ -62,6 +62,12 @@ var (
ErrInvalidConfigType = errors.New("invalid config type provided")
)
// HookLoadConfig contains the hook and configuration as loaded from a configuration (usually file).
type HookLoadConfig struct {
Hook Hook
Config any
}
// Hook provides an interface of handlers for different events which occur
// during the lifecycle of the broker.
type Hook interface {
@@ -70,6 +76,7 @@ type Hook interface {
Init(config any) error
Stop() error
SetOpts(l *slog.Logger, o *HookOptions)
OnStarted()
OnStopped()
OnConnectAuthenticate(cl *Client, pk packets.Packet) bool

View File

@@ -16,9 +16,10 @@ import (
// Options contains configuration settings for the debug output.
type Options struct {
ShowPacketData bool // include decoded packet data (default false)
ShowPings bool // show ping requests and responses (default false)
ShowPasswords bool // show connecting user passwords (default false)
Enable bool `yaml:"enable" json:"enable"` // non-zero field for enabling hook using file-based config
ShowPacketData bool `yaml:"show_packet_data" json:"show_packet_data"` // include decoded packet data (default false)
ShowPings bool `yaml:"show_pings" json:"show_pings"` // show ping requests and responses (default false)
ShowPasswords bool `yaml:"show_passwords" json:"show_passwords"` // show connecting user passwords (default false)
}
// Hook is a debugging hook which logs additional low-level information from the server.

View File

@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"strings"
"time"
mqtt "github.com/mochi-mqtt/server/v2"
"github.com/mochi-mqtt/server/v2/hooks/storage"
@@ -20,7 +21,9 @@ import (
const (
// defaultDbFile is the default file path for the badger db file.
defaultDbFile = ".badger"
defaultDbFile = ".badger"
defaultGcInterval = 5 * 60 // gc interval in seconds
defaultGcDiscardRatio = 0.5
)
// clientKey returns a primary key for a client.
@@ -51,14 +54,21 @@ func sysInfoKey() string {
// Options contains configuration settings for the BadgerDB instance.
type Options struct {
Options *badgerhold.Options
Path string
Path string `yaml:"path" json:"path"`
// GcDiscardRatio specifies the ratio of log discard compared to the maximum possible log discard.
// Setting it to a higher value would result in fewer space reclaims, while setting it to a lower value
// would result in more space reclaims at the cost of increased activity on the LSM tree.
// discardRatio must be in the range (0.0, 1.0), both endpoints excluded, otherwise, it will be set to the default value of 0.5.
GcDiscardRatio float64 `yaml:"gc_discard_ratio" json:"gc_discard_ratio"`
GcInterval int64 `yaml:"gc_interval" json:"gc_interval"`
}
// Hook is a persistent storage hook based using BadgerDB file store as a backend.
type Hook struct {
mqtt.HookBase
config *Options // options for configuring the BadgerDB instance.
db *badgerhold.Store // the BadgerDB instance.
config *Options // options for configuring the BadgerDB instance.
gcTicker *time.Ticker // Ticker for BadgerDB garbage collection.
db *badgerhold.Store // the BadgerDB instance.
}
// ID returns the id of the hook.
@@ -89,6 +99,21 @@ func (h *Hook) Provides(b byte) bool {
}, []byte{b})
}
// GcLoop periodically runs the garbage collection process to reclaim space in the value log files.
// It uses a ticker to trigger the garbage collection at regular intervals specified by the configuration.
// Refer to: https://dgraph.io/docs/badger/get-started/#garbage-collection
func (h *Hook) GcLoop() {
for range h.gcTicker.C {
again:
// Run the garbage collection process with a threshold.
// If the process returns nil (success), repeat the process.
err := h.db.Badger().RunValueLogGC(h.config.GcDiscardRatio)
if err == nil {
goto again // Retry garbage collection if successful.
}
}
}
// Init initializes and connects to the badger instance.
func (h *Hook) Init(config any) error {
if _, ok := config.(*Options); !ok && config != nil {
@@ -104,6 +129,14 @@ func (h *Hook) Init(config any) error {
h.config.Path = defaultDbFile
}
if h.config.GcInterval == 0 {
h.config.GcInterval = defaultGcInterval
}
if h.config.GcDiscardRatio <= 0.0 || h.config.GcDiscardRatio >= 1.0 {
h.config.GcDiscardRatio = defaultGcDiscardRatio
}
options := badgerhold.DefaultOptions
options.Dir = h.config.Path
options.ValueDir = h.config.Path
@@ -115,11 +148,17 @@ func (h *Hook) Init(config any) error {
return err
}
h.gcTicker = time.NewTicker(time.Duration(h.config.GcInterval) * time.Second)
go h.GcLoop()
return nil
}
// Stop closes the badger instance.
func (h *Hook) Stop() error {
if h.gcTicker != nil {
h.gcTicker.Stop()
}
return h.db.Close()
}
@@ -182,7 +221,7 @@ func (h *Hook) OnDisconnect(cl *mqtt.Client, _ error, expire bool) {
return
}
if cl.StopCause() == packets.ErrSessionTakenOver {
if errors.Is(cl.StopCause(), packets.ErrSessionTakenOver) {
return
}

View File

@@ -11,6 +11,7 @@ import (
"testing"
"time"
badgerdb "github.com/dgraph-io/badger"
mqtt "github.com/mochi-mqtt/server/v2"
"github.com/mochi-mqtt/server/v2/hooks/storage"
"github.com/mochi-mqtt/server/v2/packets"
@@ -702,3 +703,21 @@ func TestDebugf(t *testing.T) {
h.SetOpts(logger, nil)
h.Debugf("test", 1, 2, 3)
}
func TestGcLoop(t *testing.T) {
h := new(Hook)
h.SetOpts(logger, nil)
h.Init(&Options{
GcInterval: 2, // Set the interval for garbage collection.
Options: &badgerhold.Options{
// BadgerDB options. Modify as needed.
Options: badgerdb.Options{
ValueLogFileSize: 1 << 20, // Set the default size of the log file to 1 MB.
},
},
})
defer teardown(t, h.config.Path, h)
h.OnSessionEstablished(client, packets.Packet{})
h.OnDisconnect(client, nil, true)
time.Sleep(3 * time.Second)
}

View File

@@ -56,7 +56,7 @@ func sysInfoKey() string {
// Options contains configuration settings for the bolt instance.
type Options struct {
Options *bbolt.Options
Path string
Path string `yaml:"path" json:"path"`
}
// Hook is a persistent storage hook based using boltdb file store as a backend.

View File

@@ -51,8 +51,12 @@ func sysInfoKey() string {
// Options contains configuration settings for the bolt instance.
type Options struct {
HPrefix string
Options *redis.Options
Address string `yaml:"address" json:"address"`
Username string `yaml:"username" json:"username"`
Password string `yaml:"password" json:"password"`
Database int `yaml:"database" json:"database"`
HPrefix string `yaml:"h_prefix" json:"h_prefix"`
Options *redis.Options
}
// Hook is a persistent storage hook based using Redis as a backend.
@@ -105,23 +109,31 @@ func (h *Hook) Init(config any) error {
h.ctx = context.Background()
if config == nil {
config = &Options{
Options: &redis.Options{
Addr: defaultAddr,
},
config = new(Options)
}
h.config = config.(*Options)
if h.config.Options == nil {
h.config.Options = &redis.Options{
Addr: defaultAddr,
}
h.config.Options.Addr = h.config.Address
h.config.Options.DB = h.config.Database
h.config.Options.Username = h.config.Username
h.config.Options.Password = h.config.Password
}
h.config = config.(*Options)
if h.config.HPrefix == "" {
h.config.HPrefix = defaultHPrefix
}
h.Log.Info("connecting to redis service",
h.Log.Info(
"connecting to redis service",
"prefix", h.config.HPrefix,
"address", h.config.Options.Addr,
"username", h.config.Options.Username,
"password-len", len(h.config.Options.Password),
"db", h.config.Options.DB)
"db", h.config.Options.DB,
)
h.db = redis.NewClient(h.config.Options)
_, err := h.db.Ping(context.Background()).Result()

View File

@@ -135,6 +135,29 @@ func TestInitUseDefaults(t *testing.T) {
require.Equal(t, defaultAddr, h.config.Options.Addr)
}
func TestInitUsePassConfig(t *testing.T) {
s := miniredis.RunT(t)
s.StartAddr(defaultAddr)
defer s.Close()
h := newHook(t, "")
h.SetOpts(logger, nil)
err := h.Init(&Options{
Address: defaultAddr,
Username: "username",
Password: "password",
Database: 2,
})
require.Error(t, err)
h.db.FlushAll(h.ctx)
require.Equal(t, defaultAddr, h.config.Options.Addr)
require.Equal(t, "username", h.config.Options.Username)
require.Equal(t, "password", h.config.Options.Password)
require.Equal(t, 2, h.config.Options.DB)
}
func TestInitBadConfig(t *testing.T) {
h := new(Hook)
h.SetOpts(logger, nil)

View File

@@ -13,24 +13,23 @@ import (
"time"
)
const TypeHealthCheck = "healthcheck"
// HTTPHealthCheck is a listener for providing an HTTP healthcheck endpoint.
type HTTPHealthCheck struct {
sync.RWMutex
id string // the internal id of the listener
address string // the network address to bind to
config *Config // configuration values for the listener
config Config // configuration values for the listener
listen *http.Server // the http server
end uint32 // ensure the close methods are only called once
}
// NewHTTPHealthCheck initialises and returns a new HTTP listener, listening on an address.
func NewHTTPHealthCheck(id, address string, config *Config) *HTTPHealthCheck {
if config == nil {
config = new(Config)
}
// NewHTTPHealthCheck initializes and returns a new HTTP listener, listening on an address.
func NewHTTPHealthCheck(config Config) *HTTPHealthCheck {
return &HTTPHealthCheck{
id: id,
address: address,
id: config.ID,
address: config.Address,
config: config,
}
}

View File

@@ -14,47 +14,44 @@ import (
)
func TestNewHTTPHealthCheck(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
require.Equal(t, "healthcheck", l.id)
require.Equal(t, testAddr, l.address)
l := NewHTTPHealthCheck(basicConfig)
require.Equal(t, basicConfig.ID, l.id)
require.Equal(t, basicConfig.Address, l.address)
}
func TestHTTPHealthCheckID(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
require.Equal(t, "healthcheck", l.ID())
l := NewHTTPHealthCheck(basicConfig)
require.Equal(t, basicConfig.ID, l.ID())
}
func TestHTTPHealthCheckAddress(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
require.Equal(t, testAddr, l.Address())
l := NewHTTPHealthCheck(basicConfig)
require.Equal(t, basicConfig.Address, l.Address())
}
func TestHTTPHealthCheckProtocol(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
l := NewHTTPHealthCheck(basicConfig)
require.Equal(t, "http", l.Protocol())
}
func TestHTTPHealthCheckTLSProtocol(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewHTTPHealthCheck(tlsConfig)
_ = l.Init(logger)
require.Equal(t, "https", l.Protocol())
}
func TestHTTPHealthCheckInit(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
l := NewHTTPHealthCheck(basicConfig)
err := l.Init(logger)
require.NoError(t, err)
require.NotNil(t, l.listen)
require.Equal(t, testAddr, l.listen.Addr)
require.Equal(t, basicConfig.Address, l.listen.Addr)
}
func TestHTTPHealthCheckServeAndClose(t *testing.T) {
// setup http stats listener
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
l := NewHTTPHealthCheck(basicConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -90,7 +87,7 @@ func TestHTTPHealthCheckServeAndClose(t *testing.T) {
func TestHTTPHealthCheckServeAndCloseMethodNotAllowed(t *testing.T) {
// setup http stats listener
l := NewHTTPHealthCheck("healthcheck", testAddr, nil)
l := NewHTTPHealthCheck(basicConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -125,10 +122,7 @@ func TestHTTPHealthCheckServeAndCloseMethodNotAllowed(t *testing.T) {
}
func TestHTTPHealthCheckServeTLSAndClose(t *testing.T) {
l := NewHTTPHealthCheck("healthcheck", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewHTTPHealthCheck(tlsConfig)
err := l.Init(logger)
require.NoError(t, err)

View File

@@ -17,27 +17,26 @@ import (
"github.com/mochi-mqtt/server/v2/system"
)
const TypeSysInfo = "sysinfo"
// HTTPStats is a listener for presenting the server $SYS stats on a JSON http endpoint.
type HTTPStats struct {
sync.RWMutex
id string // the internal id of the listener
address string // the network address to bind to
config *Config // configuration values for the listener
config Config // configuration values for the listener
listen *http.Server // the http server
sysInfo *system.Info // pointers to the server data
log *slog.Logger // server logger
end uint32 // ensure the close methods are only called once
}
// NewHTTPStats initialises and returns a new HTTP listener, listening on an address.
func NewHTTPStats(id, address string, config *Config, sysInfo *system.Info) *HTTPStats {
if config == nil {
config = new(Config)
}
// NewHTTPStats initializes and returns a new HTTP listener, listening on an address.
func NewHTTPStats(config Config, sysInfo *system.Info) *HTTPStats {
return &HTTPStats{
id: id,
address: address,
sysInfo: sysInfo,
id: config.ID,
address: config.Address,
config: config,
}
}

View File

@@ -17,38 +17,35 @@ import (
)
func TestNewHTTPStats(t *testing.T) {
l := NewHTTPStats("t1", testAddr, nil, nil)
l := NewHTTPStats(basicConfig, nil)
require.Equal(t, "t1", l.id)
require.Equal(t, testAddr, l.address)
}
func TestHTTPStatsID(t *testing.T) {
l := NewHTTPStats("t1", testAddr, nil, nil)
l := NewHTTPStats(basicConfig, nil)
require.Equal(t, "t1", l.ID())
}
func TestHTTPStatsAddress(t *testing.T) {
l := NewHTTPStats("t1", testAddr, nil, nil)
l := NewHTTPStats(basicConfig, nil)
require.Equal(t, testAddr, l.Address())
}
func TestHTTPStatsProtocol(t *testing.T) {
l := NewHTTPStats("t1", testAddr, nil, nil)
l := NewHTTPStats(basicConfig, nil)
require.Equal(t, "http", l.Protocol())
}
func TestHTTPStatsTLSProtocol(t *testing.T) {
l := NewHTTPStats("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
}, nil)
l := NewHTTPStats(tlsConfig, nil)
_ = l.Init(logger)
require.Equal(t, "https", l.Protocol())
}
func TestHTTPStatsInit(t *testing.T) {
sysInfo := new(system.Info)
l := NewHTTPStats("t1", testAddr, nil, sysInfo)
l := NewHTTPStats(basicConfig, sysInfo)
err := l.Init(logger)
require.NoError(t, err)
@@ -64,7 +61,7 @@ func TestHTTPStatsServeAndClose(t *testing.T) {
}
// setup http stats listener
l := NewHTTPStats("t1", testAddr, nil, sysInfo)
l := NewHTTPStats(basicConfig, sysInfo)
err := l.Init(logger)
require.NoError(t, err)
@@ -109,9 +106,7 @@ func TestHTTPStatsServeTLSAndClose(t *testing.T) {
Version: "test",
}
l := NewHTTPStats("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
}, sysInfo)
l := NewHTTPStats(tlsConfig, sysInfo)
err := l.Init(logger)
require.NoError(t, err)
@@ -132,7 +127,9 @@ func TestHTTPStatsFailedToServe(t *testing.T) {
}
// setup http stats listener
l := NewHTTPStats("t1", "wrong_addr", nil, sysInfo)
config := basicConfig
config.Address = "wrong_addr"
l := NewHTTPStats(config, sysInfo)
err := l.Init(logger)
require.NoError(t, err)

View File

@@ -14,8 +14,10 @@ import (
// Config contains configuration values for a listener.
type Config struct {
// TLSConfig is a tls.Config configuration to be used with the listener.
// See examples folder for basic and mutual-tls use.
Type string
ID string
Address string
// TLSConfig is a tls.Config configuration to be used with the listener. See examples folder for basic and mutual-tls use.
TLSConfig *tls.Config
}

View File

@@ -19,6 +19,9 @@ import (
const testAddr = ":22222"
var (
basicConfig = Config{ID: "t1", Address: testAddr}
tlsConfig = Config{ID: "t1", Address: testAddr, TLSConfig: tlsConfigBasic}
logger = slog.New(slog.NewTextHandler(os.Stdout, nil))
testCertificate = []byte(`-----BEGIN CERTIFICATE-----
@@ -65,6 +68,7 @@ func init() {
MinVersion: tls.VersionTLS12,
Certificates: []tls.Certificate{cert},
}
tlsConfig.TLSConfig = tlsConfigBasic
}
func TestNew(t *testing.T) {

View File

@@ -12,6 +12,8 @@ import (
"log/slog"
)
const TypeMock = "mock"
// MockEstablisher is a function signature which can be used in testing.
func MockEstablisher(id string, c net.Conn) error {
return nil

View File

@@ -13,26 +13,24 @@ import (
"log/slog"
)
const TypeTCP = "tcp"
// TCP is a listener for establishing client connections on basic TCP protocol.
type TCP struct { // [MQTT-4.2.0-1]
sync.RWMutex
id string // the internal id of the listener
address string // the network address to bind to
listen net.Listener // a net.Listener which will listen for new clients
config *Config // configuration values for the listener
config Config // configuration values for the listener
log *slog.Logger // server logger
end uint32 // ensure the close methods are only called once
}
// NewTCP initialises and returns a new TCP listener, listening on an address.
func NewTCP(id, address string, config *Config) *TCP {
if config == nil {
config = new(Config)
}
// NewTCP initializes and returns a new TCP listener, listening on an address.
func NewTCP(config Config) *TCP {
return &TCP{
id: id,
address: address,
id: config.ID,
address: config.Address,
config: config,
}
}
@@ -44,6 +42,9 @@ func (l *TCP) ID() string {
// Address returns the address of the listener.
func (l *TCP) Address() string {
if l.listen != nil {
return l.listen.Addr().String()
}
return l.address
}

View File

@@ -14,45 +14,40 @@ import (
)
func TestNewTCP(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
require.Equal(t, "t1", l.id)
require.Equal(t, testAddr, l.address)
}
func TestTCPID(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
require.Equal(t, "t1", l.ID())
}
func TestTCPAddress(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
require.Equal(t, testAddr, l.Address())
}
func TestTCPProtocol(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
require.Equal(t, "tcp", l.Protocol())
}
func TestTCPProtocolTLS(t *testing.T) {
l := NewTCP("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewTCP(tlsConfig)
_ = l.Init(logger)
defer l.listen.Close()
require.Equal(t, "tcp", l.Protocol())
}
func TestTCPInit(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
err := l.Init(logger)
l.Close(MockCloser)
require.NoError(t, err)
l2 := NewTCP("t2", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l2 := NewTCP(tlsConfig)
err = l2.Init(logger)
l2.Close(MockCloser)
require.NoError(t, err)
@@ -60,7 +55,7 @@ func TestTCPInit(t *testing.T) {
}
func TestTCPServeAndClose(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -85,9 +80,7 @@ func TestTCPServeAndClose(t *testing.T) {
}
func TestTCPServeTLSAndClose(t *testing.T) {
l := NewTCP("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewTCP(tlsConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -109,7 +102,7 @@ func TestTCPServeTLSAndClose(t *testing.T) {
}
func TestTCPEstablishThenEnd(t *testing.T) {
l := NewTCP("t1", testAddr, nil)
l := NewTCP(basicConfig)
err := l.Init(logger)
require.NoError(t, err)

View File

@@ -13,21 +13,25 @@ import (
"log/slog"
)
const TypeUnix = "unix"
// UnixSock is a listener for establishing client connections on basic UnixSock protocol.
type UnixSock struct {
sync.RWMutex
id string // the internal id of the listener.
address string // the network address to bind to.
config Config // configuration values for the listener
listen net.Listener // a net.Listener which will listen for new clients.
log *slog.Logger // server logger
end uint32 // ensure the close methods are only called once.
}
// NewUnixSock initialises and returns a new UnixSock listener, listening on an address.
func NewUnixSock(id, address string) *UnixSock {
// NewUnixSock initializes and returns a new UnixSock listener, listening on an address.
func NewUnixSock(config Config) *UnixSock {
return &UnixSock{
id: id,
address: address,
id: config.ID,
address: config.Address,
config: config,
}
}

View File

@@ -15,41 +15,47 @@ import (
const testUnixAddr = "mochi.sock"
var (
unixConfig = Config{ID: "t1", Address: testUnixAddr}
)
func TestNewUnixSock(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
require.Equal(t, "t1", l.id)
require.Equal(t, testUnixAddr, l.address)
}
func TestUnixSockID(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
require.Equal(t, "t1", l.ID())
}
func TestUnixSockAddress(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
require.Equal(t, testUnixAddr, l.Address())
}
func TestUnixSockProtocol(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
require.Equal(t, "unix", l.Protocol())
}
func TestUnixSockInit(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
err := l.Init(logger)
l.Close(MockCloser)
require.NoError(t, err)
l2 := NewUnixSock("t2", testUnixAddr)
t2Config := unixConfig
t2Config.ID = "t2"
l2 := NewUnixSock(t2Config)
err = l2.Init(logger)
l2.Close(MockCloser)
require.NoError(t, err)
}
func TestUnixSockServeAndClose(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -74,7 +80,7 @@ func TestUnixSockServeAndClose(t *testing.T) {
}
func TestUnixSockEstablishThenEnd(t *testing.T) {
l := NewUnixSock("t1", testUnixAddr)
l := NewUnixSock(unixConfig)
err := l.Init(logger)
require.NoError(t, err)

View File

@@ -19,6 +19,8 @@ import (
"github.com/gorilla/websocket"
)
const TypeWS = "ws"
var (
// ErrInvalidMessage indicates that a message payload was not valid.
ErrInvalidMessage = errors.New("message type not binary")
@@ -29,7 +31,7 @@ type Websocket struct { // [MQTT-4.2.0-1]
sync.RWMutex
id string // the internal id of the listener
address string // the network address to bind to
config *Config // configuration values for the listener
config Config // configuration values for the listener
listen *http.Server // a http server for serving websocket connections
log *slog.Logger // server logger
establish EstablishFn // the server's establish connection handler
@@ -37,15 +39,11 @@ type Websocket struct { // [MQTT-4.2.0-1]
end uint32 // ensure the close methods are only called once
}
// NewWebsocket initialises and returns a new Websocket listener, listening on an address.
func NewWebsocket(id, address string, config *Config) *Websocket {
if config == nil {
config = new(Config)
}
// NewWebsocket initializes and returns a new Websocket listener, listening on an address.
func NewWebsocket(config Config) *Websocket {
return &Websocket{
id: id,
address: address,
id: config.ID,
address: config.Address,
config: config,
upgrader: &websocket.Upgrader{
Subprotocols: []string{"mqtt"},

View File

@@ -17,35 +17,33 @@ import (
)
func TestNewWebsocket(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
require.Equal(t, "t1", l.id)
require.Equal(t, testAddr, l.address)
}
func TestWebsocketID(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
require.Equal(t, "t1", l.ID())
}
func TestWebsocketAddress(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
require.Equal(t, testAddr, l.Address())
}
func TestWebsocketProtocol(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
require.Equal(t, "ws", l.Protocol())
}
func TestWebsocketProtocolTLS(t *testing.T) {
l := NewWebsocket("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewWebsocket(tlsConfig)
require.Equal(t, "wss", l.Protocol())
}
func TestWebsocketInit(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
require.Nil(t, l.listen)
err := l.Init(logger)
require.NoError(t, err)
@@ -53,7 +51,7 @@ func TestWebsocketInit(t *testing.T) {
}
func TestWebsocketServeAndClose(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
_ = l.Init(logger)
o := make(chan bool)
@@ -74,9 +72,7 @@ func TestWebsocketServeAndClose(t *testing.T) {
}
func TestWebsocketServeTLSAndClose(t *testing.T) {
l := NewWebsocket("t1", testAddr, &Config{
TLSConfig: tlsConfigBasic,
})
l := NewWebsocket(tlsConfig)
err := l.Init(logger)
require.NoError(t, err)
@@ -96,9 +92,9 @@ func TestWebsocketServeTLSAndClose(t *testing.T) {
}
func TestWebsocketFailedToServe(t *testing.T) {
l := NewWebsocket("t1", "wrong_addr", &Config{
TLSConfig: tlsConfigBasic,
})
config := tlsConfig
config.Address = "wrong_addr"
l := NewWebsocket(config)
err := l.Init(logger)
require.NoError(t, err)
@@ -117,7 +113,7 @@ func TestWebsocketFailedToServe(t *testing.T) {
}
func TestWebsocketUpgrade(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
_ = l.Init(logger)
e := make(chan bool)
@@ -136,7 +132,7 @@ func TestWebsocketUpgrade(t *testing.T) {
}
func TestWebsocketConnectionReads(t *testing.T) {
l := NewWebsocket("t1", testAddr, nil)
l := NewWebsocket(basicConfig)
_ = l.Init(nil)
recv := make(chan []byte)

81
mempool/bufpool.go Normal file
View File

@@ -0,0 +1,81 @@
package mempool
import (
"bytes"
"sync"
)
var bufPool = NewBuffer(0)
// GetBuffer takes a Buffer from the default buffer pool
func GetBuffer() *bytes.Buffer { return bufPool.Get() }
// PutBuffer returns Buffer to the default buffer pool
func PutBuffer(x *bytes.Buffer) { bufPool.Put(x) }
type BufferPool interface {
Get() *bytes.Buffer
Put(x *bytes.Buffer)
}
// NewBuffer returns a buffer pool. The max specify the max capacity of the Buffer the pool will
// return. If the Buffer becoomes large than max, it will no longer be returned to the pool. If
// max <= 0, no limit will be enforced.
func NewBuffer(max int) BufferPool {
if max > 0 {
return newBufferWithCap(max)
}
return newBuffer()
}
// Buffer is a Buffer pool.
type Buffer struct {
pool *sync.Pool
}
func newBuffer() *Buffer {
return &Buffer{
pool: &sync.Pool{
New: func() any { return new(bytes.Buffer) },
},
}
}
// Get a Buffer from the pool.
func (b *Buffer) Get() *bytes.Buffer {
return b.pool.Get().(*bytes.Buffer)
}
// Put the Buffer back into pool. It resets the Buffer for reuse.
func (b *Buffer) Put(x *bytes.Buffer) {
x.Reset()
b.pool.Put(x)
}
// BufferWithCap is a Buffer pool that
type BufferWithCap struct {
bp *Buffer
max int
}
func newBufferWithCap(max int) *BufferWithCap {
return &BufferWithCap{
bp: newBuffer(),
max: max,
}
}
// Get a Buffer from the pool.
func (b *BufferWithCap) Get() *bytes.Buffer {
return b.bp.Get()
}
// Put the Buffer back into the pool if the capacity doesn't exceed the limit. It resets the Buffer
// for reuse.
func (b *BufferWithCap) Put(x *bytes.Buffer) {
if x.Cap() > b.max {
return
}
b.bp.Put(x)
}

96
mempool/bufpool_test.go Normal file
View File

@@ -0,0 +1,96 @@
package mempool
import (
"bytes"
"reflect"
"runtime/debug"
"testing"
"github.com/stretchr/testify/require"
)
func TestNewBuffer(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(-1))
bp := NewBuffer(1000)
require.Equal(t, "*mempool.BufferWithCap", reflect.TypeOf(bp).String())
bp = NewBuffer(0)
require.Equal(t, "*mempool.Buffer", reflect.TypeOf(bp).String())
bp = NewBuffer(-1)
require.Equal(t, "*mempool.Buffer", reflect.TypeOf(bp).String())
}
func TestBuffer(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(-1))
Size := 101
bp := NewBuffer(0)
buf := bp.Get()
for i := 0; i < Size; i++ {
buf.WriteByte('a')
}
bp.Put(buf)
buf = bp.Get()
require.Equal(t, 0, buf.Len())
}
func TestBufferWithCap(t *testing.T) {
defer debug.SetGCPercent(debug.SetGCPercent(-1))
Size := 101
bp := NewBuffer(100)
buf := bp.Get()
for i := 0; i < Size; i++ {
buf.WriteByte('a')
}
bp.Put(buf)
buf = bp.Get()
require.Equal(t, 0, buf.Len())
require.Equal(t, 0, buf.Cap())
}
func BenchmarkBufferPool(b *testing.B) {
bp := NewBuffer(0)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b := bp.Get()
b.WriteString("this is a test")
bp.Put(b)
}
}
func BenchmarkBufferPoolWithCapLarger(b *testing.B) {
bp := NewBuffer(64 * 1024)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b := bp.Get()
b.WriteString("this is a test")
bp.Put(b)
}
}
func BenchmarkBufferPoolWithCapLesser(b *testing.B) {
bp := NewBuffer(10)
b.ResetTimer()
for i := 0; i < b.N; i++ {
b := bp.Get()
b.WriteString("this is a test")
bp.Put(b)
}
}
func BenchmarkBufferWithoutPool(b *testing.B) {
b.ResetTimer()
for i := 0; i < b.N; i++ {
b := new(bytes.Buffer)
b.WriteString("this is a test")
_ = b
}
}

View File

@@ -12,6 +12,8 @@ import (
"strconv"
"strings"
"sync"
"github.com/mochi-mqtt/server/v2/mempool"
)
// All valid packet types and their packet identifiers.
@@ -298,7 +300,8 @@ func (s *Subscription) decode(b byte) {
// ConnectEncode encodes a connect packet.
func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeBytes(pk.Connect.ProtocolName))
nb.WriteByte(pk.ProtocolVersion)
@@ -315,7 +318,8 @@ func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error {
nb.Write(encodeUint16(pk.Connect.Keepalive))
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
(&pk.Properties).Encode(pk.FixedHeader.Type, pk.Mods, pb, 0)
nb.Write(pb.Bytes())
}
@@ -324,7 +328,8 @@ func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error {
if pk.Connect.WillFlag {
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
(&pk.Connect).WillProperties.Encode(WillProperties, pk.Mods, pb, 0)
nb.Write(pb.Bytes())
}
@@ -343,7 +348,7 @@ func (pk *Packet) ConnectEncode(buf *bytes.Buffer) error {
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -493,19 +498,22 @@ func (pk *Packet) ConnectValidate() Code {
// ConnackEncode encodes a Connack packet.
func (pk *Packet) ConnackEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.WriteByte(encodeBool(pk.SessionPresent))
nb.WriteByte(pk.ReasonCode)
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+2) // +SessionPresent +ReasonCode
nb.Write(pb.Bytes())
}
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -536,19 +544,21 @@ func (pk *Packet) ConnackDecode(buf []byte) error {
// DisconnectEncode encodes a Disconnect packet.
func (pk *Packet) DisconnectEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
if pk.ProtocolVersion == 5 {
nb.WriteByte(pk.ReasonCode)
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len())
nb.Write(pb.Bytes())
}
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -598,7 +608,8 @@ func (pk *Packet) PingrespDecode(buf []byte) error {
// PublishEncode encodes a Publish packet.
func (pk *Packet) PublishEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeString(pk.TopicName)) // [MQTT-3.3.2-1]
@@ -610,16 +621,16 @@ func (pk *Packet) PublishEncode(buf *bytes.Buffer) error {
}
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.Payload))
nb.Write(pb.Bytes())
}
nb.Write(pk.Payload)
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Remaining = nb.Len() + len(pk.Payload)
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
buf.Write(pk.Payload)
return nil
}
@@ -690,11 +701,13 @@ func (pk *Packet) PublishValidate(topicAliasMaximum uint16) Code {
// encodePubAckRelRecComp encodes a Puback, Pubrel, Pubrec, or Pubcomp packet.
func (pk *Packet) encodePubAckRelRecComp(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeUint16(pk.PacketID))
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len())
if pk.ReasonCode >= ErrUnspecifiedError.Code || pb.Len() > 1 {
nb.WriteByte(pk.ReasonCode)
@@ -707,7 +720,7 @@ func (pk *Packet) encodePubAckRelRecComp(buf *bytes.Buffer) error {
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -831,11 +844,13 @@ func (pk *Packet) ReasonCodeValid() bool {
// SubackEncode encodes a Suback packet.
func (pk *Packet) SubackEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeUint16(pk.PacketID))
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+len(pk.ReasonCodes))
nb.Write(pb.Bytes())
}
@@ -844,7 +859,7 @@ func (pk *Packet) SubackEncode(buf *bytes.Buffer) error {
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -878,10 +893,12 @@ func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error {
return ErrProtocolViolationNoPacketID
}
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeUint16(pk.PacketID))
xb := bytes.NewBuffer([]byte{}) // capture and write filters after length checks
xb := mempool.GetBuffer() // capture and write filters after length checks
defer mempool.PutBuffer(xb)
for _, opts := range pk.Filters {
xb.Write(encodeString(opts.Filter)) // [MQTT-3.8.3-1]
if pk.ProtocolVersion == 5 {
@@ -892,7 +909,8 @@ func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error {
}
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len())
nb.Write(pb.Bytes())
}
@@ -901,7 +919,7 @@ func (pk *Packet) SubscribeEncode(buf *bytes.Buffer) error {
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -983,20 +1001,21 @@ func (pk *Packet) SubscribeValidate() Code {
// UnsubackEncode encodes an Unsuback packet.
func (pk *Packet) UnsubackEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeUint16(pk.PacketID))
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len())
nb.Write(pb.Bytes())
nb.Write(pk.ReasonCodes)
}
nb.Write(pk.ReasonCodes)
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -1031,16 +1050,19 @@ func (pk *Packet) UnsubscribeEncode(buf *bytes.Buffer) error {
return ErrProtocolViolationNoPacketID
}
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.Write(encodeUint16(pk.PacketID))
xb := bytes.NewBuffer([]byte{}) // capture filters and write after length checks
xb := mempool.GetBuffer() // capture filters and write after length checks
defer mempool.PutBuffer(xb)
for _, sub := range pk.Filters {
xb.Write(encodeString(sub.Filter)) // [MQTT-3.10.3-1]
}
if pk.ProtocolVersion == 5 {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len()+xb.Len())
nb.Write(pb.Bytes())
}
@@ -1049,7 +1071,7 @@ func (pk *Packet) UnsubscribeEncode(buf *bytes.Buffer) error {
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}
@@ -1100,16 +1122,18 @@ func (pk *Packet) UnsubscribeValidate() Code {
// AuthEncode encodes an Auth packet.
func (pk *Packet) AuthEncode(buf *bytes.Buffer) error {
nb := bytes.NewBuffer([]byte{})
nb := mempool.GetBuffer()
defer mempool.PutBuffer(nb)
nb.WriteByte(pk.ReasonCode)
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
pk.Properties.Encode(pk.FixedHeader.Type, pk.Mods, pb, nb.Len())
nb.Write(pb.Bytes())
pk.FixedHeader.Remaining = nb.Len()
pk.FixedHeader.Encode(buf)
_, _ = nb.WriteTo(buf)
buf.Write(nb.Bytes())
return nil
}

View File

@@ -8,6 +8,8 @@ import (
"bytes"
"fmt"
"strings"
"github.com/mochi-mqtt/server/v2/mempool"
)
const (
@@ -199,7 +201,8 @@ func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) {
return
}
var buf bytes.Buffer
buf := mempool.GetBuffer()
defer mempool.PutBuffer(buf)
if p.canEncode(pkt, PropPayloadFormat) && p.PayloadFormatFlag {
buf.WriteByte(PropPayloadFormat)
buf.WriteByte(p.PayloadFormat)
@@ -230,7 +233,7 @@ func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) {
for _, v := range p.SubscriptionIdentifier {
if v > 0 {
buf.WriteByte(PropSubscriptionIdentifier)
encodeLength(&buf, int64(v))
encodeLength(buf, int64(v))
}
}
}
@@ -321,7 +324,8 @@ func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) {
}
if !mods.DisallowProblemInfo && p.canEncode(pkt, PropUser) {
pb := bytes.NewBuffer([]byte{})
pb := mempool.GetBuffer()
defer mempool.PutBuffer(pb)
for _, v := range p.User {
pb.WriteByte(PropUser)
pb.Write(encodeString(v.Key))
@@ -355,7 +359,7 @@ func (p *Properties) Encode(pkt byte, mods Mods, b *bytes.Buffer, n int) {
}
encodeLength(b, int64(buf.Len()))
_, _ = buf.WriteTo(b) // [MQTT-3.1.3-10]
b.Write(buf.Bytes()) // [MQTT-3.1.3-10]
}
// Decode decodes property bytes into a properties struct.

257
server.go
View File

@@ -14,6 +14,7 @@ import (
"runtime"
"sort"
"strconv"
"strings"
"sync/atomic"
"time"
@@ -26,91 +27,106 @@ import (
)
const (
Version = "2.4.0" // the current server version.
Version = "2.6.0" // the current server version.
defaultSysTopicInterval int64 = 1 // the interval between $SYS topic publishes
LocalListener = "local"
InlineClientId = "inline"
)
var (
// DefaultServerCapabilities defines the default features and capabilities provided by the server.
DefaultServerCapabilities = &Capabilities{
MaximumSessionExpiryInterval: math.MaxUint32, // maximum number of seconds to keep disconnected sessions
MaximumMessageExpiryInterval: 60 * 60 * 24, // maximum message expiry if message expiry is 0 or over
ReceiveMaximum: 1024, // maximum number of concurrent qos messages per client
MaximumQos: 2, // maximum qos value available to clients
RetainAvailable: 1, // retain messages is available
MaximumPacketSize: 0, // no maximum packet size
TopicAliasMaximum: math.MaxUint16, // maximum topic alias value
WildcardSubAvailable: 1, // wildcard subscriptions are available
SubIDAvailable: 1, // subscription identifiers are available
SharedSubAvailable: 1, // shared subscriptions are available
MinimumProtocolVersion: 3, // minimum supported mqtt version (3.0.0)
MaximumClientWritesPending: 1024 * 8, // maximum number of pending message writes for a client
}
// Deprecated: Use NewDefaultServerCapabilities to avoid data race issue.
DefaultServerCapabilities = NewDefaultServerCapabilities()
ErrListenerIDExists = errors.New("listener id already exists") // a listener with the same id already exists
ErrConnectionClosed = errors.New("connection not open") // connection is closed
ErrInlineClientNotEnabled = errors.New("please set Options.InlineClient=true to use this feature") // inline client is not enabled by default
ErrOptionsUnreadable = errors.New("unable to read options from bytes")
)
// Capabilities indicates the capabilities and features provided by the server.
type Capabilities struct {
MaximumMessageExpiryInterval int64
MaximumClientWritesPending int32
MaximumSessionExpiryInterval uint32
MaximumPacketSize uint32
maximumPacketID uint32 // unexported, used for testing only
ReceiveMaximum uint16
TopicAliasMaximum uint16
SharedSubAvailable byte
MinimumProtocolVersion byte
Compatibilities Compatibilities
MaximumQos byte
RetainAvailable byte
WildcardSubAvailable byte
SubIDAvailable byte
MaximumMessageExpiryInterval int64 `yaml:"maximum_message_expiry_interval" json:"maximum_message_expiry_interval"` // maximum message expiry if message expiry is 0 or over
MaximumClientWritesPending int32 `yaml:"maximum_client_writes_pending" json:"maximum_client_writes_pending"` // maximum number of pending message writes for a client
MaximumSessionExpiryInterval uint32 `yaml:"maximum_session_expiry_interval" json:"maximum_session_expiry_interval"` // maximum number of seconds to keep disconnected sessions
MaximumPacketSize uint32 `yaml:"maximum_packet_size" json:"maximum_packet_size"` // maximum packet size, no limit if 0
maximumPacketID uint32 // unexported, used for testing only
ReceiveMaximum uint16 `yaml:"receive_maximum" json:"receive_maximum"` // maximum number of concurrent qos messages per client
MaximumInflight uint16 `yaml:"maximum_inflight" json:"maximum_inflight"` // maximum number of qos > 0 messages can be stored, 0(=8192)-65535
TopicAliasMaximum uint16 `yaml:"topic_alias_maximum" json:"topic_alias_maximum"` // maximum topic alias value
SharedSubAvailable byte `yaml:"shared_sub_available" json:"shared_sub_available"` // support of shared subscriptions
MinimumProtocolVersion byte `yaml:"minimum_protocol_version" json:"minimum_protocol_version"` // minimum supported mqtt version
Compatibilities Compatibilities `yaml:"compatibilities" json:"compatibilities"` // version compatibilities the server provides
MaximumQos byte `yaml:"maximum_qos" json:"maximum_qos"` // maximum qos value available to clients
RetainAvailable byte `yaml:"retain_available" json:"retain_available"` // support of retain messages
WildcardSubAvailable byte `yaml:"wildcard_sub_available" json:"wildcard_sub_available"` // support of wildcard subscriptions
SubIDAvailable byte `yaml:"sub_id_available" json:"sub_id_available"` // support of subscription identifiers
}
// NewDefaultServerCapabilities defines the default features and capabilities provided by the server.
func NewDefaultServerCapabilities() *Capabilities {
return &Capabilities{
MaximumMessageExpiryInterval: 60 * 60 * 24, // maximum message expiry if message expiry is 0 or over
MaximumClientWritesPending: 1024 * 8, // maximum number of pending message writes for a client
MaximumSessionExpiryInterval: math.MaxUint32, // maximum number of seconds to keep disconnected sessions
MaximumPacketSize: 0, // no maximum packet size
maximumPacketID: math.MaxUint16,
ReceiveMaximum: 1024, // maximum number of concurrent qos messages per client
MaximumInflight: 1024 * 8, // maximum number of qos > 0 messages can be stored
TopicAliasMaximum: math.MaxUint16, // maximum topic alias value
SharedSubAvailable: 1, // shared subscriptions are available
MinimumProtocolVersion: 3, // minimum supported mqtt version (3.0.0)
MaximumQos: 2, // maximum qos value available to clients
RetainAvailable: 1, // retain messages is available
WildcardSubAvailable: 1, // wildcard subscriptions are available
SubIDAvailable: 1, // subscription identifiers are available
}
}
// Compatibilities provides flags for using compatibility modes.
type Compatibilities struct {
ObscureNotAuthorized bool // return unspecified errors instead of not authorized
PassiveClientDisconnect bool // don't disconnect the client forcefully after sending disconnect packet (paho - spec violation)
AlwaysReturnResponseInfo bool // always return response info (useful for testing)
RestoreSysInfoOnRestart bool // restore system info from store as if server never stopped
NoInheritedPropertiesOnAck bool // don't allow inherited user properties on ack (paho - spec violation)
ObscureNotAuthorized bool `yaml:"obscure_not_authorized" json:"obscure_not_authorized"` // return unspecified errors instead of not authorized
PassiveClientDisconnect bool `yaml:"passive_client_disconnect" json:"passive_client_disconnect"` // don't disconnect the client forcefully after sending disconnect packet (paho - spec violation)
AlwaysReturnResponseInfo bool `yaml:"always_return_response_info" json:"always_return_response_info"` // always return response info (useful for testing)
RestoreSysInfoOnRestart bool `yaml:"restore_sys_info_on_restart" json:"restore_sys_info_on_restart"` // restore system info from store as if server never stopped
NoInheritedPropertiesOnAck bool `yaml:"no_inherited_properties_on_ack" json:"no_inherited_properties_on_ack"` // don't allow inherited user properties on ack (paho - spec violation)
}
// Options contains configurable options for the server.
type Options struct {
// Listeners specifies any listeners which should be dynamically added on serve. Used when setting listeners by config.
Listeners []listeners.Config `yaml:"listeners" json:"listeners"`
// Hooks specifies any hooks which should be dynamically added on serve. Used when setting hooks by config.
Hooks []HookLoadConfig `yaml:"hooks" json:"hooks"`
// Capabilities defines the server features and behaviour. If you only wish to modify
// several of these values, set them explicitly - e.g.
// server.Options.Capabilities.MaximumClientWritesPending = 16 * 1024
Capabilities *Capabilities
Capabilities *Capabilities `yaml:"capabilities" json:"capabilities"`
// ClientNetWriteBufferSize specifies the size of the client *bufio.Writer write buffer.
ClientNetWriteBufferSize int
ClientNetWriteBufferSize int `yaml:"client_net_write_buffer_size" json:"client_net_write_buffer_size"`
// ClientNetReadBufferSize specifies the size of the client *bufio.Reader read buffer.
ClientNetReadBufferSize int
ClientNetReadBufferSize int `yaml:"client_net_read_buffer_size" json:"client_net_read_buffer_size"`
// Logger specifies a custom configured implementation of zerolog to override
// the servers default logger configuration. If you wish to change the log level,
// of the default logger, you can do so by setting
// server := mqtt.New(nil)
// of the default logger, you can do so by setting:
// server := mqtt.New(nil)
// level := new(slog.LevelVar)
// server.Slog = slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
// Level: level,
// }))
// level.Set(slog.LevelDebug)
Logger *slog.Logger
Logger *slog.Logger `yaml:"-" json:"-"`
// SysTopicResendInterval specifies the interval between $SYS topic updates in seconds.
SysTopicResendInterval int64
SysTopicResendInterval int64 `yaml:"sys_topic_resend_interval" json:"sys_topic_resend_interval"`
// Enable Inline client to allow direct subscribing and publishing from the parent codebase,
// with negligible performance difference (disabled by default to prevent confusion in statistics).
InlineClient bool
InlineClient bool `yaml:"inline_client" json:"inline_client"`
}
// Server is an MQTT broker server. It should be created with server.New()
@@ -190,11 +206,15 @@ func New(opts *Options) *Server {
// ensureDefaults ensures that the server starts with sane default values, if none are provided.
func (o *Options) ensureDefaults() {
if o.Capabilities == nil {
o.Capabilities = DefaultServerCapabilities
o.Capabilities = NewDefaultServerCapabilities()
}
o.Capabilities.maximumPacketID = math.MaxUint16 // spec maximum is 65535
if o.Capabilities.MaximumInflight == 0 {
o.Capabilities.MaximumInflight = 1024 * 8
}
if o.SysTopicResendInterval == 0 {
o.SysTopicResendInterval = defaultSysTopicInterval
}
@@ -233,8 +253,6 @@ func (s *Server) NewClient(c net.Conn, listener string, id string, inline bool)
// By default, we don't want to restrict developer publishes,
// but if you do, reset this after creating inline client.
cl.State.Inflight.ResetReceiveQuota(math.MaxInt32)
} else {
go cl.WriteLoop() // can only write to real clients
}
return cl
@@ -252,6 +270,17 @@ func (s *Server) AddHook(hook Hook, config any) error {
return s.hooks.Add(hook, config)
}
// AddHooksFromConfig adds hooks to the server which were specified in the hooks config (usually from a config file).
// New built-in hooks should be added to this list.
func (s *Server) AddHooksFromConfig(hooks []HookLoadConfig) error {
for _, h := range hooks {
if err := s.AddHook(h.Hook, h.Config); err != nil {
return err
}
}
return nil
}
// AddListener adds a new network listener to the server, for receiving incoming client connections.
func (s *Server) AddListener(l listeners.Listener) error {
if _, ok := s.Listeners.Get(l.ID()); ok {
@@ -270,12 +299,55 @@ func (s *Server) AddListener(l listeners.Listener) error {
return nil
}
// AddListenersFromConfig adds listeners to the server which were specified in the listeners config (usually from a config file).
// New built-in listeners should be added to this list.
func (s *Server) AddListenersFromConfig(configs []listeners.Config) error {
for _, conf := range configs {
var l listeners.Listener
switch strings.ToLower(conf.Type) {
case listeners.TypeTCP:
l = listeners.NewTCP(conf)
case listeners.TypeWS:
l = listeners.NewWebsocket(conf)
case listeners.TypeUnix:
l = listeners.NewUnixSock(conf)
case listeners.TypeHealthCheck:
l = listeners.NewHTTPHealthCheck(conf)
case listeners.TypeSysInfo:
l = listeners.NewHTTPStats(conf, s.Info)
case listeners.TypeMock:
l = listeners.NewMockListener(conf.ID, conf.Address)
default:
s.Log.Error("listener type unavailable by config", "listener", conf.Type)
continue
}
if err := s.AddListener(l); err != nil {
return err
}
}
return nil
}
// Serve starts the event loops responsible for establishing client connections
// on all attached listeners, publishing the system topics, and starting all hooks.
func (s *Server) Serve() error {
s.Log.Info("mochi mqtt starting", "version", Version)
defer s.Log.Info("mochi mqtt server started")
if len(s.Options.Listeners) > 0 {
err := s.AddListenersFromConfig(s.Options.Listeners)
if err != nil {
return err
}
}
if len(s.Options.Hooks) > 0 {
err := s.AddHooksFromConfig(s.Options.Hooks)
if err != nil {
return err
}
}
if s.hooks.Provides(
StoredClients,
StoredInflightMessages,
@@ -332,6 +404,8 @@ func (s *Server) EstablishConnection(listener string, c net.Conn) error {
func (s *Server) attachClient(cl *Client, listener string) error {
defer s.Listeners.ClientsWg.Done()
s.Listeners.ClientsWg.Add(1)
go cl.WriteLoop()
defer cl.Stop(nil)
pk, err := s.readConnectionPacket(cl)
@@ -400,7 +474,7 @@ func (s *Server) attachClient(cl *Client, listener string) error {
s.hooks.OnDisconnect(cl, err, expire)
if expire && atomic.LoadUint32(&cl.State.isTakenOver) == 0 {
cl.ClearInflights(math.MaxInt64, 0)
cl.ClearInflights()
s.UnsubscribeClient(cl)
s.Clients.Delete(cl.ID) // [MQTT-4.1.0-2] ![MQTT-3.1.2-23]
}
@@ -478,7 +552,7 @@ func (s *Server) inheritClientSession(pk packets.Packet, cl *Client) bool {
_ = s.DisconnectClient(existing, packets.ErrSessionTakenOver) // [MQTT-3.1.4-3]
if pk.Connect.Clean || (existing.Properties.Clean && existing.Properties.ProtocolVersion < 5) { // [MQTT-3.1.2-4] [MQTT-3.1.4-4]
s.UnsubscribeClient(existing)
existing.ClearInflights(math.MaxInt64, 0)
existing.ClearInflights()
atomic.StoreUint32(&existing.State.isTakenOver, 1) // only set isTakenOver after unsubscribe has occurred
return false // [MQTT-3.2.2-3]
}
@@ -503,7 +577,7 @@ func (s *Server) inheritClientSession(pk packets.Packet, cl *Client) bool {
// Clean the state of the existing client to prevent sequential take-overs
// from increasing memory usage by inflights + subs * client-id.
s.UnsubscribeClient(existing)
existing.ClearInflights(math.MaxInt64, 0)
existing.ClearInflights()
s.Log.Debug("session taken over", "client", cl.ID, "old_remote", existing.Net.Remote, "new_remote", cl.Net.Remote)
@@ -969,9 +1043,17 @@ func (s *Server) publishToClient(cl *Client, sub packets.Subscription, pk packet
}
if out.FixedHeader.Qos > 0 {
if cl.State.Inflight.Len() >= int(s.Options.Capabilities.MaximumInflight) {
// add hook?
atomic.AddInt64(&s.Info.InflightDropped, 1)
s.Log.Warn("client store quota reached", "client", cl.ID, "listener", cl.Net.Listener)
return out, packets.ErrQuotaExceeded
}
i, err := cl.NextPacketID() // [MQTT-4.3.2-1] [MQTT-4.3.3-1]
if err != nil {
s.hooks.OnPacketIDExhausted(cl, pk)
atomic.AddInt64(&s.Info.InflightDropped, 1)
s.Log.Warn("packet ids exhausted", "error", err, "client", cl.ID, "listener", cl.Net.Listener)
return out, packets.ErrQuotaExceeded
}
@@ -1002,8 +1084,10 @@ func (s *Server) publishToClient(cl *Client, sub packets.Subscription, pk packet
default:
atomic.AddInt64(&s.Info.MessagesDropped, 1)
cl.ops.hooks.OnPublishDropped(cl, pk)
cl.State.Inflight.Delete(out.PacketID) // packet was dropped due to irregular circumstances, so rollback inflight.
cl.State.Inflight.IncreaseSendQuota()
if out.FixedHeader.Qos > 0 {
cl.State.Inflight.Delete(out.PacketID) // packet was dropped due to irregular circumstances, so rollback inflight.
cl.State.Inflight.IncreaseSendQuota()
}
return out, packets.ErrPendingClientWritesExceeded
}
@@ -1351,27 +1435,28 @@ func (s *Server) publishSysTopics() {
atomic.StoreInt64(&s.Info.ClientsTotal, int64(s.Clients.Len()))
atomic.StoreInt64(&s.Info.ClientsDisconnected, atomic.LoadInt64(&s.Info.ClientsTotal)-atomic.LoadInt64(&s.Info.ClientsConnected))
info := s.Info.Clone()
topics := map[string]string{
SysPrefix + "/broker/version": s.Info.Version,
SysPrefix + "/broker/time": AtomicItoa(&s.Info.Time),
SysPrefix + "/broker/uptime": AtomicItoa(&s.Info.Uptime),
SysPrefix + "/broker/started": AtomicItoa(&s.Info.Started),
SysPrefix + "/broker/load/bytes/received": AtomicItoa(&s.Info.BytesReceived),
SysPrefix + "/broker/load/bytes/sent": AtomicItoa(&s.Info.BytesSent),
SysPrefix + "/broker/clients/connected": AtomicItoa(&s.Info.ClientsConnected),
SysPrefix + "/broker/clients/disconnected": AtomicItoa(&s.Info.ClientsDisconnected),
SysPrefix + "/broker/clients/maximum": AtomicItoa(&s.Info.ClientsMaximum),
SysPrefix + "/broker/clients/total": AtomicItoa(&s.Info.ClientsTotal),
SysPrefix + "/broker/packets/received": AtomicItoa(&s.Info.PacketsReceived),
SysPrefix + "/broker/packets/sent": AtomicItoa(&s.Info.PacketsSent),
SysPrefix + "/broker/messages/received": AtomicItoa(&s.Info.MessagesReceived),
SysPrefix + "/broker/messages/sent": AtomicItoa(&s.Info.MessagesSent),
SysPrefix + "/broker/messages/dropped": AtomicItoa(&s.Info.MessagesDropped),
SysPrefix + "/broker/messages/inflight": AtomicItoa(&s.Info.Inflight),
SysPrefix + "/broker/retained": AtomicItoa(&s.Info.Retained),
SysPrefix + "/broker/subscriptions": AtomicItoa(&s.Info.Subscriptions),
SysPrefix + "/broker/system/memory": AtomicItoa(&s.Info.MemoryAlloc),
SysPrefix + "/broker/system/threads": AtomicItoa(&s.Info.Threads),
SysPrefix + "/broker/time": Int64toa(info.Time),
SysPrefix + "/broker/uptime": Int64toa(info.Uptime),
SysPrefix + "/broker/started": Int64toa(info.Started),
SysPrefix + "/broker/load/bytes/received": Int64toa(info.BytesReceived),
SysPrefix + "/broker/load/bytes/sent": Int64toa(info.BytesSent),
SysPrefix + "/broker/clients/connected": Int64toa(info.ClientsConnected),
SysPrefix + "/broker/clients/disconnected": Int64toa(info.ClientsDisconnected),
SysPrefix + "/broker/clients/maximum": Int64toa(info.ClientsMaximum),
SysPrefix + "/broker/clients/total": Int64toa(info.ClientsTotal),
SysPrefix + "/broker/packets/received": Int64toa(info.PacketsReceived),
SysPrefix + "/broker/packets/sent": Int64toa(info.PacketsSent),
SysPrefix + "/broker/messages/received": Int64toa(info.MessagesReceived),
SysPrefix + "/broker/messages/sent": Int64toa(info.MessagesSent),
SysPrefix + "/broker/messages/dropped": Int64toa(info.MessagesDropped),
SysPrefix + "/broker/messages/inflight": Int64toa(info.Inflight),
SysPrefix + "/broker/retained": Int64toa(info.Retained),
SysPrefix + "/broker/subscriptions": Int64toa(info.Subscriptions),
SysPrefix + "/broker/system/memory": Int64toa(info.MemoryAlloc),
SysPrefix + "/broker/system/threads": Int64toa(info.Threads),
}
for topic, payload := range topics {
@@ -1381,7 +1466,7 @@ func (s *Server) publishSysTopics() {
s.publishToSubscribers(pk)
}
s.hooks.OnSysInfoTick(s.Info)
s.hooks.OnSysInfoTick(info)
}
// Close attempts to gracefully shut down the server, all listeners, clients, and stores.
@@ -1553,7 +1638,18 @@ func (s *Server) loadClients(v []storage.Client) {
MaximumPacketSize: c.Properties.MaximumPacketSize,
}
cl.Properties.Will = Will(c.Will)
s.Clients.Add(cl)
// cancel the context, update cl.State such as disconnected time and stopCause.
cl.Stop(packets.ErrServerShuttingDown)
expire := (cl.Properties.ProtocolVersion == 5 && cl.Properties.Props.SessionExpiryInterval == 0) || (cl.Properties.ProtocolVersion < 5 && cl.Properties.Clean)
s.hooks.OnDisconnect(cl, packets.ErrServerShuttingDown, expire)
if expire {
cl.ClearInflights()
s.UnsubscribeClient(cl)
} else {
s.Clients.Add(cl)
}
}
}
@@ -1597,7 +1693,14 @@ func (s *Server) clearExpiredClients(dt int64) {
// clearExpiredRetainedMessage deletes retained messages from topics if they have expired.
func (s *Server) clearExpiredRetainedMessages(now int64) {
for filter, pk := range s.Topics.Retained.GetAll() {
if (pk.Expiry > 0 && pk.Expiry < now) || pk.Created+s.Options.Capabilities.MaximumMessageExpiryInterval < now {
expired := pk.ProtocolVersion == 5 && pk.Expiry > 0 && pk.Expiry < now // [MQTT-3.3.2-5]
// If the maximum message expiry interval is set (greater than 0), and the message
// retention period exceeds the maximum expiry, the message will be forcibly removed.
enforced := s.Options.Capabilities.MaximumMessageExpiryInterval > 0 &&
now-pk.Created > s.Options.Capabilities.MaximumMessageExpiryInterval
if expired || enforced {
s.Topics.Retained.Delete(filter)
s.hooks.OnRetainedExpired(filter)
}
@@ -1607,7 +1710,7 @@ func (s *Server) clearExpiredRetainedMessages(now int64) {
// clearExpiredInflights deletes any inflight messages which have expired.
func (s *Server) clearExpiredInflights(now int64) {
for _, client := range s.Clients.GetAll() {
if deleted := client.ClearInflights(now, s.Options.Capabilities.MaximumMessageExpiryInterval); len(deleted) > 0 {
if deleted := client.ClearExpiredInflights(now, s.Options.Capabilities.MaximumMessageExpiryInterval); len(deleted) > 0 {
for _, id := range deleted {
s.hooks.OnQosDropped(client, packets.Packet{PacketID: id})
}
@@ -1632,7 +1735,7 @@ func (s *Server) sendDelayedLWT(dt int64) {
}
}
// AtomicItoa converts an int64 point to a string.
func AtomicItoa(ptr *int64) string {
return strconv.FormatInt(atomic.LoadInt64(ptr), 10)
// Int64toa converts an int64 to a string.
func Int64toa(v int64) string {
return strconv.FormatInt(v, 10)
}

View File

@@ -96,24 +96,24 @@ func (h *DelayHook) OnDisconnect(cl *Client, err error, expire bool) {
}
func newServer() *Server {
cc := *DefaultServerCapabilities
cc := NewDefaultServerCapabilities()
cc.MaximumMessageExpiryInterval = 0
cc.ReceiveMaximum = 0
s := New(&Options{
Logger: logger,
Capabilities: &cc,
Capabilities: cc,
})
_ = s.AddHook(new(AllowHook), nil)
return s
}
func newServerWithInlineClient() *Server {
cc := *DefaultServerCapabilities
cc := NewDefaultServerCapabilities()
cc.MaximumMessageExpiryInterval = 0
cc.ReceiveMaximum = 0
s := New(&Options{
Logger: logger,
Capabilities: &cc,
Capabilities: cc,
InlineClient: true,
})
_ = s.AddHook(new(AllowHook), nil)
@@ -125,7 +125,7 @@ func TestOptionsSetDefaults(t *testing.T) {
opts.ensureDefaults()
require.Equal(t, defaultSysTopicInterval, opts.SysTopicResendInterval)
require.Equal(t, DefaultServerCapabilities, opts.Capabilities)
require.Equal(t, NewDefaultServerCapabilities(), opts.Capabilities)
opts = new(Options)
opts.ensureDefaults()
@@ -220,6 +220,34 @@ func TestServerAddListener(t *testing.T) {
require.Equal(t, ErrListenerIDExists, err)
}
func TestServerAddHooksFromConfig(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Log = logger
hooks := []HookLoadConfig{
{Hook: new(modifiedHookBase)},
}
err := s.AddHooksFromConfig(hooks)
require.NoError(t, err)
}
func TestServerAddHooksFromConfigError(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Log = logger
hooks := []HookLoadConfig{
{Hook: new(modifiedHookBase), Config: map[string]interface{}{}},
}
err := s.AddHooksFromConfig(hooks)
require.Error(t, err)
}
func TestServerAddListenerInitFailure(t *testing.T) {
s := newServer()
defer s.Close()
@@ -232,6 +260,60 @@ func TestServerAddListenerInitFailure(t *testing.T) {
require.Error(t, err)
}
func TestServerAddListenersFromConfig(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Log = logger
lc := []listeners.Config{
{Type: listeners.TypeTCP, ID: "tcp", Address: ":1883"},
{Type: listeners.TypeWS, ID: "ws", Address: ":1882"},
{Type: listeners.TypeHealthCheck, ID: "health", Address: ":1881"},
{Type: listeners.TypeSysInfo, ID: "info", Address: ":1880"},
{Type: listeners.TypeUnix, ID: "unix", Address: "mochi.sock"},
{Type: listeners.TypeMock, ID: "mock", Address: "0"},
{Type: "unknown", ID: "unknown"},
}
err := s.AddListenersFromConfig(lc)
require.NoError(t, err)
require.Equal(t, 6, s.Listeners.Len())
tcp, _ := s.Listeners.Get("tcp")
require.Equal(t, "[::]:1883", tcp.Address())
ws, _ := s.Listeners.Get("ws")
require.Equal(t, ":1882", ws.Address())
health, _ := s.Listeners.Get("health")
require.Equal(t, ":1881", health.Address())
info, _ := s.Listeners.Get("info")
require.Equal(t, ":1880", info.Address())
unix, _ := s.Listeners.Get("unix")
require.Equal(t, "mochi.sock", unix.Address())
mock, _ := s.Listeners.Get("mock")
require.Equal(t, "0", mock.Address())
}
func TestServerAddListenersFromConfigError(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Log = logger
lc := []listeners.Config{
{Type: listeners.TypeTCP, ID: "tcp", Address: "x"},
}
err := s.AddListenersFromConfig(lc)
require.Error(t, err)
require.Equal(t, 0, s.Listeners.Len())
}
func TestServerServe(t *testing.T) {
s := newServer()
defer s.Close()
@@ -253,6 +335,57 @@ func TestServerServe(t *testing.T) {
require.Equal(t, true, listener.(*listeners.MockListener).IsServing())
}
func TestServerServeFromConfig(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Options.Listeners = []listeners.Config{
{Type: listeners.TypeMock, ID: "mock", Address: "0"},
}
s.Options.Hooks = []HookLoadConfig{
{Hook: new(modifiedHookBase)},
}
err := s.Serve()
require.NoError(t, err)
time.Sleep(time.Millisecond)
require.Equal(t, 1, s.Listeners.Len())
listener, ok := s.Listeners.Get("mock")
require.Equal(t, true, ok)
require.Equal(t, true, listener.(*listeners.MockListener).IsServing())
}
func TestServerServeFromConfigListenerError(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Options.Listeners = []listeners.Config{
{Type: listeners.TypeTCP, ID: "tcp", Address: "x"},
}
err := s.Serve()
require.Error(t, err)
}
func TestServerServeFromConfigHookError(t *testing.T) {
s := newServer()
defer s.Close()
require.NotNil(t, s)
s.Options.Hooks = []HookLoadConfig{
{Hook: new(modifiedHookBase), Config: map[string]interface{}{}},
}
err := s.Serve()
require.Error(t, err)
}
func TestServerServeReadStoreFailure(t *testing.T) {
s := newServer()
defer s.Close()
@@ -1529,10 +1662,10 @@ func TestServerProcessPublishACLCheckDeny(t *testing.T) {
for _, tx := range tt {
t.Run(tx.name, func(t *testing.T) {
cc := *DefaultServerCapabilities
cc := NewDefaultServerCapabilities()
s := New(&Options{
Logger: logger,
Capabilities: &cc,
Capabilities: cc,
})
_ = s.AddHook(new(DenyHook), nil)
_ = s.Serve()
@@ -1907,6 +2040,7 @@ func TestPublishToClientSubscriptionDowngradeQos(t *testing.T) {
}
func TestPublishToClientExceedClientWritesPending(t *testing.T) {
var sendQuota uint16 = 5
s := newServer()
_, w := net.Pipe()
@@ -1917,9 +2051,12 @@ func TestPublishToClientExceedClientWritesPending(t *testing.T) {
options: &Options{
Capabilities: &Capabilities{
MaximumClientWritesPending: 3,
maximumPacketID: 10,
},
},
})
cl.Properties.Props.ReceiveMaximum = sendQuota
cl.State.Inflight.ResetSendQuota(int32(cl.Properties.Props.ReceiveMaximum))
s.Clients.Add(cl)
@@ -1928,9 +2065,20 @@ func TestPublishToClientExceedClientWritesPending(t *testing.T) {
atomic.AddInt32(&cl.State.outboundQty, 1)
}
id, _ := cl.NextPacketID()
cl.State.Inflight.Set(packets.Packet{PacketID: uint16(id)})
cl.State.Inflight.DecreaseSendQuota()
sendQuota--
_, err := s.publishToClient(cl, packets.Subscription{Filter: "a/b/c", Qos: 2}, packets.Packet{})
require.Error(t, err)
require.ErrorIs(t, packets.ErrPendingClientWritesExceeded, err)
require.Equal(t, int32(sendQuota), atomic.LoadInt32(&cl.State.Inflight.sendQuota))
_, err = s.publishToClient(cl, packets.Subscription{Filter: "a/b/c", Qos: 2}, packets.Packet{FixedHeader: packets.FixedHeader{Qos: 1}})
require.Error(t, err)
require.ErrorIs(t, packets.ErrPendingClientWritesExceeded, err)
require.Equal(t, int32(sendQuota), atomic.LoadInt32(&cl.State.Inflight.sendQuota))
}
func TestPublishToClientServerTopicAlias(t *testing.T) {
@@ -1986,6 +2134,22 @@ func TestPublishToClientMqtt5RetainAsPublishedTrueLeverageNoConn(t *testing.T) {
require.ErrorIs(t, err, packets.CodeDisconnect)
}
func TestPublishToClientExceedMaximumInflight(t *testing.T) {
const MaxInflight uint16 = 5
s := newServer()
cl, _, _ := newTestClient()
s.Options.Capabilities.MaximumInflight = MaxInflight
cl.ops.options.Capabilities.MaximumInflight = MaxInflight
for i := uint16(0); i < MaxInflight; i++ {
cl.State.Inflight.Set(packets.Packet{PacketID: i})
}
_, err := s.publishToClient(cl, packets.Subscription{Filter: "a/b/c", Qos: 1}, *packets.TPacketData[packets.Publish].Get(packets.TPublishQos1).Packet)
require.Error(t, err)
require.ErrorIs(t, err, packets.ErrQuotaExceeded)
require.Equal(t, int64(1), atomic.LoadInt64(&s.Info.InflightDropped))
}
func TestPublishToClientExhaustedPacketID(t *testing.T) {
s := newServer()
cl, _, _ := newTestClient()
@@ -1996,6 +2160,7 @@ func TestPublishToClientExhaustedPacketID(t *testing.T) {
_, err := s.publishToClient(cl, packets.Subscription{Filter: "a/b/c", Qos: 1}, *packets.TPacketData[packets.Publish].Get(packets.TPublishQos1).Packet)
require.Error(t, err)
require.ErrorIs(t, err, packets.ErrQuotaExceeded)
require.Equal(t, int64(1), atomic.LoadInt64(&s.Info.InflightDropped))
}
func TestPublishToClientACLNotAuthorized(t *testing.T) {
@@ -3128,15 +3293,50 @@ func TestServerLoadClients(t *testing.T) {
{ID: "mochi"},
{ID: "zen"},
{ID: "mochi-co"},
{ID: "v3-clean", ProtocolVersion: 4, Clean: true},
{ID: "v3-not-clean", ProtocolVersion: 4, Clean: false},
{
ID: "v5-clean",
ProtocolVersion: 5,
Clean: true,
Properties: storage.ClientProperties{
SessionExpiryInterval: 10,
},
},
{
ID: "v5-expire-interval-0",
ProtocolVersion: 5,
Properties: storage.ClientProperties{
SessionExpiryInterval: 0,
},
},
{
ID: "v5-expire-interval-not-0",
ProtocolVersion: 5,
Properties: storage.ClientProperties{
SessionExpiryInterval: 10,
},
},
}
s := newServer()
require.Equal(t, 0, s.Clients.Len())
s.loadClients(v)
require.Equal(t, 3, s.Clients.Len())
require.Equal(t, 6, s.Clients.Len())
cl, ok := s.Clients.Get("mochi")
require.True(t, ok)
require.Equal(t, "mochi", cl.ID)
_, ok = s.Clients.Get("v3-clean")
require.False(t, ok)
_, ok = s.Clients.Get("v3-not-clean")
require.True(t, ok)
_, ok = s.Clients.Get("v5-clean")
require.True(t, ok)
_, ok = s.Clients.Get("v5-expire-interval-0")
require.False(t, ok)
_, ok = s.Clients.Get("v5-expire-interval-not-0")
require.True(t, ok)
}
func TestServerLoadSubscriptions(t *testing.T) {
@@ -3259,6 +3459,11 @@ func TestServerClearExpiredInflights(t *testing.T) {
s.clearExpiredInflights(n)
require.Len(t, cl.State.Inflight.GetAll(false), 2)
require.Equal(t, int64(-3), s.Info.Inflight)
s.Options.Capabilities.MaximumMessageExpiryInterval = 0
cl.State.Inflight.Set(packets.Packet{PacketID: 8, Expiry: n - 8})
s.clearExpiredInflights(n)
require.Len(t, cl.State.Inflight.GetAll(false), 3)
}
func TestServerClearExpiredRetained(t *testing.T) {
@@ -3267,15 +3472,28 @@ func TestServerClearExpiredRetained(t *testing.T) {
s.Options.Capabilities.MaximumMessageExpiryInterval = 4
n := time.Now().Unix()
s.Topics.Retained.Add("a/b/c", packets.Packet{Created: n, Expiry: n - 1})
s.Topics.Retained.Add("d/e/f", packets.Packet{Created: n, Expiry: n - 2})
s.Topics.Retained.Add("g/h/i", packets.Packet{Created: n - 3}) // within bounds
s.Topics.Retained.Add("j/k/l", packets.Packet{Created: n - 5}) // over max server expiry limit
s.Topics.Retained.Add("m/n/o", packets.Packet{Created: n})
s.Topics.Retained.Add("a/b/c", packets.Packet{ProtocolVersion: 5, Created: n, Expiry: n - 1})
s.Topics.Retained.Add("d/e/f", packets.Packet{ProtocolVersion: 5, Created: n, Expiry: n - 2})
s.Topics.Retained.Add("g/h/i", packets.Packet{ProtocolVersion: 5, Created: n - 3}) // within bounds
s.Topics.Retained.Add("j/k/l", packets.Packet{ProtocolVersion: 5, Created: n - 5}) // over max server expiry limit
s.Topics.Retained.Add("m/n/o", packets.Packet{ProtocolVersion: 5, Created: n})
require.Len(t, s.Topics.Retained.GetAll(), 5)
s.clearExpiredRetainedMessages(n)
require.Len(t, s.Topics.Retained.GetAll(), 2)
s.Topics.Retained.Add("p/q/r", packets.Packet{Created: n, Expiry: n - 1})
s.Topics.Retained.Add("s/t/u", packets.Packet{Created: n, Expiry: n - 2}) // expiry is ineffective for v3.
s.Topics.Retained.Add("v/w/x", packets.Packet{Created: n - 3}) // within bounds for v3
s.Topics.Retained.Add("y/z/1", packets.Packet{Created: n - 5}) // over max server expiry limit
require.Len(t, s.Topics.Retained.GetAll(), 6)
s.clearExpiredRetainedMessages(n)
require.Len(t, s.Topics.Retained.GetAll(), 5)
s.Options.Capabilities.MaximumMessageExpiryInterval = 0
s.Topics.Retained.Add("2/3/4", packets.Packet{Created: n - 8})
s.clearExpiredRetainedMessages(n)
require.Len(t, s.Topics.Retained.GetAll(), 6)
}
func TestServerClearExpiredClients(t *testing.T) {
@@ -3335,10 +3553,9 @@ func TestLoadServerInfoRestoreOnRestart(t *testing.T) {
require.Equal(t, int64(60), s.Info.BytesReceived)
}
func TestAtomicItoa(t *testing.T) {
func TestItoa(t *testing.T) {
i := int64(22)
ip := &i
require.Equal(t, "22", AtomicItoa(ip))
require.Equal(t, "22", Int64toa(i))
}
func TestServerSubscribe(t *testing.T) {

View File

@@ -1 +0,0 @@
language: go

View File

@@ -1,35 +0,0 @@
bbloom.go
// The MIT License (MIT)
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
siphash.go
// https://github.com/dchest/siphash
//
// Written in 2012 by Dmitry Chestnykh.
//
// To the extent possible under law, the author have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
// http://creativecommons.org/publicdomain/zero/1.0/
//
// Package siphash implements SipHash-2-4, a fast short-input PRF
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.

View File

@@ -1,131 +0,0 @@
## bbloom: a bitset Bloom filter for go/golang
===
[![Build Status](https://travis-ci.org/AndreasBriese/bbloom.png?branch=master)](http://travis-ci.org/AndreasBriese/bbloom)
package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
===
changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
Nonetheless bbloom should work with any other form of entries.
~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
Minimum hashset size is: 512 ([4]uint64; will be set automatically).
###install
```sh
go get github.com/AndreasBriese/bbloom
```
###test
+ change to folder ../bbloom
+ create wordlist in file "words.txt" (you might use `python permut.py`)
+ run 'go test -bench=.' within the folder
```go
go test -bench=.
```
~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
### usage
after installation add
```go
import (
...
"github.com/AndreasBriese/bbloom"
...
)
```
at your header. In the program use
```go
// create a bloom filter for 65536 items and 1 % wrong-positive ratio
bf := bbloom.New(float64(1<<16), float64(0.01))
// or
// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
// bf = bbloom.New(float64(650000), float64(7))
// or
bf = bbloom.New(650000.0, 7.0)
// add one item
bf.Add([]byte("butter"))
// Number of elements added is exposed now
// Note: ElemNum will not be included in JSON export (for compatability to older version)
nOfElementsInFilter := bf.ElemNum
// check if item is in the filter
isIn := bf.Has([]byte("butter")) // should be true
isNotIn := bf.Has([]byte("Butter")) // should be false
// 'add only if item is new' to the bloomfilter
added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
// add one item
bf.AddTS([]byte("peanutbutter"))
// check if item is in the filter
isIn = bf.HasTS([]byte("peanutbutter")) // should be true
isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
// 'add only if item is new' to the bloomfilter
added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
// convert to JSON ([]byte)
Json := bf.JSONMarshal()
// bloomfilters Mutex is exposed for external un-/locking
// i.e. mutex lock while doing JSON conversion
bf.Mtx.Lock()
Json = bf.JSONMarshal()
bf.Mtx.Unlock()
// restore a bloom filter from storage
bfNew := bbloom.JSONUnmarshal(Json)
isInNew := bfNew.Has([]byte("butter")) // should be true
isNotInNew := bfNew.Has([]byte("Butter")) // should be false
```
to work with the bloom filter.
### why 'fast'?
It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
Bloom filter (filter size 524288, 7 hashlocs)
github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.

View File

@@ -1,284 +0,0 @@
// The MIT License (MIT)
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// 2019/08/25 code revision to reduce unsafe use
// Parts are adopted from the fork at ipfs/bbloom after performance rev by
// Steve Allen (https://github.com/Stebalien)
// (see https://github.com/ipfs/bbloom/blob/master/bbloom.go)
// -> func Has
// -> func set
// -> func add
package bbloom
import (
"bytes"
"encoding/json"
"log"
"math"
"sync"
"unsafe"
)
// helper
// not needed anymore by Set
// var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
func getSize(ui64 uint64) (size uint64, exponent uint64) {
if ui64 < uint64(512) {
ui64 = uint64(512)
}
size = uint64(1)
for size < ui64 {
size <<= 1
exponent++
}
return size, exponent
}
func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
locs := math.Ceil(float64(0.69314718056) * size / numEntries)
return uint64(size), uint64(locs)
}
// New
// returns a new bloomfilter
func New(params ...float64) (bloomfilter Bloom) {
var entries, locs uint64
if len(params) == 2 {
if params[1] < 1 {
entries, locs = calcSizeByWrongPositives(params[0], params[1])
} else {
entries, locs = uint64(params[0]), uint64(params[1])
}
} else {
log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
}
size, exponent := getSize(uint64(entries))
bloomfilter = Bloom{
Mtx: &sync.Mutex{},
sizeExp: exponent,
size: size - 1,
setLocs: locs,
shift: 64 - exponent,
}
bloomfilter.Size(size)
return bloomfilter
}
// NewWithBoolset
// takes a []byte slice and number of locs per entry
// returns the bloomfilter with a bitset populated according to the input []byte
func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
bloomfilter = New(float64(len(*bs)<<3), float64(locs))
for i, b := range *bs {
*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
}
return bloomfilter
}
// bloomJSONImExport
// Im/Export structure used by JSONMarshal / JSONUnmarshal
type bloomJSONImExport struct {
FilterSet []byte
SetLocs uint64
}
// JSONUnmarshal
// takes JSON-Object (type bloomJSONImExport) as []bytes
// returns Bloom object
func JSONUnmarshal(dbData []byte) Bloom {
bloomImEx := bloomJSONImExport{}
json.Unmarshal(dbData, &bloomImEx)
buf := bytes.NewBuffer(bloomImEx.FilterSet)
bs := buf.Bytes()
bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
return bf
}
//
// Bloom filter
type Bloom struct {
Mtx *sync.Mutex
ElemNum uint64
bitset []uint64
sizeExp uint64
size uint64
setLocs uint64
shift uint64
}
// <--- http://www.cse.yorku.ca/~oz/hash.html
// modified Berkeley DB Hash (32bit)
// hash is casted to l, h = 16bit fragments
// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
// hash := uint64(len(*b))
// for _, c := range *b {
// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
// }
// h = hash >> bl.shift
// l = hash << bl.shift >> bl.shift
// return l, h
// }
// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
// https://131002.net/siphash/
// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
// Add
// set the bit(s) for entry; Adds an entry to the Bloom filter
func (bl *Bloom) Add(entry []byte) {
l, h := bl.sipHash(entry)
for i := uint64(0); i < bl.setLocs; i++ {
bl.set((h + i*l) & bl.size)
bl.ElemNum++
}
}
// AddTS
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
func (bl *Bloom) AddTS(entry []byte) {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
bl.Add(entry)
}
// Has
// check if bit(s) for entry is/are set
// returns true if the entry was added to the Bloom Filter
func (bl Bloom) Has(entry []byte) bool {
l, h := bl.sipHash(entry)
res := true
for i := uint64(0); i < bl.setLocs; i++ {
res = res && bl.isSet((h+i*l)&bl.size)
// https://github.com/ipfs/bbloom/commit/84e8303a9bfb37b2658b85982921d15bbb0fecff
// // Branching here (early escape) is not worth it
// // This is my conclusion from benchmarks
// // (prevents loop unrolling)
// switch bl.IsSet((h + i*l) & bl.size) {
// case false:
// return false
// }
}
return res
}
// HasTS
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
func (bl *Bloom) HasTS(entry []byte) bool {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
return bl.Has(entry)
}
// AddIfNotHas
// Only Add entry if it's not present in the bloomfilter
// returns true if entry was added
// returns false if entry was allready registered in the bloomfilter
func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
if bl.Has(entry) {
return added
}
bl.Add(entry)
return true
}
// AddIfNotHasTS
// Tread safe: Only Add entry if it's not present in the bloomfilter
// returns true if entry was added
// returns false if entry was allready registered in the bloomfilter
func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
bl.Mtx.Lock()
defer bl.Mtx.Unlock()
return bl.AddIfNotHas(entry)
}
// Size
// make Bloom filter with as bitset of size sz
func (bl *Bloom) Size(sz uint64) {
bl.bitset = make([]uint64, sz>>6)
}
// Clear
// resets the Bloom filter
func (bl *Bloom) Clear() {
bs := bl.bitset
for i := range bs {
bs[i] = 0
}
}
// Set
// set the bit[idx] of bitsit
func (bl *Bloom) set(idx uint64) {
// ommit unsafe
// *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))) |= mask[idx%8]
bl.bitset[idx>>6] |= 1 << (idx % 64)
}
// IsSet
// check if bit[idx] of bitset is set
// returns true/false
func (bl *Bloom) isSet(idx uint64) bool {
// ommit unsafe
// return (((*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)))) >> (idx % 8)) & 1) == 1
return bl.bitset[idx>>6]&(1<<(idx%64)) != 0
}
// JSONMarshal
// returns JSON-object (type bloomJSONImExport) as []byte
func (bl Bloom) JSONMarshal() []byte {
bloomImEx := bloomJSONImExport{}
bloomImEx.SetLocs = uint64(bl.setLocs)
bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
for i := range bloomImEx.FilterSet {
bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + uintptr(i)))
}
data, err := json.Marshal(bloomImEx)
if err != nil {
log.Fatal("json.Marshal failed: ", err)
}
return data
}
// // alternative hashFn
// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
// h64 := fnv.New64a()
// h64.Write(*b)
// hash := h64.Sum64()
// h = hash >> 32
// l = hash << 32 >> 32
// return l, h
// }
//
// // <-- http://partow.net/programming/hashfunctions/index.html
// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
// // under the topic of sorting and search chapter 6.4.
// // modified to fit with boolset-length
// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
// hash := uint64(len(*b))
// for _, c := range *b {
// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
// }
// h = hash >> bl.shift
// l = hash << bl.sizeExp >> bl.sizeExp
// return l, h
// }

View File

@@ -1,225 +0,0 @@
// Written in 2012 by Dmitry Chestnykh.
//
// To the extent possible under law, the author have dedicated all copyright
// and related and neighboring rights to this software to the public domain
// worldwide. This software is distributed without any warranty.
// http://creativecommons.org/publicdomain/zero/1.0/
//
// Package siphash implements SipHash-2-4, a fast short-input PRF
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
package bbloom
// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
// parts of 128-bit key: k0 and k1.
func (bl Bloom) sipHash(p []byte) (l, h uint64) {
// Initialization.
v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
t := uint64(len(p)) << 56
// Compression.
for len(p) >= 8 {
m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
v3 ^= m
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
v0 ^= m
p = p[8:]
}
// Compress last block.
switch len(p) {
case 7:
t |= uint64(p[6]) << 48
fallthrough
case 6:
t |= uint64(p[5]) << 40
fallthrough
case 5:
t |= uint64(p[4]) << 32
fallthrough
case 4:
t |= uint64(p[3]) << 24
fallthrough
case 3:
t |= uint64(p[2]) << 16
fallthrough
case 2:
t |= uint64(p[1]) << 8
fallthrough
case 1:
t |= uint64(p[0])
}
v3 ^= t
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
v0 ^= t
// Finalization.
v2 ^= 0xff
// Round 1.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 2.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 3.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// Round 4.
v0 += v1
v1 = v1<<13 | v1>>51
v1 ^= v0
v0 = v0<<32 | v0>>32
v2 += v3
v3 = v3<<16 | v3>>48
v3 ^= v2
v0 += v3
v3 = v3<<21 | v3>>43
v3 ^= v0
v2 += v1
v1 = v1<<17 | v1>>47
v1 ^= v2
v2 = v2<<32 | v2>>32
// return v0 ^ v1 ^ v2 ^ v3
hash := v0 ^ v1 ^ v2 ^ v3
h = hash >> bl.shift
l = hash << bl.shift >> bl.shift
return l, h
}

View File

@@ -1,140 +0,0 @@
2014/01/01 00:00:00 /info.html
2014/01/01 00:00:00 /info.html
2014/01/01 00:00:01 /info.html
2014/01/01 00:00:02 /info.html
2014/01/01 00:00:03 /info.html
2014/01/01 00:00:04 /info.html
2014/01/01 00:00:05 /info.html
2014/01/01 00:00:06 /info.html
2014/01/01 00:00:07 /info.html
2014/01/01 00:00:08 /info.html
2014/01/01 00:00:09 /info.html
2014/01/01 00:00:10 /info.html
2014/01/01 00:00:11 /info.html
2014/01/01 00:00:12 /info.html
2014/01/01 00:00:13 /info.html
2014/01/01 00:00:14 /info.html
2014/01/01 00:00:15 /info.html
2014/01/01 00:00:16 /info.html
2014/01/01 00:00:17 /info.html
2014/01/01 00:00:18 /info.html
2014/01/01 00:00:19 /info.html
2014/01/01 00:00:20 /info.html
2014/01/01 00:00:21 /info.html
2014/01/01 00:00:22 /info.html
2014/01/01 00:00:23 /info.html
2014/01/01 00:00:24 /info.html
2014/01/01 00:00:25 /info.html
2014/01/01 00:00:26 /info.html
2014/01/01 00:00:27 /info.html
2014/01/01 00:00:28 /info.html
2014/01/01 00:00:29 /info.html
2014/01/01 00:00:30 /info.html
2014/01/01 00:00:31 /info.html
2014/01/01 00:00:32 /info.html
2014/01/01 00:00:33 /info.html
2014/01/01 00:00:34 /info.html
2014/01/01 00:00:35 /info.html
2014/01/01 00:00:36 /info.html
2014/01/01 00:00:37 /info.html
2014/01/01 00:00:38 /info.html
2014/01/01 00:00:39 /info.html
2014/01/01 00:00:40 /info.html
2014/01/01 00:00:41 /info.html
2014/01/01 00:00:42 /info.html
2014/01/01 00:00:43 /info.html
2014/01/01 00:00:44 /info.html
2014/01/01 00:00:45 /info.html
2014/01/01 00:00:46 /info.html
2014/01/01 00:00:47 /info.html
2014/01/01 00:00:48 /info.html
2014/01/01 00:00:49 /info.html
2014/01/01 00:00:50 /info.html
2014/01/01 00:00:51 /info.html
2014/01/01 00:00:52 /info.html
2014/01/01 00:00:53 /info.html
2014/01/01 00:00:54 /info.html
2014/01/01 00:00:55 /info.html
2014/01/01 00:00:56 /info.html
2014/01/01 00:00:57 /info.html
2014/01/01 00:00:58 /info.html
2014/01/01 00:00:59 /info.html
2014/01/01 00:01:00 /info.html
2014/01/01 00:01:01 /info.html
2014/01/01 00:01:02 /info.html
2014/01/01 00:01:03 /info.html
2014/01/01 00:01:04 /info.html
2014/01/01 00:01:05 /info.html
2014/01/01 00:01:06 /info.html
2014/01/01 00:01:07 /info.html
2014/01/01 00:01:08 /info.html
2014/01/01 00:01:09 /info.html
2014/01/01 00:01:10 /info.html
2014/01/01 00:01:11 /info.html
2014/01/01 00:01:12 /info.html
2014/01/01 00:01:13 /info.html
2014/01/01 00:01:14 /info.html
2014/01/01 00:01:15 /info.html
2014/01/01 00:01:16 /info.html
2014/01/01 00:01:17 /info.html
2014/01/01 00:01:18 /info.html
2014/01/01 00:01:19 /info.html
2014/01/01 00:01:20 /info.html
2014/01/01 00:01:21 /info.html
2014/01/01 00:01:22 /info.html
2014/01/01 00:01:23 /info.html
2014/01/01 00:01:24 /info.html
2014/01/01 00:01:25 /info.html
2014/01/01 00:01:26 /info.html
2014/01/01 00:01:27 /info.html
2014/01/01 00:01:28 /info.html
2014/01/01 00:01:29 /info.html
2014/01/01 00:01:30 /info.html
2014/01/01 00:01:31 /info.html
2014/01/01 00:01:32 /info.html
2014/01/01 00:01:33 /info.html
2014/01/01 00:01:34 /info.html
2014/01/01 00:01:35 /info.html
2014/01/01 00:01:36 /info.html
2014/01/01 00:01:37 /info.html
2014/01/01 00:01:38 /info.html
2014/01/01 00:01:39 /info.html
2014/01/01 00:01:40 /info.html
2014/01/01 00:01:41 /info.html
2014/01/01 00:01:42 /info.html
2014/01/01 00:01:43 /info.html
2014/01/01 00:01:44 /info.html
2014/01/01 00:01:45 /info.html
2014/01/01 00:01:46 /info.html
2014/01/01 00:01:47 /info.html
2014/01/01 00:01:48 /info.html
2014/01/01 00:01:49 /info.html
2014/01/01 00:01:50 /info.html
2014/01/01 00:01:51 /info.html
2014/01/01 00:01:52 /info.html
2014/01/01 00:01:53 /info.html
2014/01/01 00:01:54 /info.html
2014/01/01 00:01:55 /info.html
2014/01/01 00:01:56 /info.html
2014/01/01 00:01:57 /info.html
2014/01/01 00:01:58 /info.html
2014/01/01 00:01:59 /info.html
2014/01/01 00:02:00 /info.html
2014/01/01 00:02:01 /info.html
2014/01/01 00:02:02 /info.html
2014/01/01 00:02:03 /info.html
2014/01/01 00:02:04 /info.html
2014/01/01 00:02:05 /info.html
2014/01/01 00:02:06 /info.html
2014/01/01 00:02:07 /info.html
2014/01/01 00:02:08 /info.html
2014/01/01 00:02:09 /info.html
2014/01/01 00:02:10 /info.html
2014/01/01 00:02:11 /info.html
2014/01/01 00:02:12 /info.html
2014/01/01 00:02:13 /info.html
2014/01/01 00:02:14 /info.html
2014/01/01 00:02:15 /info.html
2014/01/01 00:02:16 /info.html
2014/01/01 00:02:17 /info.html
2014/01/01 00:02:18 /info.html

View File

@@ -1,24 +0,0 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

View File

@@ -1,7 +0,0 @@
# gopher-json [![GoDoc](https://godoc.org/layeh.com/gopher-json?status.svg)](https://godoc.org/layeh.com/gopher-json)
Package json is a simple JSON encoder/decoder for [gopher-lua](https://github.com/yuin/gopher-lua).
## License
Public domain

View File

@@ -1,33 +0,0 @@
// Package json is a simple JSON encoder/decoder for gopher-lua.
//
// Documentation
//
// The following functions are exposed by the library:
// decode(string): Decodes a JSON string. Returns nil and an error string if
// the string could not be decoded.
// encode(value): Encodes a value into a JSON string. Returns nil and an error
// string if the value could not be encoded.
//
// The following types are supported:
//
// Lua | JSON
// ---------+-----
// nil | null
// number | number
// string | string
// table | object: when table is non-empty and has only string keys
// | array: when table is empty, or has only sequential numeric keys
// | starting from 1
//
// Attempting to encode any other Lua type will result in an error.
//
// Example
//
// Below is an example usage of the library:
// import (
// luajson "layeh.com/gopher-json"
// )
//
// L := lua.NewState()
// luajson.Preload(s)
package json

View File

@@ -1,189 +0,0 @@
package json
import (
"encoding/json"
"errors"
"github.com/yuin/gopher-lua"
)
// Preload adds json to the given Lua state's package.preload table. After it
// has been preloaded, it can be loaded using require:
//
// local json = require("json")
func Preload(L *lua.LState) {
L.PreloadModule("json", Loader)
}
// Loader is the module loader function.
func Loader(L *lua.LState) int {
t := L.NewTable()
L.SetFuncs(t, api)
L.Push(t)
return 1
}
var api = map[string]lua.LGFunction{
"decode": apiDecode,
"encode": apiEncode,
}
func apiDecode(L *lua.LState) int {
if L.GetTop() != 1 {
L.Error(lua.LString("bad argument #1 to decode"), 1)
return 0
}
str := L.CheckString(1)
value, err := Decode(L, []byte(str))
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
L.Push(value)
return 1
}
func apiEncode(L *lua.LState) int {
if L.GetTop() != 1 {
L.Error(lua.LString("bad argument #1 to encode"), 1)
return 0
}
value := L.CheckAny(1)
data, err := Encode(value)
if err != nil {
L.Push(lua.LNil)
L.Push(lua.LString(err.Error()))
return 2
}
L.Push(lua.LString(string(data)))
return 1
}
var (
errNested = errors.New("cannot encode recursively nested tables to JSON")
errSparseArray = errors.New("cannot encode sparse array")
errInvalidKeys = errors.New("cannot encode mixed or invalid key types")
)
type invalidTypeError lua.LValueType
func (i invalidTypeError) Error() string {
return `cannot encode ` + lua.LValueType(i).String() + ` to JSON`
}
// Encode returns the JSON encoding of value.
func Encode(value lua.LValue) ([]byte, error) {
return json.Marshal(jsonValue{
LValue: value,
visited: make(map[*lua.LTable]bool),
})
}
type jsonValue struct {
lua.LValue
visited map[*lua.LTable]bool
}
func (j jsonValue) MarshalJSON() (data []byte, err error) {
switch converted := j.LValue.(type) {
case lua.LBool:
data, err = json.Marshal(bool(converted))
case lua.LNumber:
data, err = json.Marshal(float64(converted))
case *lua.LNilType:
data = []byte(`null`)
case lua.LString:
data, err = json.Marshal(string(converted))
case *lua.LTable:
if j.visited[converted] {
return nil, errNested
}
j.visited[converted] = true
key, value := converted.Next(lua.LNil)
switch key.Type() {
case lua.LTNil: // empty table
data = []byte(`[]`)
case lua.LTNumber:
arr := make([]jsonValue, 0, converted.Len())
expectedKey := lua.LNumber(1)
for key != lua.LNil {
if key.Type() != lua.LTNumber {
err = errInvalidKeys
return
}
if expectedKey != key {
err = errSparseArray
return
}
arr = append(arr, jsonValue{value, j.visited})
expectedKey++
key, value = converted.Next(key)
}
data, err = json.Marshal(arr)
case lua.LTString:
obj := make(map[string]jsonValue)
for key != lua.LNil {
if key.Type() != lua.LTString {
err = errInvalidKeys
return
}
obj[key.String()] = jsonValue{value, j.visited}
key, value = converted.Next(key)
}
data, err = json.Marshal(obj)
default:
err = errInvalidKeys
}
default:
err = invalidTypeError(j.LValue.Type())
}
return
}
// Decode converts the JSON encoded data to Lua values.
func Decode(L *lua.LState, data []byte) (lua.LValue, error) {
var value interface{}
err := json.Unmarshal(data, &value)
if err != nil {
return nil, err
}
return DecodeValue(L, value), nil
}
// DecodeValue converts the value to a Lua value.
//
// This function only converts values that the encoding/json package decodes to.
// All other values will return lua.LNil.
func DecodeValue(L *lua.LState, value interface{}) lua.LValue {
switch converted := value.(type) {
case bool:
return lua.LBool(converted)
case float64:
return lua.LNumber(converted)
case string:
return lua.LString(converted)
case json.Number:
return lua.LString(converted)
case []interface{}:
arr := L.CreateTable(len(converted), 0)
for _, item := range converted {
arr.Append(DecodeValue(L, item))
}
return arr
case map[string]interface{}:
tbl := L.CreateTable(0, len(converted))
for key, item := range converted {
tbl.RawSetH(lua.LString(key), DecodeValue(L, item))
}
return tbl
case nil:
return lua.LNil
}
return lua.LNil
}

View File

@@ -1,6 +0,0 @@
/integration/redis_src/
/integration/dump.rdb
*.swp
/integration/nodes.conf
.idea/
miniredis.iml

View File

@@ -1,225 +0,0 @@
## Changelog
### v2.23.0
- basic INFO support (thanks @kirill-a-belov)
- support COUNT in SSCAN (thanks @Abdi-dd)
- test and support Go 1.19
- support LPOS (thanks @ianstarz)
- support XPENDING, XGROUP {CREATECONSUMER,DESTROY,DELCONSUMER}, XINFO {CONSUMERS,GROUPS}, XCLAIM (thanks @sandyharvie)
### v2.22.0
- set miniredis.DumpMaxLineLen to get more Dump() info (thanks @afjoseph)
- fix invalid resposne of COMMAND (thanks @zsh1995)
- fix possibility to generate duplicate IDs in XADD (thanks @readams)
- adds support for XAUTOCLAIM min-idle parameter (thanks @readams)
### v2.21.0
- support for GETEX (thanks @dntj)
- support for GT and LT in ZADD (thanks @lsgndln)
- support for XAUTOCLAIM (thanks @randall-fulton)
### v2.20.0
- back to support Go >= 1.14 (thanks @ajatprabha and @marcind)
### v2.19.0
- support for TYPE in SCAN (thanks @0xDiddi)
- update BITPOS (thanks @dirkm)
- fix a lua redis.call() return value (thanks @mpetronic)
- update ZRANGE (thanks @valdemarpereira)
### v2.18.0
- support for ZUNION (thanks @propan)
- support for COPY (thanks @matiasinsaurralde and @rockitbaby)
- support for LMOVE (thanks @btwear)
### v2.17.0
- added miniredis.RunT(t)
### v2.16.1
- fix ZINTERSTORE with wets (thanks @lingjl2010 and @okhowang)
- fix exclusive ranges in XRANGE (thanks @joseotoro)
### v2.16.0
- simplify some code (thanks @zonque)
- support for EXAT/PXAT in SET
- support for XTRIM (thanks @joseotoro)
- support for ZRANDMEMBER
- support for redis.log() in lua (thanks @dirkm)
### v2.15.2
- Fix race condition in blocking code (thanks @zonque and @robx)
- XREAD accepts '$' as ID (thanks @bradengroom)
### v2.15.1
- EVAL should cache the script (thanks @guoshimin)
### v2.15.0
- target redis 6.2 and added new args to various commands
- support for all hyperlog commands (thanks @ilbaktin)
- support for GETDEL (thanks @wszaranski)
### v2.14.5
- added XPENDING
- support for BLOCK option in XREAD and XREADGROUP
### v2.14.4
- fix BITPOS error (thanks @xiaoyuzdy)
- small fixes for XREAD, XACK, and XDEL. Mostly error cases.
- fix empty EXEC return type (thanks @ashanbrown)
- fix XDEL (thanks @svakili and @yvesf)
- fix FLUSHALL for streams (thanks @svakili)
### v2.14.3
- fix problem where Lua code didn't set the selected DB
- update to redis 6.0.10 (thanks @lazappa)
### v2.14.2
- update LUA dependency
- deal with (p)unsubscribe when there are no channels
### v2.14.1
- mod tidy
### v2.14.0
- support for HELLO and the RESP3 protocol
- KEEPTTL in SET (thanks @johnpena)
### v2.13.3
- support Go 1.14 and 1.15
- update the `Check...()` methods
- support for XREAD (thanks @pieterlexis)
### v2.13.2
- Use SAN instead of CN in self signed cert for testing (thanks @johejo)
- Travis CI now tests against the most recent two versions of Go (thanks @johejo)
- changed unit and integration tests to compare raw payloads, not parsed payloads
- remove "redigo" dependency
### v2.13.1
- added HSTRLEN
- minimal support for ACL users in AUTH
### v2.13.0
- added RunTLS(...)
- added SetError(...)
### v2.12.0
- redis 6
- Lua json update (thanks @gsmith85)
- CLUSTER commands (thanks @kratisto)
- fix TOUCH
- fix a shutdown race condition
### v2.11.4
- ZUNIONSTORE now supports standard set types (thanks @wshirey)
### v2.11.3
- support for TOUCH (thanks @cleroux)
- support for cluster and stream commands (thanks @kak-tus)
### v2.11.2
- make sure Lua code is executed concurrently
- add command GEORADIUSBYMEMBER (thanks @kyeett)
### v2.11.1
- globals protection for Lua code (thanks @vk-outreach)
- HSET update (thanks @carlgreen)
- fix BLPOP block on shutdown (thanks @Asalle)
### v2.11.0
- added XRANGE/XREVRANGE, XADD, and XLEN (thanks @skateinmars)
- added GEODIST
- improved precision for geohashes, closer to what real redis does
- use 128bit floats internally for INCRBYFLOAT and related (thanks @timnd)
### v2.10.1
- added m.Server()
### v2.10.0
- added UNLINK
- fix DEL zero-argument case
- cleanup some direct access commands
- added GEOADD, GEOPOS, GEORADIUS, and GEORADIUS_RO
### v2.9.1
- fix issue with ZRANGEBYLEX
- fix issue with BRPOPLPUSH and direct access
### v2.9.0
- proper versioned import of github.com/gomodule/redigo (thanks @yfei1)
- fix messages generated by PSUBSCRIBE
- optional internal seed (thanks @zikaeroh)
### v2.8.0
Proper `v2` in go.mod.
### older
See https://github.com/alicebob/miniredis/releases for the full changelog

View File

@@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Harmen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,12 +0,0 @@
.PHONY: all test testrace int
all: test
test:
go test ./...
testrace:
go test -race ./...
int:
${MAKE} -C integration all

View File

@@ -1,333 +0,0 @@
# Miniredis
Pure Go Redis test server, used in Go unittests.
##
Sometimes you want to test code which uses Redis, without making it a full-blown
integration test.
Miniredis implements (parts of) the Redis server, to be used in unittests. It
enables a simple, cheap, in-memory, Redis replacement, with a real TCP interface. Think of it as the Redis version of `net/http/httptest`.
It saves you from using mock code, and since the redis server lives in the
test process you can query for values directly, without going through the server
stack.
There are no dependencies on external binaries, so you can easily integrate it in automated build processes.
Be sure to import v2:
```
import "github.com/alicebob/miniredis/v2"
```
## Commands
Implemented commands:
- Connection (complete)
- AUTH -- see RequireAuth()
- ECHO
- HELLO -- see RequireUserAuth()
- PING
- SELECT
- SWAPDB
- QUIT
- Key
- COPY
- DEL
- EXISTS
- EXPIRE
- EXPIREAT
- KEYS
- MOVE
- PERSIST
- PEXPIRE
- PEXPIREAT
- PTTL
- RENAME
- RENAMENX
- RANDOMKEY -- see m.Seed(...)
- SCAN
- TOUCH
- TTL
- TYPE
- UNLINK
- Transactions (complete)
- DISCARD
- EXEC
- MULTI
- UNWATCH
- WATCH
- Server
- DBSIZE
- FLUSHALL
- FLUSHDB
- TIME -- returns time.Now() or value set by SetTime()
- COMMAND -- partly
- INFO -- partly, returns only "clients" section with one field "connected_clients"
- String keys (complete)
- APPEND
- BITCOUNT
- BITOP
- BITPOS
- DECR
- DECRBY
- GET
- GETBIT
- GETRANGE
- GETSET
- GETDEL
- GETEX
- INCR
- INCRBY
- INCRBYFLOAT
- MGET
- MSET
- MSETNX
- PSETEX
- SET
- SETBIT
- SETEX
- SETNX
- SETRANGE
- STRLEN
- Hash keys (complete)
- HDEL
- HEXISTS
- HGET
- HGETALL
- HINCRBY
- HINCRBYFLOAT
- HKEYS
- HLEN
- HMGET
- HMSET
- HSET
- HSETNX
- HSTRLEN
- HVALS
- HSCAN
- List keys (complete)
- BLPOP
- BRPOP
- BRPOPLPUSH
- LINDEX
- LINSERT
- LLEN
- LPOP
- LPUSH
- LPUSHX
- LRANGE
- LREM
- LSET
- LTRIM
- RPOP
- RPOPLPUSH
- RPUSH
- RPUSHX
- LMOVE
- Pub/Sub (complete)
- PSUBSCRIBE
- PUBLISH
- PUBSUB
- PUNSUBSCRIBE
- SUBSCRIBE
- UNSUBSCRIBE
- Set keys (complete)
- SADD
- SCARD
- SDIFF
- SDIFFSTORE
- SINTER
- SINTERSTORE
- SISMEMBER
- SMEMBERS
- SMOVE
- SPOP -- see m.Seed(...)
- SRANDMEMBER -- see m.Seed(...)
- SREM
- SUNION
- SUNIONSTORE
- SSCAN
- Sorted Set keys (complete)
- ZADD
- ZCARD
- ZCOUNT
- ZINCRBY
- ZINTERSTORE
- ZLEXCOUNT
- ZPOPMIN
- ZPOPMAX
- ZRANDMEMBER
- ZRANGE
- ZRANGEBYLEX
- ZRANGEBYSCORE
- ZRANK
- ZREM
- ZREMRANGEBYLEX
- ZREMRANGEBYRANK
- ZREMRANGEBYSCORE
- ZREVRANGE
- ZREVRANGEBYLEX
- ZREVRANGEBYSCORE
- ZREVRANK
- ZSCORE
- ZUNION
- ZUNIONSTORE
- ZSCAN
- Stream keys
- XACK
- XADD
- XAUTOCLAIM
- XCLAIM
- XDEL
- XGROUP CREATE
- XGROUP CREATECONSUMER
- XGROUP DESTROY
- XGROUP DELCONSUMER
- XINFO STREAM -- partly
- XINFO GROUPS
- XINFO CONSUMERS -- partly
- XLEN
- XRANGE
- XREAD
- XREADGROUP
- XREVRANGE
- XPENDING
- XTRIM
- Scripting
- EVAL
- EVALSHA
- SCRIPT LOAD
- SCRIPT EXISTS
- SCRIPT FLUSH
- GEO
- GEOADD
- GEODIST
- ~~GEOHASH~~
- GEOPOS
- GEORADIUS
- GEORADIUS_RO
- GEORADIUSBYMEMBER
- GEORADIUSBYMEMBER_RO
- Cluster
- CLUSTER SLOTS
- CLUSTER KEYSLOT
- CLUSTER NODES
- HyperLogLog (complete)
- PFADD
- PFCOUNT
- PFMERGE
## TTLs, key expiration, and time
Since miniredis is intended to be used in unittests TTLs don't decrease
automatically. You can use `TTL()` to get the TTL (as a time.Duration) of a
key. It will return 0 when no TTL is set.
`m.FastForward(d)` can be used to decrement all TTLs. All TTLs which become <=
0 will be removed.
EXPIREAT and PEXPIREAT values will be
converted to a duration. For that you can either set m.SetTime(t) to use that
time as the base for the (P)EXPIREAT conversion, or don't call SetTime(), in
which case time.Now() will be used.
SetTime() also sets the value returned by TIME, which defaults to time.Now().
It is not updated by FastForward, only by SetTime.
## Randomness and Seed()
Miniredis will use `math/rand`'s global RNG for randomness unless a seed is
provided by calling `m.Seed(...)`. If a seed is provided, then miniredis will
use its own RNG based on that seed.
Commands which use randomness are: RANDOMKEY, SPOP, and SRANDMEMBER.
## Example
``` Go
import (
...
"github.com/alicebob/miniredis/v2"
...
)
func TestSomething(t *testing.T) {
s := miniredis.RunT(t)
// Optionally set some keys your code expects:
s.Set("foo", "bar")
s.HSet("some", "other", "key")
// Run your code and see if it behaves.
// An example using the redigo library from "github.com/gomodule/redigo/redis":
c, err := redis.Dial("tcp", s.Addr())
_, err = c.Do("SET", "foo", "bar")
// Optionally check values in redis...
if got, err := s.Get("foo"); err != nil || got != "bar" {
t.Error("'foo' has the wrong value")
}
// ... or use a helper for that:
s.CheckGet(t, "foo", "bar")
// TTL and expiration:
s.Set("foo", "bar")
s.SetTTL("foo", 10*time.Second)
s.FastForward(11 * time.Second)
if s.Exists("foo") {
t.Fatal("'foo' should not have existed anymore")
}
}
```
## Not supported
Commands which will probably not be implemented:
- CLUSTER (all)
- ~~CLUSTER *~~
- ~~READONLY~~
- ~~READWRITE~~
- Key
- ~~DUMP~~
- ~~MIGRATE~~
- ~~OBJECT~~
- ~~RESTORE~~
- ~~WAIT~~
- Scripting
- ~~SCRIPT DEBUG~~
- ~~SCRIPT KILL~~
- Server
- ~~BGSAVE~~
- ~~BGWRITEAOF~~
- ~~CLIENT *~~
- ~~CONFIG *~~
- ~~DEBUG *~~
- ~~LASTSAVE~~
- ~~MONITOR~~
- ~~ROLE~~
- ~~SAVE~~
- ~~SHUTDOWN~~
- ~~SLAVEOF~~
- ~~SLOWLOG~~
- ~~SYNC~~
## &c.
Integration tests are run against Redis 6.2.6. The [./integration](./integration/) subdir
compares miniredis against a real redis instance.
The Redis 6 RESP3 protocol is supported. If there are problems, please open
an issue.
If you want to test Redis Sentinel have a look at [minisentinel](https://github.com/Bose/minisentinel).
A changelog is kept at [CHANGELOG.md](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md).
[![Go Reference](https://pkg.go.dev/badge/github.com/alicebob/miniredis/v2.svg)](https://pkg.go.dev/github.com/alicebob/miniredis/v2)

View File

@@ -1,63 +0,0 @@
package miniredis
import (
"reflect"
"sort"
)
// T is implemented by Testing.T
type T interface {
Helper()
Errorf(string, ...interface{})
}
// CheckGet does not call Errorf() iff there is a string key with the
// expected value. Normal use case is `m.CheckGet(t, "username", "theking")`.
func (m *Miniredis) CheckGet(t T, key, expected string) {
t.Helper()
found, err := m.Get(key)
if err != nil {
t.Errorf("GET error, key %#v: %v", key, err)
return
}
if found != expected {
t.Errorf("GET error, key %#v: Expected %#v, got %#v", key, expected, found)
return
}
}
// CheckList does not call Errorf() iff there is a list key with the
// expected values.
// Normal use case is `m.CheckGet(t, "favorite_colors", "red", "green", "infrared")`.
func (m *Miniredis) CheckList(t T, key string, expected ...string) {
t.Helper()
found, err := m.List(key)
if err != nil {
t.Errorf("List error, key %#v: %v", key, err)
return
}
if !reflect.DeepEqual(expected, found) {
t.Errorf("List error, key %#v: Expected %#v, got %#v", key, expected, found)
return
}
}
// CheckSet does not call Errorf() iff there is a set key with the
// expected values.
// Normal use case is `m.CheckSet(t, "visited", "Rome", "Stockholm", "Dublin")`.
func (m *Miniredis) CheckSet(t T, key string, expected ...string) {
t.Helper()
found, err := m.Members(key)
if err != nil {
t.Errorf("Set error, key %#v: %v", key, err)
return
}
sort.Strings(expected)
if !reflect.DeepEqual(expected, found) {
t.Errorf("Set error, key %#v: Expected %#v, got %#v", key, expected, found)
return
}
}

View File

@@ -1,67 +0,0 @@
// Commands from https://redis.io/commands#cluster
package miniredis
import (
"fmt"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
// commandsCluster handles some cluster operations.
func commandsCluster(m *Miniredis) {
m.srv.Register("CLUSTER", m.cmdCluster)
}
func (m *Miniredis) cmdCluster(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
switch strings.ToUpper(args[0]) {
case "SLOTS":
m.cmdClusterSlots(c, cmd, args)
case "KEYSLOT":
m.cmdClusterKeySlot(c, cmd, args)
case "NODES":
m.cmdClusterNodes(c, cmd, args)
default:
setDirty(c)
c.WriteError(fmt.Sprintf("ERR 'CLUSTER %s' not supported", strings.Join(args, " ")))
return
}
}
// CLUSTER SLOTS
func (m *Miniredis) cmdClusterSlots(c *server.Peer, cmd string, args []string) {
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
c.WriteLen(1)
c.WriteLen(3)
c.WriteInt(0)
c.WriteInt(16383)
c.WriteLen(3)
c.WriteBulk(m.srv.Addr().IP.String())
c.WriteInt(m.srv.Addr().Port)
c.WriteBulk("09dbe9720cda62f7865eabc5fd8857c5d2678366")
})
}
// CLUSTER KEYSLOT
func (m *Miniredis) cmdClusterKeySlot(c *server.Peer, cmd string, args []string) {
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
c.WriteInt(163)
})
}
// CLUSTER NODES
func (m *Miniredis) cmdClusterNodes(c *server.Peer, cmd string, args []string) {
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
c.WriteBulk("e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:7000@7000 myself,master - 0 0 1 connected 0-16383")
})
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,284 +0,0 @@
// Commands from https://redis.io/commands#connection
package miniredis
import (
"fmt"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
func commandsConnection(m *Miniredis) {
m.srv.Register("AUTH", m.cmdAuth)
m.srv.Register("ECHO", m.cmdEcho)
m.srv.Register("HELLO", m.cmdHello)
m.srv.Register("PING", m.cmdPing)
m.srv.Register("QUIT", m.cmdQuit)
m.srv.Register("SELECT", m.cmdSelect)
m.srv.Register("SWAPDB", m.cmdSwapdb)
}
// PING
func (m *Miniredis) cmdPing(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if len(args) > 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
payload := ""
if len(args) > 0 {
payload = args[0]
}
// PING is allowed in subscribed state
if sub := getCtx(c).subscriber; sub != nil {
c.Block(func(c *server.Writer) {
c.WriteLen(2)
c.WriteBulk("pong")
c.WriteBulk(payload)
})
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if payload == "" {
c.WriteInline("PONG")
return
}
c.WriteBulk(payload)
})
}
// AUTH
func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if len(args) > 2 {
c.WriteError(msgSyntaxError)
return
}
if m.checkPubsub(c, cmd) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
var opts = struct {
username string
password string
}{
username: "default",
password: args[0],
}
if len(args) == 2 {
opts.username, opts.password = args[0], args[1]
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if len(m.passwords) == 0 && opts.username == "default" {
c.WriteError("ERR AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?")
return
}
setPW, ok := m.passwords[opts.username]
if !ok {
c.WriteError("WRONGPASS invalid username-password pair")
return
}
if setPW != opts.password {
c.WriteError("WRONGPASS invalid username-password pair")
return
}
ctx.authenticated = true
c.WriteOK()
})
}
// HELLO
func (m *Miniredis) cmdHello(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
c.WriteError(errWrongNumber(cmd))
return
}
var opts struct {
version int
username string
password string
}
if ok := optIntErr(c, args[0], &opts.version, "ERR Protocol version is not an integer or out of range"); !ok {
return
}
args = args[1:]
switch opts.version {
case 2, 3:
default:
c.WriteError("NOPROTO unsupported protocol version")
return
}
var checkAuth bool
for len(args) > 0 {
switch strings.ToUpper(args[0]) {
case "AUTH":
if len(args) < 3 {
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
return
}
opts.username, opts.password, args = args[1], args[2], args[3:]
checkAuth = true
case "SETNAME":
if len(args) < 2 {
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
return
}
_, args = args[1], args[2:]
default:
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
return
}
}
if len(m.passwords) == 0 && opts.username == "default" {
// redis ignores legacy "AUTH" if it's not enabled.
checkAuth = false
}
if checkAuth {
setPW, ok := m.passwords[opts.username]
if !ok {
c.WriteError("WRONGPASS invalid username-password pair")
return
}
if setPW != opts.password {
c.WriteError("WRONGPASS invalid username-password pair")
return
}
getCtx(c).authenticated = true
}
c.Resp3 = opts.version == 3
c.WriteMapLen(7)
c.WriteBulk("server")
c.WriteBulk("miniredis")
c.WriteBulk("version")
c.WriteBulk("6.0.5")
c.WriteBulk("proto")
c.WriteInt(opts.version)
c.WriteBulk("id")
c.WriteInt(42)
c.WriteBulk("mode")
c.WriteBulk("standalone")
c.WriteBulk("role")
c.WriteBulk("master")
c.WriteBulk("modules")
c.WriteLen(0)
}
// ECHO
func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
msg := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
c.WriteBulk(msg)
})
}
// SELECT
func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.isValidCMD(c, cmd) {
return
}
var opts struct {
id int
}
if ok := optInt(c, args[0], &opts.id); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if opts.id < 0 {
c.WriteError(msgDBIndexOutOfRange)
setDirty(c)
return
}
ctx.selectedDB = opts.id
c.WriteOK()
})
}
// SWAPDB
func (m *Miniredis) cmdSwapdb(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
var opts struct {
id1 int
id2 int
}
if ok := optIntErr(c, args[0], &opts.id1, "ERR invalid first DB index"); !ok {
return
}
if ok := optIntErr(c, args[1], &opts.id2, "ERR invalid second DB index"); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if opts.id1 < 0 || opts.id2 < 0 {
c.WriteError(msgDBIndexOutOfRange)
setDirty(c)
return
}
m.swapDB(opts.id1, opts.id2)
c.WriteOK()
})
}
// QUIT
func (m *Miniredis) cmdQuit(c *server.Peer, cmd string, args []string) {
// QUIT isn't transactionfied and accepts any arguments.
c.WriteOK()
c.Close()
}

View File

@@ -1,669 +0,0 @@
// Commands from https://redis.io/commands#generic
package miniredis
import (
"sort"
"strconv"
"strings"
"time"
"github.com/alicebob/miniredis/v2/server"
)
// commandsGeneric handles EXPIRE, TTL, PERSIST, &c.
func commandsGeneric(m *Miniredis) {
m.srv.Register("COPY", m.cmdCopy)
m.srv.Register("DEL", m.cmdDel)
// DUMP
m.srv.Register("EXISTS", m.cmdExists)
m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second))
m.srv.Register("EXPIREAT", makeCmdExpire(m, true, time.Second))
m.srv.Register("KEYS", m.cmdKeys)
// MIGRATE
m.srv.Register("MOVE", m.cmdMove)
// OBJECT
m.srv.Register("PERSIST", m.cmdPersist)
m.srv.Register("PEXPIRE", makeCmdExpire(m, false, time.Millisecond))
m.srv.Register("PEXPIREAT", makeCmdExpire(m, true, time.Millisecond))
m.srv.Register("PTTL", m.cmdPTTL)
m.srv.Register("RANDOMKEY", m.cmdRandomkey)
m.srv.Register("RENAME", m.cmdRename)
m.srv.Register("RENAMENX", m.cmdRenamenx)
// RESTORE
m.srv.Register("TOUCH", m.cmdTouch)
m.srv.Register("TTL", m.cmdTTL)
m.srv.Register("TYPE", m.cmdType)
m.srv.Register("SCAN", m.cmdScan)
// SORT
m.srv.Register("UNLINK", m.cmdDel)
}
// generic expire command for EXPIRE, PEXPIRE, EXPIREAT, PEXPIREAT
// d is the time unit. If unix is set it'll be seen as a unixtimestamp and
// converted to a duration.
func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, string, []string) {
return func(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
value int
}
opts.key = args[0]
if ok := optInt(c, args[1], &opts.value); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
// Key must be present.
if _, ok := db.keys[opts.key]; !ok {
c.WriteInt(0)
return
}
if unix {
db.ttl[opts.key] = m.at(opts.value, d)
} else {
db.ttl[opts.key] = time.Duration(opts.value) * d
}
db.keyVersion[opts.key]++
db.checkTTL(opts.key)
c.WriteInt(1)
})
}
}
// TOUCH
func (m *Miniredis) cmdTouch(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if len(args) == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
count := 0
for _, key := range args {
if db.exists(key) {
count++
}
}
c.WriteInt(count)
})
}
// TTL
func (m *Miniredis) cmdTTL(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if _, ok := db.keys[key]; !ok {
// No such key
c.WriteInt(-2)
return
}
v, ok := db.ttl[key]
if !ok {
// no expire value
c.WriteInt(-1)
return
}
c.WriteInt(int(v.Seconds()))
})
}
// PTTL
func (m *Miniredis) cmdPTTL(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if _, ok := db.keys[key]; !ok {
// no such key
c.WriteInt(-2)
return
}
v, ok := db.ttl[key]
if !ok {
// no expire value
c.WriteInt(-1)
return
}
c.WriteInt(int(v.Nanoseconds() / 1000000))
})
}
// PERSIST
func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if _, ok := db.keys[key]; !ok {
// no such key
c.WriteInt(0)
return
}
if _, ok := db.ttl[key]; !ok {
// no expire value
c.WriteInt(0)
return
}
delete(db.ttl, key)
db.keyVersion[key]++
c.WriteInt(1)
})
}
// DEL and UNLINK
func (m *Miniredis) cmdDel(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if len(args) == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
count := 0
for _, key := range args {
if db.exists(key) {
count++
}
db.del(key, true) // delete expire
}
c.WriteInt(count)
})
}
// TYPE
func (m *Miniredis) cmdType(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError("usage error")
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
c.WriteInline("none")
return
}
c.WriteInline(t)
})
}
// EXISTS
func (m *Miniredis) cmdExists(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
found := 0
for _, k := range args {
if db.exists(k) {
found++
}
}
c.WriteInt(found)
})
}
// MOVE
func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
targetDB int
}
opts.key = args[0]
opts.targetDB, _ = strconv.Atoi(args[1])
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if ctx.selectedDB == opts.targetDB {
c.WriteError("ERR source and destination objects are the same")
return
}
db := m.db(ctx.selectedDB)
targetDB := m.db(opts.targetDB)
if !db.move(opts.key, targetDB) {
c.WriteInt(0)
return
}
c.WriteInt(1)
})
}
// KEYS
func (m *Miniredis) cmdKeys(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
keys, _ := matchKeys(db.allKeys(), key)
c.WriteLen(len(keys))
for _, s := range keys {
c.WriteBulk(s)
}
})
}
// RANDOMKEY
func (m *Miniredis) cmdRandomkey(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if len(db.keys) == 0 {
c.WriteNull()
return
}
nr := m.randIntn(len(db.keys))
for k := range db.keys {
if nr == 0 {
c.WriteBulk(k)
return
}
nr--
}
})
}
// RENAME
func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
from string
to string
}{
from: args[0],
to: args[1],
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.from) {
c.WriteError(msgKeyNotFound)
return
}
db.rename(opts.from, opts.to)
c.WriteOK()
})
}
// RENAMENX
func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
from string
to string
}{
from: args[0],
to: args[1],
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.from) {
c.WriteError(msgKeyNotFound)
return
}
if db.exists(opts.to) {
c.WriteInt(0)
return
}
db.rename(opts.from, opts.to)
c.WriteInt(1)
})
}
// SCAN
func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
cursor int
withMatch bool
match string
withType bool
_type string
}
if ok := optIntErr(c, args[0], &opts.cursor, msgInvalidCursor); !ok {
return
}
args = args[1:]
// MATCH, COUNT and TYPE options
for len(args) > 0 {
if strings.ToLower(args[0]) == "count" {
// we do nothing with count
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
if _, err := strconv.Atoi(args[1]); err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
args = args[2:]
continue
}
if strings.ToLower(args[0]) == "match" {
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
opts.withMatch = true
opts.match, args = args[1], args[2:]
continue
}
if strings.ToLower(args[0]) == "type" {
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
opts.withType = true
opts._type, args = strings.ToLower(args[1]), args[2:]
continue
}
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
// We return _all_ (matched) keys every time.
if opts.cursor != 0 {
// Invalid cursor.
c.WriteLen(2)
c.WriteBulk("0") // no next cursor
c.WriteLen(0) // no elements
return
}
var keys []string
if opts.withType {
keys = make([]string, 0)
for k, t := range db.keys {
// type must be given exactly; no pattern matching is performed
if t == opts._type {
keys = append(keys, k)
}
}
sort.Strings(keys) // To make things deterministic.
} else {
keys = db.allKeys()
}
if opts.withMatch {
keys, _ = matchKeys(keys, opts.match)
}
c.WriteLen(2)
c.WriteBulk("0") // no next cursor
c.WriteLen(len(keys))
for _, k := range keys {
c.WriteBulk(k)
}
})
}
// COPY
func (m *Miniredis) cmdCopy(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts = struct {
from string
to string
destinationDB int
replace bool
}{
destinationDB: -1,
}
opts.from, opts.to, args = args[0], args[1], args[2:]
for len(args) > 0 {
switch strings.ToLower(args[0]) {
case "db":
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
db, err := strconv.Atoi(args[1])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if db < 0 {
setDirty(c)
c.WriteError(msgDBIndexOutOfRange)
return
}
opts.destinationDB = db
args = args[2:]
case "replace":
opts.replace = true
args = args[1:]
default:
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
fromDB, toDB := ctx.selectedDB, opts.destinationDB
if toDB == -1 {
toDB = fromDB
}
if fromDB == toDB && opts.from == opts.to {
c.WriteError("ERR source and destination objects are the same")
return
}
if !m.db(fromDB).exists(opts.from) {
c.WriteInt(0)
return
}
if !opts.replace {
if m.db(toDB).exists(opts.to) {
c.WriteInt(0)
return
}
}
m.copy(m.db(fromDB), opts.from, m.db(toDB), opts.to)
c.WriteInt(1)
})
}

View File

@@ -1,609 +0,0 @@
// Commands from https://redis.io/commands#geo
package miniredis
import (
"fmt"
"sort"
"strconv"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
// commandsGeo handles GEOADD, GEORADIUS etc.
func commandsGeo(m *Miniredis) {
m.srv.Register("GEOADD", m.cmdGeoadd)
m.srv.Register("GEODIST", m.cmdGeodist)
m.srv.Register("GEOPOS", m.cmdGeopos)
m.srv.Register("GEORADIUS", m.cmdGeoradius)
m.srv.Register("GEORADIUS_RO", m.cmdGeoradius)
m.srv.Register("GEORADIUSBYMEMBER", m.cmdGeoradiusbymember)
m.srv.Register("GEORADIUSBYMEMBER_RO", m.cmdGeoradiusbymember)
}
// GEOADD
func (m *Miniredis) cmdGeoadd(c *server.Peer, cmd string, args []string) {
if len(args) < 3 || len(args[1:])%3 != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if db.exists(key) && db.t(key) != "zset" {
c.WriteError(ErrWrongType.Error())
return
}
toSet := map[string]float64{}
for len(args) > 2 {
rawLong, rawLat, name := args[0], args[1], args[2]
args = args[3:]
longitude, err := strconv.ParseFloat(rawLong, 64)
if err != nil {
c.WriteError("ERR value is not a valid float")
return
}
latitude, err := strconv.ParseFloat(rawLat, 64)
if err != nil {
c.WriteError("ERR value is not a valid float")
return
}
if latitude < -85.05112878 ||
latitude > 85.05112878 ||
longitude < -180 ||
longitude > 180 {
c.WriteError(fmt.Sprintf("ERR invalid longitude,latitude pair %.6f,%.6f", longitude, latitude))
return
}
toSet[name] = float64(toGeohash(longitude, latitude))
}
set := 0
for name, score := range toSet {
if db.ssetAdd(key, score, name) {
set++
}
}
c.WriteInt(set)
})
}
// GEODIST
func (m *Miniredis) cmdGeodist(c *server.Peer, cmd string, args []string) {
if len(args) < 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, from, to, args := args[0], args[1], args[2], args[3:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteNull()
return
}
if db.t(key) != "zset" {
c.WriteError(ErrWrongType.Error())
return
}
unit := "m"
if len(args) > 0 {
unit, args = args[0], args[1:]
}
if len(args) > 0 {
c.WriteError(msgSyntaxError)
return
}
toMeter := parseUnit(unit)
if toMeter == 0 {
c.WriteError(msgUnsupportedUnit)
return
}
members := db.sortedsetKeys[key]
fromD, okFrom := members.get(from)
toD, okTo := members.get(to)
if !okFrom || !okTo {
c.WriteNull()
return
}
fromLo, fromLat := fromGeohash(uint64(fromD))
toLo, toLat := fromGeohash(uint64(toD))
dist := distance(fromLat, fromLo, toLat, toLo) / toMeter
c.WriteBulk(fmt.Sprintf("%.4f", dist))
})
}
// GEOPOS
func (m *Miniredis) cmdGeopos(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if db.exists(key) && db.t(key) != "zset" {
c.WriteError(ErrWrongType.Error())
return
}
c.WriteLen(len(args))
for _, l := range args {
if !db.ssetExists(key, l) {
c.WriteLen(-1)
continue
}
score := db.ssetScore(key, l)
c.WriteLen(2)
long, lat := fromGeohash(uint64(score))
c.WriteBulk(fmt.Sprintf("%f", long))
c.WriteBulk(fmt.Sprintf("%f", lat))
}
})
}
type geoDistance struct {
Name string
Score float64
Distance float64
Longitude float64
Latitude float64
}
// GEORADIUS and GEORADIUS_RO
func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) {
if len(args) < 5 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
longitude, err := strconv.ParseFloat(args[1], 64)
if err != nil {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
latitude, err := strconv.ParseFloat(args[2], 64)
if err != nil {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
radius, err := strconv.ParseFloat(args[3], 64)
if err != nil || radius < 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
toMeter := parseUnit(args[4])
if toMeter == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
args = args[5:]
var opts struct {
withDist bool
withCoord bool
direction direction // unsorted
count int
withStore bool
storeKey string
withStoredist bool
storedistKey string
}
for len(args) > 0 {
arg := args[0]
args = args[1:]
switch strings.ToUpper(arg) {
case "WITHCOORD":
opts.withCoord = true
case "WITHDIST":
opts.withDist = true
case "ASC":
opts.direction = asc
case "DESC":
opts.direction = desc
case "COUNT":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
n, err := strconv.Atoi(args[0])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if n <= 0 {
setDirty(c)
c.WriteError("ERR COUNT must be > 0")
return
}
args = args[1:]
opts.count = n
case "STORE":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
opts.withStore = true
opts.storeKey = args[0]
args = args[1:]
case "STOREDIST":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
opts.withStoredist = true
opts.storedistKey = args[0]
args = args[1:]
default:
setDirty(c)
c.WriteError("ERR syntax error")
return
}
}
if strings.ToUpper(cmd) == "GEORADIUS_RO" && (opts.withStore || opts.withStoredist) {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) {
c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options")
return
}
db := m.db(ctx.selectedDB)
members := db.ssetElements(key)
matches := withinRadius(members, longitude, latitude, radius*toMeter)
// deal with ASC/DESC
if opts.direction != unsorted {
sort.Slice(matches, func(i, j int) bool {
if opts.direction == desc {
return matches[i].Distance > matches[j].Distance
}
return matches[i].Distance < matches[j].Distance
})
}
// deal with COUNT
if opts.count > 0 && len(matches) > opts.count {
matches = matches[:opts.count]
}
// deal with "STORE x"
if opts.withStore {
db.del(opts.storeKey, true)
for _, member := range matches {
db.ssetAdd(opts.storeKey, member.Score, member.Name)
}
c.WriteInt(len(matches))
return
}
// deal with "STOREDIST x"
if opts.withStoredist {
db.del(opts.storedistKey, true)
for _, member := range matches {
db.ssetAdd(opts.storedistKey, member.Distance/toMeter, member.Name)
}
c.WriteInt(len(matches))
return
}
c.WriteLen(len(matches))
for _, member := range matches {
if !opts.withDist && !opts.withCoord {
c.WriteBulk(member.Name)
continue
}
len := 1
if opts.withDist {
len++
}
if opts.withCoord {
len++
}
c.WriteLen(len)
c.WriteBulk(member.Name)
if opts.withDist {
c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter))
}
if opts.withCoord {
c.WriteLen(2)
c.WriteBulk(fmt.Sprintf("%f", member.Longitude))
c.WriteBulk(fmt.Sprintf("%f", member.Latitude))
}
}
})
}
// GEORADIUSBYMEMBER and GEORADIUSBYMEMBER_RO
func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []string) {
if len(args) < 4 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
member string
radius float64
toMeter float64
withDist bool
withCoord bool
direction direction // unsorted
count int
withStore bool
storeKey string
withStoredist bool
storedistKey string
}{
key: args[0],
member: args[1],
}
r, err := strconv.ParseFloat(args[2], 64)
if err != nil || r < 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
opts.radius = r
opts.toMeter = parseUnit(args[3])
if opts.toMeter == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
args = args[4:]
for len(args) > 0 {
arg := args[0]
args = args[1:]
switch strings.ToUpper(arg) {
case "WITHCOORD":
opts.withCoord = true
case "WITHDIST":
opts.withDist = true
case "ASC":
opts.direction = asc
case "DESC":
opts.direction = desc
case "COUNT":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
n, err := strconv.Atoi(args[0])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if n <= 0 {
setDirty(c)
c.WriteError("ERR COUNT must be > 0")
return
}
args = args[1:]
opts.count = n
case "STORE":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
opts.withStore = true
opts.storeKey = args[0]
args = args[1:]
case "STOREDIST":
if len(args) == 0 {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
opts.withStoredist = true
opts.storedistKey = args[0]
args = args[1:]
default:
setDirty(c)
c.WriteError("ERR syntax error")
return
}
}
if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (opts.withStore || opts.withStoredist) {
setDirty(c)
c.WriteError("ERR syntax error")
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) {
c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options")
return
}
db := m.db(ctx.selectedDB)
if !db.exists(opts.key) {
c.WriteNull()
return
}
if db.t(opts.key) != "zset" {
c.WriteError(ErrWrongType.Error())
return
}
// get position of member
if !db.ssetExists(opts.key, opts.member) {
c.WriteError("ERR could not decode requested zset member")
return
}
score := db.ssetScore(opts.key, opts.member)
longitude, latitude := fromGeohash(uint64(score))
members := db.ssetElements(opts.key)
matches := withinRadius(members, longitude, latitude, opts.radius*opts.toMeter)
// deal with ASC/DESC
if opts.direction != unsorted {
sort.Slice(matches, func(i, j int) bool {
if opts.direction == desc {
return matches[i].Distance > matches[j].Distance
}
return matches[i].Distance < matches[j].Distance
})
}
// deal with COUNT
if opts.count > 0 && len(matches) > opts.count {
matches = matches[:opts.count]
}
// deal with "STORE x"
if opts.withStore {
db.del(opts.storeKey, true)
for _, member := range matches {
db.ssetAdd(opts.storeKey, member.Score, member.Name)
}
c.WriteInt(len(matches))
return
}
// deal with "STOREDIST x"
if opts.withStoredist {
db.del(opts.storedistKey, true)
for _, member := range matches {
db.ssetAdd(opts.storedistKey, member.Distance/opts.toMeter, member.Name)
}
c.WriteInt(len(matches))
return
}
c.WriteLen(len(matches))
for _, member := range matches {
if !opts.withDist && !opts.withCoord {
c.WriteBulk(member.Name)
continue
}
len := 1
if opts.withDist {
len++
}
if opts.withCoord {
len++
}
c.WriteLen(len)
c.WriteBulk(member.Name)
if opts.withDist {
c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/opts.toMeter))
}
if opts.withCoord {
c.WriteLen(2)
c.WriteBulk(fmt.Sprintf("%f", member.Longitude))
c.WriteBulk(fmt.Sprintf("%f", member.Latitude))
}
}
})
}
func withinRadius(members []ssElem, longitude, latitude, radius float64) []geoDistance {
matches := []geoDistance{}
for _, el := range members {
elLo, elLat := fromGeohash(uint64(el.score))
distanceInMeter := distance(latitude, longitude, elLat, elLo)
if distanceInMeter <= radius {
matches = append(matches, geoDistance{
Name: el.member,
Score: el.score,
Distance: distanceInMeter,
Longitude: elLo,
Latitude: elLat,
})
}
}
return matches
}
func parseUnit(u string) float64 {
switch u {
case "m":
return 1
case "km":
return 1000
case "mi":
return 1609.34
case "ft":
return 0.3048
default:
return 0
}
}

View File

@@ -1,683 +0,0 @@
// Commands from https://redis.io/commands#hash
package miniredis
import (
"math/big"
"strconv"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
// commandsHash handles all hash value operations.
func commandsHash(m *Miniredis) {
m.srv.Register("HDEL", m.cmdHdel)
m.srv.Register("HEXISTS", m.cmdHexists)
m.srv.Register("HGET", m.cmdHget)
m.srv.Register("HGETALL", m.cmdHgetall)
m.srv.Register("HINCRBY", m.cmdHincrby)
m.srv.Register("HINCRBYFLOAT", m.cmdHincrbyfloat)
m.srv.Register("HKEYS", m.cmdHkeys)
m.srv.Register("HLEN", m.cmdHlen)
m.srv.Register("HMGET", m.cmdHmget)
m.srv.Register("HMSET", m.cmdHmset)
m.srv.Register("HSET", m.cmdHset)
m.srv.Register("HSETNX", m.cmdHsetnx)
m.srv.Register("HSTRLEN", m.cmdHstrlen)
m.srv.Register("HVALS", m.cmdHvals)
m.srv.Register("HSCAN", m.cmdHscan)
}
// HSET
func (m *Miniredis) cmdHset(c *server.Peer, cmd string, args []string) {
if len(args) < 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, pairs := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if len(pairs)%2 == 1 {
c.WriteError(errWrongNumber(cmd))
return
}
if t, ok := db.keys[key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
new := db.hashSet(key, pairs...)
c.WriteInt(new)
})
}
// HSETNX
func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
field string
value string
}{
key: args[0],
field: args[1],
value: args[2],
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[opts.key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
if _, ok := db.hashKeys[opts.key]; !ok {
db.hashKeys[opts.key] = map[string]string{}
db.keys[opts.key] = "hash"
}
_, ok := db.hashKeys[opts.key][opts.field]
if ok {
c.WriteInt(0)
return
}
db.hashKeys[opts.key][opts.field] = opts.value
db.keyVersion[opts.key]++
c.WriteInt(1)
})
}
// HMSET
func (m *Miniredis) cmdHmset(c *server.Peer, cmd string, args []string) {
if len(args) < 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, args := args[0], args[1:]
if len(args)%2 != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
for len(args) > 0 {
field, value := args[0], args[1]
args = args[2:]
db.hashSet(key, field, value)
}
c.WriteOK()
})
}
// HGET
func (m *Miniredis) cmdHget(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, field := args[0], args[1]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
c.WriteNull()
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
value, ok := db.hashKeys[key][field]
if !ok {
c.WriteNull()
return
}
c.WriteBulk(value)
})
}
// HDEL
func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
fields []string
}{
key: args[0],
fields: args[1:],
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[opts.key]
if !ok {
// No key is zero deleted
c.WriteInt(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
deleted := 0
for _, f := range opts.fields {
_, ok := db.hashKeys[opts.key][f]
if !ok {
continue
}
delete(db.hashKeys[opts.key], f)
deleted++
}
c.WriteInt(deleted)
// Nothing left. Remove the whole key.
if len(db.hashKeys[opts.key]) == 0 {
db.del(opts.key, true)
}
})
}
// HEXISTS
func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
field string
}{
key: args[0],
field: args[1],
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[opts.key]
if !ok {
c.WriteInt(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
if _, ok := db.hashKeys[opts.key][opts.field]; !ok {
c.WriteInt(0)
return
}
c.WriteInt(1)
})
}
// HGETALL
func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
c.WriteMapLen(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
c.WriteMapLen(len(db.hashKeys[key]))
for _, k := range db.hashFields(key) {
c.WriteBulk(k)
c.WriteBulk(db.hashGet(key, k))
}
})
}
// HKEYS
func (m *Miniredis) cmdHkeys(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteLen(0)
return
}
if db.t(key) != "hash" {
c.WriteError(msgWrongType)
return
}
fields := db.hashFields(key)
c.WriteLen(len(fields))
for _, f := range fields {
c.WriteBulk(f)
}
})
}
// HSTRLEN
func (m *Miniredis) cmdHstrlen(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
hash, key := args[0], args[1]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[hash]
if !ok {
c.WriteInt(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
keys := db.hashKeys[hash]
c.WriteInt(len(keys[key]))
})
}
// HVALS
func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
c.WriteLen(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
vals := db.hashValues(key)
c.WriteLen(len(vals))
for _, v := range vals {
c.WriteBulk(v)
}
})
}
// HLEN
func (m *Miniredis) cmdHlen(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
c.WriteInt(0)
return
}
if t != "hash" {
c.WriteError(msgWrongType)
return
}
c.WriteInt(len(db.hashKeys[key]))
})
}
// HMGET
func (m *Miniredis) cmdHmget(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
f, ok := db.hashKeys[key]
if !ok {
f = map[string]string{}
}
c.WriteLen(len(args) - 1)
for _, k := range args[1:] {
v, ok := f[k]
if !ok {
c.WriteNull()
continue
}
c.WriteBulk(v)
}
})
}
// HINCRBY
func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
field string
delta int
}{
key: args[0],
field: args[1],
}
if ok := optInt(c, args[2], &opts.delta); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[opts.key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
v, err := db.hashIncr(opts.key, opts.field, opts.delta)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteInt(v)
})
}
// HINCRBYFLOAT
func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
field string
delta *big.Float
}{
key: args[0],
field: args[1],
}
delta, _, err := big.ParseFloat(args[2], 10, 128, 0)
if err != nil {
setDirty(c)
c.WriteError(msgInvalidFloat)
return
}
opts.delta = delta
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[opts.key]; ok && t != "hash" {
c.WriteError(msgWrongType)
return
}
v, err := db.hashIncrfloat(opts.key, opts.field, opts.delta)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteBulk(formatBig(v))
})
}
// HSCAN
func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
cursor int
withMatch bool
match string
}{
key: args[0],
}
if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok {
return
}
args = args[2:]
// MATCH and COUNT options
for len(args) > 0 {
if strings.ToLower(args[0]) == "count" {
// we do nothing with count
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
_, err := strconv.Atoi(args[1])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
args = args[2:]
continue
}
if strings.ToLower(args[0]) == "match" {
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
opts.withMatch = true
opts.match, args = args[1], args[2:]
continue
}
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
// return _all_ (matched) keys every time
if opts.cursor != 0 {
// Invalid cursor.
c.WriteLen(2)
c.WriteBulk("0") // no next cursor
c.WriteLen(0) // no elements
return
}
if db.exists(opts.key) && db.t(opts.key) != "hash" {
c.WriteError(ErrWrongType.Error())
return
}
members := db.hashFields(opts.key)
if opts.withMatch {
members, _ = matchKeys(members, opts.match)
}
c.WriteLen(2)
c.WriteBulk("0") // no next cursor
// HSCAN gives key, values.
c.WriteLen(len(members) * 2)
for _, k := range members {
c.WriteBulk(k)
c.WriteBulk(db.hashGet(opts.key, k))
}
})
}

View File

@@ -1,95 +0,0 @@
package miniredis
import "github.com/alicebob/miniredis/v2/server"
// commandsHll handles all hll related operations.
func commandsHll(m *Miniredis) {
m.srv.Register("PFADD", m.cmdPfadd)
m.srv.Register("PFCOUNT", m.cmdPfcount)
m.srv.Register("PFMERGE", m.cmdPfmerge)
}
// PFADD
func (m *Miniredis) cmdPfadd(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, items := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if db.exists(key) && db.t(key) != "hll" {
c.WriteError(ErrNotValidHllValue.Error())
return
}
altered := db.hllAdd(key, items...)
c.WriteInt(altered)
})
}
// PFCOUNT
func (m *Miniredis) cmdPfcount(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
keys := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
count, err := db.hllCount(keys)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteInt(count)
})
}
// PFMERGE
func (m *Miniredis) cmdPfmerge(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
keys := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if err := db.hllMerge(keys); err != nil {
c.WriteError(err.Error())
return
}
c.WriteOK()
})
}

View File

@@ -1,40 +0,0 @@
package miniredis
import (
"fmt"
"github.com/alicebob/miniredis/v2/server"
)
// Command 'INFO' from https://redis.io/commands/info/
func (m *Miniredis) cmdInfo(c *server.Peer, cmd string, args []string) {
if !m.isValidCMD(c, cmd) {
return
}
if len(args) > 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
const (
clientsSectionName = "clients"
clientsSectionContent = "# Clients\nconnected_clients:%d\r\n"
)
var result string
for _, key := range args {
if key != clientsSectionName {
setDirty(c)
c.WriteError(fmt.Sprintf("section (%s) is not supported", key))
return
}
}
result = fmt.Sprintf(clientsSectionContent, m.Server().ClientsLen())
c.WriteBulk(result)
})
}

View File

@@ -1,986 +0,0 @@
// Commands from https://redis.io/commands#list
package miniredis
import (
"strconv"
"strings"
"time"
"github.com/alicebob/miniredis/v2/server"
)
type leftright int
const (
left leftright = iota
right
)
// commandsList handles list commands (mostly L*)
func commandsList(m *Miniredis) {
m.srv.Register("BLPOP", m.cmdBlpop)
m.srv.Register("BRPOP", m.cmdBrpop)
m.srv.Register("BRPOPLPUSH", m.cmdBrpoplpush)
m.srv.Register("LINDEX", m.cmdLindex)
m.srv.Register("LPOS", m.cmdLpos)
m.srv.Register("LINSERT", m.cmdLinsert)
m.srv.Register("LLEN", m.cmdLlen)
m.srv.Register("LPOP", m.cmdLpop)
m.srv.Register("LPUSH", m.cmdLpush)
m.srv.Register("LPUSHX", m.cmdLpushx)
m.srv.Register("LRANGE", m.cmdLrange)
m.srv.Register("LREM", m.cmdLrem)
m.srv.Register("LSET", m.cmdLset)
m.srv.Register("LTRIM", m.cmdLtrim)
m.srv.Register("RPOP", m.cmdRpop)
m.srv.Register("RPOPLPUSH", m.cmdRpoplpush)
m.srv.Register("RPUSH", m.cmdRpush)
m.srv.Register("RPUSHX", m.cmdRpushx)
m.srv.Register("LMOVE", m.cmdLmove)
}
// BLPOP
func (m *Miniredis) cmdBlpop(c *server.Peer, cmd string, args []string) {
m.cmdBXpop(c, cmd, args, left)
}
// BRPOP
func (m *Miniredis) cmdBrpop(c *server.Peer, cmd string, args []string) {
m.cmdBXpop(c, cmd, args, right)
}
func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftright) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
timeoutS := args[len(args)-1]
keys := args[:len(args)-1]
timeout, err := strconv.Atoi(timeoutS)
if err != nil {
setDirty(c)
c.WriteError(msgInvalidTimeout)
return
}
if timeout < 0 {
setDirty(c)
c.WriteError(msgNegTimeout)
return
}
blocking(
m,
c,
time.Duration(timeout)*time.Second,
func(c *server.Peer, ctx *connCtx) bool {
db := m.db(ctx.selectedDB)
for _, key := range keys {
if !db.exists(key) {
continue
}
if db.t(key) != "list" {
c.WriteError(msgWrongType)
return true
}
if len(db.listKeys[key]) == 0 {
continue
}
c.WriteLen(2)
c.WriteBulk(key)
var v string
switch lr {
case left:
v = db.listLpop(key)
case right:
v = db.listPop(key)
}
c.WriteBulk(v)
return true
}
return false
},
func(c *server.Peer) {
// timeout
c.WriteLen(-1)
},
)
}
// LINDEX
func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, offsets := args[0], args[1]
offset, err := strconv.Atoi(offsets)
if err != nil || offsets == "-0" {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
// No such key
c.WriteNull()
return
}
if t != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[key]
if offset < 0 {
offset = len(l) + offset
}
if offset < 0 || offset > len(l)-1 {
c.WriteNull()
return
}
c.WriteBulk(l[offset])
})
}
// LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len]
func (m *Miniredis) cmdLpos(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if len(args) == 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
// Extract options from arguments if present.
//
// Redis allows duplicate options and uses the last specified.
// `LPOS key term RANK 1 RANK 2` is effectively the same as
// `LPOS key term RANK 2`
if len(args)%2 == 1 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
rank, count := 1, 1 // Default values
var maxlen int // Default value is the list length (see below)
var countSpecified, maxlenSpecified bool
if len(args) > 2 {
for i := 2; i < len(args); i++ {
if i%2 == 0 {
val := args[i+1]
var err error
switch strings.ToLower(args[i]) {
case "rank":
if rank, err = strconv.Atoi(val); err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if rank == 0 {
setDirty(c)
c.WriteError(msgRankIsZero)
return
}
case "count":
countSpecified = true
if count, err = strconv.Atoi(val); err != nil || count < 0 {
setDirty(c)
c.WriteError(msgCountIsNegative)
return
}
case "maxlen":
maxlenSpecified = true
if maxlen, err = strconv.Atoi(val); err != nil || maxlen < 0 {
setDirty(c)
c.WriteError(msgMaxLengthIsNegative)
return
}
default:
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
}
}
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
key, element := args[0], args[1]
t, ok := db.keys[key]
if !ok {
// No such key
c.WriteNull()
return
}
if t != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[key]
// RANK cannot be zero (see above).
// If RANK is positive search forward (left to right).
// If RANK is negative search backward (right to left).
// Iterator returns true to continue iterating.
iterate := func(iterator func(i int, e string) bool) {
comparisons := len(l)
// Only use max length if specified, not zero, and less than total length.
// When max length is specified, but is zero, this means "unlimited".
if maxlenSpecified && maxlen != 0 && maxlen < len(l) {
comparisons = maxlen
}
if rank > 0 {
for i := 0; i < comparisons; i++ {
if resume := iterator(i, l[i]); !resume {
return
}
}
} else if rank < 0 {
start := len(l) - 1
end := len(l) - comparisons
for i := start; i >= end; i-- {
if resume := iterator(i, l[i]); !resume {
return
}
}
}
}
var currentRank, currentCount int
vals := make([]int, 0, count)
iterate(func(i int, e string) bool {
if e == element {
currentRank++
// Only collect values only after surpassing the absolute value of rank.
if rank > 0 && currentRank < rank {
return true
}
if rank < 0 && currentRank < -rank {
return true
}
vals = append(vals, i)
currentCount++
if currentCount == count {
return false
}
}
return true
})
if !countSpecified && len(vals) == 0 {
c.WriteNull()
return
}
if !countSpecified && len(vals) == 1 {
c.WriteInt(vals[0])
return
}
c.WriteLen(len(vals))
for _, val := range vals {
c.WriteInt(val)
}
})
}
// LINSERT
func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) {
if len(args) != 4 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
where := 0
switch strings.ToLower(args[1]) {
case "before":
where = -1
case "after":
where = +1
default:
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
pivot := args[2]
value := args[3]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
// No such key
c.WriteInt(0)
return
}
if t != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[key]
for i, el := range l {
if el != pivot {
continue
}
if where < 0 {
l = append(l[:i], append(listKey{value}, l[i:]...)...)
} else {
if i == len(l)-1 {
l = append(l, value)
} else {
l = append(l[:i+1], append(listKey{value}, l[i+1:]...)...)
}
}
db.listKeys[key] = l
db.keyVersion[key]++
c.WriteInt(len(l))
return
}
c.WriteInt(-1)
})
}
// LLEN
func (m *Miniredis) cmdLlen(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[key]
if !ok {
// No such key. That's zero length.
c.WriteInt(0)
return
}
if t != "list" {
c.WriteError(msgWrongType)
return
}
c.WriteInt(len(db.listKeys[key]))
})
}
// LPOP
func (m *Miniredis) cmdLpop(c *server.Peer, cmd string, args []string) {
m.cmdXpop(c, cmd, args, left)
}
// RPOP
func (m *Miniredis) cmdRpop(c *server.Peer, cmd string, args []string) {
m.cmdXpop(c, cmd, args, right)
}
func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftright) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
withCount bool
count int
}
opts.key, args = args[0], args[1:]
if len(args) > 0 {
if ok := optInt(c, args[0], &opts.count); !ok {
return
}
if opts.count < 0 {
setDirty(c)
c.WriteError(msgOutOfRange)
return
}
opts.withCount = true
args = args[1:]
}
if len(args) > 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.key) {
// non-existing key is fine
c.WriteNull()
return
}
if db.t(opts.key) != "list" {
c.WriteError(msgWrongType)
return
}
if opts.withCount {
var popped []string
for opts.count > 0 && len(db.listKeys[opts.key]) > 0 {
switch lr {
case left:
popped = append(popped, db.listLpop(opts.key))
case right:
popped = append(popped, db.listPop(opts.key))
}
opts.count -= 1
}
if len(popped) == 0 {
c.WriteLen(-1)
} else {
c.WriteStrings(popped)
}
return
}
var elem string
switch lr {
case left:
elem = db.listLpop(opts.key)
case right:
elem = db.listPop(opts.key)
}
c.WriteBulk(elem)
})
}
// LPUSH
func (m *Miniredis) cmdLpush(c *server.Peer, cmd string, args []string) {
m.cmdXpush(c, cmd, args, left)
}
// RPUSH
func (m *Miniredis) cmdRpush(c *server.Peer, cmd string, args []string) {
m.cmdXpush(c, cmd, args, right)
}
func (m *Miniredis) cmdXpush(c *server.Peer, cmd string, args []string, lr leftright) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if db.exists(key) && db.t(key) != "list" {
c.WriteError(msgWrongType)
return
}
var newLen int
for _, value := range args {
switch lr {
case left:
newLen = db.listLpush(key, value)
case right:
newLen = db.listPush(key, value)
}
}
c.WriteInt(newLen)
})
}
// LPUSHX
func (m *Miniredis) cmdLpushx(c *server.Peer, cmd string, args []string) {
m.cmdXpushx(c, cmd, args, left)
}
// RPUSHX
func (m *Miniredis) cmdRpushx(c *server.Peer, cmd string, args []string) {
m.cmdXpushx(c, cmd, args, right)
}
func (m *Miniredis) cmdXpushx(c *server.Peer, cmd string, args []string, lr leftright) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteInt(0)
return
}
if db.t(key) != "list" {
c.WriteError(msgWrongType)
return
}
var newLen int
for _, value := range args {
switch lr {
case left:
newLen = db.listLpush(key, value)
case right:
newLen = db.listPush(key, value)
}
}
c.WriteInt(newLen)
})
}
// LRANGE
func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
start int
end int
}{
key: args[0],
}
if ok := optInt(c, args[1], &opts.start); !ok {
return
}
if ok := optInt(c, args[2], &opts.end); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if t, ok := db.keys[opts.key]; ok && t != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[opts.key]
if len(l) == 0 {
c.WriteLen(0)
return
}
rs, re := redisRange(len(l), opts.start, opts.end, false)
c.WriteLen(re - rs)
for _, el := range l[rs:re] {
c.WriteBulk(el)
}
})
}
// LREM
func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
count int
value string
}
opts.key = args[0]
if ok := optInt(c, args[1], &opts.count); !ok {
return
}
opts.value = args[2]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.key) {
c.WriteInt(0)
return
}
if db.t(opts.key) != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[opts.key]
if opts.count < 0 {
reverseSlice(l)
}
deleted := 0
newL := []string{}
toDelete := len(l)
if opts.count < 0 {
toDelete = -opts.count
}
if opts.count > 0 {
toDelete = opts.count
}
for _, el := range l {
if el == opts.value {
if toDelete > 0 {
deleted++
toDelete--
continue
}
}
newL = append(newL, el)
}
if opts.count < 0 {
reverseSlice(newL)
}
if len(newL) == 0 {
db.del(opts.key, true)
} else {
db.listKeys[opts.key] = newL
db.keyVersion[opts.key]++
}
c.WriteInt(deleted)
})
}
// LSET
func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
index int
value string
}
opts.key = args[0]
if ok := optInt(c, args[1], &opts.index); !ok {
return
}
opts.value = args[2]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.key) {
c.WriteError(msgKeyNotFound)
return
}
if db.t(opts.key) != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[opts.key]
index := opts.index
if index < 0 {
index = len(l) + index
}
if index < 0 || index > len(l)-1 {
c.WriteError(msgOutOfRange)
return
}
l[index] = opts.value
db.keyVersion[opts.key]++
c.WriteOK()
})
}
// LTRIM
func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
start int
end int
}
opts.key = args[0]
if ok := optInt(c, args[1], &opts.start); !ok {
return
}
if ok := optInt(c, args[2], &opts.end); !ok {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
t, ok := db.keys[opts.key]
if !ok {
c.WriteOK()
return
}
if t != "list" {
c.WriteError(msgWrongType)
return
}
l := db.listKeys[opts.key]
rs, re := redisRange(len(l), opts.start, opts.end, false)
l = l[rs:re]
if len(l) == 0 {
db.del(opts.key, true)
} else {
db.listKeys[opts.key] = l
db.keyVersion[opts.key]++
}
c.WriteOK()
})
}
// RPOPLPUSH
func (m *Miniredis) cmdRpoplpush(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
src, dst := args[0], args[1]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(src) {
c.WriteNull()
return
}
if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") {
c.WriteError(msgWrongType)
return
}
elem := db.listPop(src)
db.listLpush(dst, elem)
c.WriteBulk(elem)
})
}
// BRPOPLPUSH
func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
src string
dst string
timeout int
}
opts.src = args[0]
opts.dst = args[1]
if ok := optIntErr(c, args[2], &opts.timeout, msgInvalidTimeout); !ok {
return
}
if opts.timeout < 0 {
setDirty(c)
c.WriteError(msgNegTimeout)
return
}
blocking(
m,
c,
time.Duration(opts.timeout)*time.Second,
func(c *server.Peer, ctx *connCtx) bool {
db := m.db(ctx.selectedDB)
if !db.exists(opts.src) {
return false
}
if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") {
c.WriteError(msgWrongType)
return true
}
if len(db.listKeys[opts.src]) == 0 {
return false
}
elem := db.listPop(opts.src)
db.listLpush(opts.dst, elem)
c.WriteBulk(elem)
return true
},
func(c *server.Peer) {
// timeout
c.WriteLen(-1)
},
)
}
// LMOVE
func (m *Miniredis) cmdLmove(c *server.Peer, cmd string, args []string) {
if len(args) != 4 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
src string
dst string
srcDir string
dstDir string
}{
src: args[0],
dst: args[1],
srcDir: strings.ToLower(args[2]),
dstDir: strings.ToLower(args[3]),
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.src) {
c.WriteNull()
return
}
if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") {
c.WriteError(msgWrongType)
return
}
var elem string
switch opts.srcDir {
case "left":
elem = db.listLpop(opts.src)
case "right":
elem = db.listPop(opts.src)
default:
c.WriteError(msgSyntaxError)
return
}
switch opts.dstDir {
case "left":
db.listLpush(opts.dst, elem)
case "right":
db.listPush(opts.dst, elem)
default:
c.WriteError(msgSyntaxError)
return
}
c.WriteBulk(elem)
})
}

View File

@@ -1,256 +0,0 @@
// Commands from https://redis.io/commands#pubsub
package miniredis
import (
"fmt"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
// commandsPubsub handles all PUB/SUB operations.
func commandsPubsub(m *Miniredis) {
m.srv.Register("SUBSCRIBE", m.cmdSubscribe)
m.srv.Register("UNSUBSCRIBE", m.cmdUnsubscribe)
m.srv.Register("PSUBSCRIBE", m.cmdPsubscribe)
m.srv.Register("PUNSUBSCRIBE", m.cmdPunsubscribe)
m.srv.Register("PUBLISH", m.cmdPublish)
m.srv.Register("PUBSUB", m.cmdPubSub)
}
// SUBSCRIBE
func (m *Miniredis) cmdSubscribe(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
sub := m.subscribedState(c)
for _, channel := range args {
n := sub.Subscribe(channel)
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("subscribe")
w.WriteBulk(channel)
w.WriteInt(n)
})
}
})
}
// UNSUBSCRIBE
func (m *Miniredis) cmdUnsubscribe(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
channels := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
sub := m.subscribedState(c)
if len(channels) == 0 {
channels = sub.Channels()
}
// there is no de-duplication
for _, channel := range channels {
n := sub.Unsubscribe(channel)
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("unsubscribe")
w.WriteBulk(channel)
w.WriteInt(n)
})
}
if len(channels) == 0 {
// special case: there is always a reply
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("unsubscribe")
w.WriteNull()
w.WriteInt(0)
})
}
if sub.Count() == 0 {
endSubscriber(m, c)
}
})
}
// PSUBSCRIBE
func (m *Miniredis) cmdPsubscribe(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
sub := m.subscribedState(c)
for _, pat := range args {
n := sub.Psubscribe(pat)
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("psubscribe")
w.WriteBulk(pat)
w.WriteInt(n)
})
}
})
}
// PUNSUBSCRIBE
func (m *Miniredis) cmdPunsubscribe(c *server.Peer, cmd string, args []string) {
if !m.handleAuth(c) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
patterns := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
sub := m.subscribedState(c)
if len(patterns) == 0 {
patterns = sub.Patterns()
}
// there is no de-duplication
for _, pat := range patterns {
n := sub.Punsubscribe(pat)
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("punsubscribe")
w.WriteBulk(pat)
w.WriteInt(n)
})
}
if len(patterns) == 0 {
// special case: there is always a reply
c.Block(func(w *server.Writer) {
w.WritePushLen(3)
w.WriteBulk("punsubscribe")
w.WriteNull()
w.WriteInt(0)
})
}
if sub.Count() == 0 {
endSubscriber(m, c)
}
})
}
// PUBLISH
func (m *Miniredis) cmdPublish(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
channel, mesg := args[0], args[1]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
c.WriteInt(m.publish(channel, mesg))
})
}
// PUBSUB
func (m *Miniredis) cmdPubSub(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if m.checkPubsub(c, cmd) {
return
}
subcommand := strings.ToUpper(args[0])
subargs := args[1:]
var argsOk bool
switch subcommand {
case "CHANNELS":
argsOk = len(subargs) < 2
case "NUMSUB":
argsOk = true
case "NUMPAT":
argsOk = len(subargs) == 0
default:
argsOk = false
}
if !argsOk {
setDirty(c)
c.WriteError(fmt.Sprintf(msgFPubsubUsage, subcommand))
return
}
if !m.handleAuth(c) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
switch subcommand {
case "CHANNELS":
pat := ""
if len(subargs) == 1 {
pat = subargs[0]
}
allsubs := m.allSubscribers()
channels := activeChannels(allsubs, pat)
c.WriteLen(len(channels))
for _, channel := range channels {
c.WriteBulk(channel)
}
case "NUMSUB":
subs := m.allSubscribers()
c.WriteLen(len(subargs) * 2)
for _, channel := range subargs {
c.WriteBulk(channel)
c.WriteInt(countSubs(subs, channel))
}
case "NUMPAT":
c.WriteInt(countPsubs(m.allSubscribers()))
}
})
}

View File

@@ -1,281 +0,0 @@
package miniredis
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"io"
"strconv"
"strings"
luajson "github.com/alicebob/gopher-json"
lua "github.com/yuin/gopher-lua"
"github.com/yuin/gopher-lua/parse"
"github.com/alicebob/miniredis/v2/server"
)
func commandsScripting(m *Miniredis) {
m.srv.Register("EVAL", m.cmdEval)
m.srv.Register("EVALSHA", m.cmdEvalsha)
m.srv.Register("SCRIPT", m.cmdScript)
}
// Execute lua. Needs to run m.Lock()ed, from within withTx().
// Returns true if the lua was OK (and hence should be cached).
func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) bool {
l := lua.NewState(lua.Options{SkipOpenLibs: true})
defer l.Close()
// Taken from the go-lua manual
for _, pair := range []struct {
n string
f lua.LGFunction
}{
{lua.LoadLibName, lua.OpenPackage},
{lua.BaseLibName, lua.OpenBase},
{lua.CoroutineLibName, lua.OpenCoroutine},
{lua.TabLibName, lua.OpenTable},
{lua.StringLibName, lua.OpenString},
{lua.MathLibName, lua.OpenMath},
{lua.DebugLibName, lua.OpenDebug},
} {
if err := l.CallByParam(lua.P{
Fn: l.NewFunction(pair.f),
NRet: 0,
Protect: true,
}, lua.LString(pair.n)); err != nil {
panic(err)
}
}
luajson.Preload(l)
requireGlobal(l, "cjson", "json")
// set global variable KEYS
keysTable := l.NewTable()
keysS, args := args[0], args[1:]
keysLen, err := strconv.Atoi(keysS)
if err != nil {
c.WriteError(msgInvalidInt)
return false
}
if keysLen < 0 {
c.WriteError(msgNegativeKeysNumber)
return false
}
if keysLen > len(args) {
c.WriteError(msgInvalidKeysNumber)
return false
}
keys, args := args[:keysLen], args[keysLen:]
for i, k := range keys {
l.RawSet(keysTable, lua.LNumber(i+1), lua.LString(k))
}
l.SetGlobal("KEYS", keysTable)
argvTable := l.NewTable()
for i, a := range args {
l.RawSet(argvTable, lua.LNumber(i+1), lua.LString(a))
}
l.SetGlobal("ARGV", argvTable)
redisFuncs, redisConstants := mkLua(m.srv, c)
// Register command handlers
l.Push(l.NewFunction(func(l *lua.LState) int {
mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable)
for k, v := range redisConstants {
mod.RawSetString(k, v)
}
l.Push(mod)
return 1
}))
l.DoString(protectGlobals)
l.Push(lua.LString("redis"))
l.Call(1, 0)
if err := l.DoString(script); err != nil {
c.WriteError(errLuaParseError(err))
return false
}
luaToRedis(l, c, l.Get(1))
return true
}
func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
script, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
ok := m.runLuaScript(c, script, args)
if ok {
sha := sha1Hex(script)
m.scripts[sha] = script
}
})
}
func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
sha, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
script, ok := m.scripts[sha]
if !ok {
c.WriteError(msgNoScriptFound)
return
}
m.runLuaScript(c, script, args)
})
}
func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
if getCtx(c).nested {
c.WriteError(msgNotFromScripts)
return
}
subcmd, args := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
switch strings.ToLower(subcmd) {
case "load":
if len(args) != 1 {
c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD"))
return
}
script := args[0]
if _, err := parse.Parse(strings.NewReader(script), "user_script"); err != nil {
c.WriteError(errLuaParseError(err))
return
}
sha := sha1Hex(script)
m.scripts[sha] = script
c.WriteBulk(sha)
case "exists":
c.WriteLen(len(args))
for _, arg := range args {
if _, ok := m.scripts[arg]; ok {
c.WriteInt(1)
} else {
c.WriteInt(0)
}
}
case "flush":
if len(args) == 1 {
switch strings.ToUpper(args[0]) {
case "SYNC", "ASYNC":
args = args[1:]
default:
}
}
if len(args) != 0 {
c.WriteError(msgScriptFlush)
return
}
m.scripts = map[string]string{}
c.WriteOK()
default:
c.WriteError(fmt.Sprintf(msgFScriptUsage, strings.ToUpper(subcmd)))
}
})
}
func sha1Hex(s string) string {
h := sha1.New()
io.WriteString(h, s)
return hex.EncodeToString(h.Sum(nil))
}
// requireGlobal imports module modName into the global namespace with the
// identifier id. panics if an error results from the function execution
func requireGlobal(l *lua.LState, id, modName string) {
if err := l.CallByParam(lua.P{
Fn: l.GetGlobal("require"),
NRet: 1,
Protect: true,
}, lua.LString(modName)); err != nil {
panic(err)
}
mod := l.Get(-1)
l.Pop(1)
l.SetGlobal(id, mod)
}
// the following script protects globals
// it is based on: http://metalua.luaforge.net/src/lib/strict.lua.html
var protectGlobals = `
local dbg=debug
local mt = {}
setmetatable(_G, mt)
mt.__newindex = function (t, n, v)
if dbg.getinfo(2) then
local w = dbg.getinfo(2, "S").what
if w ~= "C" then
error("Script attempted to create global variable '"..tostring(n).."'", 2)
end
end
rawset(t, n, v)
end
mt.__index = function (t, n)
if dbg.getinfo(2) and dbg.getinfo(2, "S").what ~= "C" then
error("Script attempted to access nonexistent global variable '"..tostring(n).."'", 2)
end
return rawget(t, n)
end
debug = nil
`

View File

@@ -1,112 +0,0 @@
// Commands from https://redis.io/commands#server
package miniredis
import (
"strconv"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
func commandsServer(m *Miniredis) {
m.srv.Register("COMMAND", m.cmdCommand)
m.srv.Register("DBSIZE", m.cmdDbsize)
m.srv.Register("FLUSHALL", m.cmdFlushall)
m.srv.Register("FLUSHDB", m.cmdFlushdb)
m.srv.Register("INFO", m.cmdInfo)
m.srv.Register("TIME", m.cmdTime)
}
// DBSIZE
func (m *Miniredis) cmdDbsize(c *server.Peer, cmd string, args []string) {
if len(args) > 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
c.WriteInt(len(db.keys))
})
}
// FLUSHALL
func (m *Miniredis) cmdFlushall(c *server.Peer, cmd string, args []string) {
if len(args) > 0 && strings.ToLower(args[0]) == "async" {
args = args[1:]
}
if len(args) > 0 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
m.flushAll()
c.WriteOK()
})
}
// FLUSHDB
func (m *Miniredis) cmdFlushdb(c *server.Peer, cmd string, args []string) {
if len(args) > 0 && strings.ToLower(args[0]) == "async" {
args = args[1:]
}
if len(args) > 0 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
m.db(ctx.selectedDB).flush()
c.WriteOK()
})
}
// TIME
func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) {
if len(args) > 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
now := m.effectiveNow()
nanos := now.UnixNano()
seconds := nanos / 1_000_000_000
microseconds := (nanos / 1_000) % 1_000_000
c.WriteLen(2)
c.WriteBulk(strconv.FormatInt(seconds, 10))
c.WriteBulk(strconv.FormatInt(microseconds, 10))
})
}

View File

@@ -1,704 +0,0 @@
// Commands from https://redis.io/commands#set
package miniredis
import (
"fmt"
"strconv"
"strings"
"github.com/alicebob/miniredis/v2/server"
)
// commandsSet handles all set value operations.
func commandsSet(m *Miniredis) {
m.srv.Register("SADD", m.cmdSadd)
m.srv.Register("SCARD", m.cmdScard)
m.srv.Register("SDIFF", m.cmdSdiff)
m.srv.Register("SDIFFSTORE", m.cmdSdiffstore)
m.srv.Register("SINTER", m.cmdSinter)
m.srv.Register("SINTERSTORE", m.cmdSinterstore)
m.srv.Register("SISMEMBER", m.cmdSismember)
m.srv.Register("SMEMBERS", m.cmdSmembers)
m.srv.Register("SMOVE", m.cmdSmove)
m.srv.Register("SPOP", m.cmdSpop)
m.srv.Register("SRANDMEMBER", m.cmdSrandmember)
m.srv.Register("SREM", m.cmdSrem)
m.srv.Register("SUNION", m.cmdSunion)
m.srv.Register("SUNIONSTORE", m.cmdSunionstore)
m.srv.Register("SSCAN", m.cmdSscan)
}
// SADD
func (m *Miniredis) cmdSadd(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, elems := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if db.exists(key) && db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
added := db.setAdd(key, elems...)
c.WriteInt(added)
})
}
// SCARD
func (m *Miniredis) cmdScard(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteInt(0)
return
}
if db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
members := db.setMembers(key)
c.WriteInt(len(members))
})
}
// SDIFF
func (m *Miniredis) cmdSdiff(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
keys := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setDiff(keys)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteSetLen(len(set))
for k := range set {
c.WriteBulk(k)
}
})
}
// SDIFFSTORE
func (m *Miniredis) cmdSdiffstore(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
dest, keys := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setDiff(keys)
if err != nil {
c.WriteError(err.Error())
return
}
db.del(dest, true)
db.setSet(dest, set)
c.WriteInt(len(set))
})
}
// SINTER
func (m *Miniredis) cmdSinter(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
keys := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setInter(keys)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteLen(len(set))
for k := range set {
c.WriteBulk(k)
}
})
}
// SINTERSTORE
func (m *Miniredis) cmdSinterstore(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
dest, keys := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setInter(keys)
if err != nil {
c.WriteError(err.Error())
return
}
db.del(dest, true)
db.setSet(dest, set)
c.WriteInt(len(set))
})
}
// SISMEMBER
func (m *Miniredis) cmdSismember(c *server.Peer, cmd string, args []string) {
if len(args) != 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, value := args[0], args[1]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteInt(0)
return
}
if db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
if db.setIsMember(key, value) {
c.WriteInt(1)
return
}
c.WriteInt(0)
})
}
// SMEMBERS
func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) {
if len(args) != 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteSetLen(0)
return
}
if db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
members := db.setMembers(key)
c.WriteSetLen(len(members))
for _, elem := range members {
c.WriteBulk(elem)
}
})
}
// SMOVE
func (m *Miniredis) cmdSmove(c *server.Peer, cmd string, args []string) {
if len(args) != 3 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
src, dst, member := args[0], args[1], args[2]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(src) {
c.WriteInt(0)
return
}
if db.t(src) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
if db.exists(dst) && db.t(dst) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
if !db.setIsMember(src, member) {
c.WriteInt(0)
return
}
db.setRem(src, member)
db.setAdd(dst, member)
c.WriteInt(1)
})
}
// SPOP
func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) {
if len(args) == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
opts := struct {
key string
withCount bool
count int
}{
count: 1,
}
opts.key, args = args[0], args[1:]
if len(args) > 0 {
v, err := strconv.Atoi(args[0])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if v < 0 {
setDirty(c)
c.WriteError(msgOutOfRange)
return
}
opts.count = v
opts.withCount = true
args = args[1:]
}
if len(args) > 0 {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(opts.key) {
if !opts.withCount {
c.WriteNull()
return
}
c.WriteLen(0)
return
}
if db.t(opts.key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
var deleted []string
for i := 0; i < opts.count; i++ {
members := db.setMembers(opts.key)
if len(members) == 0 {
break
}
member := members[m.randIntn(len(members))]
db.setRem(opts.key, member)
deleted = append(deleted, member)
}
// without `count` return a single value
if !opts.withCount {
if len(deleted) == 0 {
c.WriteNull()
return
}
c.WriteBulk(deleted[0])
return
}
// with `count` return a list
c.WriteLen(len(deleted))
for _, v := range deleted {
c.WriteBulk(v)
}
})
}
// SRANDMEMBER
func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if len(args) > 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key := args[0]
count := 0
withCount := false
if len(args) == 2 {
var err error
count, err = strconv.Atoi(args[1])
if err != nil {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
withCount = true
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteNull()
return
}
if db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
members := db.setMembers(key)
if count < 0 {
// Non-unique elements is allowed with negative count.
c.WriteLen(-count)
for count != 0 {
member := members[m.randIntn(len(members))]
c.WriteBulk(member)
count++
}
return
}
// Must be unique elements.
m.shuffle(members)
if count > len(members) {
count = len(members)
}
if !withCount {
c.WriteBulk(members[0])
return
}
c.WriteLen(count)
for i := range make([]struct{}, count) {
c.WriteBulk(members[i])
}
})
}
// SREM
func (m *Miniredis) cmdSrem(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
key, fields := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
if !db.exists(key) {
c.WriteInt(0)
return
}
if db.t(key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
c.WriteInt(db.setRem(key, fields...))
})
}
// SUNION
func (m *Miniredis) cmdSunion(c *server.Peer, cmd string, args []string) {
if len(args) < 1 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
keys := args
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setUnion(keys)
if err != nil {
c.WriteError(err.Error())
return
}
c.WriteLen(len(set))
for k := range set {
c.WriteBulk(k)
}
})
}
// SUNIONSTORE
func (m *Miniredis) cmdSunionstore(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
dest, keys := args[0], args[1:]
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
set, err := db.setUnion(keys)
if err != nil {
c.WriteError(err.Error())
return
}
db.del(dest, true)
db.setSet(dest, set)
c.WriteInt(len(set))
})
}
// SSCAN
func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) {
if len(args) < 2 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
var opts struct {
key string
value int
cursor int
count int
withMatch bool
match string
}
opts.key = args[0]
if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok {
return
}
args = args[2:]
// MATCH and COUNT options
for len(args) > 0 {
if strings.ToLower(args[0]) == "count" {
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
count, err := strconv.Atoi(args[1])
if err != nil || count < 0 {
setDirty(c)
c.WriteError(msgInvalidInt)
return
}
if count == 0 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
opts.count = count
args = args[2:]
continue
}
if strings.ToLower(args[0]) == "match" {
if len(args) < 2 {
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
opts.withMatch = true
opts.match = args[1]
args = args[2:]
continue
}
setDirty(c)
c.WriteError(msgSyntaxError)
return
}
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
db := m.db(ctx.selectedDB)
// return _all_ (matched) keys every time
if db.exists(opts.key) && db.t(opts.key) != "set" {
c.WriteError(ErrWrongType.Error())
return
}
members := db.setMembers(opts.key)
if opts.withMatch {
members, _ = matchKeys(members, opts.match)
}
low := opts.cursor
high := low + opts.count
// validate high is correct
if high > len(members) || high == 0 {
high = len(members)
}
if opts.cursor > high {
// invalid cursor
c.WriteLen(2)
c.WriteBulk("0") // no next cursor
c.WriteLen(0) // no elements
return
}
cursorValue := low + opts.count
if cursorValue > len(members) {
cursorValue = 0 // no next cursor
}
members = members[low:high]
c.WriteLen(2)
c.WriteBulk(fmt.Sprintf("%d", cursorValue))
c.WriteLen(len(members))
for _, k := range members {
c.WriteBulk(k)
}
})
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,179 +0,0 @@
// Commands from https://redis.io/commands#transactions
package miniredis
import (
"github.com/alicebob/miniredis/v2/server"
)
// commandsTransaction handles MULTI &c.
func commandsTransaction(m *Miniredis) {
m.srv.Register("DISCARD", m.cmdDiscard)
m.srv.Register("EXEC", m.cmdExec)
m.srv.Register("MULTI", m.cmdMulti)
m.srv.Register("UNWATCH", m.cmdUnwatch)
m.srv.Register("WATCH", m.cmdWatch)
}
// MULTI
func (m *Miniredis) cmdMulti(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
ctx := getCtx(c)
if ctx.nested {
c.WriteError(msgNotFromScripts)
return
}
if inTx(ctx) {
c.WriteError("ERR MULTI calls can not be nested")
return
}
startTx(ctx)
c.WriteOK()
}
// EXEC
func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
ctx := getCtx(c)
if ctx.nested {
c.WriteError(msgNotFromScripts)
return
}
if !inTx(ctx) {
c.WriteError("ERR EXEC without MULTI")
return
}
if ctx.dirtyTransaction {
c.WriteError("EXECABORT Transaction discarded because of previous errors.")
// a failed EXEC finishes the tx
stopTx(ctx)
return
}
m.Lock()
defer m.Unlock()
// Check WATCHed keys.
for t, version := range ctx.watch {
if m.db(t.db).keyVersion[t.key] > version {
// Abort! Abort!
stopTx(ctx)
c.WriteLen(-1)
return
}
}
c.WriteLen(len(ctx.transaction))
for _, cb := range ctx.transaction {
cb(c, ctx)
}
// wake up anyone who waits on anything.
m.signal.Broadcast()
stopTx(ctx)
}
// DISCARD
func (m *Miniredis) cmdDiscard(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
ctx := getCtx(c)
if !inTx(ctx) {
c.WriteError("ERR DISCARD without MULTI")
return
}
stopTx(ctx)
c.WriteOK()
}
// WATCH
func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) {
if len(args) == 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
ctx := getCtx(c)
if ctx.nested {
c.WriteError(msgNotFromScripts)
return
}
if inTx(ctx) {
c.WriteError("ERR WATCH in MULTI")
return
}
m.Lock()
defer m.Unlock()
db := m.db(ctx.selectedDB)
for _, key := range args {
watch(db, ctx, key)
}
c.WriteOK()
}
// UNWATCH
func (m *Miniredis) cmdUnwatch(c *server.Peer, cmd string, args []string) {
if len(args) != 0 {
setDirty(c)
c.WriteError(errWrongNumber(cmd))
return
}
if !m.handleAuth(c) {
return
}
if m.checkPubsub(c, cmd) {
return
}
// Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me.
unwatch(getCtx(c))
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
// Do nothing if it's called in a transaction.
c.WriteOK()
})
}

View File

@@ -1,708 +0,0 @@
package miniredis
import (
"errors"
"fmt"
"math/big"
"sort"
"strconv"
"time"
)
var (
errInvalidEntryID = errors.New("stream ID is invalid")
)
func (db *RedisDB) exists(k string) bool {
_, ok := db.keys[k]
return ok
}
// t gives the type of a key, or ""
func (db *RedisDB) t(k string) string {
return db.keys[k]
}
// allKeys returns all keys. Sorted.
func (db *RedisDB) allKeys() []string {
res := make([]string, 0, len(db.keys))
for k := range db.keys {
res = append(res, k)
}
sort.Strings(res) // To make things deterministic.
return res
}
// flush removes all keys and values.
func (db *RedisDB) flush() {
db.keys = map[string]string{}
db.stringKeys = map[string]string{}
db.hashKeys = map[string]hashKey{}
db.listKeys = map[string]listKey{}
db.setKeys = map[string]setKey{}
db.hllKeys = map[string]*hll{}
db.sortedsetKeys = map[string]sortedSet{}
db.ttl = map[string]time.Duration{}
db.streamKeys = map[string]*streamKey{}
}
// move something to another db. Will return ok. Or not.
func (db *RedisDB) move(key string, to *RedisDB) bool {
if _, ok := to.keys[key]; ok {
return false
}
t, ok := db.keys[key]
if !ok {
return false
}
to.keys[key] = db.keys[key]
switch t {
case "string":
to.stringKeys[key] = db.stringKeys[key]
case "hash":
to.hashKeys[key] = db.hashKeys[key]
case "list":
to.listKeys[key] = db.listKeys[key]
case "set":
to.setKeys[key] = db.setKeys[key]
case "zset":
to.sortedsetKeys[key] = db.sortedsetKeys[key]
case "stream":
to.streamKeys[key] = db.streamKeys[key]
case "hll":
to.hllKeys[key] = db.hllKeys[key]
default:
panic("unhandled key type")
}
to.keyVersion[key]++
if v, ok := db.ttl[key]; ok {
to.ttl[key] = v
}
db.del(key, true)
return true
}
func (db *RedisDB) rename(from, to string) {
db.del(to, true)
switch db.t(from) {
case "string":
db.stringKeys[to] = db.stringKeys[from]
case "hash":
db.hashKeys[to] = db.hashKeys[from]
case "list":
db.listKeys[to] = db.listKeys[from]
case "set":
db.setKeys[to] = db.setKeys[from]
case "zset":
db.sortedsetKeys[to] = db.sortedsetKeys[from]
case "stream":
db.streamKeys[to] = db.streamKeys[from]
case "hll":
db.hllKeys[to] = db.hllKeys[from]
default:
panic("missing case")
}
db.keys[to] = db.keys[from]
db.keyVersion[to]++
if v, ok := db.ttl[from]; ok {
db.ttl[to] = v
}
db.del(from, true)
}
func (db *RedisDB) del(k string, delTTL bool) {
if !db.exists(k) {
return
}
t := db.t(k)
delete(db.keys, k)
db.keyVersion[k]++
if delTTL {
delete(db.ttl, k)
}
switch t {
case "string":
delete(db.stringKeys, k)
case "hash":
delete(db.hashKeys, k)
case "list":
delete(db.listKeys, k)
case "set":
delete(db.setKeys, k)
case "zset":
delete(db.sortedsetKeys, k)
case "stream":
delete(db.streamKeys, k)
case "hll":
delete(db.hllKeys, k)
default:
panic("Unknown key type: " + t)
}
}
// stringGet returns the string key or "" on error/nonexists.
func (db *RedisDB) stringGet(k string) string {
if t, ok := db.keys[k]; !ok || t != "string" {
return ""
}
return db.stringKeys[k]
}
// stringSet force set()s a key. Does not touch expire.
func (db *RedisDB) stringSet(k, v string) {
db.del(k, false)
db.keys[k] = "string"
db.stringKeys[k] = v
db.keyVersion[k]++
}
// change int key value
func (db *RedisDB) stringIncr(k string, delta int) (int, error) {
v := 0
if sv, ok := db.stringKeys[k]; ok {
var err error
v, err = strconv.Atoi(sv)
if err != nil {
return 0, ErrIntValueError
}
}
v += delta
db.stringSet(k, strconv.Itoa(v))
return v, nil
}
// change float key value
func (db *RedisDB) stringIncrfloat(k string, delta *big.Float) (*big.Float, error) {
v := big.NewFloat(0.0)
v.SetPrec(128)
if sv, ok := db.stringKeys[k]; ok {
var err error
v, _, err = big.ParseFloat(sv, 10, 128, 0)
if err != nil {
return nil, ErrFloatValueError
}
}
v.Add(v, delta)
db.stringSet(k, formatBig(v))
return v, nil
}
// listLpush is 'left push', aka unshift. Returns the new length.
func (db *RedisDB) listLpush(k, v string) int {
l, ok := db.listKeys[k]
if !ok {
db.keys[k] = "list"
}
l = append([]string{v}, l...)
db.listKeys[k] = l
db.keyVersion[k]++
return len(l)
}
// 'left pop', aka shift.
func (db *RedisDB) listLpop(k string) string {
l := db.listKeys[k]
el := l[0]
l = l[1:]
if len(l) == 0 {
db.del(k, true)
} else {
db.listKeys[k] = l
}
db.keyVersion[k]++
return el
}
func (db *RedisDB) listPush(k string, v ...string) int {
l, ok := db.listKeys[k]
if !ok {
db.keys[k] = "list"
}
l = append(l, v...)
db.listKeys[k] = l
db.keyVersion[k]++
return len(l)
}
func (db *RedisDB) listPop(k string) string {
l := db.listKeys[k]
el := l[len(l)-1]
l = l[:len(l)-1]
if len(l) == 0 {
db.del(k, true)
} else {
db.listKeys[k] = l
db.keyVersion[k]++
}
return el
}
// setset replaces a whole set.
func (db *RedisDB) setSet(k string, set setKey) {
db.keys[k] = "set"
db.setKeys[k] = set
db.keyVersion[k]++
}
// setadd adds members to a set. Returns nr of new keys.
func (db *RedisDB) setAdd(k string, elems ...string) int {
s, ok := db.setKeys[k]
if !ok {
s = setKey{}
db.keys[k] = "set"
}
added := 0
for _, e := range elems {
if _, ok := s[e]; !ok {
added++
}
s[e] = struct{}{}
}
db.setKeys[k] = s
db.keyVersion[k]++
return added
}
// setrem removes members from a set. Returns nr of deleted keys.
func (db *RedisDB) setRem(k string, fields ...string) int {
s, ok := db.setKeys[k]
if !ok {
return 0
}
removed := 0
for _, f := range fields {
if _, ok := s[f]; ok {
removed++
delete(s, f)
}
}
if len(s) == 0 {
db.del(k, true)
} else {
db.setKeys[k] = s
}
db.keyVersion[k]++
return removed
}
// All members of a set.
func (db *RedisDB) setMembers(k string) []string {
set := db.setKeys[k]
members := make([]string, 0, len(set))
for k := range set {
members = append(members, k)
}
sort.Strings(members)
return members
}
// Is a SET value present?
func (db *RedisDB) setIsMember(k, v string) bool {
set, ok := db.setKeys[k]
if !ok {
return false
}
_, ok = set[v]
return ok
}
// hashFields returns all (sorted) keys ('fields') for a hash key.
func (db *RedisDB) hashFields(k string) []string {
v := db.hashKeys[k]
var r []string
for k := range v {
r = append(r, k)
}
sort.Strings(r)
return r
}
// hashValues returns all (sorted) values a hash key.
func (db *RedisDB) hashValues(k string) []string {
h := db.hashKeys[k]
var r []string
for _, v := range h {
r = append(r, v)
}
sort.Strings(r)
return r
}
// hashGet a value
func (db *RedisDB) hashGet(key, field string) string {
return db.hashKeys[key][field]
}
// hashSet returns the number of new keys
func (db *RedisDB) hashSet(k string, fv ...string) int {
if t, ok := db.keys[k]; ok && t != "hash" {
db.del(k, true)
}
db.keys[k] = "hash"
if _, ok := db.hashKeys[k]; !ok {
db.hashKeys[k] = map[string]string{}
}
new := 0
for idx := 0; idx < len(fv)-1; idx = idx + 2 {
f, v := fv[idx], fv[idx+1]
_, ok := db.hashKeys[k][f]
db.hashKeys[k][f] = v
db.keyVersion[k]++
if !ok {
new++
}
}
return new
}
// hashIncr changes int key value
func (db *RedisDB) hashIncr(key, field string, delta int) (int, error) {
v := 0
if h, ok := db.hashKeys[key]; ok {
if f, ok := h[field]; ok {
var err error
v, err = strconv.Atoi(f)
if err != nil {
return 0, ErrIntValueError
}
}
}
v += delta
db.hashSet(key, field, strconv.Itoa(v))
return v, nil
}
// hashIncrfloat changes float key value
func (db *RedisDB) hashIncrfloat(key, field string, delta *big.Float) (*big.Float, error) {
v := big.NewFloat(0.0)
v.SetPrec(128)
if h, ok := db.hashKeys[key]; ok {
if f, ok := h[field]; ok {
var err error
v, _, err = big.ParseFloat(f, 10, 128, 0)
if err != nil {
return nil, ErrFloatValueError
}
}
}
v.Add(v, delta)
db.hashSet(key, field, formatBig(v))
return v, nil
}
// sortedSet set returns a sortedSet as map
func (db *RedisDB) sortedSet(key string) map[string]float64 {
ss := db.sortedsetKeys[key]
return map[string]float64(ss)
}
// ssetSet sets a complete sorted set.
func (db *RedisDB) ssetSet(key string, sset sortedSet) {
db.keys[key] = "zset"
db.keyVersion[key]++
db.sortedsetKeys[key] = sset
}
// ssetAdd adds member to a sorted set. Returns whether this was a new member.
func (db *RedisDB) ssetAdd(key string, score float64, member string) bool {
ss, ok := db.sortedsetKeys[key]
if !ok {
ss = newSortedSet()
db.keys[key] = "zset"
}
_, ok = ss[member]
ss[member] = score
db.sortedsetKeys[key] = ss
db.keyVersion[key]++
return !ok
}
// All members from a sorted set, ordered by score.
func (db *RedisDB) ssetMembers(key string) []string {
ss, ok := db.sortedsetKeys[key]
if !ok {
return nil
}
elems := ss.byScore(asc)
members := make([]string, 0, len(elems))
for _, e := range elems {
members = append(members, e.member)
}
return members
}
// All members+scores from a sorted set, ordered by score.
func (db *RedisDB) ssetElements(key string) ssElems {
ss, ok := db.sortedsetKeys[key]
if !ok {
return nil
}
return ss.byScore(asc)
}
func (db *RedisDB) ssetRandomMember(key string) string {
elems := db.ssetElements(key)
if len(elems) == 0 {
return ""
}
return elems[db.master.randIntn(len(elems))].member
}
// ssetCard is the sorted set cardinality.
func (db *RedisDB) ssetCard(key string) int {
ss := db.sortedsetKeys[key]
return ss.card()
}
// ssetRank is the sorted set rank.
func (db *RedisDB) ssetRank(key, member string, d direction) (int, bool) {
ss := db.sortedsetKeys[key]
return ss.rankByScore(member, d)
}
// ssetScore is sorted set score.
func (db *RedisDB) ssetScore(key, member string) float64 {
ss := db.sortedsetKeys[key]
return ss[member]
}
// ssetRem is sorted set key delete.
func (db *RedisDB) ssetRem(key, member string) bool {
ss := db.sortedsetKeys[key]
_, ok := ss[member]
delete(ss, member)
if len(ss) == 0 {
// Delete key on removal of last member
db.del(key, true)
}
return ok
}
// ssetExists tells if a member exists in a sorted set.
func (db *RedisDB) ssetExists(key, member string) bool {
ss := db.sortedsetKeys[key]
_, ok := ss[member]
return ok
}
// ssetIncrby changes float sorted set score.
func (db *RedisDB) ssetIncrby(k, m string, delta float64) float64 {
ss, ok := db.sortedsetKeys[k]
if !ok {
ss = newSortedSet()
db.keys[k] = "zset"
db.sortedsetKeys[k] = ss
}
v, _ := ss.get(m)
v += delta
ss.set(v, m)
db.keyVersion[k]++
return v
}
// setDiff implements the logic behind SDIFF*
func (db *RedisDB) setDiff(keys []string) (setKey, error) {
key := keys[0]
keys = keys[1:]
if db.exists(key) && db.t(key) != "set" {
return nil, ErrWrongType
}
s := setKey{}
for k := range db.setKeys[key] {
s[k] = struct{}{}
}
for _, sk := range keys {
if !db.exists(sk) {
continue
}
if db.t(sk) != "set" {
return nil, ErrWrongType
}
for e := range db.setKeys[sk] {
delete(s, e)
}
}
return s, nil
}
// setInter implements the logic behind SINTER*
// len keys needs to be > 0
func (db *RedisDB) setInter(keys []string) (setKey, error) {
// all keys must either not exist, or be of type "set".
for _, key := range keys {
if db.exists(key) && db.t(key) != "set" {
return nil, ErrWrongType
}
}
key := keys[0]
keys = keys[1:]
if !db.exists(key) {
return nil, nil
}
if db.t(key) != "set" {
return nil, ErrWrongType
}
s := setKey{}
for k := range db.setKeys[key] {
s[k] = struct{}{}
}
for _, sk := range keys {
if !db.exists(sk) {
return setKey{}, nil
}
if db.t(sk) != "set" {
return nil, ErrWrongType
}
other := db.setKeys[sk]
for e := range s {
if _, ok := other[e]; ok {
continue
}
delete(s, e)
}
}
return s, nil
}
// setUnion implements the logic behind SUNION*
func (db *RedisDB) setUnion(keys []string) (setKey, error) {
key := keys[0]
keys = keys[1:]
if db.exists(key) && db.t(key) != "set" {
return nil, ErrWrongType
}
s := setKey{}
for k := range db.setKeys[key] {
s[k] = struct{}{}
}
for _, sk := range keys {
if !db.exists(sk) {
continue
}
if db.t(sk) != "set" {
return nil, ErrWrongType
}
for e := range db.setKeys[sk] {
s[e] = struct{}{}
}
}
return s, nil
}
func (db *RedisDB) newStream(key string) (*streamKey, error) {
if s, err := db.stream(key); err != nil {
return nil, err
} else if s != nil {
return nil, fmt.Errorf("ErrAlreadyExists")
}
db.keys[key] = "stream"
s := newStreamKey()
db.streamKeys[key] = s
db.keyVersion[key]++
return s, nil
}
// return existing stream, or nil.
func (db *RedisDB) stream(key string) (*streamKey, error) {
if db.exists(key) && db.t(key) != "stream" {
return nil, ErrWrongType
}
return db.streamKeys[key], nil
}
// return existing stream group, or nil.
func (db *RedisDB) streamGroup(key, group string) (*streamGroup, error) {
s, err := db.stream(key)
if err != nil || s == nil {
return nil, err
}
return s.groups[group], nil
}
// fastForward proceeds the current timestamp with duration, works as a time machine
func (db *RedisDB) fastForward(duration time.Duration) {
for _, key := range db.allKeys() {
if value, ok := db.ttl[key]; ok {
db.ttl[key] = value - duration
db.checkTTL(key)
}
}
}
func (db *RedisDB) checkTTL(key string) {
if v, ok := db.ttl[key]; ok && v <= 0 {
db.del(key, true)
}
}
// hllAdd adds members to a hll. Returns 1 if at least 1 if internal HyperLogLog was altered, otherwise 0
func (db *RedisDB) hllAdd(k string, elems ...string) int {
s, ok := db.hllKeys[k]
if !ok {
s = newHll()
db.keys[k] = "hll"
}
hllAltered := 0
for _, e := range elems {
if s.Add([]byte(e)) {
hllAltered = 1
}
}
db.hllKeys[k] = s
db.keyVersion[k]++
return hllAltered
}
// hllCount estimates the amount of members added to hll by hllAdd. If called with several arguments, hllCount returns a sum of estimations
func (db *RedisDB) hllCount(keys []string) (int, error) {
countOverall := 0
for _, key := range keys {
if db.exists(key) && db.t(key) != "hll" {
return 0, ErrNotValidHllValue
}
if !db.exists(key) {
continue
}
countOverall += db.hllKeys[key].Count()
}
return countOverall, nil
}
// hllMerge merges all the hlls provided as keys to the first key. Creates a new hll in the first key if it contains nothing
func (db *RedisDB) hllMerge(keys []string) error {
for _, key := range keys {
if db.exists(key) && db.t(key) != "hll" {
return ErrNotValidHllValue
}
}
destKey := keys[0]
restKeys := keys[1:]
var destHll *hll
if db.exists(destKey) {
destHll = db.hllKeys[destKey]
} else {
destHll = newHll()
}
for _, key := range restKeys {
if !db.exists(key) {
continue
}
destHll.Merge(db.hllKeys[key])
}
db.hllKeys[destKey] = destHll
db.keys[destKey] = "hll"
db.keyVersion[destKey]++
return nil
}

View File

@@ -1,803 +0,0 @@
package miniredis
// Commands to modify and query our databases directly.
import (
"errors"
"math/big"
"time"
)
var (
// ErrKeyNotFound is returned when a key doesn't exist.
ErrKeyNotFound = errors.New(msgKeyNotFound)
// ErrWrongType when a key is not the right type.
ErrWrongType = errors.New(msgWrongType)
// ErrNotValidHllValue when a key is not a valid HyperLogLog string value.
ErrNotValidHllValue = errors.New(msgNotValidHllValue)
// ErrIntValueError can returned by INCRBY
ErrIntValueError = errors.New(msgInvalidInt)
// ErrFloatValueError can returned by INCRBYFLOAT
ErrFloatValueError = errors.New(msgInvalidFloat)
)
// Select sets the DB id for all direct commands.
func (m *Miniredis) Select(i int) {
m.Lock()
defer m.Unlock()
m.selectedDB = i
}
// Keys returns all keys from the selected database, sorted.
func (m *Miniredis) Keys() []string {
return m.DB(m.selectedDB).Keys()
}
// Keys returns all keys, sorted.
func (db *RedisDB) Keys() []string {
db.master.Lock()
defer db.master.Unlock()
return db.allKeys()
}
// FlushAll removes all keys from all databases.
func (m *Miniredis) FlushAll() {
m.Lock()
defer m.Unlock()
defer m.signal.Broadcast()
m.flushAll()
}
func (m *Miniredis) flushAll() {
for _, db := range m.dbs {
db.flush()
}
}
// FlushDB removes all keys from the selected database.
func (m *Miniredis) FlushDB() {
m.DB(m.selectedDB).FlushDB()
}
// FlushDB removes all keys.
func (db *RedisDB) FlushDB() {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
db.flush()
}
// Get returns string keys added with SET.
func (m *Miniredis) Get(k string) (string, error) {
return m.DB(m.selectedDB).Get(k)
}
// Get returns a string key.
func (db *RedisDB) Get(k string) (string, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return "", ErrKeyNotFound
}
if db.t(k) != "string" {
return "", ErrWrongType
}
return db.stringGet(k), nil
}
// Set sets a string key. Removes expire.
func (m *Miniredis) Set(k, v string) error {
return m.DB(m.selectedDB).Set(k, v)
}
// Set sets a string key. Removes expire.
// Unlike redis the key can't be an existing non-string key.
func (db *RedisDB) Set(k, v string) error {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "string" {
return ErrWrongType
}
db.del(k, true) // Remove expire
db.stringSet(k, v)
return nil
}
// Incr changes a int string value by delta.
func (m *Miniredis) Incr(k string, delta int) (int, error) {
return m.DB(m.selectedDB).Incr(k, delta)
}
// Incr changes a int string value by delta.
func (db *RedisDB) Incr(k string, delta int) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "string" {
return 0, ErrWrongType
}
return db.stringIncr(k, delta)
}
// IncrByFloat increments the float value of a key by the given delta.
// is an alias for Miniredis.Incrfloat
func (m *Miniredis) IncrByFloat(k string, delta float64) (float64, error) {
return m.Incrfloat(k, delta)
}
// Incrfloat changes a float string value by delta.
func (m *Miniredis) Incrfloat(k string, delta float64) (float64, error) {
return m.DB(m.selectedDB).Incrfloat(k, delta)
}
// Incrfloat changes a float string value by delta.
func (db *RedisDB) Incrfloat(k string, delta float64) (float64, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "string" {
return 0, ErrWrongType
}
v, err := db.stringIncrfloat(k, big.NewFloat(delta))
if err != nil {
return 0, err
}
vf, _ := v.Float64()
return vf, nil
}
// List returns the list k, or an error if it's not there or something else.
// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
// range-ing.
func (m *Miniredis) List(k string) ([]string, error) {
return m.DB(m.selectedDB).List(k)
}
// List returns the list k, or an error if it's not there or something else.
// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
// range-ing.
func (db *RedisDB) List(k string) ([]string, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return nil, ErrKeyNotFound
}
if db.t(k) != "list" {
return nil, ErrWrongType
}
return db.listKeys[k], nil
}
// Lpush prepends one value to a list. Returns the new length.
func (m *Miniredis) Lpush(k, v string) (int, error) {
return m.DB(m.selectedDB).Lpush(k, v)
}
// Lpush prepends one value to a list. Returns the new length.
func (db *RedisDB) Lpush(k, v string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "list" {
return 0, ErrWrongType
}
return db.listLpush(k, v), nil
}
// Lpop removes and returns the last element in a list.
func (m *Miniredis) Lpop(k string) (string, error) {
return m.DB(m.selectedDB).Lpop(k)
}
// Lpop removes and returns the last element in a list.
func (db *RedisDB) Lpop(k string) (string, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if !db.exists(k) {
return "", ErrKeyNotFound
}
if db.t(k) != "list" {
return "", ErrWrongType
}
return db.listLpop(k), nil
}
// RPush appends one or multiple values to a list. Returns the new length.
// An alias for Push
func (m *Miniredis) RPush(k string, v ...string) (int, error) {
return m.Push(k, v...)
}
// Push add element at the end. Returns the new length.
func (m *Miniredis) Push(k string, v ...string) (int, error) {
return m.DB(m.selectedDB).Push(k, v...)
}
// Push add element at the end. Is called RPUSH in redis. Returns the new length.
func (db *RedisDB) Push(k string, v ...string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "list" {
return 0, ErrWrongType
}
return db.listPush(k, v...), nil
}
// RPop is an alias for Pop
func (m *Miniredis) RPop(k string) (string, error) {
return m.Pop(k)
}
// Pop removes and returns the last element. Is called RPOP in Redis.
func (m *Miniredis) Pop(k string) (string, error) {
return m.DB(m.selectedDB).Pop(k)
}
// Pop removes and returns the last element. Is called RPOP in Redis.
func (db *RedisDB) Pop(k string) (string, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if !db.exists(k) {
return "", ErrKeyNotFound
}
if db.t(k) != "list" {
return "", ErrWrongType
}
return db.listPop(k), nil
}
// SAdd adds keys to a set. Returns the number of new keys.
// Alias for SetAdd
func (m *Miniredis) SAdd(k string, elems ...string) (int, error) {
return m.SetAdd(k, elems...)
}
// SetAdd adds keys to a set. Returns the number of new keys.
func (m *Miniredis) SetAdd(k string, elems ...string) (int, error) {
return m.DB(m.selectedDB).SetAdd(k, elems...)
}
// SetAdd adds keys to a set. Returns the number of new keys.
func (db *RedisDB) SetAdd(k string, elems ...string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "set" {
return 0, ErrWrongType
}
return db.setAdd(k, elems...), nil
}
// SMembers returns all keys in a set, sorted.
// Alias for Members.
func (m *Miniredis) SMembers(k string) ([]string, error) {
return m.Members(k)
}
// Members returns all keys in a set, sorted.
func (m *Miniredis) Members(k string) ([]string, error) {
return m.DB(m.selectedDB).Members(k)
}
// Members gives all set keys. Sorted.
func (db *RedisDB) Members(k string) ([]string, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return nil, ErrKeyNotFound
}
if db.t(k) != "set" {
return nil, ErrWrongType
}
return db.setMembers(k), nil
}
// SIsMember tells if value is in the set.
// Alias for IsMember
func (m *Miniredis) SIsMember(k, v string) (bool, error) {
return m.IsMember(k, v)
}
// IsMember tells if value is in the set.
func (m *Miniredis) IsMember(k, v string) (bool, error) {
return m.DB(m.selectedDB).IsMember(k, v)
}
// IsMember tells if value is in the set.
func (db *RedisDB) IsMember(k, v string) (bool, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return false, ErrKeyNotFound
}
if db.t(k) != "set" {
return false, ErrWrongType
}
return db.setIsMember(k, v), nil
}
// HKeys returns all (sorted) keys ('fields') for a hash key.
func (m *Miniredis) HKeys(k string) ([]string, error) {
return m.DB(m.selectedDB).HKeys(k)
}
// HKeys returns all (sorted) keys ('fields') for a hash key.
func (db *RedisDB) HKeys(key string) ([]string, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(key) {
return nil, ErrKeyNotFound
}
if db.t(key) != "hash" {
return nil, ErrWrongType
}
return db.hashFields(key), nil
}
// Del deletes a key and any expiration value. Returns whether there was a key.
func (m *Miniredis) Del(k string) bool {
return m.DB(m.selectedDB).Del(k)
}
// Del deletes a key and any expiration value. Returns whether there was a key.
func (db *RedisDB) Del(k string) bool {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if !db.exists(k) {
return false
}
db.del(k, true)
return true
}
// Unlink deletes a key and any expiration value. Returns where there was a key.
// It's exactly the same as Del() and is not async. It is here for the consistency.
func (m *Miniredis) Unlink(k string) bool {
return m.Del(k)
}
// Unlink deletes a key and any expiration value. Returns where there was a key.
// It's exactly the same as Del() and is not async. It is here for the consistency.
func (db *RedisDB) Unlink(k string) bool {
return db.Del(k)
}
// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
// PEXPIREAT.
// Note: this direct function returns 0 if there is no TTL set, unlike redis,
// which returns -1.
func (m *Miniredis) TTL(k string) time.Duration {
return m.DB(m.selectedDB).TTL(k)
}
// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
// PEXPIREAT.
// 0 if not set.
func (db *RedisDB) TTL(k string) time.Duration {
db.master.Lock()
defer db.master.Unlock()
return db.ttl[k]
}
// SetTTL sets the TTL of a key.
func (m *Miniredis) SetTTL(k string, ttl time.Duration) {
m.DB(m.selectedDB).SetTTL(k, ttl)
}
// SetTTL sets the time to live of a key.
func (db *RedisDB) SetTTL(k string, ttl time.Duration) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
db.ttl[k] = ttl
db.keyVersion[k]++
}
// Type gives the type of a key, or ""
func (m *Miniredis) Type(k string) string {
return m.DB(m.selectedDB).Type(k)
}
// Type gives the type of a key, or ""
func (db *RedisDB) Type(k string) string {
db.master.Lock()
defer db.master.Unlock()
return db.t(k)
}
// Exists tells whether a key exists.
func (m *Miniredis) Exists(k string) bool {
return m.DB(m.selectedDB).Exists(k)
}
// Exists tells whether a key exists.
func (db *RedisDB) Exists(k string) bool {
db.master.Lock()
defer db.master.Unlock()
return db.exists(k)
}
// HGet returns hash keys added with HSET.
// This will return an empty string if the key is not set. Redis would return
// a nil.
// Returns empty string when the key is of a different type.
func (m *Miniredis) HGet(k, f string) string {
return m.DB(m.selectedDB).HGet(k, f)
}
// HGet returns hash keys added with HSET.
// Returns empty string when the key is of a different type.
func (db *RedisDB) HGet(k, f string) string {
db.master.Lock()
defer db.master.Unlock()
h, ok := db.hashKeys[k]
if !ok {
return ""
}
return h[f]
}
// HSet sets hash keys.
// If there is another key by the same name it will be gone.
func (m *Miniredis) HSet(k string, fv ...string) {
m.DB(m.selectedDB).HSet(k, fv...)
}
// HSet sets hash keys.
// If there is another key by the same name it will be gone.
func (db *RedisDB) HSet(k string, fv ...string) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
db.hashSet(k, fv...)
}
// HDel deletes a hash key.
func (m *Miniredis) HDel(k, f string) {
m.DB(m.selectedDB).HDel(k, f)
}
// HDel deletes a hash key.
func (db *RedisDB) HDel(k, f string) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
db.hdel(k, f)
}
func (db *RedisDB) hdel(k, f string) {
if _, ok := db.hashKeys[k]; !ok {
return
}
delete(db.hashKeys[k], f)
db.keyVersion[k]++
}
// HIncrBy increases the integer value of a hash field by delta (int).
func (m *Miniredis) HIncrBy(k, f string, delta int) (int, error) {
return m.HIncr(k, f, delta)
}
// HIncr increases a key/field by delta (int).
func (m *Miniredis) HIncr(k, f string, delta int) (int, error) {
return m.DB(m.selectedDB).HIncr(k, f, delta)
}
// HIncr increases a key/field by delta (int).
func (db *RedisDB) HIncr(k, f string, delta int) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
return db.hashIncr(k, f, delta)
}
// HIncrByFloat increases a key/field by delta (float).
func (m *Miniredis) HIncrByFloat(k, f string, delta float64) (float64, error) {
return m.HIncrfloat(k, f, delta)
}
// HIncrfloat increases a key/field by delta (float).
func (m *Miniredis) HIncrfloat(k, f string, delta float64) (float64, error) {
return m.DB(m.selectedDB).HIncrfloat(k, f, delta)
}
// HIncrfloat increases a key/field by delta (float).
func (db *RedisDB) HIncrfloat(k, f string, delta float64) (float64, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
v, err := db.hashIncrfloat(k, f, big.NewFloat(delta))
if err != nil {
return 0, err
}
vf, _ := v.Float64()
return vf, nil
}
// SRem removes fields from a set. Returns number of deleted fields.
func (m *Miniredis) SRem(k string, fields ...string) (int, error) {
return m.DB(m.selectedDB).SRem(k, fields...)
}
// SRem removes fields from a set. Returns number of deleted fields.
func (db *RedisDB) SRem(k string, fields ...string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if !db.exists(k) {
return 0, ErrKeyNotFound
}
if db.t(k) != "set" {
return 0, ErrWrongType
}
return db.setRem(k, fields...), nil
}
// ZAdd adds a score,member to a sorted set.
func (m *Miniredis) ZAdd(k string, score float64, member string) (bool, error) {
return m.DB(m.selectedDB).ZAdd(k, score, member)
}
// ZAdd adds a score,member to a sorted set.
func (db *RedisDB) ZAdd(k string, score float64, member string) (bool, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if db.exists(k) && db.t(k) != "zset" {
return false, ErrWrongType
}
return db.ssetAdd(k, score, member), nil
}
// ZMembers returns all members of a sorted set by score
func (m *Miniredis) ZMembers(k string) ([]string, error) {
return m.DB(m.selectedDB).ZMembers(k)
}
// ZMembers returns all members of a sorted set by score
func (db *RedisDB) ZMembers(k string) ([]string, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return nil, ErrKeyNotFound
}
if db.t(k) != "zset" {
return nil, ErrWrongType
}
return db.ssetMembers(k), nil
}
// SortedSet returns a raw string->float64 map.
func (m *Miniredis) SortedSet(k string) (map[string]float64, error) {
return m.DB(m.selectedDB).SortedSet(k)
}
// SortedSet returns a raw string->float64 map.
func (db *RedisDB) SortedSet(k string) (map[string]float64, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return nil, ErrKeyNotFound
}
if db.t(k) != "zset" {
return nil, ErrWrongType
}
return db.sortedSet(k), nil
}
// ZRem deletes a member. Returns whether the was a key.
func (m *Miniredis) ZRem(k, member string) (bool, error) {
return m.DB(m.selectedDB).ZRem(k, member)
}
// ZRem deletes a member. Returns whether the was a key.
func (db *RedisDB) ZRem(k, member string) (bool, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
if !db.exists(k) {
return false, ErrKeyNotFound
}
if db.t(k) != "zset" {
return false, ErrWrongType
}
return db.ssetRem(k, member), nil
}
// ZScore gives the score of a sorted set member.
func (m *Miniredis) ZScore(k, member string) (float64, error) {
return m.DB(m.selectedDB).ZScore(k, member)
}
// ZScore gives the score of a sorted set member.
func (db *RedisDB) ZScore(k, member string) (float64, error) {
db.master.Lock()
defer db.master.Unlock()
if !db.exists(k) {
return 0, ErrKeyNotFound
}
if db.t(k) != "zset" {
return 0, ErrWrongType
}
return db.ssetScore(k, member), nil
}
// XAdd adds an entry to a stream. `id` can be left empty or be '*'.
// If a value is given normal XADD rules apply. Values should be an even
// length.
func (m *Miniredis) XAdd(k string, id string, values []string) (string, error) {
return m.DB(m.selectedDB).XAdd(k, id, values)
}
// XAdd adds an entry to a stream. `id` can be left empty or be '*'.
// If a value is given normal XADD rules apply. Values should be an even
// length.
func (db *RedisDB) XAdd(k string, id string, values []string) (string, error) {
db.master.Lock()
defer db.master.Unlock()
defer db.master.signal.Broadcast()
s, err := db.stream(k)
if err != nil {
return "", err
}
if s == nil {
s, _ = db.newStream(k)
}
return s.add(id, values, db.master.effectiveNow())
}
// Stream returns a slice of stream entries. Oldest first.
func (m *Miniredis) Stream(k string) ([]StreamEntry, error) {
return m.DB(m.selectedDB).Stream(k)
}
// Stream returns a slice of stream entries. Oldest first.
func (db *RedisDB) Stream(key string) ([]StreamEntry, error) {
db.master.Lock()
defer db.master.Unlock()
s, err := db.stream(key)
if err != nil {
return nil, err
}
if s == nil {
return nil, nil
}
return s.entries, nil
}
// Publish a message to subscribers. Returns the number of receivers.
func (m *Miniredis) Publish(channel, message string) int {
m.Lock()
defer m.Unlock()
return m.publish(channel, message)
}
// PubSubChannels is "PUBSUB CHANNELS <pattern>". An empty pattern is fine
// (meaning all channels).
// Returned channels will be ordered alphabetically.
func (m *Miniredis) PubSubChannels(pattern string) []string {
m.Lock()
defer m.Unlock()
return activeChannels(m.allSubscribers(), pattern)
}
// PubSubNumSub is "PUBSUB NUMSUB [channels]". It returns all channels with their
// subscriber count.
func (m *Miniredis) PubSubNumSub(channels ...string) map[string]int {
m.Lock()
defer m.Unlock()
subs := m.allSubscribers()
res := map[string]int{}
for _, channel := range channels {
res[channel] = countSubs(subs, channel)
}
return res
}
// PubSubNumPat is "PUBSUB NUMPAT"
func (m *Miniredis) PubSubNumPat() int {
m.Lock()
defer m.Unlock()
return countPsubs(m.allSubscribers())
}
// PfAdd adds keys to a hll. Returns the flag which equals to 1 if the inner hll value has been changed.
func (m *Miniredis) PfAdd(k string, elems ...string) (int, error) {
return m.DB(m.selectedDB).HllAdd(k, elems...)
}
// HllAdd adds keys to a hll. Returns the flag which equals to true if the inner hll value has been changed.
func (db *RedisDB) HllAdd(k string, elems ...string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
if db.exists(k) && db.t(k) != "hll" {
return 0, ErrWrongType
}
return db.hllAdd(k, elems...), nil
}
// PfCount returns an estimation of the amount of elements previously added to a hll.
func (m *Miniredis) PfCount(keys ...string) (int, error) {
return m.DB(m.selectedDB).HllCount(keys...)
}
// HllCount returns an estimation of the amount of elements previously added to a hll.
func (db *RedisDB) HllCount(keys ...string) (int, error) {
db.master.Lock()
defer db.master.Unlock()
return db.hllCount(keys)
}
// PfMerge merges all the input hlls into a hll under destKey key.
func (m *Miniredis) PfMerge(destKey string, sourceKeys ...string) error {
return m.DB(m.selectedDB).HllMerge(destKey, sourceKeys...)
}
// HllMerge merges all the input hlls into a hll under destKey key.
func (db *RedisDB) HllMerge(destKey string, sourceKeys ...string) error {
db.master.Lock()
defer db.master.Unlock()
return db.hllMerge(append([]string{destKey}, sourceKeys...))
}
// Copy a value.
// Needs the IDs of both the source and dest DBs (which can differ).
// Returns ErrKeyNotFound if src does not exist.
// Overwrites dest if it already exists (unlike the redis command, which needs a flag to allow that).
func (m *Miniredis) Copy(srcDB int, src string, destDB int, dest string) error {
return m.copy(m.DB(srcDB), src, m.DB(destDB), dest)
}

View File

@@ -1,46 +0,0 @@
package miniredis
import (
"math"
"github.com/alicebob/miniredis/v2/geohash"
)
func toGeohash(long, lat float64) uint64 {
return geohash.EncodeIntWithPrecision(lat, long, 52)
}
func fromGeohash(score uint64) (float64, float64) {
lat, long := geohash.DecodeIntWithPrecision(score, 52)
return long, lat
}
// haversin(θ) function
func hsin(theta float64) float64 {
return math.Pow(math.Sin(theta/2), 2)
}
// distance function returns the distance (in meters) between two points of
// a given longitude and latitude relatively accurately (using a spherical
// approximation of the Earth) through the Haversin Distance Formula for
// great arc distance on a sphere with accuracy for small distances
// point coordinates are supplied in degrees and converted into rad. in the func
// distance returned is meters
// http://en.wikipedia.org/wiki/Haversine_formula
// Source: https://gist.github.com/cdipaolo/d3f8db3848278b49db68
func distance(lat1, lon1, lat2, lon2 float64) float64 {
// convert to radians
// must cast radius as float to multiply later
var la1, lo1, la2, lo2 float64
la1 = lat1 * math.Pi / 180
lo1 = lon1 * math.Pi / 180
la2 = lat2 * math.Pi / 180
lo2 = lon2 * math.Pi / 180
earth := 6372797.560856 // Earth radius in METERS, according to src/geohash_helper.c
// calculate
h := hsin(la2-la1) + math.Cos(la1)*math.Cos(la2)*hsin(lo2-lo1)
return 2 * earth * math.Asin(math.Sqrt(h))
}

View File

@@ -1,22 +0,0 @@
The MIT License (MIT)
Copyright (c) 2015 Michael McLoughlin
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,2 +0,0 @@
This is a (selected) copy of github.com/mmcloughlin/geohash with the latitude
range changed from 90 to ~85, to align with the algorithm use by Redis.

View File

@@ -1,44 +0,0 @@
package geohash
// encoding encapsulates an encoding defined by a given base32 alphabet.
type encoding struct {
encode string
decode [256]byte
}
// newEncoding constructs a new encoding defined by the given alphabet,
// which must be a 32-byte string.
func newEncoding(encoder string) *encoding {
e := new(encoding)
e.encode = encoder
for i := 0; i < len(e.decode); i++ {
e.decode[i] = 0xff
}
for i := 0; i < len(encoder); i++ {
e.decode[encoder[i]] = byte(i)
}
return e
}
// Decode string into bits of a 64-bit word. The string s may be at most 12
// characters.
func (e *encoding) Decode(s string) uint64 {
x := uint64(0)
for i := 0; i < len(s); i++ {
x = (x << 5) | uint64(e.decode[s[i]])
}
return x
}
// Encode bits of 64-bit word into a string.
func (e *encoding) Encode(x uint64) string {
b := [12]byte{}
for i := 0; i < 12; i++ {
b[11-i] = e.encode[x&0x1f]
x >>= 5
}
return string(b[:])
}
// Base32Encoding with the Geohash alphabet.
var base32encoding = newEncoding("0123456789bcdefghjkmnpqrstuvwxyz")

View File

@@ -1,269 +0,0 @@
// Package geohash provides encoding and decoding of string and integer
// geohashes.
package geohash
import (
"math"
)
const (
ENC_LAT = 85.05112878
ENC_LONG = 180.0
)
// Direction represents directions in the latitute/longitude space.
type Direction int
// Cardinal and intercardinal directions
const (
North Direction = iota
NorthEast
East
SouthEast
South
SouthWest
West
NorthWest
)
// Encode the point (lat, lng) as a string geohash with the standard 12
// characters of precision.
func Encode(lat, lng float64) string {
return EncodeWithPrecision(lat, lng, 12)
}
// EncodeWithPrecision encodes the point (lat, lng) as a string geohash with
// the specified number of characters of precision (max 12).
func EncodeWithPrecision(lat, lng float64, chars uint) string {
bits := 5 * chars
inthash := EncodeIntWithPrecision(lat, lng, bits)
enc := base32encoding.Encode(inthash)
return enc[12-chars:]
}
// encodeInt provides a Go implementation of integer geohash. This is the
// default implementation of EncodeInt, but optimized versions are provided
// for certain architectures.
func EncodeInt(lat, lng float64) uint64 {
latInt := encodeRange(lat, ENC_LAT)
lngInt := encodeRange(lng, ENC_LONG)
return interleave(latInt, lngInt)
}
// EncodeIntWithPrecision encodes the point (lat, lng) to an integer with the
// specified number of bits.
func EncodeIntWithPrecision(lat, lng float64, bits uint) uint64 {
hash := EncodeInt(lat, lng)
return hash >> (64 - bits)
}
// Box represents a rectangle in latitude/longitude space.
type Box struct {
MinLat float64
MaxLat float64
MinLng float64
MaxLng float64
}
// Center returns the center of the box.
func (b Box) Center() (lat, lng float64) {
lat = (b.MinLat + b.MaxLat) / 2.0
lng = (b.MinLng + b.MaxLng) / 2.0
return
}
// Contains decides whether (lat, lng) is contained in the box. The
// containment test is inclusive of the edges and corners.
func (b Box) Contains(lat, lng float64) bool {
return (b.MinLat <= lat && lat <= b.MaxLat &&
b.MinLng <= lng && lng <= b.MaxLng)
}
// errorWithPrecision returns the error range in latitude and longitude for in
// integer geohash with bits of precision.
func errorWithPrecision(bits uint) (latErr, lngErr float64) {
b := int(bits)
latBits := b / 2
lngBits := b - latBits
latErr = math.Ldexp(180.0, -latBits)
lngErr = math.Ldexp(360.0, -lngBits)
return
}
// BoundingBox returns the region encoded by the given string geohash.
func BoundingBox(hash string) Box {
bits := uint(5 * len(hash))
inthash := base32encoding.Decode(hash)
return BoundingBoxIntWithPrecision(inthash, bits)
}
// BoundingBoxIntWithPrecision returns the region encoded by the integer
// geohash with the specified precision.
func BoundingBoxIntWithPrecision(hash uint64, bits uint) Box {
fullHash := hash << (64 - bits)
latInt, lngInt := deinterleave(fullHash)
lat := decodeRange(latInt, ENC_LAT)
lng := decodeRange(lngInt, ENC_LONG)
latErr, lngErr := errorWithPrecision(bits)
return Box{
MinLat: lat,
MaxLat: lat + latErr,
MinLng: lng,
MaxLng: lng + lngErr,
}
}
// BoundingBoxInt returns the region encoded by the given 64-bit integer
// geohash.
func BoundingBoxInt(hash uint64) Box {
return BoundingBoxIntWithPrecision(hash, 64)
}
// DecodeCenter decodes the string geohash to the central point of the bounding box.
func DecodeCenter(hash string) (lat, lng float64) {
box := BoundingBox(hash)
return box.Center()
}
// DecodeIntWithPrecision decodes the provided integer geohash with bits of
// precision to a (lat, lng) point.
func DecodeIntWithPrecision(hash uint64, bits uint) (lat, lng float64) {
box := BoundingBoxIntWithPrecision(hash, bits)
return box.Center()
}
// DecodeInt decodes the provided 64-bit integer geohash to a (lat, lng) point.
func DecodeInt(hash uint64) (lat, lng float64) {
return DecodeIntWithPrecision(hash, 64)
}
// Neighbors returns a slice of geohash strings that correspond to the provided
// geohash's neighbors.
func Neighbors(hash string) []string {
box := BoundingBox(hash)
lat, lng := box.Center()
latDelta := box.MaxLat - box.MinLat
lngDelta := box.MaxLng - box.MinLng
precision := uint(len(hash))
return []string{
// N
EncodeWithPrecision(lat+latDelta, lng, precision),
// NE,
EncodeWithPrecision(lat+latDelta, lng+lngDelta, precision),
// E,
EncodeWithPrecision(lat, lng+lngDelta, precision),
// SE,
EncodeWithPrecision(lat-latDelta, lng+lngDelta, precision),
// S,
EncodeWithPrecision(lat-latDelta, lng, precision),
// SW,
EncodeWithPrecision(lat-latDelta, lng-lngDelta, precision),
// W,
EncodeWithPrecision(lat, lng-lngDelta, precision),
// NW
EncodeWithPrecision(lat+latDelta, lng-lngDelta, precision),
}
}
// NeighborsInt returns a slice of uint64s that correspond to the provided hash's
// neighbors at 64-bit precision.
func NeighborsInt(hash uint64) []uint64 {
return NeighborsIntWithPrecision(hash, 64)
}
// NeighborsIntWithPrecision returns a slice of uint64s that correspond to the
// provided hash's neighbors at the given precision.
func NeighborsIntWithPrecision(hash uint64, bits uint) []uint64 {
box := BoundingBoxIntWithPrecision(hash, bits)
lat, lng := box.Center()
latDelta := box.MaxLat - box.MinLat
lngDelta := box.MaxLng - box.MinLng
return []uint64{
// N
EncodeIntWithPrecision(lat+latDelta, lng, bits),
// NE,
EncodeIntWithPrecision(lat+latDelta, lng+lngDelta, bits),
// E,
EncodeIntWithPrecision(lat, lng+lngDelta, bits),
// SE,
EncodeIntWithPrecision(lat-latDelta, lng+lngDelta, bits),
// S,
EncodeIntWithPrecision(lat-latDelta, lng, bits),
// SW,
EncodeIntWithPrecision(lat-latDelta, lng-lngDelta, bits),
// W,
EncodeIntWithPrecision(lat, lng-lngDelta, bits),
// NW
EncodeIntWithPrecision(lat+latDelta, lng-lngDelta, bits),
}
}
// Neighbor returns a geohash string that corresponds to the provided
// geohash's neighbor in the provided direction
func Neighbor(hash string, direction Direction) string {
return Neighbors(hash)[direction]
}
// NeighborInt returns a uint64 that corresponds to the provided hash's
// neighbor in the provided direction at 64-bit precision.
func NeighborInt(hash uint64, direction Direction) uint64 {
return NeighborsIntWithPrecision(hash, 64)[direction]
}
// NeighborIntWithPrecision returns a uint64s that corresponds to the
// provided hash's neighbor in the provided direction at the given precision.
func NeighborIntWithPrecision(hash uint64, bits uint, direction Direction) uint64 {
return NeighborsIntWithPrecision(hash, bits)[direction]
}
// precalculated for performance
var exp232 = math.Exp2(32)
// Encode the position of x within the range -r to +r as a 32-bit integer.
func encodeRange(x, r float64) uint32 {
p := (x + r) / (2 * r)
return uint32(p * exp232)
}
// Decode the 32-bit range encoding X back to a value in the range -r to +r.
func decodeRange(X uint32, r float64) float64 {
p := float64(X) / exp232
x := 2*r*p - r
return x
}
// Spread out the 32 bits of x into 64 bits, where the bits of x occupy even
// bit positions.
func spread(x uint32) uint64 {
X := uint64(x)
X = (X | (X << 16)) & 0x0000ffff0000ffff
X = (X | (X << 8)) & 0x00ff00ff00ff00ff
X = (X | (X << 4)) & 0x0f0f0f0f0f0f0f0f
X = (X | (X << 2)) & 0x3333333333333333
X = (X | (X << 1)) & 0x5555555555555555
return X
}
// Interleave the bits of x and y. In the result, x and y occupy even and odd
// bitlevels, respectively.
func interleave(x, y uint32) uint64 {
return spread(x) | (spread(y) << 1)
}
// Squash the even bitlevels of X into a 32-bit word. Odd bitlevels of X are
// ignored, and may take any value.
func squash(X uint64) uint32 {
X &= 0x5555555555555555
X = (X | (X >> 1)) & 0x3333333333333333
X = (X | (X >> 2)) & 0x0f0f0f0f0f0f0f0f
X = (X | (X >> 4)) & 0x00ff00ff00ff00ff
X = (X | (X >> 8)) & 0x0000ffff0000ffff
X = (X | (X >> 16)) & 0x00000000ffffffff
return uint32(X)
}
// Deinterleave the bits of X into 32-bit words containing the even and odd
// bitlevels of X, respectively.
func deinterleave(X uint64) (uint32, uint32) {
return squash(X), squash(X >> 1)
}

View File

@@ -1,42 +0,0 @@
package miniredis
import (
"github.com/alicebob/miniredis/v2/hyperloglog"
)
type hll struct {
inner *hyperloglog.Sketch
}
func newHll() *hll {
return &hll{
inner: hyperloglog.New14(),
}
}
// Add returns true if cardinality has been changed, or false otherwise.
func (h *hll) Add(item []byte) bool {
return h.inner.Insert(item)
}
// Count returns the estimation of a set cardinality.
func (h *hll) Count() int {
return int(h.inner.Estimate())
}
// Merge merges the other hll into original one (not making a copy but doing this in place).
func (h *hll) Merge(other *hll) {
_ = h.inner.Merge(other.inner)
}
// Bytes returns raw-bytes representation of hll data structure.
func (h *hll) Bytes() []byte {
dataBytes, _ := h.inner.MarshalBinary()
return dataBytes
}
func (h *hll) copy() *hll {
return &hll{
inner: h.inner.Clone(),
}
}

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2017 Axiom Inc. <seif@axiom.sh>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1 +0,0 @@
This is a copy of github.com/axiomhq/hyperloglog.

View File

@@ -1,180 +0,0 @@
package hyperloglog
import "encoding/binary"
// Original author of this file is github.com/clarkduvall/hyperloglog
type iterable interface {
decode(i int, last uint32) (uint32, int)
Len() int
Iter() *iterator
}
type iterator struct {
i int
last uint32
v iterable
}
func (iter *iterator) Next() uint32 {
n, i := iter.v.decode(iter.i, iter.last)
iter.last = n
iter.i = i
return n
}
func (iter *iterator) Peek() uint32 {
n, _ := iter.v.decode(iter.i, iter.last)
return n
}
func (iter iterator) HasNext() bool {
return iter.i < iter.v.Len()
}
type compressedList struct {
count uint32
last uint32
b variableLengthList
}
func (v *compressedList) Clone() *compressedList {
if v == nil {
return nil
}
newV := &compressedList{
count: v.count,
last: v.last,
}
newV.b = make(variableLengthList, len(v.b))
copy(newV.b, v.b)
return newV
}
func (v *compressedList) MarshalBinary() (data []byte, err error) {
// Marshal the variableLengthList
bdata, err := v.b.MarshalBinary()
if err != nil {
return nil, err
}
// At least 4 bytes for the two fixed sized values plus the size of bdata.
data = make([]byte, 0, 4+4+len(bdata))
// Marshal the count and last values.
data = append(data, []byte{
// Number of items in the list.
byte(v.count >> 24),
byte(v.count >> 16),
byte(v.count >> 8),
byte(v.count),
// The last item in the list.
byte(v.last >> 24),
byte(v.last >> 16),
byte(v.last >> 8),
byte(v.last),
}...)
// Append the list
return append(data, bdata...), nil
}
func (v *compressedList) UnmarshalBinary(data []byte) error {
if len(data) < 12 {
return ErrorTooShort
}
// Set the count.
v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:]
// Set the last value.
v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:]
// Set the list.
sz, data := binary.BigEndian.Uint32(data[:4]), data[4:]
v.b = make([]uint8, sz)
if uint32(len(data)) < sz {
return ErrorTooShort
}
for i := uint32(0); i < sz; i++ {
v.b[i] = data[i]
}
return nil
}
func newCompressedList() *compressedList {
v := &compressedList{}
v.b = make(variableLengthList, 0)
return v
}
func (v *compressedList) Len() int {
return len(v.b)
}
func (v *compressedList) decode(i int, last uint32) (uint32, int) {
n, i := v.b.decode(i, last)
return n + last, i
}
func (v *compressedList) Append(x uint32) {
v.count++
v.b = v.b.Append(x - v.last)
v.last = x
}
func (v *compressedList) Iter() *iterator {
return &iterator{0, 0, v}
}
type variableLengthList []uint8
func (v variableLengthList) MarshalBinary() (data []byte, err error) {
// 4 bytes for the size of the list, and a byte for each element in the
// list.
data = make([]byte, 0, 4+v.Len())
// Length of the list. We only need 32 bits because the size of the set
// couldn't exceed that on 32 bit architectures.
sz := v.Len()
data = append(data, []byte{
byte(sz >> 24),
byte(sz >> 16),
byte(sz >> 8),
byte(sz),
}...)
// Marshal each element in the list.
for i := 0; i < sz; i++ {
data = append(data, v[i])
}
return data, nil
}
func (v variableLengthList) Len() int {
return len(v)
}
func (v *variableLengthList) Iter() *iterator {
return &iterator{0, 0, v}
}
func (v variableLengthList) decode(i int, last uint32) (uint32, int) {
var x uint32
j := i
for ; v[j]&0x80 != 0; j++ {
x |= uint32(v[j]&0x7f) << (uint(j-i) * 7)
}
x |= uint32(v[j]) << (uint(j-i) * 7)
return x, j + 1
}
func (v variableLengthList) Append(x uint32) variableLengthList {
for x&0xffffff80 != 0 {
v = append(v, uint8((x&0x7f)|0x80))
x >>= 7
}
return append(v, uint8(x&0x7f))
}

View File

@@ -1,424 +0,0 @@
package hyperloglog
import (
"encoding/binary"
"errors"
"fmt"
"math"
"sort"
)
const (
capacity = uint8(16)
pp = uint8(25)
mp = uint32(1) << pp
version = 1
)
// Sketch is a HyperLogLog data-structure for the count-distinct problem,
// approximating the number of distinct elements in a multiset.
type Sketch struct {
p uint8
b uint8
m uint32
alpha float64
tmpSet set
sparseList *compressedList
regs *registers
}
// New returns a HyperLogLog Sketch with 2^14 registers (precision 14)
func New() *Sketch {
return New14()
}
// New14 returns a HyperLogLog Sketch with 2^14 registers (precision 14)
func New14() *Sketch {
sk, _ := newSketch(14, true)
return sk
}
// New16 returns a HyperLogLog Sketch with 2^16 registers (precision 16)
func New16() *Sketch {
sk, _ := newSketch(16, true)
return sk
}
// NewNoSparse returns a HyperLogLog Sketch with 2^14 registers (precision 14)
// that will not use a sparse representation
func NewNoSparse() *Sketch {
sk, _ := newSketch(14, false)
return sk
}
// New16NoSparse returns a HyperLogLog Sketch with 2^16 registers (precision 16)
// that will not use a sparse representation
func New16NoSparse() *Sketch {
sk, _ := newSketch(16, false)
return sk
}
// newSketch returns a HyperLogLog Sketch with 2^precision registers
func newSketch(precision uint8, sparse bool) (*Sketch, error) {
if precision < 4 || precision > 18 {
return nil, fmt.Errorf("p has to be >= 4 and <= 18")
}
m := uint32(math.Pow(2, float64(precision)))
s := &Sketch{
m: m,
p: precision,
alpha: alpha(float64(m)),
}
if sparse {
s.tmpSet = set{}
s.sparseList = newCompressedList()
} else {
s.regs = newRegisters(m)
}
return s, nil
}
func (sk *Sketch) sparse() bool {
return sk.sparseList != nil
}
// Clone returns a deep copy of sk.
func (sk *Sketch) Clone() *Sketch {
return &Sketch{
b: sk.b,
p: sk.p,
m: sk.m,
alpha: sk.alpha,
tmpSet: sk.tmpSet.Clone(),
sparseList: sk.sparseList.Clone(),
regs: sk.regs.clone(),
}
}
// Converts to normal if the sparse list is too large.
func (sk *Sketch) maybeToNormal() {
if uint32(len(sk.tmpSet))*100 > sk.m {
sk.mergeSparse()
if uint32(sk.sparseList.Len()) > sk.m {
sk.toNormal()
}
}
}
// Merge takes another Sketch and combines it with Sketch h.
// If Sketch h is using the sparse Sketch, it will be converted
// to the normal Sketch.
func (sk *Sketch) Merge(other *Sketch) error {
if other == nil {
// Nothing to do
return nil
}
cpOther := other.Clone()
if sk.p != cpOther.p {
return errors.New("precisions must be equal")
}
if sk.sparse() && other.sparse() {
for k := range other.tmpSet {
sk.tmpSet.add(k)
}
for iter := other.sparseList.Iter(); iter.HasNext(); {
sk.tmpSet.add(iter.Next())
}
sk.maybeToNormal()
return nil
}
if sk.sparse() {
sk.toNormal()
}
if cpOther.sparse() {
for k := range cpOther.tmpSet {
i, r := decodeHash(k, cpOther.p, pp)
sk.insert(i, r)
}
for iter := cpOther.sparseList.Iter(); iter.HasNext(); {
i, r := decodeHash(iter.Next(), cpOther.p, pp)
sk.insert(i, r)
}
} else {
if sk.b < cpOther.b {
sk.regs.rebase(cpOther.b - sk.b)
sk.b = cpOther.b
} else {
cpOther.regs.rebase(sk.b - cpOther.b)
cpOther.b = sk.b
}
for i, v := range cpOther.regs.tailcuts {
v1 := v.get(0)
if v1 > sk.regs.get(uint32(i)*2) {
sk.regs.set(uint32(i)*2, v1)
}
v2 := v.get(1)
if v2 > sk.regs.get(1+uint32(i)*2) {
sk.regs.set(1+uint32(i)*2, v2)
}
}
}
return nil
}
// Convert from sparse Sketch to dense Sketch.
func (sk *Sketch) toNormal() {
if len(sk.tmpSet) > 0 {
sk.mergeSparse()
}
sk.regs = newRegisters(sk.m)
for iter := sk.sparseList.Iter(); iter.HasNext(); {
i, r := decodeHash(iter.Next(), sk.p, pp)
sk.insert(i, r)
}
sk.tmpSet = nil
sk.sparseList = nil
}
func (sk *Sketch) insert(i uint32, r uint8) bool {
changed := false
if r-sk.b >= capacity {
//overflow
db := sk.regs.min()
if db > 0 {
sk.b += db
sk.regs.rebase(db)
changed = true
}
}
if r > sk.b {
val := r - sk.b
if c1 := capacity - 1; c1 < val {
val = c1
}
if val > sk.regs.get(i) {
sk.regs.set(i, val)
changed = true
}
}
return changed
}
// Insert adds element e to sketch
func (sk *Sketch) Insert(e []byte) bool {
x := hash(e)
return sk.InsertHash(x)
}
// InsertHash adds hash x to sketch
func (sk *Sketch) InsertHash(x uint64) bool {
if sk.sparse() {
changed := sk.tmpSet.add(encodeHash(x, sk.p, pp))
if !changed {
return false
}
if uint32(len(sk.tmpSet))*100 > sk.m/2 {
sk.mergeSparse()
if uint32(sk.sparseList.Len()) > sk.m/2 {
sk.toNormal()
}
}
return true
} else {
i, r := getPosVal(x, sk.p)
return sk.insert(uint32(i), r)
}
}
// Estimate returns the cardinality of the Sketch
func (sk *Sketch) Estimate() uint64 {
if sk.sparse() {
sk.mergeSparse()
return uint64(linearCount(mp, mp-sk.sparseList.count))
}
sum, ez := sk.regs.sumAndZeros(sk.b)
m := float64(sk.m)
var est float64
var beta func(float64) float64
if sk.p < 16 {
beta = beta14
} else {
beta = beta16
}
if sk.b == 0 {
est = (sk.alpha * m * (m - ez) / (sum + beta(ez)))
} else {
est = (sk.alpha * m * m / sum)
}
return uint64(est + 0.5)
}
func (sk *Sketch) mergeSparse() {
if len(sk.tmpSet) == 0 {
return
}
keys := make(uint64Slice, 0, len(sk.tmpSet))
for k := range sk.tmpSet {
keys = append(keys, k)
}
sort.Sort(keys)
newList := newCompressedList()
for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {
if !iter.HasNext() {
newList.Append(keys[i])
i++
continue
}
if i >= len(keys) {
newList.Append(iter.Next())
continue
}
x1, x2 := iter.Peek(), keys[i]
if x1 == x2 {
newList.Append(iter.Next())
i++
} else if x1 > x2 {
newList.Append(x2)
i++
} else {
newList.Append(iter.Next())
}
}
sk.sparseList = newList
sk.tmpSet = set{}
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (sk *Sketch) MarshalBinary() (data []byte, err error) {
// Marshal a version marker.
data = append(data, version)
// Marshal p.
data = append(data, sk.p)
// Marshal b
data = append(data, sk.b)
if sk.sparse() {
// It's using the sparse Sketch.
data = append(data, byte(1))
// Add the tmp_set
tsdata, err := sk.tmpSet.MarshalBinary()
if err != nil {
return nil, err
}
data = append(data, tsdata...)
// Add the sparse Sketch
sdata, err := sk.sparseList.MarshalBinary()
if err != nil {
return nil, err
}
return append(data, sdata...), nil
}
// It's using the dense Sketch.
data = append(data, byte(0))
// Add the dense sketch Sketch.
sz := len(sk.regs.tailcuts)
data = append(data, []byte{
byte(sz >> 24),
byte(sz >> 16),
byte(sz >> 8),
byte(sz),
}...)
// Marshal each element in the list.
for i := 0; i < len(sk.regs.tailcuts); i++ {
data = append(data, byte(sk.regs.tailcuts[i]))
}
return data, nil
}
// ErrorTooShort is an error that UnmarshalBinary try to parse too short
// binary.
var ErrorTooShort = errors.New("too short binary")
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
func (sk *Sketch) UnmarshalBinary(data []byte) error {
if len(data) < 8 {
return ErrorTooShort
}
// Unmarshal version. We may need this in the future if we make
// non-compatible changes.
_ = data[0]
// Unmarshal p.
p := data[1]
// Unmarshal b.
sk.b = data[2]
// Determine if we need a sparse Sketch
sparse := data[3] == byte(1)
// Make a newSketch Sketch if the precision doesn't match or if the Sketch was used
if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
newh, err := newSketch(p, sparse)
if err != nil {
return err
}
newh.b = sk.b
*sk = *newh
}
// h is now initialised with the correct p. We just need to fill the
// rest of the details out.
if sparse {
// Using the sparse Sketch.
// Unmarshal the tmp_set.
tssz := binary.BigEndian.Uint32(data[4:8])
sk.tmpSet = make(map[uint32]struct{}, tssz)
// We need to unmarshal tssz values in total, and each value requires us
// to read 4 bytes.
tsLastByte := int((tssz * 4) + 8)
for i := 8; i < tsLastByte; i += 4 {
k := binary.BigEndian.Uint32(data[i : i+4])
sk.tmpSet[k] = struct{}{}
}
// Unmarshal the sparse Sketch.
return sk.sparseList.UnmarshalBinary(data[tsLastByte:])
}
// Using the dense Sketch.
sk.sparseList = nil
sk.tmpSet = nil
dsz := binary.BigEndian.Uint32(data[4:8])
sk.regs = newRegisters(dsz * 2)
data = data[8:]
for i, val := range data {
sk.regs.tailcuts[i] = reg(val)
if uint8(sk.regs.tailcuts[i]<<4>>4) > 0 {
sk.regs.nz--
}
if uint8(sk.regs.tailcuts[i]>>4) > 0 {
sk.regs.nz--
}
}
return nil
}

Some files were not shown because too many files have changed in this diff Show More