mirror of
https://github.com/mochi-mqtt/server.git
synced 2025-11-03 10:31:11 +08:00
Compare commits
2 Commits
main
...
remove-ven
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d9c3de2c8c | ||
|
|
787390e7ba |
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,5 @@
|
||||
cmd/mqtt
|
||||
.DS_Store
|
||||
*.db
|
||||
.idea
|
||||
.idea
|
||||
vendor
|
||||
1
vendor/github.com/AndreasBriese/bbloom/.travis.yml
generated
vendored
1
vendor/github.com/AndreasBriese/bbloom/.travis.yml
generated
vendored
@@ -1 +0,0 @@
|
||||
language: go
|
||||
35
vendor/github.com/AndreasBriese/bbloom/LICENSE
generated
vendored
35
vendor/github.com/AndreasBriese/bbloom/LICENSE
generated
vendored
@@ -1,35 +0,0 @@
|
||||
bbloom.go
|
||||
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
siphash.go
|
||||
|
||||
// https://github.com/dchest/siphash
|
||||
//
|
||||
// Written in 2012 by Dmitry Chestnykh.
|
||||
//
|
||||
// To the extent possible under law, the author have dedicated all copyright
|
||||
// and related and neighboring rights to this software to the public domain
|
||||
// worldwide. This software is distributed without any warranty.
|
||||
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||
//
|
||||
// Package siphash implements SipHash-2-4, a fast short-input PRF
|
||||
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
|
||||
131
vendor/github.com/AndreasBriese/bbloom/README.md
generated
vendored
131
vendor/github.com/AndreasBriese/bbloom/README.md
generated
vendored
@@ -1,131 +0,0 @@
|
||||
## bbloom: a bitset Bloom filter for go/golang
|
||||
===
|
||||
|
||||
[](http://travis-ci.org/AndreasBriese/bbloom)
|
||||
|
||||
package implements a fast bloom filter with real 'bitset' and JSONMarshal/JSONUnmarshal to store/reload the Bloom filter.
|
||||
|
||||
NOTE: the package uses unsafe.Pointer to set and read the bits from the bitset. If you're uncomfortable with using the unsafe package, please consider using my bloom filter package at github.com/AndreasBriese/bloom
|
||||
|
||||
===
|
||||
|
||||
changelog 11/2015: new thread safe methods AddTS(), HasTS(), AddIfNotHasTS() following a suggestion from Srdjan Marinovic (github @a-little-srdjan), who used this to code a bloomfilter cache.
|
||||
|
||||
This bloom filter was developed to strengthen a website-log database and was tested and optimized for this log-entry mask: "2014/%02i/%02i %02i:%02i:%02i /info.html".
|
||||
Nonetheless bbloom should work with any other form of entries.
|
||||
|
||||
~~Hash function is a modified Berkeley DB sdbm hash (to optimize for smaller strings). sdbm http://www.cse.yorku.ca/~oz/hash.html~~
|
||||
|
||||
Found sipHash (SipHash-2-4, a fast short-input PRF created by Jean-Philippe Aumasson and Daniel J. Bernstein.) to be about as fast. sipHash had been ported by Dimtry Chestnyk to Go (github.com/dchest/siphash )
|
||||
|
||||
Minimum hashset size is: 512 ([4]uint64; will be set automatically).
|
||||
|
||||
###install
|
||||
|
||||
```sh
|
||||
go get github.com/AndreasBriese/bbloom
|
||||
```
|
||||
|
||||
###test
|
||||
+ change to folder ../bbloom
|
||||
+ create wordlist in file "words.txt" (you might use `python permut.py`)
|
||||
+ run 'go test -bench=.' within the folder
|
||||
|
||||
```go
|
||||
go test -bench=.
|
||||
```
|
||||
|
||||
~~If you've installed the GOCONVEY TDD-framework http://goconvey.co/ you can run the tests automatically.~~
|
||||
|
||||
using go's testing framework now (have in mind that the op timing is related to 65536 operations of Add, Has, AddIfNotHas respectively)
|
||||
|
||||
### usage
|
||||
|
||||
after installation add
|
||||
|
||||
```go
|
||||
import (
|
||||
...
|
||||
"github.com/AndreasBriese/bbloom"
|
||||
...
|
||||
)
|
||||
```
|
||||
|
||||
at your header. In the program use
|
||||
|
||||
```go
|
||||
// create a bloom filter for 65536 items and 1 % wrong-positive ratio
|
||||
bf := bbloom.New(float64(1<<16), float64(0.01))
|
||||
|
||||
// or
|
||||
// create a bloom filter with 650000 for 65536 items and 7 locs per hash explicitly
|
||||
// bf = bbloom.New(float64(650000), float64(7))
|
||||
// or
|
||||
bf = bbloom.New(650000.0, 7.0)
|
||||
|
||||
// add one item
|
||||
bf.Add([]byte("butter"))
|
||||
|
||||
// Number of elements added is exposed now
|
||||
// Note: ElemNum will not be included in JSON export (for compatability to older version)
|
||||
nOfElementsInFilter := bf.ElemNum
|
||||
|
||||
// check if item is in the filter
|
||||
isIn := bf.Has([]byte("butter")) // should be true
|
||||
isNotIn := bf.Has([]byte("Butter")) // should be false
|
||||
|
||||
// 'add only if item is new' to the bloomfilter
|
||||
added := bf.AddIfNotHas([]byte("butter")) // should be false because 'butter' is already in the set
|
||||
added = bf.AddIfNotHas([]byte("buTTer")) // should be true because 'buTTer' is new
|
||||
|
||||
// thread safe versions for concurrent use: AddTS, HasTS, AddIfNotHasTS
|
||||
// add one item
|
||||
bf.AddTS([]byte("peanutbutter"))
|
||||
// check if item is in the filter
|
||||
isIn = bf.HasTS([]byte("peanutbutter")) // should be true
|
||||
isNotIn = bf.HasTS([]byte("peanutButter")) // should be false
|
||||
// 'add only if item is new' to the bloomfilter
|
||||
added = bf.AddIfNotHasTS([]byte("butter")) // should be false because 'peanutbutter' is already in the set
|
||||
added = bf.AddIfNotHasTS([]byte("peanutbuTTer")) // should be true because 'penutbuTTer' is new
|
||||
|
||||
// convert to JSON ([]byte)
|
||||
Json := bf.JSONMarshal()
|
||||
|
||||
// bloomfilters Mutex is exposed for external un-/locking
|
||||
// i.e. mutex lock while doing JSON conversion
|
||||
bf.Mtx.Lock()
|
||||
Json = bf.JSONMarshal()
|
||||
bf.Mtx.Unlock()
|
||||
|
||||
// restore a bloom filter from storage
|
||||
bfNew := bbloom.JSONUnmarshal(Json)
|
||||
|
||||
isInNew := bfNew.Has([]byte("butter")) // should be true
|
||||
isNotInNew := bfNew.Has([]byte("Butter")) // should be false
|
||||
|
||||
```
|
||||
|
||||
to work with the bloom filter.
|
||||
|
||||
### why 'fast'?
|
||||
|
||||
It's about 3 times faster than William Fitzgeralds bitset bloom filter https://github.com/willf/bloom . And it is about so fast as my []bool set variant for Boom filters (see https://github.com/AndreasBriese/bloom ) but having a 8times smaller memory footprint:
|
||||
|
||||
|
||||
Bloom filter (filter size 524288, 7 hashlocs)
|
||||
github.com/AndreasBriese/bbloom 'Add' 65536 items (10 repetitions): 6595800 ns (100 ns/op)
|
||||
github.com/AndreasBriese/bbloom 'Has' 65536 items (10 repetitions): 5986600 ns (91 ns/op)
|
||||
github.com/AndreasBriese/bloom 'Add' 65536 items (10 repetitions): 6304684 ns (96 ns/op)
|
||||
github.com/AndreasBriese/bloom 'Has' 65536 items (10 repetitions): 6568663 ns (100 ns/op)
|
||||
|
||||
github.com/willf/bloom 'Add' 65536 items (10 repetitions): 24367224 ns (371 ns/op)
|
||||
github.com/willf/bloom 'Test' 65536 items (10 repetitions): 21881142 ns (333 ns/op)
|
||||
github.com/dataence/bloom/standard 'Add' 65536 items (10 repetitions): 23041644 ns (351 ns/op)
|
||||
github.com/dataence/bloom/standard 'Check' 65536 items (10 repetitions): 19153133 ns (292 ns/op)
|
||||
github.com/cabello/bloom 'Add' 65536 items (10 repetitions): 131921507 ns (2012 ns/op)
|
||||
github.com/cabello/bloom 'Contains' 65536 items (10 repetitions): 131108962 ns (2000 ns/op)
|
||||
|
||||
(on MBPro15 OSX10.8.5 i7 4Core 2.4Ghz)
|
||||
|
||||
|
||||
With 32bit bloom filters (bloom32) using modified sdbm, bloom32 does hashing with only 2 bit shifts, one xor and one substraction per byte. smdb is about as fast as fnv64a but gives less collisions with the dataset (see mask above). bloom.New(float64(10 * 1<<16),float64(7)) populated with 1<<16 random items from the dataset (see above) and tested against the rest results in less than 0.05% collisions.
|
||||
284
vendor/github.com/AndreasBriese/bbloom/bbloom.go
generated
vendored
284
vendor/github.com/AndreasBriese/bbloom/bbloom.go
generated
vendored
@@ -1,284 +0,0 @@
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2014 Andreas Briese, eduToolbox@Bri-C GmbH, Sarstedt
|
||||
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
// this software and associated documentation files (the "Software"), to deal in
|
||||
// the Software without restriction, including without limitation the rights to
|
||||
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
// the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
// subject to the following conditions:
|
||||
|
||||
// The above copyright notice and this permission notice shall be included in all
|
||||
// copies or substantial portions of the Software.
|
||||
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
// 2019/08/25 code revision to reduce unsafe use
|
||||
// Parts are adopted from the fork at ipfs/bbloom after performance rev by
|
||||
// Steve Allen (https://github.com/Stebalien)
|
||||
// (see https://github.com/ipfs/bbloom/blob/master/bbloom.go)
|
||||
// -> func Has
|
||||
// -> func set
|
||||
// -> func add
|
||||
|
||||
package bbloom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"log"
|
||||
"math"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// helper
|
||||
// not needed anymore by Set
|
||||
// var mask = []uint8{1, 2, 4, 8, 16, 32, 64, 128}
|
||||
|
||||
func getSize(ui64 uint64) (size uint64, exponent uint64) {
|
||||
if ui64 < uint64(512) {
|
||||
ui64 = uint64(512)
|
||||
}
|
||||
size = uint64(1)
|
||||
for size < ui64 {
|
||||
size <<= 1
|
||||
exponent++
|
||||
}
|
||||
return size, exponent
|
||||
}
|
||||
|
||||
func calcSizeByWrongPositives(numEntries, wrongs float64) (uint64, uint64) {
|
||||
size := -1 * numEntries * math.Log(wrongs) / math.Pow(float64(0.69314718056), 2)
|
||||
locs := math.Ceil(float64(0.69314718056) * size / numEntries)
|
||||
return uint64(size), uint64(locs)
|
||||
}
|
||||
|
||||
// New
|
||||
// returns a new bloomfilter
|
||||
func New(params ...float64) (bloomfilter Bloom) {
|
||||
var entries, locs uint64
|
||||
if len(params) == 2 {
|
||||
if params[1] < 1 {
|
||||
entries, locs = calcSizeByWrongPositives(params[0], params[1])
|
||||
} else {
|
||||
entries, locs = uint64(params[0]), uint64(params[1])
|
||||
}
|
||||
} else {
|
||||
log.Fatal("usage: New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(3)) or New(float64(number_of_entries), float64(number_of_hashlocations)) i.e. New(float64(1000), float64(0.03))")
|
||||
}
|
||||
size, exponent := getSize(uint64(entries))
|
||||
bloomfilter = Bloom{
|
||||
Mtx: &sync.Mutex{},
|
||||
sizeExp: exponent,
|
||||
size: size - 1,
|
||||
setLocs: locs,
|
||||
shift: 64 - exponent,
|
||||
}
|
||||
bloomfilter.Size(size)
|
||||
return bloomfilter
|
||||
}
|
||||
|
||||
// NewWithBoolset
|
||||
// takes a []byte slice and number of locs per entry
|
||||
// returns the bloomfilter with a bitset populated according to the input []byte
|
||||
func NewWithBoolset(bs *[]byte, locs uint64) (bloomfilter Bloom) {
|
||||
bloomfilter = New(float64(len(*bs)<<3), float64(locs))
|
||||
for i, b := range *bs {
|
||||
*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bloomfilter.bitset[0])) + uintptr(i))) = b
|
||||
}
|
||||
return bloomfilter
|
||||
}
|
||||
|
||||
// bloomJSONImExport
|
||||
// Im/Export structure used by JSONMarshal / JSONUnmarshal
|
||||
type bloomJSONImExport struct {
|
||||
FilterSet []byte
|
||||
SetLocs uint64
|
||||
}
|
||||
|
||||
// JSONUnmarshal
|
||||
// takes JSON-Object (type bloomJSONImExport) as []bytes
|
||||
// returns Bloom object
|
||||
func JSONUnmarshal(dbData []byte) Bloom {
|
||||
bloomImEx := bloomJSONImExport{}
|
||||
json.Unmarshal(dbData, &bloomImEx)
|
||||
buf := bytes.NewBuffer(bloomImEx.FilterSet)
|
||||
bs := buf.Bytes()
|
||||
bf := NewWithBoolset(&bs, bloomImEx.SetLocs)
|
||||
return bf
|
||||
}
|
||||
|
||||
//
|
||||
// Bloom filter
|
||||
type Bloom struct {
|
||||
Mtx *sync.Mutex
|
||||
ElemNum uint64
|
||||
bitset []uint64
|
||||
sizeExp uint64
|
||||
size uint64
|
||||
setLocs uint64
|
||||
shift uint64
|
||||
}
|
||||
|
||||
// <--- http://www.cse.yorku.ca/~oz/hash.html
|
||||
// modified Berkeley DB Hash (32bit)
|
||||
// hash is casted to l, h = 16bit fragments
|
||||
// func (bl Bloom) absdbm(b *[]byte) (l, h uint64) {
|
||||
// hash := uint64(len(*b))
|
||||
// for _, c := range *b {
|
||||
// hash = uint64(c) + (hash << 6) + (hash << bl.sizeExp) - hash
|
||||
// }
|
||||
// h = hash >> bl.shift
|
||||
// l = hash << bl.shift >> bl.shift
|
||||
// return l, h
|
||||
// }
|
||||
|
||||
// Update: found sipHash of Jean-Philippe Aumasson & Daniel J. Bernstein to be even faster than absdbm()
|
||||
// https://131002.net/siphash/
|
||||
// siphash was implemented for Go by Dmitry Chestnykh https://github.com/dchest/siphash
|
||||
|
||||
// Add
|
||||
// set the bit(s) for entry; Adds an entry to the Bloom filter
|
||||
func (bl *Bloom) Add(entry []byte) {
|
||||
l, h := bl.sipHash(entry)
|
||||
for i := uint64(0); i < bl.setLocs; i++ {
|
||||
bl.set((h + i*l) & bl.size)
|
||||
bl.ElemNum++
|
||||
}
|
||||
}
|
||||
|
||||
// AddTS
|
||||
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
|
||||
func (bl *Bloom) AddTS(entry []byte) {
|
||||
bl.Mtx.Lock()
|
||||
defer bl.Mtx.Unlock()
|
||||
bl.Add(entry)
|
||||
}
|
||||
|
||||
// Has
|
||||
// check if bit(s) for entry is/are set
|
||||
// returns true if the entry was added to the Bloom Filter
|
||||
func (bl Bloom) Has(entry []byte) bool {
|
||||
l, h := bl.sipHash(entry)
|
||||
res := true
|
||||
for i := uint64(0); i < bl.setLocs; i++ {
|
||||
res = res && bl.isSet((h+i*l)&bl.size)
|
||||
// https://github.com/ipfs/bbloom/commit/84e8303a9bfb37b2658b85982921d15bbb0fecff
|
||||
// // Branching here (early escape) is not worth it
|
||||
// // This is my conclusion from benchmarks
|
||||
// // (prevents loop unrolling)
|
||||
// switch bl.IsSet((h + i*l) & bl.size) {
|
||||
// case false:
|
||||
// return false
|
||||
// }
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// HasTS
|
||||
// Thread safe: Mutex.Lock the bloomfilter for the time of processing the entry
|
||||
func (bl *Bloom) HasTS(entry []byte) bool {
|
||||
bl.Mtx.Lock()
|
||||
defer bl.Mtx.Unlock()
|
||||
return bl.Has(entry)
|
||||
}
|
||||
|
||||
// AddIfNotHas
|
||||
// Only Add entry if it's not present in the bloomfilter
|
||||
// returns true if entry was added
|
||||
// returns false if entry was allready registered in the bloomfilter
|
||||
func (bl Bloom) AddIfNotHas(entry []byte) (added bool) {
|
||||
if bl.Has(entry) {
|
||||
return added
|
||||
}
|
||||
bl.Add(entry)
|
||||
return true
|
||||
}
|
||||
|
||||
// AddIfNotHasTS
|
||||
// Tread safe: Only Add entry if it's not present in the bloomfilter
|
||||
// returns true if entry was added
|
||||
// returns false if entry was allready registered in the bloomfilter
|
||||
func (bl *Bloom) AddIfNotHasTS(entry []byte) (added bool) {
|
||||
bl.Mtx.Lock()
|
||||
defer bl.Mtx.Unlock()
|
||||
return bl.AddIfNotHas(entry)
|
||||
}
|
||||
|
||||
// Size
|
||||
// make Bloom filter with as bitset of size sz
|
||||
func (bl *Bloom) Size(sz uint64) {
|
||||
bl.bitset = make([]uint64, sz>>6)
|
||||
}
|
||||
|
||||
// Clear
|
||||
// resets the Bloom filter
|
||||
func (bl *Bloom) Clear() {
|
||||
bs := bl.bitset
|
||||
for i := range bs {
|
||||
bs[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Set
|
||||
// set the bit[idx] of bitsit
|
||||
func (bl *Bloom) set(idx uint64) {
|
||||
// ommit unsafe
|
||||
// *(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3))) |= mask[idx%8]
|
||||
bl.bitset[idx>>6] |= 1 << (idx % 64)
|
||||
}
|
||||
|
||||
// IsSet
|
||||
// check if bit[idx] of bitset is set
|
||||
// returns true/false
|
||||
func (bl *Bloom) isSet(idx uint64) bool {
|
||||
// ommit unsafe
|
||||
// return (((*(*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[idx>>6])) + uintptr((idx%64)>>3)))) >> (idx % 8)) & 1) == 1
|
||||
return bl.bitset[idx>>6]&(1<<(idx%64)) != 0
|
||||
}
|
||||
|
||||
// JSONMarshal
|
||||
// returns JSON-object (type bloomJSONImExport) as []byte
|
||||
func (bl Bloom) JSONMarshal() []byte {
|
||||
bloomImEx := bloomJSONImExport{}
|
||||
bloomImEx.SetLocs = uint64(bl.setLocs)
|
||||
bloomImEx.FilterSet = make([]byte, len(bl.bitset)<<3)
|
||||
for i := range bloomImEx.FilterSet {
|
||||
bloomImEx.FilterSet[i] = *(*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(&bl.bitset[0])) + uintptr(i)))
|
||||
}
|
||||
data, err := json.Marshal(bloomImEx)
|
||||
if err != nil {
|
||||
log.Fatal("json.Marshal failed: ", err)
|
||||
}
|
||||
return data
|
||||
}
|
||||
|
||||
// // alternative hashFn
|
||||
// func (bl Bloom) fnv64a(b *[]byte) (l, h uint64) {
|
||||
// h64 := fnv.New64a()
|
||||
// h64.Write(*b)
|
||||
// hash := h64.Sum64()
|
||||
// h = hash >> 32
|
||||
// l = hash << 32 >> 32
|
||||
// return l, h
|
||||
// }
|
||||
//
|
||||
// // <-- http://partow.net/programming/hashfunctions/index.html
|
||||
// // citation: An algorithm proposed by Donald E. Knuth in The Art Of Computer Programming Volume 3,
|
||||
// // under the topic of sorting and search chapter 6.4.
|
||||
// // modified to fit with boolset-length
|
||||
// func (bl Bloom) DEKHash(b *[]byte) (l, h uint64) {
|
||||
// hash := uint64(len(*b))
|
||||
// for _, c := range *b {
|
||||
// hash = ((hash << 5) ^ (hash >> bl.shift)) ^ uint64(c)
|
||||
// }
|
||||
// h = hash >> bl.shift
|
||||
// l = hash << bl.sizeExp >> bl.sizeExp
|
||||
// return l, h
|
||||
// }
|
||||
225
vendor/github.com/AndreasBriese/bbloom/sipHash.go
generated
vendored
225
vendor/github.com/AndreasBriese/bbloom/sipHash.go
generated
vendored
@@ -1,225 +0,0 @@
|
||||
// Written in 2012 by Dmitry Chestnykh.
|
||||
//
|
||||
// To the extent possible under law, the author have dedicated all copyright
|
||||
// and related and neighboring rights to this software to the public domain
|
||||
// worldwide. This software is distributed without any warranty.
|
||||
// http://creativecommons.org/publicdomain/zero/1.0/
|
||||
//
|
||||
// Package siphash implements SipHash-2-4, a fast short-input PRF
|
||||
// created by Jean-Philippe Aumasson and Daniel J. Bernstein.
|
||||
|
||||
package bbloom
|
||||
|
||||
// Hash returns the 64-bit SipHash-2-4 of the given byte slice with two 64-bit
|
||||
// parts of 128-bit key: k0 and k1.
|
||||
func (bl Bloom) sipHash(p []byte) (l, h uint64) {
|
||||
// Initialization.
|
||||
v0 := uint64(8317987320269560794) // k0 ^ 0x736f6d6570736575
|
||||
v1 := uint64(7237128889637516672) // k1 ^ 0x646f72616e646f6d
|
||||
v2 := uint64(7816392314733513934) // k0 ^ 0x6c7967656e657261
|
||||
v3 := uint64(8387220255325274014) // k1 ^ 0x7465646279746573
|
||||
t := uint64(len(p)) << 56
|
||||
|
||||
// Compression.
|
||||
for len(p) >= 8 {
|
||||
|
||||
m := uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 |
|
||||
uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||
|
||||
v3 ^= m
|
||||
|
||||
// Round 1.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// Round 2.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
v0 ^= m
|
||||
p = p[8:]
|
||||
}
|
||||
|
||||
// Compress last block.
|
||||
switch len(p) {
|
||||
case 7:
|
||||
t |= uint64(p[6]) << 48
|
||||
fallthrough
|
||||
case 6:
|
||||
t |= uint64(p[5]) << 40
|
||||
fallthrough
|
||||
case 5:
|
||||
t |= uint64(p[4]) << 32
|
||||
fallthrough
|
||||
case 4:
|
||||
t |= uint64(p[3]) << 24
|
||||
fallthrough
|
||||
case 3:
|
||||
t |= uint64(p[2]) << 16
|
||||
fallthrough
|
||||
case 2:
|
||||
t |= uint64(p[1]) << 8
|
||||
fallthrough
|
||||
case 1:
|
||||
t |= uint64(p[0])
|
||||
}
|
||||
|
||||
v3 ^= t
|
||||
|
||||
// Round 1.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// Round 2.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
v0 ^= t
|
||||
|
||||
// Finalization.
|
||||
v2 ^= 0xff
|
||||
|
||||
// Round 1.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// Round 2.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// Round 3.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// Round 4.
|
||||
v0 += v1
|
||||
v1 = v1<<13 | v1>>51
|
||||
v1 ^= v0
|
||||
v0 = v0<<32 | v0>>32
|
||||
|
||||
v2 += v3
|
||||
v3 = v3<<16 | v3>>48
|
||||
v3 ^= v2
|
||||
|
||||
v0 += v3
|
||||
v3 = v3<<21 | v3>>43
|
||||
v3 ^= v0
|
||||
|
||||
v2 += v1
|
||||
v1 = v1<<17 | v1>>47
|
||||
v1 ^= v2
|
||||
v2 = v2<<32 | v2>>32
|
||||
|
||||
// return v0 ^ v1 ^ v2 ^ v3
|
||||
|
||||
hash := v0 ^ v1 ^ v2 ^ v3
|
||||
h = hash >> bl.shift
|
||||
l = hash << bl.shift >> bl.shift
|
||||
return l, h
|
||||
|
||||
}
|
||||
140
vendor/github.com/AndreasBriese/bbloom/words.txt
generated
vendored
140
vendor/github.com/AndreasBriese/bbloom/words.txt
generated
vendored
@@ -1,140 +0,0 @@
|
||||
2014/01/01 00:00:00 /info.html
|
||||
2014/01/01 00:00:00 /info.html
|
||||
2014/01/01 00:00:01 /info.html
|
||||
2014/01/01 00:00:02 /info.html
|
||||
2014/01/01 00:00:03 /info.html
|
||||
2014/01/01 00:00:04 /info.html
|
||||
2014/01/01 00:00:05 /info.html
|
||||
2014/01/01 00:00:06 /info.html
|
||||
2014/01/01 00:00:07 /info.html
|
||||
2014/01/01 00:00:08 /info.html
|
||||
2014/01/01 00:00:09 /info.html
|
||||
2014/01/01 00:00:10 /info.html
|
||||
2014/01/01 00:00:11 /info.html
|
||||
2014/01/01 00:00:12 /info.html
|
||||
2014/01/01 00:00:13 /info.html
|
||||
2014/01/01 00:00:14 /info.html
|
||||
2014/01/01 00:00:15 /info.html
|
||||
2014/01/01 00:00:16 /info.html
|
||||
2014/01/01 00:00:17 /info.html
|
||||
2014/01/01 00:00:18 /info.html
|
||||
2014/01/01 00:00:19 /info.html
|
||||
2014/01/01 00:00:20 /info.html
|
||||
2014/01/01 00:00:21 /info.html
|
||||
2014/01/01 00:00:22 /info.html
|
||||
2014/01/01 00:00:23 /info.html
|
||||
2014/01/01 00:00:24 /info.html
|
||||
2014/01/01 00:00:25 /info.html
|
||||
2014/01/01 00:00:26 /info.html
|
||||
2014/01/01 00:00:27 /info.html
|
||||
2014/01/01 00:00:28 /info.html
|
||||
2014/01/01 00:00:29 /info.html
|
||||
2014/01/01 00:00:30 /info.html
|
||||
2014/01/01 00:00:31 /info.html
|
||||
2014/01/01 00:00:32 /info.html
|
||||
2014/01/01 00:00:33 /info.html
|
||||
2014/01/01 00:00:34 /info.html
|
||||
2014/01/01 00:00:35 /info.html
|
||||
2014/01/01 00:00:36 /info.html
|
||||
2014/01/01 00:00:37 /info.html
|
||||
2014/01/01 00:00:38 /info.html
|
||||
2014/01/01 00:00:39 /info.html
|
||||
2014/01/01 00:00:40 /info.html
|
||||
2014/01/01 00:00:41 /info.html
|
||||
2014/01/01 00:00:42 /info.html
|
||||
2014/01/01 00:00:43 /info.html
|
||||
2014/01/01 00:00:44 /info.html
|
||||
2014/01/01 00:00:45 /info.html
|
||||
2014/01/01 00:00:46 /info.html
|
||||
2014/01/01 00:00:47 /info.html
|
||||
2014/01/01 00:00:48 /info.html
|
||||
2014/01/01 00:00:49 /info.html
|
||||
2014/01/01 00:00:50 /info.html
|
||||
2014/01/01 00:00:51 /info.html
|
||||
2014/01/01 00:00:52 /info.html
|
||||
2014/01/01 00:00:53 /info.html
|
||||
2014/01/01 00:00:54 /info.html
|
||||
2014/01/01 00:00:55 /info.html
|
||||
2014/01/01 00:00:56 /info.html
|
||||
2014/01/01 00:00:57 /info.html
|
||||
2014/01/01 00:00:58 /info.html
|
||||
2014/01/01 00:00:59 /info.html
|
||||
2014/01/01 00:01:00 /info.html
|
||||
2014/01/01 00:01:01 /info.html
|
||||
2014/01/01 00:01:02 /info.html
|
||||
2014/01/01 00:01:03 /info.html
|
||||
2014/01/01 00:01:04 /info.html
|
||||
2014/01/01 00:01:05 /info.html
|
||||
2014/01/01 00:01:06 /info.html
|
||||
2014/01/01 00:01:07 /info.html
|
||||
2014/01/01 00:01:08 /info.html
|
||||
2014/01/01 00:01:09 /info.html
|
||||
2014/01/01 00:01:10 /info.html
|
||||
2014/01/01 00:01:11 /info.html
|
||||
2014/01/01 00:01:12 /info.html
|
||||
2014/01/01 00:01:13 /info.html
|
||||
2014/01/01 00:01:14 /info.html
|
||||
2014/01/01 00:01:15 /info.html
|
||||
2014/01/01 00:01:16 /info.html
|
||||
2014/01/01 00:01:17 /info.html
|
||||
2014/01/01 00:01:18 /info.html
|
||||
2014/01/01 00:01:19 /info.html
|
||||
2014/01/01 00:01:20 /info.html
|
||||
2014/01/01 00:01:21 /info.html
|
||||
2014/01/01 00:01:22 /info.html
|
||||
2014/01/01 00:01:23 /info.html
|
||||
2014/01/01 00:01:24 /info.html
|
||||
2014/01/01 00:01:25 /info.html
|
||||
2014/01/01 00:01:26 /info.html
|
||||
2014/01/01 00:01:27 /info.html
|
||||
2014/01/01 00:01:28 /info.html
|
||||
2014/01/01 00:01:29 /info.html
|
||||
2014/01/01 00:01:30 /info.html
|
||||
2014/01/01 00:01:31 /info.html
|
||||
2014/01/01 00:01:32 /info.html
|
||||
2014/01/01 00:01:33 /info.html
|
||||
2014/01/01 00:01:34 /info.html
|
||||
2014/01/01 00:01:35 /info.html
|
||||
2014/01/01 00:01:36 /info.html
|
||||
2014/01/01 00:01:37 /info.html
|
||||
2014/01/01 00:01:38 /info.html
|
||||
2014/01/01 00:01:39 /info.html
|
||||
2014/01/01 00:01:40 /info.html
|
||||
2014/01/01 00:01:41 /info.html
|
||||
2014/01/01 00:01:42 /info.html
|
||||
2014/01/01 00:01:43 /info.html
|
||||
2014/01/01 00:01:44 /info.html
|
||||
2014/01/01 00:01:45 /info.html
|
||||
2014/01/01 00:01:46 /info.html
|
||||
2014/01/01 00:01:47 /info.html
|
||||
2014/01/01 00:01:48 /info.html
|
||||
2014/01/01 00:01:49 /info.html
|
||||
2014/01/01 00:01:50 /info.html
|
||||
2014/01/01 00:01:51 /info.html
|
||||
2014/01/01 00:01:52 /info.html
|
||||
2014/01/01 00:01:53 /info.html
|
||||
2014/01/01 00:01:54 /info.html
|
||||
2014/01/01 00:01:55 /info.html
|
||||
2014/01/01 00:01:56 /info.html
|
||||
2014/01/01 00:01:57 /info.html
|
||||
2014/01/01 00:01:58 /info.html
|
||||
2014/01/01 00:01:59 /info.html
|
||||
2014/01/01 00:02:00 /info.html
|
||||
2014/01/01 00:02:01 /info.html
|
||||
2014/01/01 00:02:02 /info.html
|
||||
2014/01/01 00:02:03 /info.html
|
||||
2014/01/01 00:02:04 /info.html
|
||||
2014/01/01 00:02:05 /info.html
|
||||
2014/01/01 00:02:06 /info.html
|
||||
2014/01/01 00:02:07 /info.html
|
||||
2014/01/01 00:02:08 /info.html
|
||||
2014/01/01 00:02:09 /info.html
|
||||
2014/01/01 00:02:10 /info.html
|
||||
2014/01/01 00:02:11 /info.html
|
||||
2014/01/01 00:02:12 /info.html
|
||||
2014/01/01 00:02:13 /info.html
|
||||
2014/01/01 00:02:14 /info.html
|
||||
2014/01/01 00:02:15 /info.html
|
||||
2014/01/01 00:02:16 /info.html
|
||||
2014/01/01 00:02:17 /info.html
|
||||
2014/01/01 00:02:18 /info.html
|
||||
24
vendor/github.com/alicebob/gopher-json/LICENSE
generated
vendored
24
vendor/github.com/alicebob/gopher-json/LICENSE
generated
vendored
@@ -1,24 +0,0 @@
|
||||
This is free and unencumbered software released into the public domain.
|
||||
|
||||
Anyone is free to copy, modify, publish, use, compile, sell, or
|
||||
distribute this software, either in source code form or as a compiled
|
||||
binary, for any purpose, commercial or non-commercial, and by any
|
||||
means.
|
||||
|
||||
In jurisdictions that recognize copyright laws, the author or authors
|
||||
of this software dedicate any and all copyright interest in the
|
||||
software to the public domain. We make this dedication for the benefit
|
||||
of the public at large and to the detriment of our heirs and
|
||||
successors. We intend this dedication to be an overt act of
|
||||
relinquishment in perpetuity of all present and future rights to this
|
||||
software under copyright law.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
For more information, please refer to <http://unlicense.org/>
|
||||
7
vendor/github.com/alicebob/gopher-json/README.md
generated
vendored
7
vendor/github.com/alicebob/gopher-json/README.md
generated
vendored
@@ -1,7 +0,0 @@
|
||||
# gopher-json [](https://godoc.org/layeh.com/gopher-json)
|
||||
|
||||
Package json is a simple JSON encoder/decoder for [gopher-lua](https://github.com/yuin/gopher-lua).
|
||||
|
||||
## License
|
||||
|
||||
Public domain
|
||||
33
vendor/github.com/alicebob/gopher-json/doc.go
generated
vendored
33
vendor/github.com/alicebob/gopher-json/doc.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
// Package json is a simple JSON encoder/decoder for gopher-lua.
|
||||
//
|
||||
// Documentation
|
||||
//
|
||||
// The following functions are exposed by the library:
|
||||
// decode(string): Decodes a JSON string. Returns nil and an error string if
|
||||
// the string could not be decoded.
|
||||
// encode(value): Encodes a value into a JSON string. Returns nil and an error
|
||||
// string if the value could not be encoded.
|
||||
//
|
||||
// The following types are supported:
|
||||
//
|
||||
// Lua | JSON
|
||||
// ---------+-----
|
||||
// nil | null
|
||||
// number | number
|
||||
// string | string
|
||||
// table | object: when table is non-empty and has only string keys
|
||||
// | array: when table is empty, or has only sequential numeric keys
|
||||
// | starting from 1
|
||||
//
|
||||
// Attempting to encode any other Lua type will result in an error.
|
||||
//
|
||||
// Example
|
||||
//
|
||||
// Below is an example usage of the library:
|
||||
// import (
|
||||
// luajson "layeh.com/gopher-json"
|
||||
// )
|
||||
//
|
||||
// L := lua.NewState()
|
||||
// luajson.Preload(s)
|
||||
package json
|
||||
189
vendor/github.com/alicebob/gopher-json/json.go
generated
vendored
189
vendor/github.com/alicebob/gopher-json/json.go
generated
vendored
@@ -1,189 +0,0 @@
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
|
||||
"github.com/yuin/gopher-lua"
|
||||
)
|
||||
|
||||
// Preload adds json to the given Lua state's package.preload table. After it
|
||||
// has been preloaded, it can be loaded using require:
|
||||
//
|
||||
// local json = require("json")
|
||||
func Preload(L *lua.LState) {
|
||||
L.PreloadModule("json", Loader)
|
||||
}
|
||||
|
||||
// Loader is the module loader function.
|
||||
func Loader(L *lua.LState) int {
|
||||
t := L.NewTable()
|
||||
L.SetFuncs(t, api)
|
||||
L.Push(t)
|
||||
return 1
|
||||
}
|
||||
|
||||
var api = map[string]lua.LGFunction{
|
||||
"decode": apiDecode,
|
||||
"encode": apiEncode,
|
||||
}
|
||||
|
||||
func apiDecode(L *lua.LState) int {
|
||||
if L.GetTop() != 1 {
|
||||
L.Error(lua.LString("bad argument #1 to decode"), 1)
|
||||
return 0
|
||||
}
|
||||
str := L.CheckString(1)
|
||||
|
||||
value, err := Decode(L, []byte(str))
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
L.Push(value)
|
||||
return 1
|
||||
}
|
||||
|
||||
func apiEncode(L *lua.LState) int {
|
||||
if L.GetTop() != 1 {
|
||||
L.Error(lua.LString("bad argument #1 to encode"), 1)
|
||||
return 0
|
||||
}
|
||||
value := L.CheckAny(1)
|
||||
|
||||
data, err := Encode(value)
|
||||
if err != nil {
|
||||
L.Push(lua.LNil)
|
||||
L.Push(lua.LString(err.Error()))
|
||||
return 2
|
||||
}
|
||||
L.Push(lua.LString(string(data)))
|
||||
return 1
|
||||
}
|
||||
|
||||
var (
|
||||
errNested = errors.New("cannot encode recursively nested tables to JSON")
|
||||
errSparseArray = errors.New("cannot encode sparse array")
|
||||
errInvalidKeys = errors.New("cannot encode mixed or invalid key types")
|
||||
)
|
||||
|
||||
type invalidTypeError lua.LValueType
|
||||
|
||||
func (i invalidTypeError) Error() string {
|
||||
return `cannot encode ` + lua.LValueType(i).String() + ` to JSON`
|
||||
}
|
||||
|
||||
// Encode returns the JSON encoding of value.
|
||||
func Encode(value lua.LValue) ([]byte, error) {
|
||||
return json.Marshal(jsonValue{
|
||||
LValue: value,
|
||||
visited: make(map[*lua.LTable]bool),
|
||||
})
|
||||
}
|
||||
|
||||
type jsonValue struct {
|
||||
lua.LValue
|
||||
visited map[*lua.LTable]bool
|
||||
}
|
||||
|
||||
func (j jsonValue) MarshalJSON() (data []byte, err error) {
|
||||
switch converted := j.LValue.(type) {
|
||||
case lua.LBool:
|
||||
data, err = json.Marshal(bool(converted))
|
||||
case lua.LNumber:
|
||||
data, err = json.Marshal(float64(converted))
|
||||
case *lua.LNilType:
|
||||
data = []byte(`null`)
|
||||
case lua.LString:
|
||||
data, err = json.Marshal(string(converted))
|
||||
case *lua.LTable:
|
||||
if j.visited[converted] {
|
||||
return nil, errNested
|
||||
}
|
||||
j.visited[converted] = true
|
||||
|
||||
key, value := converted.Next(lua.LNil)
|
||||
|
||||
switch key.Type() {
|
||||
case lua.LTNil: // empty table
|
||||
data = []byte(`[]`)
|
||||
case lua.LTNumber:
|
||||
arr := make([]jsonValue, 0, converted.Len())
|
||||
expectedKey := lua.LNumber(1)
|
||||
for key != lua.LNil {
|
||||
if key.Type() != lua.LTNumber {
|
||||
err = errInvalidKeys
|
||||
return
|
||||
}
|
||||
if expectedKey != key {
|
||||
err = errSparseArray
|
||||
return
|
||||
}
|
||||
arr = append(arr, jsonValue{value, j.visited})
|
||||
expectedKey++
|
||||
key, value = converted.Next(key)
|
||||
}
|
||||
data, err = json.Marshal(arr)
|
||||
case lua.LTString:
|
||||
obj := make(map[string]jsonValue)
|
||||
for key != lua.LNil {
|
||||
if key.Type() != lua.LTString {
|
||||
err = errInvalidKeys
|
||||
return
|
||||
}
|
||||
obj[key.String()] = jsonValue{value, j.visited}
|
||||
key, value = converted.Next(key)
|
||||
}
|
||||
data, err = json.Marshal(obj)
|
||||
default:
|
||||
err = errInvalidKeys
|
||||
}
|
||||
default:
|
||||
err = invalidTypeError(j.LValue.Type())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Decode converts the JSON encoded data to Lua values.
|
||||
func Decode(L *lua.LState, data []byte) (lua.LValue, error) {
|
||||
var value interface{}
|
||||
err := json.Unmarshal(data, &value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return DecodeValue(L, value), nil
|
||||
}
|
||||
|
||||
// DecodeValue converts the value to a Lua value.
|
||||
//
|
||||
// This function only converts values that the encoding/json package decodes to.
|
||||
// All other values will return lua.LNil.
|
||||
func DecodeValue(L *lua.LState, value interface{}) lua.LValue {
|
||||
switch converted := value.(type) {
|
||||
case bool:
|
||||
return lua.LBool(converted)
|
||||
case float64:
|
||||
return lua.LNumber(converted)
|
||||
case string:
|
||||
return lua.LString(converted)
|
||||
case json.Number:
|
||||
return lua.LString(converted)
|
||||
case []interface{}:
|
||||
arr := L.CreateTable(len(converted), 0)
|
||||
for _, item := range converted {
|
||||
arr.Append(DecodeValue(L, item))
|
||||
}
|
||||
return arr
|
||||
case map[string]interface{}:
|
||||
tbl := L.CreateTable(0, len(converted))
|
||||
for key, item := range converted {
|
||||
tbl.RawSetH(lua.LString(key), DecodeValue(L, item))
|
||||
}
|
||||
return tbl
|
||||
case nil:
|
||||
return lua.LNil
|
||||
}
|
||||
|
||||
return lua.LNil
|
||||
}
|
||||
6
vendor/github.com/alicebob/miniredis/v2/.gitignore
generated
vendored
6
vendor/github.com/alicebob/miniredis/v2/.gitignore
generated
vendored
@@ -1,6 +0,0 @@
|
||||
/integration/redis_src/
|
||||
/integration/dump.rdb
|
||||
*.swp
|
||||
/integration/nodes.conf
|
||||
.idea/
|
||||
miniredis.iml
|
||||
225
vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md
generated
vendored
225
vendor/github.com/alicebob/miniredis/v2/CHANGELOG.md
generated
vendored
@@ -1,225 +0,0 @@
|
||||
## Changelog
|
||||
|
||||
|
||||
### v2.23.0
|
||||
|
||||
- basic INFO support (thanks @kirill-a-belov)
|
||||
- support COUNT in SSCAN (thanks @Abdi-dd)
|
||||
- test and support Go 1.19
|
||||
- support LPOS (thanks @ianstarz)
|
||||
- support XPENDING, XGROUP {CREATECONSUMER,DESTROY,DELCONSUMER}, XINFO {CONSUMERS,GROUPS}, XCLAIM (thanks @sandyharvie)
|
||||
|
||||
|
||||
### v2.22.0
|
||||
|
||||
- set miniredis.DumpMaxLineLen to get more Dump() info (thanks @afjoseph)
|
||||
- fix invalid resposne of COMMAND (thanks @zsh1995)
|
||||
- fix possibility to generate duplicate IDs in XADD (thanks @readams)
|
||||
- adds support for XAUTOCLAIM min-idle parameter (thanks @readams)
|
||||
|
||||
|
||||
### v2.21.0
|
||||
|
||||
- support for GETEX (thanks @dntj)
|
||||
- support for GT and LT in ZADD (thanks @lsgndln)
|
||||
- support for XAUTOCLAIM (thanks @randall-fulton)
|
||||
|
||||
|
||||
### v2.20.0
|
||||
|
||||
- back to support Go >= 1.14 (thanks @ajatprabha and @marcind)
|
||||
|
||||
|
||||
### v2.19.0
|
||||
|
||||
- support for TYPE in SCAN (thanks @0xDiddi)
|
||||
- update BITPOS (thanks @dirkm)
|
||||
- fix a lua redis.call() return value (thanks @mpetronic)
|
||||
- update ZRANGE (thanks @valdemarpereira)
|
||||
|
||||
|
||||
### v2.18.0
|
||||
|
||||
- support for ZUNION (thanks @propan)
|
||||
- support for COPY (thanks @matiasinsaurralde and @rockitbaby)
|
||||
- support for LMOVE (thanks @btwear)
|
||||
|
||||
|
||||
### v2.17.0
|
||||
|
||||
- added miniredis.RunT(t)
|
||||
|
||||
|
||||
### v2.16.1
|
||||
|
||||
- fix ZINTERSTORE with wets (thanks @lingjl2010 and @okhowang)
|
||||
- fix exclusive ranges in XRANGE (thanks @joseotoro)
|
||||
|
||||
|
||||
### v2.16.0
|
||||
|
||||
- simplify some code (thanks @zonque)
|
||||
- support for EXAT/PXAT in SET
|
||||
- support for XTRIM (thanks @joseotoro)
|
||||
- support for ZRANDMEMBER
|
||||
- support for redis.log() in lua (thanks @dirkm)
|
||||
|
||||
|
||||
### v2.15.2
|
||||
|
||||
- Fix race condition in blocking code (thanks @zonque and @robx)
|
||||
- XREAD accepts '$' as ID (thanks @bradengroom)
|
||||
|
||||
|
||||
### v2.15.1
|
||||
|
||||
- EVAL should cache the script (thanks @guoshimin)
|
||||
|
||||
|
||||
### v2.15.0
|
||||
|
||||
- target redis 6.2 and added new args to various commands
|
||||
- support for all hyperlog commands (thanks @ilbaktin)
|
||||
- support for GETDEL (thanks @wszaranski)
|
||||
|
||||
|
||||
### v2.14.5
|
||||
|
||||
- added XPENDING
|
||||
- support for BLOCK option in XREAD and XREADGROUP
|
||||
|
||||
|
||||
### v2.14.4
|
||||
|
||||
- fix BITPOS error (thanks @xiaoyuzdy)
|
||||
- small fixes for XREAD, XACK, and XDEL. Mostly error cases.
|
||||
- fix empty EXEC return type (thanks @ashanbrown)
|
||||
- fix XDEL (thanks @svakili and @yvesf)
|
||||
- fix FLUSHALL for streams (thanks @svakili)
|
||||
|
||||
|
||||
### v2.14.3
|
||||
|
||||
- fix problem where Lua code didn't set the selected DB
|
||||
- update to redis 6.0.10 (thanks @lazappa)
|
||||
|
||||
|
||||
### v2.14.2
|
||||
|
||||
- update LUA dependency
|
||||
- deal with (p)unsubscribe when there are no channels
|
||||
|
||||
|
||||
### v2.14.1
|
||||
|
||||
- mod tidy
|
||||
|
||||
|
||||
### v2.14.0
|
||||
|
||||
- support for HELLO and the RESP3 protocol
|
||||
- KEEPTTL in SET (thanks @johnpena)
|
||||
|
||||
|
||||
### v2.13.3
|
||||
|
||||
- support Go 1.14 and 1.15
|
||||
- update the `Check...()` methods
|
||||
- support for XREAD (thanks @pieterlexis)
|
||||
|
||||
|
||||
### v2.13.2
|
||||
|
||||
- Use SAN instead of CN in self signed cert for testing (thanks @johejo)
|
||||
- Travis CI now tests against the most recent two versions of Go (thanks @johejo)
|
||||
- changed unit and integration tests to compare raw payloads, not parsed payloads
|
||||
- remove "redigo" dependency
|
||||
|
||||
|
||||
### v2.13.1
|
||||
|
||||
- added HSTRLEN
|
||||
- minimal support for ACL users in AUTH
|
||||
|
||||
|
||||
### v2.13.0
|
||||
|
||||
- added RunTLS(...)
|
||||
- added SetError(...)
|
||||
|
||||
|
||||
### v2.12.0
|
||||
|
||||
- redis 6
|
||||
- Lua json update (thanks @gsmith85)
|
||||
- CLUSTER commands (thanks @kratisto)
|
||||
- fix TOUCH
|
||||
- fix a shutdown race condition
|
||||
|
||||
|
||||
### v2.11.4
|
||||
|
||||
- ZUNIONSTORE now supports standard set types (thanks @wshirey)
|
||||
|
||||
|
||||
### v2.11.3
|
||||
|
||||
- support for TOUCH (thanks @cleroux)
|
||||
- support for cluster and stream commands (thanks @kak-tus)
|
||||
|
||||
|
||||
### v2.11.2
|
||||
|
||||
- make sure Lua code is executed concurrently
|
||||
- add command GEORADIUSBYMEMBER (thanks @kyeett)
|
||||
|
||||
|
||||
### v2.11.1
|
||||
|
||||
- globals protection for Lua code (thanks @vk-outreach)
|
||||
- HSET update (thanks @carlgreen)
|
||||
- fix BLPOP block on shutdown (thanks @Asalle)
|
||||
|
||||
|
||||
### v2.11.0
|
||||
|
||||
- added XRANGE/XREVRANGE, XADD, and XLEN (thanks @skateinmars)
|
||||
- added GEODIST
|
||||
- improved precision for geohashes, closer to what real redis does
|
||||
- use 128bit floats internally for INCRBYFLOAT and related (thanks @timnd)
|
||||
|
||||
|
||||
### v2.10.1
|
||||
|
||||
- added m.Server()
|
||||
|
||||
|
||||
### v2.10.0
|
||||
|
||||
- added UNLINK
|
||||
- fix DEL zero-argument case
|
||||
- cleanup some direct access commands
|
||||
- added GEOADD, GEOPOS, GEORADIUS, and GEORADIUS_RO
|
||||
|
||||
|
||||
### v2.9.1
|
||||
|
||||
- fix issue with ZRANGEBYLEX
|
||||
- fix issue with BRPOPLPUSH and direct access
|
||||
|
||||
|
||||
### v2.9.0
|
||||
|
||||
- proper versioned import of github.com/gomodule/redigo (thanks @yfei1)
|
||||
- fix messages generated by PSUBSCRIBE
|
||||
- optional internal seed (thanks @zikaeroh)
|
||||
|
||||
|
||||
### v2.8.0
|
||||
|
||||
Proper `v2` in go.mod.
|
||||
|
||||
|
||||
### older
|
||||
|
||||
See https://github.com/alicebob/miniredis/releases for the full changelog
|
||||
21
vendor/github.com/alicebob/miniredis/v2/LICENSE
generated
vendored
21
vendor/github.com/alicebob/miniredis/v2/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Harmen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
12
vendor/github.com/alicebob/miniredis/v2/Makefile
generated
vendored
12
vendor/github.com/alicebob/miniredis/v2/Makefile
generated
vendored
@@ -1,12 +0,0 @@
|
||||
.PHONY: all test testrace int
|
||||
|
||||
all: test
|
||||
|
||||
test:
|
||||
go test ./...
|
||||
|
||||
testrace:
|
||||
go test -race ./...
|
||||
|
||||
int:
|
||||
${MAKE} -C integration all
|
||||
333
vendor/github.com/alicebob/miniredis/v2/README.md
generated
vendored
333
vendor/github.com/alicebob/miniredis/v2/README.md
generated
vendored
@@ -1,333 +0,0 @@
|
||||
# Miniredis
|
||||
|
||||
Pure Go Redis test server, used in Go unittests.
|
||||
|
||||
|
||||
##
|
||||
|
||||
Sometimes you want to test code which uses Redis, without making it a full-blown
|
||||
integration test.
|
||||
Miniredis implements (parts of) the Redis server, to be used in unittests. It
|
||||
enables a simple, cheap, in-memory, Redis replacement, with a real TCP interface. Think of it as the Redis version of `net/http/httptest`.
|
||||
|
||||
It saves you from using mock code, and since the redis server lives in the
|
||||
test process you can query for values directly, without going through the server
|
||||
stack.
|
||||
|
||||
There are no dependencies on external binaries, so you can easily integrate it in automated build processes.
|
||||
|
||||
Be sure to import v2:
|
||||
```
|
||||
import "github.com/alicebob/miniredis/v2"
|
||||
```
|
||||
|
||||
## Commands
|
||||
|
||||
Implemented commands:
|
||||
|
||||
- Connection (complete)
|
||||
- AUTH -- see RequireAuth()
|
||||
- ECHO
|
||||
- HELLO -- see RequireUserAuth()
|
||||
- PING
|
||||
- SELECT
|
||||
- SWAPDB
|
||||
- QUIT
|
||||
- Key
|
||||
- COPY
|
||||
- DEL
|
||||
- EXISTS
|
||||
- EXPIRE
|
||||
- EXPIREAT
|
||||
- KEYS
|
||||
- MOVE
|
||||
- PERSIST
|
||||
- PEXPIRE
|
||||
- PEXPIREAT
|
||||
- PTTL
|
||||
- RENAME
|
||||
- RENAMENX
|
||||
- RANDOMKEY -- see m.Seed(...)
|
||||
- SCAN
|
||||
- TOUCH
|
||||
- TTL
|
||||
- TYPE
|
||||
- UNLINK
|
||||
- Transactions (complete)
|
||||
- DISCARD
|
||||
- EXEC
|
||||
- MULTI
|
||||
- UNWATCH
|
||||
- WATCH
|
||||
- Server
|
||||
- DBSIZE
|
||||
- FLUSHALL
|
||||
- FLUSHDB
|
||||
- TIME -- returns time.Now() or value set by SetTime()
|
||||
- COMMAND -- partly
|
||||
- INFO -- partly, returns only "clients" section with one field "connected_clients"
|
||||
- String keys (complete)
|
||||
- APPEND
|
||||
- BITCOUNT
|
||||
- BITOP
|
||||
- BITPOS
|
||||
- DECR
|
||||
- DECRBY
|
||||
- GET
|
||||
- GETBIT
|
||||
- GETRANGE
|
||||
- GETSET
|
||||
- GETDEL
|
||||
- GETEX
|
||||
- INCR
|
||||
- INCRBY
|
||||
- INCRBYFLOAT
|
||||
- MGET
|
||||
- MSET
|
||||
- MSETNX
|
||||
- PSETEX
|
||||
- SET
|
||||
- SETBIT
|
||||
- SETEX
|
||||
- SETNX
|
||||
- SETRANGE
|
||||
- STRLEN
|
||||
- Hash keys (complete)
|
||||
- HDEL
|
||||
- HEXISTS
|
||||
- HGET
|
||||
- HGETALL
|
||||
- HINCRBY
|
||||
- HINCRBYFLOAT
|
||||
- HKEYS
|
||||
- HLEN
|
||||
- HMGET
|
||||
- HMSET
|
||||
- HSET
|
||||
- HSETNX
|
||||
- HSTRLEN
|
||||
- HVALS
|
||||
- HSCAN
|
||||
- List keys (complete)
|
||||
- BLPOP
|
||||
- BRPOP
|
||||
- BRPOPLPUSH
|
||||
- LINDEX
|
||||
- LINSERT
|
||||
- LLEN
|
||||
- LPOP
|
||||
- LPUSH
|
||||
- LPUSHX
|
||||
- LRANGE
|
||||
- LREM
|
||||
- LSET
|
||||
- LTRIM
|
||||
- RPOP
|
||||
- RPOPLPUSH
|
||||
- RPUSH
|
||||
- RPUSHX
|
||||
- LMOVE
|
||||
- Pub/Sub (complete)
|
||||
- PSUBSCRIBE
|
||||
- PUBLISH
|
||||
- PUBSUB
|
||||
- PUNSUBSCRIBE
|
||||
- SUBSCRIBE
|
||||
- UNSUBSCRIBE
|
||||
- Set keys (complete)
|
||||
- SADD
|
||||
- SCARD
|
||||
- SDIFF
|
||||
- SDIFFSTORE
|
||||
- SINTER
|
||||
- SINTERSTORE
|
||||
- SISMEMBER
|
||||
- SMEMBERS
|
||||
- SMOVE
|
||||
- SPOP -- see m.Seed(...)
|
||||
- SRANDMEMBER -- see m.Seed(...)
|
||||
- SREM
|
||||
- SUNION
|
||||
- SUNIONSTORE
|
||||
- SSCAN
|
||||
- Sorted Set keys (complete)
|
||||
- ZADD
|
||||
- ZCARD
|
||||
- ZCOUNT
|
||||
- ZINCRBY
|
||||
- ZINTERSTORE
|
||||
- ZLEXCOUNT
|
||||
- ZPOPMIN
|
||||
- ZPOPMAX
|
||||
- ZRANDMEMBER
|
||||
- ZRANGE
|
||||
- ZRANGEBYLEX
|
||||
- ZRANGEBYSCORE
|
||||
- ZRANK
|
||||
- ZREM
|
||||
- ZREMRANGEBYLEX
|
||||
- ZREMRANGEBYRANK
|
||||
- ZREMRANGEBYSCORE
|
||||
- ZREVRANGE
|
||||
- ZREVRANGEBYLEX
|
||||
- ZREVRANGEBYSCORE
|
||||
- ZREVRANK
|
||||
- ZSCORE
|
||||
- ZUNION
|
||||
- ZUNIONSTORE
|
||||
- ZSCAN
|
||||
- Stream keys
|
||||
- XACK
|
||||
- XADD
|
||||
- XAUTOCLAIM
|
||||
- XCLAIM
|
||||
- XDEL
|
||||
- XGROUP CREATE
|
||||
- XGROUP CREATECONSUMER
|
||||
- XGROUP DESTROY
|
||||
- XGROUP DELCONSUMER
|
||||
- XINFO STREAM -- partly
|
||||
- XINFO GROUPS
|
||||
- XINFO CONSUMERS -- partly
|
||||
- XLEN
|
||||
- XRANGE
|
||||
- XREAD
|
||||
- XREADGROUP
|
||||
- XREVRANGE
|
||||
- XPENDING
|
||||
- XTRIM
|
||||
- Scripting
|
||||
- EVAL
|
||||
- EVALSHA
|
||||
- SCRIPT LOAD
|
||||
- SCRIPT EXISTS
|
||||
- SCRIPT FLUSH
|
||||
- GEO
|
||||
- GEOADD
|
||||
- GEODIST
|
||||
- ~~GEOHASH~~
|
||||
- GEOPOS
|
||||
- GEORADIUS
|
||||
- GEORADIUS_RO
|
||||
- GEORADIUSBYMEMBER
|
||||
- GEORADIUSBYMEMBER_RO
|
||||
- Cluster
|
||||
- CLUSTER SLOTS
|
||||
- CLUSTER KEYSLOT
|
||||
- CLUSTER NODES
|
||||
- HyperLogLog (complete)
|
||||
- PFADD
|
||||
- PFCOUNT
|
||||
- PFMERGE
|
||||
|
||||
|
||||
## TTLs, key expiration, and time
|
||||
|
||||
Since miniredis is intended to be used in unittests TTLs don't decrease
|
||||
automatically. You can use `TTL()` to get the TTL (as a time.Duration) of a
|
||||
key. It will return 0 when no TTL is set.
|
||||
|
||||
`m.FastForward(d)` can be used to decrement all TTLs. All TTLs which become <=
|
||||
0 will be removed.
|
||||
|
||||
EXPIREAT and PEXPIREAT values will be
|
||||
converted to a duration. For that you can either set m.SetTime(t) to use that
|
||||
time as the base for the (P)EXPIREAT conversion, or don't call SetTime(), in
|
||||
which case time.Now() will be used.
|
||||
|
||||
SetTime() also sets the value returned by TIME, which defaults to time.Now().
|
||||
It is not updated by FastForward, only by SetTime.
|
||||
|
||||
## Randomness and Seed()
|
||||
|
||||
Miniredis will use `math/rand`'s global RNG for randomness unless a seed is
|
||||
provided by calling `m.Seed(...)`. If a seed is provided, then miniredis will
|
||||
use its own RNG based on that seed.
|
||||
|
||||
Commands which use randomness are: RANDOMKEY, SPOP, and SRANDMEMBER.
|
||||
|
||||
## Example
|
||||
|
||||
``` Go
|
||||
|
||||
import (
|
||||
...
|
||||
"github.com/alicebob/miniredis/v2"
|
||||
...
|
||||
)
|
||||
|
||||
func TestSomething(t *testing.T) {
|
||||
s := miniredis.RunT(t)
|
||||
|
||||
// Optionally set some keys your code expects:
|
||||
s.Set("foo", "bar")
|
||||
s.HSet("some", "other", "key")
|
||||
|
||||
// Run your code and see if it behaves.
|
||||
// An example using the redigo library from "github.com/gomodule/redigo/redis":
|
||||
c, err := redis.Dial("tcp", s.Addr())
|
||||
_, err = c.Do("SET", "foo", "bar")
|
||||
|
||||
// Optionally check values in redis...
|
||||
if got, err := s.Get("foo"); err != nil || got != "bar" {
|
||||
t.Error("'foo' has the wrong value")
|
||||
}
|
||||
// ... or use a helper for that:
|
||||
s.CheckGet(t, "foo", "bar")
|
||||
|
||||
// TTL and expiration:
|
||||
s.Set("foo", "bar")
|
||||
s.SetTTL("foo", 10*time.Second)
|
||||
s.FastForward(11 * time.Second)
|
||||
if s.Exists("foo") {
|
||||
t.Fatal("'foo' should not have existed anymore")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Not supported
|
||||
|
||||
Commands which will probably not be implemented:
|
||||
|
||||
- CLUSTER (all)
|
||||
- ~~CLUSTER *~~
|
||||
- ~~READONLY~~
|
||||
- ~~READWRITE~~
|
||||
- Key
|
||||
- ~~DUMP~~
|
||||
- ~~MIGRATE~~
|
||||
- ~~OBJECT~~
|
||||
- ~~RESTORE~~
|
||||
- ~~WAIT~~
|
||||
- Scripting
|
||||
- ~~SCRIPT DEBUG~~
|
||||
- ~~SCRIPT KILL~~
|
||||
- Server
|
||||
- ~~BGSAVE~~
|
||||
- ~~BGWRITEAOF~~
|
||||
- ~~CLIENT *~~
|
||||
- ~~CONFIG *~~
|
||||
- ~~DEBUG *~~
|
||||
- ~~LASTSAVE~~
|
||||
- ~~MONITOR~~
|
||||
- ~~ROLE~~
|
||||
- ~~SAVE~~
|
||||
- ~~SHUTDOWN~~
|
||||
- ~~SLAVEOF~~
|
||||
- ~~SLOWLOG~~
|
||||
- ~~SYNC~~
|
||||
|
||||
|
||||
## &c.
|
||||
|
||||
Integration tests are run against Redis 6.2.6. The [./integration](./integration/) subdir
|
||||
compares miniredis against a real redis instance.
|
||||
|
||||
The Redis 6 RESP3 protocol is supported. If there are problems, please open
|
||||
an issue.
|
||||
|
||||
If you want to test Redis Sentinel have a look at [minisentinel](https://github.com/Bose/minisentinel).
|
||||
|
||||
A changelog is kept at [CHANGELOG.md](https://github.com/alicebob/miniredis/blob/master/CHANGELOG.md).
|
||||
|
||||
[](https://pkg.go.dev/github.com/alicebob/miniredis/v2)
|
||||
63
vendor/github.com/alicebob/miniredis/v2/check.go
generated
vendored
63
vendor/github.com/alicebob/miniredis/v2/check.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// T is implemented by Testing.T
|
||||
type T interface {
|
||||
Helper()
|
||||
Errorf(string, ...interface{})
|
||||
}
|
||||
|
||||
// CheckGet does not call Errorf() iff there is a string key with the
|
||||
// expected value. Normal use case is `m.CheckGet(t, "username", "theking")`.
|
||||
func (m *Miniredis) CheckGet(t T, key, expected string) {
|
||||
t.Helper()
|
||||
|
||||
found, err := m.Get(key)
|
||||
if err != nil {
|
||||
t.Errorf("GET error, key %#v: %v", key, err)
|
||||
return
|
||||
}
|
||||
if found != expected {
|
||||
t.Errorf("GET error, key %#v: Expected %#v, got %#v", key, expected, found)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CheckList does not call Errorf() iff there is a list key with the
|
||||
// expected values.
|
||||
// Normal use case is `m.CheckGet(t, "favorite_colors", "red", "green", "infrared")`.
|
||||
func (m *Miniredis) CheckList(t T, key string, expected ...string) {
|
||||
t.Helper()
|
||||
|
||||
found, err := m.List(key)
|
||||
if err != nil {
|
||||
t.Errorf("List error, key %#v: %v", key, err)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(expected, found) {
|
||||
t.Errorf("List error, key %#v: Expected %#v, got %#v", key, expected, found)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CheckSet does not call Errorf() iff there is a set key with the
|
||||
// expected values.
|
||||
// Normal use case is `m.CheckSet(t, "visited", "Rome", "Stockholm", "Dublin")`.
|
||||
func (m *Miniredis) CheckSet(t T, key string, expected ...string) {
|
||||
t.Helper()
|
||||
|
||||
found, err := m.Members(key)
|
||||
if err != nil {
|
||||
t.Errorf("Set error, key %#v: %v", key, err)
|
||||
return
|
||||
}
|
||||
sort.Strings(expected)
|
||||
if !reflect.DeepEqual(expected, found) {
|
||||
t.Errorf("Set error, key %#v: Expected %#v, got %#v", key, expected, found)
|
||||
return
|
||||
}
|
||||
}
|
||||
67
vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go
generated
vendored
67
vendor/github.com/alicebob/miniredis/v2/cmd_cluster.go
generated
vendored
@@ -1,67 +0,0 @@
|
||||
// Commands from https://redis.io/commands#cluster
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsCluster handles some cluster operations.
|
||||
func commandsCluster(m *Miniredis) {
|
||||
m.srv.Register("CLUSTER", m.cmdCluster)
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdCluster(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
switch strings.ToUpper(args[0]) {
|
||||
case "SLOTS":
|
||||
m.cmdClusterSlots(c, cmd, args)
|
||||
case "KEYSLOT":
|
||||
m.cmdClusterKeySlot(c, cmd, args)
|
||||
case "NODES":
|
||||
m.cmdClusterNodes(c, cmd, args)
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError(fmt.Sprintf("ERR 'CLUSTER %s' not supported", strings.Join(args, " ")))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CLUSTER SLOTS
|
||||
func (m *Miniredis) cmdClusterSlots(c *server.Peer, cmd string, args []string) {
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
c.WriteLen(1)
|
||||
c.WriteLen(3)
|
||||
c.WriteInt(0)
|
||||
c.WriteInt(16383)
|
||||
c.WriteLen(3)
|
||||
c.WriteBulk(m.srv.Addr().IP.String())
|
||||
c.WriteInt(m.srv.Addr().Port)
|
||||
c.WriteBulk("09dbe9720cda62f7865eabc5fd8857c5d2678366")
|
||||
})
|
||||
}
|
||||
|
||||
// CLUSTER KEYSLOT
|
||||
func (m *Miniredis) cmdClusterKeySlot(c *server.Peer, cmd string, args []string) {
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
c.WriteInt(163)
|
||||
})
|
||||
}
|
||||
|
||||
// CLUSTER NODES
|
||||
func (m *Miniredis) cmdClusterNodes(c *server.Peer, cmd string, args []string) {
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
c.WriteBulk("e7d1eecce10fd6bb5eb35b9f99a514335d9ba9ca 127.0.0.1:7000@7000 myself,master - 0 0 1 connected 0-16383")
|
||||
})
|
||||
}
|
||||
2045
vendor/github.com/alicebob/miniredis/v2/cmd_command.go
generated
vendored
2045
vendor/github.com/alicebob/miniredis/v2/cmd_command.go
generated
vendored
File diff suppressed because it is too large
Load Diff
284
vendor/github.com/alicebob/miniredis/v2/cmd_connection.go
generated
vendored
284
vendor/github.com/alicebob/miniredis/v2/cmd_connection.go
generated
vendored
@@ -1,284 +0,0 @@
|
||||
// Commands from https://redis.io/commands#connection
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
func commandsConnection(m *Miniredis) {
|
||||
m.srv.Register("AUTH", m.cmdAuth)
|
||||
m.srv.Register("ECHO", m.cmdEcho)
|
||||
m.srv.Register("HELLO", m.cmdHello)
|
||||
m.srv.Register("PING", m.cmdPing)
|
||||
m.srv.Register("QUIT", m.cmdQuit)
|
||||
m.srv.Register("SELECT", m.cmdSelect)
|
||||
m.srv.Register("SWAPDB", m.cmdSwapdb)
|
||||
}
|
||||
|
||||
// PING
|
||||
func (m *Miniredis) cmdPing(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) > 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
payload := ""
|
||||
if len(args) > 0 {
|
||||
payload = args[0]
|
||||
}
|
||||
|
||||
// PING is allowed in subscribed state
|
||||
if sub := getCtx(c).subscriber; sub != nil {
|
||||
c.Block(func(c *server.Writer) {
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("pong")
|
||||
c.WriteBulk(payload)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if payload == "" {
|
||||
c.WriteInline("PONG")
|
||||
return
|
||||
}
|
||||
c.WriteBulk(payload)
|
||||
})
|
||||
}
|
||||
|
||||
// AUTH
|
||||
func (m *Miniredis) cmdAuth(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) > 2 {
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
var opts = struct {
|
||||
username string
|
||||
password string
|
||||
}{
|
||||
username: "default",
|
||||
password: args[0],
|
||||
}
|
||||
if len(args) == 2 {
|
||||
opts.username, opts.password = args[0], args[1]
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if len(m.passwords) == 0 && opts.username == "default" {
|
||||
c.WriteError("ERR AUTH <password> called without any password configured for the default user. Are you sure your configuration is correct?")
|
||||
return
|
||||
}
|
||||
setPW, ok := m.passwords[opts.username]
|
||||
if !ok {
|
||||
c.WriteError("WRONGPASS invalid username-password pair")
|
||||
return
|
||||
}
|
||||
if setPW != opts.password {
|
||||
c.WriteError("WRONGPASS invalid username-password pair")
|
||||
return
|
||||
}
|
||||
|
||||
ctx.authenticated = true
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// HELLO
|
||||
func (m *Miniredis) cmdHello(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
version int
|
||||
username string
|
||||
password string
|
||||
}
|
||||
|
||||
if ok := optIntErr(c, args[0], &opts.version, "ERR Protocol version is not an integer or out of range"); !ok {
|
||||
return
|
||||
}
|
||||
args = args[1:]
|
||||
|
||||
switch opts.version {
|
||||
case 2, 3:
|
||||
default:
|
||||
c.WriteError("NOPROTO unsupported protocol version")
|
||||
return
|
||||
}
|
||||
|
||||
var checkAuth bool
|
||||
for len(args) > 0 {
|
||||
switch strings.ToUpper(args[0]) {
|
||||
case "AUTH":
|
||||
if len(args) < 3 {
|
||||
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
|
||||
return
|
||||
}
|
||||
opts.username, opts.password, args = args[1], args[2], args[3:]
|
||||
checkAuth = true
|
||||
case "SETNAME":
|
||||
if len(args) < 2 {
|
||||
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
|
||||
return
|
||||
}
|
||||
_, args = args[1], args[2:]
|
||||
default:
|
||||
c.WriteError(fmt.Sprintf("ERR Syntax error in HELLO option '%s'", args[0]))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.passwords) == 0 && opts.username == "default" {
|
||||
// redis ignores legacy "AUTH" if it's not enabled.
|
||||
checkAuth = false
|
||||
}
|
||||
if checkAuth {
|
||||
setPW, ok := m.passwords[opts.username]
|
||||
if !ok {
|
||||
c.WriteError("WRONGPASS invalid username-password pair")
|
||||
return
|
||||
}
|
||||
if setPW != opts.password {
|
||||
c.WriteError("WRONGPASS invalid username-password pair")
|
||||
return
|
||||
}
|
||||
getCtx(c).authenticated = true
|
||||
}
|
||||
|
||||
c.Resp3 = opts.version == 3
|
||||
|
||||
c.WriteMapLen(7)
|
||||
c.WriteBulk("server")
|
||||
c.WriteBulk("miniredis")
|
||||
c.WriteBulk("version")
|
||||
c.WriteBulk("6.0.5")
|
||||
c.WriteBulk("proto")
|
||||
c.WriteInt(opts.version)
|
||||
c.WriteBulk("id")
|
||||
c.WriteInt(42)
|
||||
c.WriteBulk("mode")
|
||||
c.WriteBulk("standalone")
|
||||
c.WriteBulk("role")
|
||||
c.WriteBulk("master")
|
||||
c.WriteBulk("modules")
|
||||
c.WriteLen(0)
|
||||
}
|
||||
|
||||
// ECHO
|
||||
func (m *Miniredis) cmdEcho(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
msg := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
c.WriteBulk(msg)
|
||||
})
|
||||
}
|
||||
|
||||
// SELECT
|
||||
func (m *Miniredis) cmdSelect(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.isValidCMD(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
id int
|
||||
}
|
||||
if ok := optInt(c, args[0], &opts.id); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if opts.id < 0 {
|
||||
c.WriteError(msgDBIndexOutOfRange)
|
||||
setDirty(c)
|
||||
return
|
||||
}
|
||||
|
||||
ctx.selectedDB = opts.id
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// SWAPDB
|
||||
func (m *Miniredis) cmdSwapdb(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
id1 int
|
||||
id2 int
|
||||
}
|
||||
|
||||
if ok := optIntErr(c, args[0], &opts.id1, "ERR invalid first DB index"); !ok {
|
||||
return
|
||||
}
|
||||
if ok := optIntErr(c, args[1], &opts.id2, "ERR invalid second DB index"); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if opts.id1 < 0 || opts.id2 < 0 {
|
||||
c.WriteError(msgDBIndexOutOfRange)
|
||||
setDirty(c)
|
||||
return
|
||||
}
|
||||
|
||||
m.swapDB(opts.id1, opts.id2)
|
||||
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// QUIT
|
||||
func (m *Miniredis) cmdQuit(c *server.Peer, cmd string, args []string) {
|
||||
// QUIT isn't transactionfied and accepts any arguments.
|
||||
c.WriteOK()
|
||||
c.Close()
|
||||
}
|
||||
669
vendor/github.com/alicebob/miniredis/v2/cmd_generic.go
generated
vendored
669
vendor/github.com/alicebob/miniredis/v2/cmd_generic.go
generated
vendored
@@ -1,669 +0,0 @@
|
||||
// Commands from https://redis.io/commands#generic
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsGeneric handles EXPIRE, TTL, PERSIST, &c.
|
||||
func commandsGeneric(m *Miniredis) {
|
||||
m.srv.Register("COPY", m.cmdCopy)
|
||||
m.srv.Register("DEL", m.cmdDel)
|
||||
// DUMP
|
||||
m.srv.Register("EXISTS", m.cmdExists)
|
||||
m.srv.Register("EXPIRE", makeCmdExpire(m, false, time.Second))
|
||||
m.srv.Register("EXPIREAT", makeCmdExpire(m, true, time.Second))
|
||||
m.srv.Register("KEYS", m.cmdKeys)
|
||||
// MIGRATE
|
||||
m.srv.Register("MOVE", m.cmdMove)
|
||||
// OBJECT
|
||||
m.srv.Register("PERSIST", m.cmdPersist)
|
||||
m.srv.Register("PEXPIRE", makeCmdExpire(m, false, time.Millisecond))
|
||||
m.srv.Register("PEXPIREAT", makeCmdExpire(m, true, time.Millisecond))
|
||||
m.srv.Register("PTTL", m.cmdPTTL)
|
||||
m.srv.Register("RANDOMKEY", m.cmdRandomkey)
|
||||
m.srv.Register("RENAME", m.cmdRename)
|
||||
m.srv.Register("RENAMENX", m.cmdRenamenx)
|
||||
// RESTORE
|
||||
m.srv.Register("TOUCH", m.cmdTouch)
|
||||
m.srv.Register("TTL", m.cmdTTL)
|
||||
m.srv.Register("TYPE", m.cmdType)
|
||||
m.srv.Register("SCAN", m.cmdScan)
|
||||
// SORT
|
||||
m.srv.Register("UNLINK", m.cmdDel)
|
||||
}
|
||||
|
||||
// generic expire command for EXPIRE, PEXPIRE, EXPIREAT, PEXPIREAT
|
||||
// d is the time unit. If unix is set it'll be seen as a unixtimestamp and
|
||||
// converted to a duration.
|
||||
func makeCmdExpire(m *Miniredis, unix bool, d time.Duration) func(*server.Peer, string, []string) {
|
||||
return func(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
value int
|
||||
}
|
||||
opts.key = args[0]
|
||||
if ok := optInt(c, args[1], &opts.value); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
// Key must be present.
|
||||
if _, ok := db.keys[opts.key]; !ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if unix {
|
||||
db.ttl[opts.key] = m.at(opts.value, d)
|
||||
} else {
|
||||
db.ttl[opts.key] = time.Duration(opts.value) * d
|
||||
}
|
||||
db.keyVersion[opts.key]++
|
||||
db.checkTTL(opts.key)
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TOUCH
|
||||
func (m *Miniredis) cmdTouch(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
count := 0
|
||||
for _, key := range args {
|
||||
if db.exists(key) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
c.WriteInt(count)
|
||||
})
|
||||
}
|
||||
|
||||
// TTL
|
||||
func (m *Miniredis) cmdTTL(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if _, ok := db.keys[key]; !ok {
|
||||
// No such key
|
||||
c.WriteInt(-2)
|
||||
return
|
||||
}
|
||||
|
||||
v, ok := db.ttl[key]
|
||||
if !ok {
|
||||
// no expire value
|
||||
c.WriteInt(-1)
|
||||
return
|
||||
}
|
||||
c.WriteInt(int(v.Seconds()))
|
||||
})
|
||||
}
|
||||
|
||||
// PTTL
|
||||
func (m *Miniredis) cmdPTTL(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if _, ok := db.keys[key]; !ok {
|
||||
// no such key
|
||||
c.WriteInt(-2)
|
||||
return
|
||||
}
|
||||
|
||||
v, ok := db.ttl[key]
|
||||
if !ok {
|
||||
// no expire value
|
||||
c.WriteInt(-1)
|
||||
return
|
||||
}
|
||||
c.WriteInt(int(v.Nanoseconds() / 1000000))
|
||||
})
|
||||
}
|
||||
|
||||
// PERSIST
|
||||
func (m *Miniredis) cmdPersist(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if _, ok := db.keys[key]; !ok {
|
||||
// no such key
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := db.ttl[key]; !ok {
|
||||
// no expire value
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
delete(db.ttl, key)
|
||||
db.keyVersion[key]++
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// DEL and UNLINK
|
||||
func (m *Miniredis) cmdDel(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
count := 0
|
||||
for _, key := range args {
|
||||
if db.exists(key) {
|
||||
count++
|
||||
}
|
||||
db.del(key, true) // delete expire
|
||||
}
|
||||
c.WriteInt(count)
|
||||
})
|
||||
}
|
||||
|
||||
// TYPE
|
||||
func (m *Miniredis) cmdType(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError("usage error")
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
c.WriteInline("none")
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteInline(t)
|
||||
})
|
||||
}
|
||||
|
||||
// EXISTS
|
||||
func (m *Miniredis) cmdExists(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
found := 0
|
||||
for _, k := range args {
|
||||
if db.exists(k) {
|
||||
found++
|
||||
}
|
||||
}
|
||||
c.WriteInt(found)
|
||||
})
|
||||
}
|
||||
|
||||
// MOVE
|
||||
func (m *Miniredis) cmdMove(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
targetDB int
|
||||
}
|
||||
|
||||
opts.key = args[0]
|
||||
opts.targetDB, _ = strconv.Atoi(args[1])
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if ctx.selectedDB == opts.targetDB {
|
||||
c.WriteError("ERR source and destination objects are the same")
|
||||
return
|
||||
}
|
||||
db := m.db(ctx.selectedDB)
|
||||
targetDB := m.db(opts.targetDB)
|
||||
|
||||
if !db.move(opts.key, targetDB) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// KEYS
|
||||
func (m *Miniredis) cmdKeys(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
keys, _ := matchKeys(db.allKeys(), key)
|
||||
c.WriteLen(len(keys))
|
||||
for _, s := range keys {
|
||||
c.WriteBulk(s)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RANDOMKEY
|
||||
func (m *Miniredis) cmdRandomkey(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if len(db.keys) == 0 {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
nr := m.randIntn(len(db.keys))
|
||||
for k := range db.keys {
|
||||
if nr == 0 {
|
||||
c.WriteBulk(k)
|
||||
return
|
||||
}
|
||||
nr--
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// RENAME
|
||||
func (m *Miniredis) cmdRename(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
from string
|
||||
to string
|
||||
}{
|
||||
from: args[0],
|
||||
to: args[1],
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.from) {
|
||||
c.WriteError(msgKeyNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
db.rename(opts.from, opts.to)
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// RENAMENX
|
||||
func (m *Miniredis) cmdRenamenx(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
from string
|
||||
to string
|
||||
}{
|
||||
from: args[0],
|
||||
to: args[1],
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.from) {
|
||||
c.WriteError(msgKeyNotFound)
|
||||
return
|
||||
}
|
||||
|
||||
if db.exists(opts.to) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
db.rename(opts.from, opts.to)
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// SCAN
|
||||
func (m *Miniredis) cmdScan(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
cursor int
|
||||
withMatch bool
|
||||
match string
|
||||
withType bool
|
||||
_type string
|
||||
}
|
||||
|
||||
if ok := optIntErr(c, args[0], &opts.cursor, msgInvalidCursor); !ok {
|
||||
return
|
||||
}
|
||||
args = args[1:]
|
||||
|
||||
// MATCH, COUNT and TYPE options
|
||||
for len(args) > 0 {
|
||||
if strings.ToLower(args[0]) == "count" {
|
||||
// we do nothing with count
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
if _, err := strconv.Atoi(args[1]); err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
args = args[2:]
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(args[0]) == "match" {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
opts.withMatch = true
|
||||
opts.match, args = args[1], args[2:]
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(args[0]) == "type" {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
opts.withType = true
|
||||
opts._type, args = strings.ToLower(args[1]), args[2:]
|
||||
continue
|
||||
}
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
// We return _all_ (matched) keys every time.
|
||||
|
||||
if opts.cursor != 0 {
|
||||
// Invalid cursor.
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("0") // no next cursor
|
||||
c.WriteLen(0) // no elements
|
||||
return
|
||||
}
|
||||
|
||||
var keys []string
|
||||
|
||||
if opts.withType {
|
||||
keys = make([]string, 0)
|
||||
for k, t := range db.keys {
|
||||
// type must be given exactly; no pattern matching is performed
|
||||
if t == opts._type {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
}
|
||||
sort.Strings(keys) // To make things deterministic.
|
||||
} else {
|
||||
keys = db.allKeys()
|
||||
}
|
||||
|
||||
if opts.withMatch {
|
||||
keys, _ = matchKeys(keys, opts.match)
|
||||
}
|
||||
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("0") // no next cursor
|
||||
c.WriteLen(len(keys))
|
||||
for _, k := range keys {
|
||||
c.WriteBulk(k)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// COPY
|
||||
func (m *Miniredis) cmdCopy(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts = struct {
|
||||
from string
|
||||
to string
|
||||
destinationDB int
|
||||
replace bool
|
||||
}{
|
||||
destinationDB: -1,
|
||||
}
|
||||
|
||||
opts.from, opts.to, args = args[0], args[1], args[2:]
|
||||
for len(args) > 0 {
|
||||
switch strings.ToLower(args[0]) {
|
||||
case "db":
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
db, err := strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if db < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgDBIndexOutOfRange)
|
||||
return
|
||||
}
|
||||
opts.destinationDB = db
|
||||
args = args[2:]
|
||||
case "replace":
|
||||
opts.replace = true
|
||||
args = args[1:]
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
fromDB, toDB := ctx.selectedDB, opts.destinationDB
|
||||
if toDB == -1 {
|
||||
toDB = fromDB
|
||||
}
|
||||
|
||||
if fromDB == toDB && opts.from == opts.to {
|
||||
c.WriteError("ERR source and destination objects are the same")
|
||||
return
|
||||
}
|
||||
|
||||
if !m.db(fromDB).exists(opts.from) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if !opts.replace {
|
||||
if m.db(toDB).exists(opts.to) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
m.copy(m.db(fromDB), opts.from, m.db(toDB), opts.to)
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
609
vendor/github.com/alicebob/miniredis/v2/cmd_geo.go
generated
vendored
609
vendor/github.com/alicebob/miniredis/v2/cmd_geo.go
generated
vendored
@@ -1,609 +0,0 @@
|
||||
// Commands from https://redis.io/commands#geo
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsGeo handles GEOADD, GEORADIUS etc.
|
||||
func commandsGeo(m *Miniredis) {
|
||||
m.srv.Register("GEOADD", m.cmdGeoadd)
|
||||
m.srv.Register("GEODIST", m.cmdGeodist)
|
||||
m.srv.Register("GEOPOS", m.cmdGeopos)
|
||||
m.srv.Register("GEORADIUS", m.cmdGeoradius)
|
||||
m.srv.Register("GEORADIUS_RO", m.cmdGeoradius)
|
||||
m.srv.Register("GEORADIUSBYMEMBER", m.cmdGeoradiusbymember)
|
||||
m.srv.Register("GEORADIUSBYMEMBER_RO", m.cmdGeoradiusbymember)
|
||||
}
|
||||
|
||||
// GEOADD
|
||||
func (m *Miniredis) cmdGeoadd(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 3 || len(args[1:])%3 != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
key, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if db.exists(key) && db.t(key) != "zset" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
toSet := map[string]float64{}
|
||||
for len(args) > 2 {
|
||||
rawLong, rawLat, name := args[0], args[1], args[2]
|
||||
args = args[3:]
|
||||
longitude, err := strconv.ParseFloat(rawLong, 64)
|
||||
if err != nil {
|
||||
c.WriteError("ERR value is not a valid float")
|
||||
return
|
||||
}
|
||||
latitude, err := strconv.ParseFloat(rawLat, 64)
|
||||
if err != nil {
|
||||
c.WriteError("ERR value is not a valid float")
|
||||
return
|
||||
}
|
||||
|
||||
if latitude < -85.05112878 ||
|
||||
latitude > 85.05112878 ||
|
||||
longitude < -180 ||
|
||||
longitude > 180 {
|
||||
c.WriteError(fmt.Sprintf("ERR invalid longitude,latitude pair %.6f,%.6f", longitude, latitude))
|
||||
return
|
||||
}
|
||||
|
||||
toSet[name] = float64(toGeohash(longitude, latitude))
|
||||
}
|
||||
|
||||
set := 0
|
||||
for name, score := range toSet {
|
||||
if db.ssetAdd(key, score, name) {
|
||||
set++
|
||||
}
|
||||
}
|
||||
c.WriteInt(set)
|
||||
})
|
||||
}
|
||||
|
||||
// GEODIST
|
||||
func (m *Miniredis) cmdGeodist(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, from, to, args := args[0], args[1], args[2], args[3:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
if !db.exists(key) {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if db.t(key) != "zset" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
unit := "m"
|
||||
if len(args) > 0 {
|
||||
unit, args = args[0], args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
|
||||
toMeter := parseUnit(unit)
|
||||
if toMeter == 0 {
|
||||
c.WriteError(msgUnsupportedUnit)
|
||||
return
|
||||
}
|
||||
|
||||
members := db.sortedsetKeys[key]
|
||||
fromD, okFrom := members.get(from)
|
||||
toD, okTo := members.get(to)
|
||||
if !okFrom || !okTo {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
|
||||
fromLo, fromLat := fromGeohash(uint64(fromD))
|
||||
toLo, toLat := fromGeohash(uint64(toD))
|
||||
|
||||
dist := distance(fromLat, fromLo, toLat, toLo) / toMeter
|
||||
c.WriteBulk(fmt.Sprintf("%.4f", dist))
|
||||
})
|
||||
}
|
||||
|
||||
// GEOPOS
|
||||
func (m *Miniredis) cmdGeopos(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
key, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if db.exists(key) && db.t(key) != "zset" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteLen(len(args))
|
||||
for _, l := range args {
|
||||
if !db.ssetExists(key, l) {
|
||||
c.WriteLen(-1)
|
||||
continue
|
||||
}
|
||||
score := db.ssetScore(key, l)
|
||||
c.WriteLen(2)
|
||||
long, lat := fromGeohash(uint64(score))
|
||||
c.WriteBulk(fmt.Sprintf("%f", long))
|
||||
c.WriteBulk(fmt.Sprintf("%f", lat))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
type geoDistance struct {
|
||||
Name string
|
||||
Score float64
|
||||
Distance float64
|
||||
Longitude float64
|
||||
Latitude float64
|
||||
}
|
||||
|
||||
// GEORADIUS and GEORADIUS_RO
|
||||
func (m *Miniredis) cmdGeoradius(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 5 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
longitude, err := strconv.ParseFloat(args[1], 64)
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
latitude, err := strconv.ParseFloat(args[2], 64)
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
radius, err := strconv.ParseFloat(args[3], 64)
|
||||
if err != nil || radius < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
toMeter := parseUnit(args[4])
|
||||
if toMeter == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
args = args[5:]
|
||||
|
||||
var opts struct {
|
||||
withDist bool
|
||||
withCoord bool
|
||||
direction direction // unsorted
|
||||
count int
|
||||
withStore bool
|
||||
storeKey string
|
||||
withStoredist bool
|
||||
storedistKey string
|
||||
}
|
||||
for len(args) > 0 {
|
||||
arg := args[0]
|
||||
args = args[1:]
|
||||
switch strings.ToUpper(arg) {
|
||||
case "WITHCOORD":
|
||||
opts.withCoord = true
|
||||
case "WITHDIST":
|
||||
opts.withDist = true
|
||||
case "ASC":
|
||||
opts.direction = asc
|
||||
case "DESC":
|
||||
opts.direction = desc
|
||||
case "COUNT":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
n, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if n <= 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR COUNT must be > 0")
|
||||
return
|
||||
}
|
||||
args = args[1:]
|
||||
opts.count = n
|
||||
case "STORE":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
opts.withStore = true
|
||||
opts.storeKey = args[0]
|
||||
args = args[1:]
|
||||
case "STOREDIST":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
opts.withStoredist = true
|
||||
opts.storedistKey = args[0]
|
||||
args = args[1:]
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if strings.ToUpper(cmd) == "GEORADIUS_RO" && (opts.withStore || opts.withStoredist) {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) {
|
||||
c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options")
|
||||
return
|
||||
}
|
||||
|
||||
db := m.db(ctx.selectedDB)
|
||||
members := db.ssetElements(key)
|
||||
|
||||
matches := withinRadius(members, longitude, latitude, radius*toMeter)
|
||||
|
||||
// deal with ASC/DESC
|
||||
if opts.direction != unsorted {
|
||||
sort.Slice(matches, func(i, j int) bool {
|
||||
if opts.direction == desc {
|
||||
return matches[i].Distance > matches[j].Distance
|
||||
}
|
||||
return matches[i].Distance < matches[j].Distance
|
||||
})
|
||||
}
|
||||
|
||||
// deal with COUNT
|
||||
if opts.count > 0 && len(matches) > opts.count {
|
||||
matches = matches[:opts.count]
|
||||
}
|
||||
|
||||
// deal with "STORE x"
|
||||
if opts.withStore {
|
||||
db.del(opts.storeKey, true)
|
||||
for _, member := range matches {
|
||||
db.ssetAdd(opts.storeKey, member.Score, member.Name)
|
||||
}
|
||||
c.WriteInt(len(matches))
|
||||
return
|
||||
}
|
||||
|
||||
// deal with "STOREDIST x"
|
||||
if opts.withStoredist {
|
||||
db.del(opts.storedistKey, true)
|
||||
for _, member := range matches {
|
||||
db.ssetAdd(opts.storedistKey, member.Distance/toMeter, member.Name)
|
||||
}
|
||||
c.WriteInt(len(matches))
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteLen(len(matches))
|
||||
for _, member := range matches {
|
||||
if !opts.withDist && !opts.withCoord {
|
||||
c.WriteBulk(member.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
len := 1
|
||||
if opts.withDist {
|
||||
len++
|
||||
}
|
||||
if opts.withCoord {
|
||||
len++
|
||||
}
|
||||
c.WriteLen(len)
|
||||
c.WriteBulk(member.Name)
|
||||
if opts.withDist {
|
||||
c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/toMeter))
|
||||
}
|
||||
if opts.withCoord {
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk(fmt.Sprintf("%f", member.Longitude))
|
||||
c.WriteBulk(fmt.Sprintf("%f", member.Latitude))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GEORADIUSBYMEMBER and GEORADIUSBYMEMBER_RO
|
||||
func (m *Miniredis) cmdGeoradiusbymember(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 4 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
member string
|
||||
radius float64
|
||||
toMeter float64
|
||||
|
||||
withDist bool
|
||||
withCoord bool
|
||||
direction direction // unsorted
|
||||
count int
|
||||
withStore bool
|
||||
storeKey string
|
||||
withStoredist bool
|
||||
storedistKey string
|
||||
}{
|
||||
key: args[0],
|
||||
member: args[1],
|
||||
}
|
||||
|
||||
r, err := strconv.ParseFloat(args[2], 64)
|
||||
if err != nil || r < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
opts.radius = r
|
||||
|
||||
opts.toMeter = parseUnit(args[3])
|
||||
if opts.toMeter == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
args = args[4:]
|
||||
|
||||
for len(args) > 0 {
|
||||
arg := args[0]
|
||||
args = args[1:]
|
||||
switch strings.ToUpper(arg) {
|
||||
case "WITHCOORD":
|
||||
opts.withCoord = true
|
||||
case "WITHDIST":
|
||||
opts.withDist = true
|
||||
case "ASC":
|
||||
opts.direction = asc
|
||||
case "DESC":
|
||||
opts.direction = desc
|
||||
case "COUNT":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
n, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if n <= 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR COUNT must be > 0")
|
||||
return
|
||||
}
|
||||
args = args[1:]
|
||||
opts.count = n
|
||||
case "STORE":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
opts.withStore = true
|
||||
opts.storeKey = args[0]
|
||||
args = args[1:]
|
||||
case "STOREDIST":
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
opts.withStoredist = true
|
||||
opts.storedistKey = args[0]
|
||||
args = args[1:]
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if strings.ToUpper(cmd) == "GEORADIUSBYMEMBER_RO" && (opts.withStore || opts.withStoredist) {
|
||||
setDirty(c)
|
||||
c.WriteError("ERR syntax error")
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
if (opts.withStore || opts.withStoredist) && (opts.withDist || opts.withCoord) {
|
||||
c.WriteError("ERR STORE option in GEORADIUS is not compatible with WITHDIST, WITHHASH and WITHCOORDS options")
|
||||
return
|
||||
}
|
||||
|
||||
db := m.db(ctx.selectedDB)
|
||||
if !db.exists(opts.key) {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(opts.key) != "zset" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// get position of member
|
||||
if !db.ssetExists(opts.key, opts.member) {
|
||||
c.WriteError("ERR could not decode requested zset member")
|
||||
return
|
||||
}
|
||||
score := db.ssetScore(opts.key, opts.member)
|
||||
longitude, latitude := fromGeohash(uint64(score))
|
||||
|
||||
members := db.ssetElements(opts.key)
|
||||
matches := withinRadius(members, longitude, latitude, opts.radius*opts.toMeter)
|
||||
|
||||
// deal with ASC/DESC
|
||||
if opts.direction != unsorted {
|
||||
sort.Slice(matches, func(i, j int) bool {
|
||||
if opts.direction == desc {
|
||||
return matches[i].Distance > matches[j].Distance
|
||||
}
|
||||
return matches[i].Distance < matches[j].Distance
|
||||
})
|
||||
}
|
||||
|
||||
// deal with COUNT
|
||||
if opts.count > 0 && len(matches) > opts.count {
|
||||
matches = matches[:opts.count]
|
||||
}
|
||||
|
||||
// deal with "STORE x"
|
||||
if opts.withStore {
|
||||
db.del(opts.storeKey, true)
|
||||
for _, member := range matches {
|
||||
db.ssetAdd(opts.storeKey, member.Score, member.Name)
|
||||
}
|
||||
c.WriteInt(len(matches))
|
||||
return
|
||||
}
|
||||
|
||||
// deal with "STOREDIST x"
|
||||
if opts.withStoredist {
|
||||
db.del(opts.storedistKey, true)
|
||||
for _, member := range matches {
|
||||
db.ssetAdd(opts.storedistKey, member.Distance/opts.toMeter, member.Name)
|
||||
}
|
||||
c.WriteInt(len(matches))
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteLen(len(matches))
|
||||
for _, member := range matches {
|
||||
if !opts.withDist && !opts.withCoord {
|
||||
c.WriteBulk(member.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
len := 1
|
||||
if opts.withDist {
|
||||
len++
|
||||
}
|
||||
if opts.withCoord {
|
||||
len++
|
||||
}
|
||||
c.WriteLen(len)
|
||||
c.WriteBulk(member.Name)
|
||||
if opts.withDist {
|
||||
c.WriteBulk(fmt.Sprintf("%.4f", member.Distance/opts.toMeter))
|
||||
}
|
||||
if opts.withCoord {
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk(fmt.Sprintf("%f", member.Longitude))
|
||||
c.WriteBulk(fmt.Sprintf("%f", member.Latitude))
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func withinRadius(members []ssElem, longitude, latitude, radius float64) []geoDistance {
|
||||
matches := []geoDistance{}
|
||||
for _, el := range members {
|
||||
elLo, elLat := fromGeohash(uint64(el.score))
|
||||
distanceInMeter := distance(latitude, longitude, elLat, elLo)
|
||||
|
||||
if distanceInMeter <= radius {
|
||||
matches = append(matches, geoDistance{
|
||||
Name: el.member,
|
||||
Score: el.score,
|
||||
Distance: distanceInMeter,
|
||||
Longitude: elLo,
|
||||
Latitude: elLat,
|
||||
})
|
||||
}
|
||||
}
|
||||
return matches
|
||||
}
|
||||
|
||||
func parseUnit(u string) float64 {
|
||||
switch u {
|
||||
case "m":
|
||||
return 1
|
||||
case "km":
|
||||
return 1000
|
||||
case "mi":
|
||||
return 1609.34
|
||||
case "ft":
|
||||
return 0.3048
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
683
vendor/github.com/alicebob/miniredis/v2/cmd_hash.go
generated
vendored
683
vendor/github.com/alicebob/miniredis/v2/cmd_hash.go
generated
vendored
@@ -1,683 +0,0 @@
|
||||
// Commands from https://redis.io/commands#hash
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsHash handles all hash value operations.
|
||||
func commandsHash(m *Miniredis) {
|
||||
m.srv.Register("HDEL", m.cmdHdel)
|
||||
m.srv.Register("HEXISTS", m.cmdHexists)
|
||||
m.srv.Register("HGET", m.cmdHget)
|
||||
m.srv.Register("HGETALL", m.cmdHgetall)
|
||||
m.srv.Register("HINCRBY", m.cmdHincrby)
|
||||
m.srv.Register("HINCRBYFLOAT", m.cmdHincrbyfloat)
|
||||
m.srv.Register("HKEYS", m.cmdHkeys)
|
||||
m.srv.Register("HLEN", m.cmdHlen)
|
||||
m.srv.Register("HMGET", m.cmdHmget)
|
||||
m.srv.Register("HMSET", m.cmdHmset)
|
||||
m.srv.Register("HSET", m.cmdHset)
|
||||
m.srv.Register("HSETNX", m.cmdHsetnx)
|
||||
m.srv.Register("HSTRLEN", m.cmdHstrlen)
|
||||
m.srv.Register("HVALS", m.cmdHvals)
|
||||
m.srv.Register("HSCAN", m.cmdHscan)
|
||||
}
|
||||
|
||||
// HSET
|
||||
func (m *Miniredis) cmdHset(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, pairs := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if len(pairs)%2 == 1 {
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
if t, ok := db.keys[key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
new := db.hashSet(key, pairs...)
|
||||
c.WriteInt(new)
|
||||
})
|
||||
}
|
||||
|
||||
// HSETNX
|
||||
func (m *Miniredis) cmdHsetnx(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
field string
|
||||
value string
|
||||
}{
|
||||
key: args[0],
|
||||
field: args[1],
|
||||
value: args[2],
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[opts.key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := db.hashKeys[opts.key]; !ok {
|
||||
db.hashKeys[opts.key] = map[string]string{}
|
||||
db.keys[opts.key] = "hash"
|
||||
}
|
||||
_, ok := db.hashKeys[opts.key][opts.field]
|
||||
if ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
db.hashKeys[opts.key][opts.field] = opts.value
|
||||
db.keyVersion[opts.key]++
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// HMSET
|
||||
func (m *Miniredis) cmdHmset(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, args := args[0], args[1:]
|
||||
if len(args)%2 != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
for len(args) > 0 {
|
||||
field, value := args[0], args[1]
|
||||
args = args[2:]
|
||||
db.hashSet(key, field, value)
|
||||
}
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// HGET
|
||||
func (m *Miniredis) cmdHget(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, field := args[0], args[1]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
value, ok := db.hashKeys[key][field]
|
||||
if !ok {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
c.WriteBulk(value)
|
||||
})
|
||||
}
|
||||
|
||||
// HDEL
|
||||
func (m *Miniredis) cmdHdel(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
fields []string
|
||||
}{
|
||||
key: args[0],
|
||||
fields: args[1:],
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[opts.key]
|
||||
if !ok {
|
||||
// No key is zero deleted
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
deleted := 0
|
||||
for _, f := range opts.fields {
|
||||
_, ok := db.hashKeys[opts.key][f]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
delete(db.hashKeys[opts.key], f)
|
||||
deleted++
|
||||
}
|
||||
c.WriteInt(deleted)
|
||||
|
||||
// Nothing left. Remove the whole key.
|
||||
if len(db.hashKeys[opts.key]) == 0 {
|
||||
db.del(opts.key, true)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// HEXISTS
|
||||
func (m *Miniredis) cmdHexists(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
field string
|
||||
}{
|
||||
key: args[0],
|
||||
field: args[1],
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[opts.key]
|
||||
if !ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := db.hashKeys[opts.key][opts.field]; !ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// HGETALL
|
||||
func (m *Miniredis) cmdHgetall(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
c.WriteMapLen(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteMapLen(len(db.hashKeys[key]))
|
||||
for _, k := range db.hashFields(key) {
|
||||
c.WriteBulk(k)
|
||||
c.WriteBulk(db.hashGet(key, k))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// HKEYS
|
||||
func (m *Miniredis) cmdHkeys(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteLen(0)
|
||||
return
|
||||
}
|
||||
if db.t(key) != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
fields := db.hashFields(key)
|
||||
c.WriteLen(len(fields))
|
||||
for _, f := range fields {
|
||||
c.WriteBulk(f)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// HSTRLEN
|
||||
func (m *Miniredis) cmdHstrlen(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
hash, key := args[0], args[1]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[hash]
|
||||
if !ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
keys := db.hashKeys[hash]
|
||||
c.WriteInt(len(keys[key]))
|
||||
})
|
||||
}
|
||||
|
||||
// HVALS
|
||||
func (m *Miniredis) cmdHvals(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
c.WriteLen(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
vals := db.hashValues(key)
|
||||
c.WriteLen(len(vals))
|
||||
for _, v := range vals {
|
||||
c.WriteBulk(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// HLEN
|
||||
func (m *Miniredis) cmdHlen(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteInt(len(db.hashKeys[key]))
|
||||
})
|
||||
}
|
||||
|
||||
// HMGET
|
||||
func (m *Miniredis) cmdHmget(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
f, ok := db.hashKeys[key]
|
||||
if !ok {
|
||||
f = map[string]string{}
|
||||
}
|
||||
|
||||
c.WriteLen(len(args) - 1)
|
||||
for _, k := range args[1:] {
|
||||
v, ok := f[k]
|
||||
if !ok {
|
||||
c.WriteNull()
|
||||
continue
|
||||
}
|
||||
c.WriteBulk(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// HINCRBY
|
||||
func (m *Miniredis) cmdHincrby(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
field string
|
||||
delta int
|
||||
}{
|
||||
key: args[0],
|
||||
field: args[1],
|
||||
}
|
||||
if ok := optInt(c, args[2], &opts.delta); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[opts.key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
v, err := db.hashIncr(opts.key, opts.field, opts.delta)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
c.WriteInt(v)
|
||||
})
|
||||
}
|
||||
|
||||
// HINCRBYFLOAT
|
||||
func (m *Miniredis) cmdHincrbyfloat(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
field string
|
||||
delta *big.Float
|
||||
}{
|
||||
key: args[0],
|
||||
field: args[1],
|
||||
}
|
||||
delta, _, err := big.ParseFloat(args[2], 10, 128, 0)
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidFloat)
|
||||
return
|
||||
}
|
||||
opts.delta = delta
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[opts.key]; ok && t != "hash" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
v, err := db.hashIncrfloat(opts.key, opts.field, opts.delta)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
c.WriteBulk(formatBig(v))
|
||||
})
|
||||
}
|
||||
|
||||
// HSCAN
|
||||
func (m *Miniredis) cmdHscan(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
cursor int
|
||||
withMatch bool
|
||||
match string
|
||||
}{
|
||||
key: args[0],
|
||||
}
|
||||
if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok {
|
||||
return
|
||||
}
|
||||
args = args[2:]
|
||||
|
||||
// MATCH and COUNT options
|
||||
for len(args) > 0 {
|
||||
if strings.ToLower(args[0]) == "count" {
|
||||
// we do nothing with count
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
_, err := strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
args = args[2:]
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(args[0]) == "match" {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
opts.withMatch = true
|
||||
opts.match, args = args[1], args[2:]
|
||||
continue
|
||||
}
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
// return _all_ (matched) keys every time
|
||||
|
||||
if opts.cursor != 0 {
|
||||
// Invalid cursor.
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("0") // no next cursor
|
||||
c.WriteLen(0) // no elements
|
||||
return
|
||||
}
|
||||
if db.exists(opts.key) && db.t(opts.key) != "hash" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
members := db.hashFields(opts.key)
|
||||
if opts.withMatch {
|
||||
members, _ = matchKeys(members, opts.match)
|
||||
}
|
||||
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("0") // no next cursor
|
||||
// HSCAN gives key, values.
|
||||
c.WriteLen(len(members) * 2)
|
||||
for _, k := range members {
|
||||
c.WriteBulk(k)
|
||||
c.WriteBulk(db.hashGet(opts.key, k))
|
||||
}
|
||||
})
|
||||
}
|
||||
95
vendor/github.com/alicebob/miniredis/v2/cmd_hll.go
generated
vendored
95
vendor/github.com/alicebob/miniredis/v2/cmd_hll.go
generated
vendored
@@ -1,95 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import "github.com/alicebob/miniredis/v2/server"
|
||||
|
||||
// commandsHll handles all hll related operations.
|
||||
func commandsHll(m *Miniredis) {
|
||||
m.srv.Register("PFADD", m.cmdPfadd)
|
||||
m.srv.Register("PFCOUNT", m.cmdPfcount)
|
||||
m.srv.Register("PFMERGE", m.cmdPfmerge)
|
||||
}
|
||||
|
||||
// PFADD
|
||||
func (m *Miniredis) cmdPfadd(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, items := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if db.exists(key) && db.t(key) != "hll" {
|
||||
c.WriteError(ErrNotValidHllValue.Error())
|
||||
return
|
||||
}
|
||||
|
||||
altered := db.hllAdd(key, items...)
|
||||
c.WriteInt(altered)
|
||||
})
|
||||
}
|
||||
|
||||
// PFCOUNT
|
||||
func (m *Miniredis) cmdPfcount(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
keys := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
count, err := db.hllCount(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteInt(count)
|
||||
})
|
||||
}
|
||||
|
||||
// PFMERGE
|
||||
func (m *Miniredis) cmdPfmerge(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
keys := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if err := db.hllMerge(keys); err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
40
vendor/github.com/alicebob/miniredis/v2/cmd_info.go
generated
vendored
40
vendor/github.com/alicebob/miniredis/v2/cmd_info.go
generated
vendored
@@ -1,40 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// Command 'INFO' from https://redis.io/commands/info/
|
||||
func (m *Miniredis) cmdInfo(c *server.Peer, cmd string, args []string) {
|
||||
if !m.isValidCMD(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) > 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
const (
|
||||
clientsSectionName = "clients"
|
||||
clientsSectionContent = "# Clients\nconnected_clients:%d\r\n"
|
||||
)
|
||||
|
||||
var result string
|
||||
|
||||
for _, key := range args {
|
||||
if key != clientsSectionName {
|
||||
setDirty(c)
|
||||
c.WriteError(fmt.Sprintf("section (%s) is not supported", key))
|
||||
return
|
||||
}
|
||||
}
|
||||
result = fmt.Sprintf(clientsSectionContent, m.Server().ClientsLen())
|
||||
|
||||
c.WriteBulk(result)
|
||||
})
|
||||
}
|
||||
986
vendor/github.com/alicebob/miniredis/v2/cmd_list.go
generated
vendored
986
vendor/github.com/alicebob/miniredis/v2/cmd_list.go
generated
vendored
@@ -1,986 +0,0 @@
|
||||
// Commands from https://redis.io/commands#list
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
type leftright int
|
||||
|
||||
const (
|
||||
left leftright = iota
|
||||
right
|
||||
)
|
||||
|
||||
// commandsList handles list commands (mostly L*)
|
||||
func commandsList(m *Miniredis) {
|
||||
m.srv.Register("BLPOP", m.cmdBlpop)
|
||||
m.srv.Register("BRPOP", m.cmdBrpop)
|
||||
m.srv.Register("BRPOPLPUSH", m.cmdBrpoplpush)
|
||||
m.srv.Register("LINDEX", m.cmdLindex)
|
||||
m.srv.Register("LPOS", m.cmdLpos)
|
||||
m.srv.Register("LINSERT", m.cmdLinsert)
|
||||
m.srv.Register("LLEN", m.cmdLlen)
|
||||
m.srv.Register("LPOP", m.cmdLpop)
|
||||
m.srv.Register("LPUSH", m.cmdLpush)
|
||||
m.srv.Register("LPUSHX", m.cmdLpushx)
|
||||
m.srv.Register("LRANGE", m.cmdLrange)
|
||||
m.srv.Register("LREM", m.cmdLrem)
|
||||
m.srv.Register("LSET", m.cmdLset)
|
||||
m.srv.Register("LTRIM", m.cmdLtrim)
|
||||
m.srv.Register("RPOP", m.cmdRpop)
|
||||
m.srv.Register("RPOPLPUSH", m.cmdRpoplpush)
|
||||
m.srv.Register("RPUSH", m.cmdRpush)
|
||||
m.srv.Register("RPUSHX", m.cmdRpushx)
|
||||
m.srv.Register("LMOVE", m.cmdLmove)
|
||||
}
|
||||
|
||||
// BLPOP
|
||||
func (m *Miniredis) cmdBlpop(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdBXpop(c, cmd, args, left)
|
||||
}
|
||||
|
||||
// BRPOP
|
||||
func (m *Miniredis) cmdBrpop(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdBXpop(c, cmd, args, right)
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdBXpop(c *server.Peer, cmd string, args []string, lr leftright) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
timeoutS := args[len(args)-1]
|
||||
keys := args[:len(args)-1]
|
||||
|
||||
timeout, err := strconv.Atoi(timeoutS)
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidTimeout)
|
||||
return
|
||||
}
|
||||
if timeout < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgNegTimeout)
|
||||
return
|
||||
}
|
||||
|
||||
blocking(
|
||||
m,
|
||||
c,
|
||||
time.Duration(timeout)*time.Second,
|
||||
func(c *server.Peer, ctx *connCtx) bool {
|
||||
db := m.db(ctx.selectedDB)
|
||||
for _, key := range keys {
|
||||
if !db.exists(key) {
|
||||
continue
|
||||
}
|
||||
if db.t(key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return true
|
||||
}
|
||||
|
||||
if len(db.listKeys[key]) == 0 {
|
||||
continue
|
||||
}
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk(key)
|
||||
var v string
|
||||
switch lr {
|
||||
case left:
|
||||
v = db.listLpop(key)
|
||||
case right:
|
||||
v = db.listPop(key)
|
||||
}
|
||||
c.WriteBulk(v)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
},
|
||||
func(c *server.Peer) {
|
||||
// timeout
|
||||
c.WriteLen(-1)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// LINDEX
|
||||
func (m *Miniredis) cmdLindex(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, offsets := args[0], args[1]
|
||||
|
||||
offset, err := strconv.Atoi(offsets)
|
||||
if err != nil || offsets == "-0" {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
// No such key
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[key]
|
||||
if offset < 0 {
|
||||
offset = len(l) + offset
|
||||
}
|
||||
if offset < 0 || offset > len(l)-1 {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
c.WriteBulk(l[offset])
|
||||
})
|
||||
}
|
||||
|
||||
// LPOS key element [RANK rank] [COUNT num-matches] [MAXLEN len]
|
||||
func (m *Miniredis) cmdLpos(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if len(args) == 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
// Extract options from arguments if present.
|
||||
//
|
||||
// Redis allows duplicate options and uses the last specified.
|
||||
// `LPOS key term RANK 1 RANK 2` is effectively the same as
|
||||
// `LPOS key term RANK 2`
|
||||
if len(args)%2 == 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
rank, count := 1, 1 // Default values
|
||||
var maxlen int // Default value is the list length (see below)
|
||||
var countSpecified, maxlenSpecified bool
|
||||
if len(args) > 2 {
|
||||
for i := 2; i < len(args); i++ {
|
||||
if i%2 == 0 {
|
||||
val := args[i+1]
|
||||
var err error
|
||||
switch strings.ToLower(args[i]) {
|
||||
case "rank":
|
||||
if rank, err = strconv.Atoi(val); err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if rank == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgRankIsZero)
|
||||
return
|
||||
}
|
||||
case "count":
|
||||
countSpecified = true
|
||||
if count, err = strconv.Atoi(val); err != nil || count < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgCountIsNegative)
|
||||
return
|
||||
}
|
||||
case "maxlen":
|
||||
maxlenSpecified = true
|
||||
if maxlen, err = strconv.Atoi(val); err != nil || maxlen < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgMaxLengthIsNegative)
|
||||
return
|
||||
}
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
key, element := args[0], args[1]
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
// No such key
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
l := db.listKeys[key]
|
||||
|
||||
// RANK cannot be zero (see above).
|
||||
// If RANK is positive search forward (left to right).
|
||||
// If RANK is negative search backward (right to left).
|
||||
// Iterator returns true to continue iterating.
|
||||
iterate := func(iterator func(i int, e string) bool) {
|
||||
comparisons := len(l)
|
||||
// Only use max length if specified, not zero, and less than total length.
|
||||
// When max length is specified, but is zero, this means "unlimited".
|
||||
if maxlenSpecified && maxlen != 0 && maxlen < len(l) {
|
||||
comparisons = maxlen
|
||||
}
|
||||
if rank > 0 {
|
||||
for i := 0; i < comparisons; i++ {
|
||||
if resume := iterator(i, l[i]); !resume {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else if rank < 0 {
|
||||
start := len(l) - 1
|
||||
end := len(l) - comparisons
|
||||
for i := start; i >= end; i-- {
|
||||
if resume := iterator(i, l[i]); !resume {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var currentRank, currentCount int
|
||||
vals := make([]int, 0, count)
|
||||
iterate(func(i int, e string) bool {
|
||||
if e == element {
|
||||
currentRank++
|
||||
// Only collect values only after surpassing the absolute value of rank.
|
||||
if rank > 0 && currentRank < rank {
|
||||
return true
|
||||
}
|
||||
if rank < 0 && currentRank < -rank {
|
||||
return true
|
||||
}
|
||||
vals = append(vals, i)
|
||||
currentCount++
|
||||
if currentCount == count {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
|
||||
if !countSpecified && len(vals) == 0 {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if !countSpecified && len(vals) == 1 {
|
||||
c.WriteInt(vals[0])
|
||||
return
|
||||
}
|
||||
c.WriteLen(len(vals))
|
||||
for _, val := range vals {
|
||||
c.WriteInt(val)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// LINSERT
|
||||
func (m *Miniredis) cmdLinsert(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 4 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
where := 0
|
||||
switch strings.ToLower(args[1]) {
|
||||
case "before":
|
||||
where = -1
|
||||
case "after":
|
||||
where = +1
|
||||
default:
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
pivot := args[2]
|
||||
value := args[3]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
// No such key
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[key]
|
||||
for i, el := range l {
|
||||
if el != pivot {
|
||||
continue
|
||||
}
|
||||
|
||||
if where < 0 {
|
||||
l = append(l[:i], append(listKey{value}, l[i:]...)...)
|
||||
} else {
|
||||
if i == len(l)-1 {
|
||||
l = append(l, value)
|
||||
} else {
|
||||
l = append(l[:i+1], append(listKey{value}, l[i+1:]...)...)
|
||||
}
|
||||
}
|
||||
db.listKeys[key] = l
|
||||
db.keyVersion[key]++
|
||||
c.WriteInt(len(l))
|
||||
return
|
||||
}
|
||||
c.WriteInt(-1)
|
||||
})
|
||||
}
|
||||
|
||||
// LLEN
|
||||
func (m *Miniredis) cmdLlen(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
// No such key. That's zero length.
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteInt(len(db.listKeys[key]))
|
||||
})
|
||||
}
|
||||
|
||||
// LPOP
|
||||
func (m *Miniredis) cmdLpop(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpop(c, cmd, args, left)
|
||||
}
|
||||
|
||||
// RPOP
|
||||
func (m *Miniredis) cmdRpop(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpop(c, cmd, args, right)
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdXpop(c *server.Peer, cmd string, args []string, lr leftright) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
withCount bool
|
||||
count int
|
||||
}
|
||||
|
||||
opts.key, args = args[0], args[1:]
|
||||
if len(args) > 0 {
|
||||
if ok := optInt(c, args[0], &opts.count); !ok {
|
||||
return
|
||||
}
|
||||
if opts.count < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgOutOfRange)
|
||||
return
|
||||
}
|
||||
opts.withCount = true
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.key) {
|
||||
// non-existing key is fine
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if db.t(opts.key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
if opts.withCount {
|
||||
var popped []string
|
||||
for opts.count > 0 && len(db.listKeys[opts.key]) > 0 {
|
||||
switch lr {
|
||||
case left:
|
||||
popped = append(popped, db.listLpop(opts.key))
|
||||
case right:
|
||||
popped = append(popped, db.listPop(opts.key))
|
||||
}
|
||||
opts.count -= 1
|
||||
}
|
||||
if len(popped) == 0 {
|
||||
c.WriteLen(-1)
|
||||
} else {
|
||||
c.WriteStrings(popped)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var elem string
|
||||
switch lr {
|
||||
case left:
|
||||
elem = db.listLpop(opts.key)
|
||||
case right:
|
||||
elem = db.listPop(opts.key)
|
||||
}
|
||||
c.WriteBulk(elem)
|
||||
})
|
||||
}
|
||||
|
||||
// LPUSH
|
||||
func (m *Miniredis) cmdLpush(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpush(c, cmd, args, left)
|
||||
}
|
||||
|
||||
// RPUSH
|
||||
func (m *Miniredis) cmdRpush(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpush(c, cmd, args, right)
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdXpush(c *server.Peer, cmd string, args []string, lr leftright) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if db.exists(key) && db.t(key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
var newLen int
|
||||
for _, value := range args {
|
||||
switch lr {
|
||||
case left:
|
||||
newLen = db.listLpush(key, value)
|
||||
case right:
|
||||
newLen = db.listPush(key, value)
|
||||
}
|
||||
}
|
||||
c.WriteInt(newLen)
|
||||
})
|
||||
}
|
||||
|
||||
// LPUSHX
|
||||
func (m *Miniredis) cmdLpushx(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpushx(c, cmd, args, left)
|
||||
}
|
||||
|
||||
// RPUSHX
|
||||
func (m *Miniredis) cmdRpushx(c *server.Peer, cmd string, args []string) {
|
||||
m.cmdXpushx(c, cmd, args, right)
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdXpushx(c *server.Peer, cmd string, args []string, lr leftright) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if db.t(key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
var newLen int
|
||||
for _, value := range args {
|
||||
switch lr {
|
||||
case left:
|
||||
newLen = db.listLpush(key, value)
|
||||
case right:
|
||||
newLen = db.listPush(key, value)
|
||||
}
|
||||
}
|
||||
c.WriteInt(newLen)
|
||||
})
|
||||
}
|
||||
|
||||
// LRANGE
|
||||
func (m *Miniredis) cmdLrange(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
start int
|
||||
end int
|
||||
}{
|
||||
key: args[0],
|
||||
}
|
||||
if ok := optInt(c, args[1], &opts.start); !ok {
|
||||
return
|
||||
}
|
||||
if ok := optInt(c, args[2], &opts.end); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if t, ok := db.keys[opts.key]; ok && t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[opts.key]
|
||||
if len(l) == 0 {
|
||||
c.WriteLen(0)
|
||||
return
|
||||
}
|
||||
|
||||
rs, re := redisRange(len(l), opts.start, opts.end, false)
|
||||
c.WriteLen(re - rs)
|
||||
for _, el := range l[rs:re] {
|
||||
c.WriteBulk(el)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// LREM
|
||||
func (m *Miniredis) cmdLrem(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
count int
|
||||
value string
|
||||
}
|
||||
opts.key = args[0]
|
||||
if ok := optInt(c, args[1], &opts.count); !ok {
|
||||
return
|
||||
}
|
||||
opts.value = args[2]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.key) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
if db.t(opts.key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[opts.key]
|
||||
if opts.count < 0 {
|
||||
reverseSlice(l)
|
||||
}
|
||||
deleted := 0
|
||||
newL := []string{}
|
||||
toDelete := len(l)
|
||||
if opts.count < 0 {
|
||||
toDelete = -opts.count
|
||||
}
|
||||
if opts.count > 0 {
|
||||
toDelete = opts.count
|
||||
}
|
||||
for _, el := range l {
|
||||
if el == opts.value {
|
||||
if toDelete > 0 {
|
||||
deleted++
|
||||
toDelete--
|
||||
continue
|
||||
}
|
||||
}
|
||||
newL = append(newL, el)
|
||||
}
|
||||
if opts.count < 0 {
|
||||
reverseSlice(newL)
|
||||
}
|
||||
if len(newL) == 0 {
|
||||
db.del(opts.key, true)
|
||||
} else {
|
||||
db.listKeys[opts.key] = newL
|
||||
db.keyVersion[opts.key]++
|
||||
}
|
||||
|
||||
c.WriteInt(deleted)
|
||||
})
|
||||
}
|
||||
|
||||
// LSET
|
||||
func (m *Miniredis) cmdLset(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
index int
|
||||
value string
|
||||
}
|
||||
opts.key = args[0]
|
||||
if ok := optInt(c, args[1], &opts.index); !ok {
|
||||
return
|
||||
}
|
||||
opts.value = args[2]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.key) {
|
||||
c.WriteError(msgKeyNotFound)
|
||||
return
|
||||
}
|
||||
if db.t(opts.key) != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[opts.key]
|
||||
index := opts.index
|
||||
if index < 0 {
|
||||
index = len(l) + index
|
||||
}
|
||||
if index < 0 || index > len(l)-1 {
|
||||
c.WriteError(msgOutOfRange)
|
||||
return
|
||||
}
|
||||
l[index] = opts.value
|
||||
db.keyVersion[opts.key]++
|
||||
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// LTRIM
|
||||
func (m *Miniredis) cmdLtrim(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
start int
|
||||
end int
|
||||
}
|
||||
|
||||
opts.key = args[0]
|
||||
if ok := optInt(c, args[1], &opts.start); !ok {
|
||||
return
|
||||
}
|
||||
if ok := optInt(c, args[2], &opts.end); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
t, ok := db.keys[opts.key]
|
||||
if !ok {
|
||||
c.WriteOK()
|
||||
return
|
||||
}
|
||||
if t != "list" {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
|
||||
l := db.listKeys[opts.key]
|
||||
rs, re := redisRange(len(l), opts.start, opts.end, false)
|
||||
l = l[rs:re]
|
||||
if len(l) == 0 {
|
||||
db.del(opts.key, true)
|
||||
} else {
|
||||
db.listKeys[opts.key] = l
|
||||
db.keyVersion[opts.key]++
|
||||
}
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// RPOPLPUSH
|
||||
func (m *Miniredis) cmdRpoplpush(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
src, dst := args[0], args[1]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(src) {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if db.t(src) != "list" || (db.exists(dst) && db.t(dst) != "list") {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
elem := db.listPop(src)
|
||||
db.listLpush(dst, elem)
|
||||
c.WriteBulk(elem)
|
||||
})
|
||||
}
|
||||
|
||||
// BRPOPLPUSH
|
||||
func (m *Miniredis) cmdBrpoplpush(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
src string
|
||||
dst string
|
||||
timeout int
|
||||
}
|
||||
opts.src = args[0]
|
||||
opts.dst = args[1]
|
||||
if ok := optIntErr(c, args[2], &opts.timeout, msgInvalidTimeout); !ok {
|
||||
return
|
||||
}
|
||||
if opts.timeout < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgNegTimeout)
|
||||
return
|
||||
}
|
||||
|
||||
blocking(
|
||||
m,
|
||||
c,
|
||||
time.Duration(opts.timeout)*time.Second,
|
||||
func(c *server.Peer, ctx *connCtx) bool {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.src) {
|
||||
return false
|
||||
}
|
||||
if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") {
|
||||
c.WriteError(msgWrongType)
|
||||
return true
|
||||
}
|
||||
if len(db.listKeys[opts.src]) == 0 {
|
||||
return false
|
||||
}
|
||||
elem := db.listPop(opts.src)
|
||||
db.listLpush(opts.dst, elem)
|
||||
c.WriteBulk(elem)
|
||||
return true
|
||||
},
|
||||
func(c *server.Peer) {
|
||||
// timeout
|
||||
c.WriteLen(-1)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
// LMOVE
|
||||
func (m *Miniredis) cmdLmove(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 4 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
src string
|
||||
dst string
|
||||
srcDir string
|
||||
dstDir string
|
||||
}{
|
||||
src: args[0],
|
||||
dst: args[1],
|
||||
srcDir: strings.ToLower(args[2]),
|
||||
dstDir: strings.ToLower(args[3]),
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.src) {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
if db.t(opts.src) != "list" || (db.exists(opts.dst) && db.t(opts.dst) != "list") {
|
||||
c.WriteError(msgWrongType)
|
||||
return
|
||||
}
|
||||
var elem string
|
||||
switch opts.srcDir {
|
||||
case "left":
|
||||
elem = db.listLpop(opts.src)
|
||||
case "right":
|
||||
elem = db.listPop(opts.src)
|
||||
default:
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
|
||||
switch opts.dstDir {
|
||||
case "left":
|
||||
db.listLpush(opts.dst, elem)
|
||||
case "right":
|
||||
db.listPush(opts.dst, elem)
|
||||
default:
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
c.WriteBulk(elem)
|
||||
})
|
||||
}
|
||||
256
vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go
generated
vendored
256
vendor/github.com/alicebob/miniredis/v2/cmd_pubsub.go
generated
vendored
@@ -1,256 +0,0 @@
|
||||
// Commands from https://redis.io/commands#pubsub
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsPubsub handles all PUB/SUB operations.
|
||||
func commandsPubsub(m *Miniredis) {
|
||||
m.srv.Register("SUBSCRIBE", m.cmdSubscribe)
|
||||
m.srv.Register("UNSUBSCRIBE", m.cmdUnsubscribe)
|
||||
m.srv.Register("PSUBSCRIBE", m.cmdPsubscribe)
|
||||
m.srv.Register("PUNSUBSCRIBE", m.cmdPunsubscribe)
|
||||
m.srv.Register("PUBLISH", m.cmdPublish)
|
||||
m.srv.Register("PUBSUB", m.cmdPubSub)
|
||||
}
|
||||
|
||||
// SUBSCRIBE
|
||||
func (m *Miniredis) cmdSubscribe(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
sub := m.subscribedState(c)
|
||||
for _, channel := range args {
|
||||
n := sub.Subscribe(channel)
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("subscribe")
|
||||
w.WriteBulk(channel)
|
||||
w.WriteInt(n)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// UNSUBSCRIBE
|
||||
func (m *Miniredis) cmdUnsubscribe(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
channels := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
sub := m.subscribedState(c)
|
||||
|
||||
if len(channels) == 0 {
|
||||
channels = sub.Channels()
|
||||
}
|
||||
|
||||
// there is no de-duplication
|
||||
for _, channel := range channels {
|
||||
n := sub.Unsubscribe(channel)
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("unsubscribe")
|
||||
w.WriteBulk(channel)
|
||||
w.WriteInt(n)
|
||||
})
|
||||
}
|
||||
if len(channels) == 0 {
|
||||
// special case: there is always a reply
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("unsubscribe")
|
||||
w.WriteNull()
|
||||
w.WriteInt(0)
|
||||
})
|
||||
}
|
||||
|
||||
if sub.Count() == 0 {
|
||||
endSubscriber(m, c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PSUBSCRIBE
|
||||
func (m *Miniredis) cmdPsubscribe(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
sub := m.subscribedState(c)
|
||||
for _, pat := range args {
|
||||
n := sub.Psubscribe(pat)
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("psubscribe")
|
||||
w.WriteBulk(pat)
|
||||
w.WriteInt(n)
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PUNSUBSCRIBE
|
||||
func (m *Miniredis) cmdPunsubscribe(c *server.Peer, cmd string, args []string) {
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
patterns := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
sub := m.subscribedState(c)
|
||||
|
||||
if len(patterns) == 0 {
|
||||
patterns = sub.Patterns()
|
||||
}
|
||||
|
||||
// there is no de-duplication
|
||||
for _, pat := range patterns {
|
||||
n := sub.Punsubscribe(pat)
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("punsubscribe")
|
||||
w.WriteBulk(pat)
|
||||
w.WriteInt(n)
|
||||
})
|
||||
}
|
||||
if len(patterns) == 0 {
|
||||
// special case: there is always a reply
|
||||
c.Block(func(w *server.Writer) {
|
||||
w.WritePushLen(3)
|
||||
w.WriteBulk("punsubscribe")
|
||||
w.WriteNull()
|
||||
w.WriteInt(0)
|
||||
})
|
||||
}
|
||||
|
||||
if sub.Count() == 0 {
|
||||
endSubscriber(m, c)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PUBLISH
|
||||
func (m *Miniredis) cmdPublish(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
channel, mesg := args[0], args[1]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
c.WriteInt(m.publish(channel, mesg))
|
||||
})
|
||||
}
|
||||
|
||||
// PUBSUB
|
||||
func (m *Miniredis) cmdPubSub(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
subcommand := strings.ToUpper(args[0])
|
||||
subargs := args[1:]
|
||||
var argsOk bool
|
||||
|
||||
switch subcommand {
|
||||
case "CHANNELS":
|
||||
argsOk = len(subargs) < 2
|
||||
case "NUMSUB":
|
||||
argsOk = true
|
||||
case "NUMPAT":
|
||||
argsOk = len(subargs) == 0
|
||||
default:
|
||||
argsOk = false
|
||||
}
|
||||
|
||||
if !argsOk {
|
||||
setDirty(c)
|
||||
c.WriteError(fmt.Sprintf(msgFPubsubUsage, subcommand))
|
||||
return
|
||||
}
|
||||
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
switch subcommand {
|
||||
case "CHANNELS":
|
||||
pat := ""
|
||||
if len(subargs) == 1 {
|
||||
pat = subargs[0]
|
||||
}
|
||||
|
||||
allsubs := m.allSubscribers()
|
||||
channels := activeChannels(allsubs, pat)
|
||||
|
||||
c.WriteLen(len(channels))
|
||||
for _, channel := range channels {
|
||||
c.WriteBulk(channel)
|
||||
}
|
||||
|
||||
case "NUMSUB":
|
||||
subs := m.allSubscribers()
|
||||
c.WriteLen(len(subargs) * 2)
|
||||
for _, channel := range subargs {
|
||||
c.WriteBulk(channel)
|
||||
c.WriteInt(countSubs(subs, channel))
|
||||
}
|
||||
|
||||
case "NUMPAT":
|
||||
c.WriteInt(countPsubs(m.allSubscribers()))
|
||||
}
|
||||
})
|
||||
}
|
||||
281
vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go
generated
vendored
281
vendor/github.com/alicebob/miniredis/v2/cmd_scripting.go
generated
vendored
@@ -1,281 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
luajson "github.com/alicebob/gopher-json"
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
"github.com/yuin/gopher-lua/parse"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
func commandsScripting(m *Miniredis) {
|
||||
m.srv.Register("EVAL", m.cmdEval)
|
||||
m.srv.Register("EVALSHA", m.cmdEvalsha)
|
||||
m.srv.Register("SCRIPT", m.cmdScript)
|
||||
}
|
||||
|
||||
// Execute lua. Needs to run m.Lock()ed, from within withTx().
|
||||
// Returns true if the lua was OK (and hence should be cached).
|
||||
func (m *Miniredis) runLuaScript(c *server.Peer, script string, args []string) bool {
|
||||
l := lua.NewState(lua.Options{SkipOpenLibs: true})
|
||||
defer l.Close()
|
||||
|
||||
// Taken from the go-lua manual
|
||||
for _, pair := range []struct {
|
||||
n string
|
||||
f lua.LGFunction
|
||||
}{
|
||||
{lua.LoadLibName, lua.OpenPackage},
|
||||
{lua.BaseLibName, lua.OpenBase},
|
||||
{lua.CoroutineLibName, lua.OpenCoroutine},
|
||||
{lua.TabLibName, lua.OpenTable},
|
||||
{lua.StringLibName, lua.OpenString},
|
||||
{lua.MathLibName, lua.OpenMath},
|
||||
{lua.DebugLibName, lua.OpenDebug},
|
||||
} {
|
||||
if err := l.CallByParam(lua.P{
|
||||
Fn: l.NewFunction(pair.f),
|
||||
NRet: 0,
|
||||
Protect: true,
|
||||
}, lua.LString(pair.n)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
luajson.Preload(l)
|
||||
requireGlobal(l, "cjson", "json")
|
||||
|
||||
// set global variable KEYS
|
||||
keysTable := l.NewTable()
|
||||
keysS, args := args[0], args[1:]
|
||||
keysLen, err := strconv.Atoi(keysS)
|
||||
if err != nil {
|
||||
c.WriteError(msgInvalidInt)
|
||||
return false
|
||||
}
|
||||
if keysLen < 0 {
|
||||
c.WriteError(msgNegativeKeysNumber)
|
||||
return false
|
||||
}
|
||||
if keysLen > len(args) {
|
||||
c.WriteError(msgInvalidKeysNumber)
|
||||
return false
|
||||
}
|
||||
keys, args := args[:keysLen], args[keysLen:]
|
||||
for i, k := range keys {
|
||||
l.RawSet(keysTable, lua.LNumber(i+1), lua.LString(k))
|
||||
}
|
||||
l.SetGlobal("KEYS", keysTable)
|
||||
|
||||
argvTable := l.NewTable()
|
||||
for i, a := range args {
|
||||
l.RawSet(argvTable, lua.LNumber(i+1), lua.LString(a))
|
||||
}
|
||||
l.SetGlobal("ARGV", argvTable)
|
||||
|
||||
redisFuncs, redisConstants := mkLua(m.srv, c)
|
||||
// Register command handlers
|
||||
l.Push(l.NewFunction(func(l *lua.LState) int {
|
||||
mod := l.RegisterModule("redis", redisFuncs).(*lua.LTable)
|
||||
for k, v := range redisConstants {
|
||||
mod.RawSetString(k, v)
|
||||
}
|
||||
l.Push(mod)
|
||||
return 1
|
||||
}))
|
||||
|
||||
l.DoString(protectGlobals)
|
||||
|
||||
l.Push(lua.LString("redis"))
|
||||
l.Call(1, 0)
|
||||
|
||||
if err := l.DoString(script); err != nil {
|
||||
c.WriteError(errLuaParseError(err))
|
||||
return false
|
||||
}
|
||||
|
||||
luaToRedis(l, c, l.Get(1))
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdEval(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
script, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
ok := m.runLuaScript(c, script, args)
|
||||
if ok {
|
||||
sha := sha1Hex(script)
|
||||
m.scripts[sha] = script
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdEvalsha(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
sha, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
script, ok := m.scripts[sha]
|
||||
if !ok {
|
||||
c.WriteError(msgNoScriptFound)
|
||||
return
|
||||
}
|
||||
|
||||
m.runLuaScript(c, script, args)
|
||||
})
|
||||
}
|
||||
|
||||
func (m *Miniredis) cmdScript(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
if getCtx(c).nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
|
||||
subcmd, args := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
switch strings.ToLower(subcmd) {
|
||||
case "load":
|
||||
if len(args) != 1 {
|
||||
c.WriteError(fmt.Sprintf(msgFScriptUsage, "LOAD"))
|
||||
return
|
||||
}
|
||||
script := args[0]
|
||||
|
||||
if _, err := parse.Parse(strings.NewReader(script), "user_script"); err != nil {
|
||||
c.WriteError(errLuaParseError(err))
|
||||
return
|
||||
}
|
||||
sha := sha1Hex(script)
|
||||
m.scripts[sha] = script
|
||||
c.WriteBulk(sha)
|
||||
|
||||
case "exists":
|
||||
c.WriteLen(len(args))
|
||||
for _, arg := range args {
|
||||
if _, ok := m.scripts[arg]; ok {
|
||||
c.WriteInt(1)
|
||||
} else {
|
||||
c.WriteInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
case "flush":
|
||||
if len(args) == 1 {
|
||||
switch strings.ToUpper(args[0]) {
|
||||
case "SYNC", "ASYNC":
|
||||
args = args[1:]
|
||||
default:
|
||||
}
|
||||
}
|
||||
if len(args) != 0 {
|
||||
c.WriteError(msgScriptFlush)
|
||||
return
|
||||
}
|
||||
|
||||
m.scripts = map[string]string{}
|
||||
c.WriteOK()
|
||||
|
||||
default:
|
||||
c.WriteError(fmt.Sprintf(msgFScriptUsage, strings.ToUpper(subcmd)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func sha1Hex(s string) string {
|
||||
h := sha1.New()
|
||||
io.WriteString(h, s)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// requireGlobal imports module modName into the global namespace with the
|
||||
// identifier id. panics if an error results from the function execution
|
||||
func requireGlobal(l *lua.LState, id, modName string) {
|
||||
if err := l.CallByParam(lua.P{
|
||||
Fn: l.GetGlobal("require"),
|
||||
NRet: 1,
|
||||
Protect: true,
|
||||
}, lua.LString(modName)); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mod := l.Get(-1)
|
||||
l.Pop(1)
|
||||
|
||||
l.SetGlobal(id, mod)
|
||||
}
|
||||
|
||||
// the following script protects globals
|
||||
// it is based on: http://metalua.luaforge.net/src/lib/strict.lua.html
|
||||
var protectGlobals = `
|
||||
local dbg=debug
|
||||
local mt = {}
|
||||
setmetatable(_G, mt)
|
||||
mt.__newindex = function (t, n, v)
|
||||
if dbg.getinfo(2) then
|
||||
local w = dbg.getinfo(2, "S").what
|
||||
if w ~= "C" then
|
||||
error("Script attempted to create global variable '"..tostring(n).."'", 2)
|
||||
end
|
||||
end
|
||||
rawset(t, n, v)
|
||||
end
|
||||
mt.__index = function (t, n)
|
||||
if dbg.getinfo(2) and dbg.getinfo(2, "S").what ~= "C" then
|
||||
error("Script attempted to access nonexistent global variable '"..tostring(n).."'", 2)
|
||||
end
|
||||
return rawget(t, n)
|
||||
end
|
||||
debug = nil
|
||||
|
||||
`
|
||||
112
vendor/github.com/alicebob/miniredis/v2/cmd_server.go
generated
vendored
112
vendor/github.com/alicebob/miniredis/v2/cmd_server.go
generated
vendored
@@ -1,112 +0,0 @@
|
||||
// Commands from https://redis.io/commands#server
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
func commandsServer(m *Miniredis) {
|
||||
m.srv.Register("COMMAND", m.cmdCommand)
|
||||
m.srv.Register("DBSIZE", m.cmdDbsize)
|
||||
m.srv.Register("FLUSHALL", m.cmdFlushall)
|
||||
m.srv.Register("FLUSHDB", m.cmdFlushdb)
|
||||
m.srv.Register("INFO", m.cmdInfo)
|
||||
m.srv.Register("TIME", m.cmdTime)
|
||||
}
|
||||
|
||||
// DBSIZE
|
||||
func (m *Miniredis) cmdDbsize(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
c.WriteInt(len(db.keys))
|
||||
})
|
||||
}
|
||||
|
||||
// FLUSHALL
|
||||
func (m *Miniredis) cmdFlushall(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) > 0 && strings.ToLower(args[0]) == "async" {
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
m.flushAll()
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// FLUSHDB
|
||||
func (m *Miniredis) cmdFlushdb(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) > 0 && strings.ToLower(args[0]) == "async" {
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
m.db(ctx.selectedDB).flush()
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
|
||||
// TIME
|
||||
func (m *Miniredis) cmdTime(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
now := m.effectiveNow()
|
||||
nanos := now.UnixNano()
|
||||
seconds := nanos / 1_000_000_000
|
||||
microseconds := (nanos / 1_000) % 1_000_000
|
||||
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk(strconv.FormatInt(seconds, 10))
|
||||
c.WriteBulk(strconv.FormatInt(microseconds, 10))
|
||||
})
|
||||
}
|
||||
704
vendor/github.com/alicebob/miniredis/v2/cmd_set.go
generated
vendored
704
vendor/github.com/alicebob/miniredis/v2/cmd_set.go
generated
vendored
@@ -1,704 +0,0 @@
|
||||
// Commands from https://redis.io/commands#set
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsSet handles all set value operations.
|
||||
func commandsSet(m *Miniredis) {
|
||||
m.srv.Register("SADD", m.cmdSadd)
|
||||
m.srv.Register("SCARD", m.cmdScard)
|
||||
m.srv.Register("SDIFF", m.cmdSdiff)
|
||||
m.srv.Register("SDIFFSTORE", m.cmdSdiffstore)
|
||||
m.srv.Register("SINTER", m.cmdSinter)
|
||||
m.srv.Register("SINTERSTORE", m.cmdSinterstore)
|
||||
m.srv.Register("SISMEMBER", m.cmdSismember)
|
||||
m.srv.Register("SMEMBERS", m.cmdSmembers)
|
||||
m.srv.Register("SMOVE", m.cmdSmove)
|
||||
m.srv.Register("SPOP", m.cmdSpop)
|
||||
m.srv.Register("SRANDMEMBER", m.cmdSrandmember)
|
||||
m.srv.Register("SREM", m.cmdSrem)
|
||||
m.srv.Register("SUNION", m.cmdSunion)
|
||||
m.srv.Register("SUNIONSTORE", m.cmdSunionstore)
|
||||
m.srv.Register("SSCAN", m.cmdSscan)
|
||||
}
|
||||
|
||||
// SADD
|
||||
func (m *Miniredis) cmdSadd(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, elems := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if db.exists(key) && db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
added := db.setAdd(key, elems...)
|
||||
c.WriteInt(added)
|
||||
})
|
||||
}
|
||||
|
||||
// SCARD
|
||||
func (m *Miniredis) cmdScard(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
members := db.setMembers(key)
|
||||
c.WriteInt(len(members))
|
||||
})
|
||||
}
|
||||
|
||||
// SDIFF
|
||||
func (m *Miniredis) cmdSdiff(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
keys := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setDiff(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteSetLen(len(set))
|
||||
for k := range set {
|
||||
c.WriteBulk(k)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SDIFFSTORE
|
||||
func (m *Miniredis) cmdSdiffstore(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
dest, keys := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setDiff(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
db.del(dest, true)
|
||||
db.setSet(dest, set)
|
||||
c.WriteInt(len(set))
|
||||
})
|
||||
}
|
||||
|
||||
// SINTER
|
||||
func (m *Miniredis) cmdSinter(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
keys := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setInter(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteLen(len(set))
|
||||
for k := range set {
|
||||
c.WriteBulk(k)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SINTERSTORE
|
||||
func (m *Miniredis) cmdSinterstore(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
dest, keys := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setInter(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
db.del(dest, true)
|
||||
db.setSet(dest, set)
|
||||
c.WriteInt(len(set))
|
||||
})
|
||||
}
|
||||
|
||||
// SISMEMBER
|
||||
func (m *Miniredis) cmdSismember(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, value := args[0], args[1]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if db.setIsMember(key, value) {
|
||||
c.WriteInt(1)
|
||||
return
|
||||
}
|
||||
c.WriteInt(0)
|
||||
})
|
||||
}
|
||||
|
||||
// SMEMBERS
|
||||
func (m *Miniredis) cmdSmembers(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteSetLen(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
members := db.setMembers(key)
|
||||
|
||||
c.WriteSetLen(len(members))
|
||||
for _, elem := range members {
|
||||
c.WriteBulk(elem)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SMOVE
|
||||
func (m *Miniredis) cmdSmove(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 3 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
src, dst, member := args[0], args[1], args[2]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(src) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(src) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if db.exists(dst) && db.t(dst) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if !db.setIsMember(src, member) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
db.setRem(src, member)
|
||||
db.setAdd(dst, member)
|
||||
c.WriteInt(1)
|
||||
})
|
||||
}
|
||||
|
||||
// SPOP
|
||||
func (m *Miniredis) cmdSpop(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
opts := struct {
|
||||
key string
|
||||
withCount bool
|
||||
count int
|
||||
}{
|
||||
count: 1,
|
||||
}
|
||||
opts.key, args = args[0], args[1:]
|
||||
|
||||
if len(args) > 0 {
|
||||
v, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if v < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgOutOfRange)
|
||||
return
|
||||
}
|
||||
opts.count = v
|
||||
opts.withCount = true
|
||||
args = args[1:]
|
||||
}
|
||||
if len(args) > 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(opts.key) {
|
||||
if !opts.withCount {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
c.WriteLen(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(opts.key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
var deleted []string
|
||||
for i := 0; i < opts.count; i++ {
|
||||
members := db.setMembers(opts.key)
|
||||
if len(members) == 0 {
|
||||
break
|
||||
}
|
||||
member := members[m.randIntn(len(members))]
|
||||
db.setRem(opts.key, member)
|
||||
deleted = append(deleted, member)
|
||||
}
|
||||
// without `count` return a single value
|
||||
if !opts.withCount {
|
||||
if len(deleted) == 0 {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
c.WriteBulk(deleted[0])
|
||||
return
|
||||
}
|
||||
// with `count` return a list
|
||||
c.WriteLen(len(deleted))
|
||||
for _, v := range deleted {
|
||||
c.WriteBulk(v)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SRANDMEMBER
|
||||
func (m *Miniredis) cmdSrandmember(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if len(args) > 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key := args[0]
|
||||
count := 0
|
||||
withCount := false
|
||||
if len(args) == 2 {
|
||||
var err error
|
||||
count, err = strconv.Atoi(args[1])
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
withCount = true
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
members := db.setMembers(key)
|
||||
if count < 0 {
|
||||
// Non-unique elements is allowed with negative count.
|
||||
c.WriteLen(-count)
|
||||
for count != 0 {
|
||||
member := members[m.randIntn(len(members))]
|
||||
c.WriteBulk(member)
|
||||
count++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Must be unique elements.
|
||||
m.shuffle(members)
|
||||
if count > len(members) {
|
||||
count = len(members)
|
||||
}
|
||||
if !withCount {
|
||||
c.WriteBulk(members[0])
|
||||
return
|
||||
}
|
||||
c.WriteLen(count)
|
||||
for i := range make([]struct{}, count) {
|
||||
c.WriteBulk(members[i])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SREM
|
||||
func (m *Miniredis) cmdSrem(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
key, fields := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
if !db.exists(key) {
|
||||
c.WriteInt(0)
|
||||
return
|
||||
}
|
||||
|
||||
if db.t(key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteInt(db.setRem(key, fields...))
|
||||
})
|
||||
}
|
||||
|
||||
// SUNION
|
||||
func (m *Miniredis) cmdSunion(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 1 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
keys := args
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setUnion(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
c.WriteLen(len(set))
|
||||
for k := range set {
|
||||
c.WriteBulk(k)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// SUNIONSTORE
|
||||
func (m *Miniredis) cmdSunionstore(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
dest, keys := args[0], args[1:]
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
set, err := db.setUnion(keys)
|
||||
if err != nil {
|
||||
c.WriteError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
db.del(dest, true)
|
||||
db.setSet(dest, set)
|
||||
c.WriteInt(len(set))
|
||||
})
|
||||
}
|
||||
|
||||
// SSCAN
|
||||
func (m *Miniredis) cmdSscan(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
var opts struct {
|
||||
key string
|
||||
value int
|
||||
cursor int
|
||||
count int
|
||||
withMatch bool
|
||||
match string
|
||||
}
|
||||
|
||||
opts.key = args[0]
|
||||
if ok := optIntErr(c, args[1], &opts.cursor, msgInvalidCursor); !ok {
|
||||
return
|
||||
}
|
||||
args = args[2:]
|
||||
|
||||
// MATCH and COUNT options
|
||||
for len(args) > 0 {
|
||||
if strings.ToLower(args[0]) == "count" {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
count, err := strconv.Atoi(args[1])
|
||||
if err != nil || count < 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgInvalidInt)
|
||||
return
|
||||
}
|
||||
if count == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
opts.count = count
|
||||
args = args[2:]
|
||||
continue
|
||||
}
|
||||
if strings.ToLower(args[0]) == "match" {
|
||||
if len(args) < 2 {
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
opts.withMatch = true
|
||||
opts.match = args[1]
|
||||
args = args[2:]
|
||||
continue
|
||||
}
|
||||
setDirty(c)
|
||||
c.WriteError(msgSyntaxError)
|
||||
return
|
||||
}
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
db := m.db(ctx.selectedDB)
|
||||
// return _all_ (matched) keys every time
|
||||
if db.exists(opts.key) && db.t(opts.key) != "set" {
|
||||
c.WriteError(ErrWrongType.Error())
|
||||
return
|
||||
}
|
||||
members := db.setMembers(opts.key)
|
||||
if opts.withMatch {
|
||||
members, _ = matchKeys(members, opts.match)
|
||||
}
|
||||
low := opts.cursor
|
||||
high := low + opts.count
|
||||
// validate high is correct
|
||||
if high > len(members) || high == 0 {
|
||||
high = len(members)
|
||||
}
|
||||
if opts.cursor > high {
|
||||
// invalid cursor
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk("0") // no next cursor
|
||||
c.WriteLen(0) // no elements
|
||||
return
|
||||
}
|
||||
cursorValue := low + opts.count
|
||||
if cursorValue > len(members) {
|
||||
cursorValue = 0 // no next cursor
|
||||
}
|
||||
members = members[low:high]
|
||||
c.WriteLen(2)
|
||||
c.WriteBulk(fmt.Sprintf("%d", cursorValue))
|
||||
c.WriteLen(len(members))
|
||||
for _, k := range members {
|
||||
c.WriteBulk(k)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
1880
vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go
generated
vendored
1880
vendor/github.com/alicebob/miniredis/v2/cmd_sorted_set.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1704
vendor/github.com/alicebob/miniredis/v2/cmd_stream.go
generated
vendored
1704
vendor/github.com/alicebob/miniredis/v2/cmd_stream.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1350
vendor/github.com/alicebob/miniredis/v2/cmd_string.go
generated
vendored
1350
vendor/github.com/alicebob/miniredis/v2/cmd_string.go
generated
vendored
File diff suppressed because it is too large
Load Diff
179
vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go
generated
vendored
179
vendor/github.com/alicebob/miniredis/v2/cmd_transactions.go
generated
vendored
@@ -1,179 +0,0 @@
|
||||
// Commands from https://redis.io/commands#transactions
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// commandsTransaction handles MULTI &c.
|
||||
func commandsTransaction(m *Miniredis) {
|
||||
m.srv.Register("DISCARD", m.cmdDiscard)
|
||||
m.srv.Register("EXEC", m.cmdExec)
|
||||
m.srv.Register("MULTI", m.cmdMulti)
|
||||
m.srv.Register("UNWATCH", m.cmdUnwatch)
|
||||
m.srv.Register("WATCH", m.cmdWatch)
|
||||
}
|
||||
|
||||
// MULTI
|
||||
func (m *Miniredis) cmdMulti(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 0 {
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := getCtx(c)
|
||||
if ctx.nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
if inTx(ctx) {
|
||||
c.WriteError("ERR MULTI calls can not be nested")
|
||||
return
|
||||
}
|
||||
|
||||
startTx(ctx)
|
||||
|
||||
c.WriteOK()
|
||||
}
|
||||
|
||||
// EXEC
|
||||
func (m *Miniredis) cmdExec(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := getCtx(c)
|
||||
if ctx.nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
if !inTx(ctx) {
|
||||
c.WriteError("ERR EXEC without MULTI")
|
||||
return
|
||||
}
|
||||
|
||||
if ctx.dirtyTransaction {
|
||||
c.WriteError("EXECABORT Transaction discarded because of previous errors.")
|
||||
// a failed EXEC finishes the tx
|
||||
stopTx(ctx)
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
// Check WATCHed keys.
|
||||
for t, version := range ctx.watch {
|
||||
if m.db(t.db).keyVersion[t.key] > version {
|
||||
// Abort! Abort!
|
||||
stopTx(ctx)
|
||||
c.WriteLen(-1)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.WriteLen(len(ctx.transaction))
|
||||
for _, cb := range ctx.transaction {
|
||||
cb(c, ctx)
|
||||
}
|
||||
// wake up anyone who waits on anything.
|
||||
m.signal.Broadcast()
|
||||
|
||||
stopTx(ctx)
|
||||
}
|
||||
|
||||
// DISCARD
|
||||
func (m *Miniredis) cmdDiscard(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := getCtx(c)
|
||||
if !inTx(ctx) {
|
||||
c.WriteError("ERR DISCARD without MULTI")
|
||||
return
|
||||
}
|
||||
|
||||
stopTx(ctx)
|
||||
c.WriteOK()
|
||||
}
|
||||
|
||||
// WATCH
|
||||
func (m *Miniredis) cmdWatch(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) == 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
ctx := getCtx(c)
|
||||
if ctx.nested {
|
||||
c.WriteError(msgNotFromScripts)
|
||||
return
|
||||
}
|
||||
if inTx(ctx) {
|
||||
c.WriteError("ERR WATCH in MULTI")
|
||||
return
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
db := m.db(ctx.selectedDB)
|
||||
|
||||
for _, key := range args {
|
||||
watch(db, ctx, key)
|
||||
}
|
||||
c.WriteOK()
|
||||
}
|
||||
|
||||
// UNWATCH
|
||||
func (m *Miniredis) cmdUnwatch(c *server.Peer, cmd string, args []string) {
|
||||
if len(args) != 0 {
|
||||
setDirty(c)
|
||||
c.WriteError(errWrongNumber(cmd))
|
||||
return
|
||||
}
|
||||
if !m.handleAuth(c) {
|
||||
return
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return
|
||||
}
|
||||
|
||||
// Doesn't matter if UNWATCH is in a TX or not. Looks like a Redis bug to me.
|
||||
unwatch(getCtx(c))
|
||||
|
||||
withTx(m, c, func(c *server.Peer, ctx *connCtx) {
|
||||
// Do nothing if it's called in a transaction.
|
||||
c.WriteOK()
|
||||
})
|
||||
}
|
||||
708
vendor/github.com/alicebob/miniredis/v2/db.go
generated
vendored
708
vendor/github.com/alicebob/miniredis/v2/db.go
generated
vendored
@@ -1,708 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
errInvalidEntryID = errors.New("stream ID is invalid")
|
||||
)
|
||||
|
||||
func (db *RedisDB) exists(k string) bool {
|
||||
_, ok := db.keys[k]
|
||||
return ok
|
||||
}
|
||||
|
||||
// t gives the type of a key, or ""
|
||||
func (db *RedisDB) t(k string) string {
|
||||
return db.keys[k]
|
||||
}
|
||||
|
||||
// allKeys returns all keys. Sorted.
|
||||
func (db *RedisDB) allKeys() []string {
|
||||
res := make([]string, 0, len(db.keys))
|
||||
for k := range db.keys {
|
||||
res = append(res, k)
|
||||
}
|
||||
sort.Strings(res) // To make things deterministic.
|
||||
return res
|
||||
}
|
||||
|
||||
// flush removes all keys and values.
|
||||
func (db *RedisDB) flush() {
|
||||
db.keys = map[string]string{}
|
||||
db.stringKeys = map[string]string{}
|
||||
db.hashKeys = map[string]hashKey{}
|
||||
db.listKeys = map[string]listKey{}
|
||||
db.setKeys = map[string]setKey{}
|
||||
db.hllKeys = map[string]*hll{}
|
||||
db.sortedsetKeys = map[string]sortedSet{}
|
||||
db.ttl = map[string]time.Duration{}
|
||||
db.streamKeys = map[string]*streamKey{}
|
||||
}
|
||||
|
||||
// move something to another db. Will return ok. Or not.
|
||||
func (db *RedisDB) move(key string, to *RedisDB) bool {
|
||||
if _, ok := to.keys[key]; ok {
|
||||
return false
|
||||
}
|
||||
|
||||
t, ok := db.keys[key]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
to.keys[key] = db.keys[key]
|
||||
switch t {
|
||||
case "string":
|
||||
to.stringKeys[key] = db.stringKeys[key]
|
||||
case "hash":
|
||||
to.hashKeys[key] = db.hashKeys[key]
|
||||
case "list":
|
||||
to.listKeys[key] = db.listKeys[key]
|
||||
case "set":
|
||||
to.setKeys[key] = db.setKeys[key]
|
||||
case "zset":
|
||||
to.sortedsetKeys[key] = db.sortedsetKeys[key]
|
||||
case "stream":
|
||||
to.streamKeys[key] = db.streamKeys[key]
|
||||
case "hll":
|
||||
to.hllKeys[key] = db.hllKeys[key]
|
||||
default:
|
||||
panic("unhandled key type")
|
||||
}
|
||||
to.keyVersion[key]++
|
||||
if v, ok := db.ttl[key]; ok {
|
||||
to.ttl[key] = v
|
||||
}
|
||||
db.del(key, true)
|
||||
return true
|
||||
}
|
||||
|
||||
func (db *RedisDB) rename(from, to string) {
|
||||
db.del(to, true)
|
||||
switch db.t(from) {
|
||||
case "string":
|
||||
db.stringKeys[to] = db.stringKeys[from]
|
||||
case "hash":
|
||||
db.hashKeys[to] = db.hashKeys[from]
|
||||
case "list":
|
||||
db.listKeys[to] = db.listKeys[from]
|
||||
case "set":
|
||||
db.setKeys[to] = db.setKeys[from]
|
||||
case "zset":
|
||||
db.sortedsetKeys[to] = db.sortedsetKeys[from]
|
||||
case "stream":
|
||||
db.streamKeys[to] = db.streamKeys[from]
|
||||
case "hll":
|
||||
db.hllKeys[to] = db.hllKeys[from]
|
||||
default:
|
||||
panic("missing case")
|
||||
}
|
||||
db.keys[to] = db.keys[from]
|
||||
db.keyVersion[to]++
|
||||
if v, ok := db.ttl[from]; ok {
|
||||
db.ttl[to] = v
|
||||
}
|
||||
|
||||
db.del(from, true)
|
||||
}
|
||||
|
||||
func (db *RedisDB) del(k string, delTTL bool) {
|
||||
if !db.exists(k) {
|
||||
return
|
||||
}
|
||||
t := db.t(k)
|
||||
delete(db.keys, k)
|
||||
db.keyVersion[k]++
|
||||
if delTTL {
|
||||
delete(db.ttl, k)
|
||||
}
|
||||
switch t {
|
||||
case "string":
|
||||
delete(db.stringKeys, k)
|
||||
case "hash":
|
||||
delete(db.hashKeys, k)
|
||||
case "list":
|
||||
delete(db.listKeys, k)
|
||||
case "set":
|
||||
delete(db.setKeys, k)
|
||||
case "zset":
|
||||
delete(db.sortedsetKeys, k)
|
||||
case "stream":
|
||||
delete(db.streamKeys, k)
|
||||
case "hll":
|
||||
delete(db.hllKeys, k)
|
||||
default:
|
||||
panic("Unknown key type: " + t)
|
||||
}
|
||||
}
|
||||
|
||||
// stringGet returns the string key or "" on error/nonexists.
|
||||
func (db *RedisDB) stringGet(k string) string {
|
||||
if t, ok := db.keys[k]; !ok || t != "string" {
|
||||
return ""
|
||||
}
|
||||
return db.stringKeys[k]
|
||||
}
|
||||
|
||||
// stringSet force set()s a key. Does not touch expire.
|
||||
func (db *RedisDB) stringSet(k, v string) {
|
||||
db.del(k, false)
|
||||
db.keys[k] = "string"
|
||||
db.stringKeys[k] = v
|
||||
db.keyVersion[k]++
|
||||
}
|
||||
|
||||
// change int key value
|
||||
func (db *RedisDB) stringIncr(k string, delta int) (int, error) {
|
||||
v := 0
|
||||
if sv, ok := db.stringKeys[k]; ok {
|
||||
var err error
|
||||
v, err = strconv.Atoi(sv)
|
||||
if err != nil {
|
||||
return 0, ErrIntValueError
|
||||
}
|
||||
}
|
||||
v += delta
|
||||
db.stringSet(k, strconv.Itoa(v))
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// change float key value
|
||||
func (db *RedisDB) stringIncrfloat(k string, delta *big.Float) (*big.Float, error) {
|
||||
v := big.NewFloat(0.0)
|
||||
v.SetPrec(128)
|
||||
if sv, ok := db.stringKeys[k]; ok {
|
||||
var err error
|
||||
v, _, err = big.ParseFloat(sv, 10, 128, 0)
|
||||
if err != nil {
|
||||
return nil, ErrFloatValueError
|
||||
}
|
||||
}
|
||||
v.Add(v, delta)
|
||||
db.stringSet(k, formatBig(v))
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// listLpush is 'left push', aka unshift. Returns the new length.
|
||||
func (db *RedisDB) listLpush(k, v string) int {
|
||||
l, ok := db.listKeys[k]
|
||||
if !ok {
|
||||
db.keys[k] = "list"
|
||||
}
|
||||
l = append([]string{v}, l...)
|
||||
db.listKeys[k] = l
|
||||
db.keyVersion[k]++
|
||||
return len(l)
|
||||
}
|
||||
|
||||
// 'left pop', aka shift.
|
||||
func (db *RedisDB) listLpop(k string) string {
|
||||
l := db.listKeys[k]
|
||||
el := l[0]
|
||||
l = l[1:]
|
||||
if len(l) == 0 {
|
||||
db.del(k, true)
|
||||
} else {
|
||||
db.listKeys[k] = l
|
||||
}
|
||||
db.keyVersion[k]++
|
||||
return el
|
||||
}
|
||||
|
||||
func (db *RedisDB) listPush(k string, v ...string) int {
|
||||
l, ok := db.listKeys[k]
|
||||
if !ok {
|
||||
db.keys[k] = "list"
|
||||
}
|
||||
l = append(l, v...)
|
||||
db.listKeys[k] = l
|
||||
db.keyVersion[k]++
|
||||
return len(l)
|
||||
}
|
||||
|
||||
func (db *RedisDB) listPop(k string) string {
|
||||
l := db.listKeys[k]
|
||||
el := l[len(l)-1]
|
||||
l = l[:len(l)-1]
|
||||
if len(l) == 0 {
|
||||
db.del(k, true)
|
||||
} else {
|
||||
db.listKeys[k] = l
|
||||
db.keyVersion[k]++
|
||||
}
|
||||
return el
|
||||
}
|
||||
|
||||
// setset replaces a whole set.
|
||||
func (db *RedisDB) setSet(k string, set setKey) {
|
||||
db.keys[k] = "set"
|
||||
db.setKeys[k] = set
|
||||
db.keyVersion[k]++
|
||||
}
|
||||
|
||||
// setadd adds members to a set. Returns nr of new keys.
|
||||
func (db *RedisDB) setAdd(k string, elems ...string) int {
|
||||
s, ok := db.setKeys[k]
|
||||
if !ok {
|
||||
s = setKey{}
|
||||
db.keys[k] = "set"
|
||||
}
|
||||
added := 0
|
||||
for _, e := range elems {
|
||||
if _, ok := s[e]; !ok {
|
||||
added++
|
||||
}
|
||||
s[e] = struct{}{}
|
||||
}
|
||||
db.setKeys[k] = s
|
||||
db.keyVersion[k]++
|
||||
return added
|
||||
}
|
||||
|
||||
// setrem removes members from a set. Returns nr of deleted keys.
|
||||
func (db *RedisDB) setRem(k string, fields ...string) int {
|
||||
s, ok := db.setKeys[k]
|
||||
if !ok {
|
||||
return 0
|
||||
}
|
||||
removed := 0
|
||||
for _, f := range fields {
|
||||
if _, ok := s[f]; ok {
|
||||
removed++
|
||||
delete(s, f)
|
||||
}
|
||||
}
|
||||
if len(s) == 0 {
|
||||
db.del(k, true)
|
||||
} else {
|
||||
db.setKeys[k] = s
|
||||
}
|
||||
db.keyVersion[k]++
|
||||
return removed
|
||||
}
|
||||
|
||||
// All members of a set.
|
||||
func (db *RedisDB) setMembers(k string) []string {
|
||||
set := db.setKeys[k]
|
||||
members := make([]string, 0, len(set))
|
||||
for k := range set {
|
||||
members = append(members, k)
|
||||
}
|
||||
sort.Strings(members)
|
||||
return members
|
||||
}
|
||||
|
||||
// Is a SET value present?
|
||||
func (db *RedisDB) setIsMember(k, v string) bool {
|
||||
set, ok := db.setKeys[k]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, ok = set[v]
|
||||
return ok
|
||||
}
|
||||
|
||||
// hashFields returns all (sorted) keys ('fields') for a hash key.
|
||||
func (db *RedisDB) hashFields(k string) []string {
|
||||
v := db.hashKeys[k]
|
||||
var r []string
|
||||
for k := range v {
|
||||
r = append(r, k)
|
||||
}
|
||||
sort.Strings(r)
|
||||
return r
|
||||
}
|
||||
|
||||
// hashValues returns all (sorted) values a hash key.
|
||||
func (db *RedisDB) hashValues(k string) []string {
|
||||
h := db.hashKeys[k]
|
||||
var r []string
|
||||
for _, v := range h {
|
||||
r = append(r, v)
|
||||
}
|
||||
sort.Strings(r)
|
||||
return r
|
||||
}
|
||||
|
||||
// hashGet a value
|
||||
func (db *RedisDB) hashGet(key, field string) string {
|
||||
return db.hashKeys[key][field]
|
||||
}
|
||||
|
||||
// hashSet returns the number of new keys
|
||||
func (db *RedisDB) hashSet(k string, fv ...string) int {
|
||||
if t, ok := db.keys[k]; ok && t != "hash" {
|
||||
db.del(k, true)
|
||||
}
|
||||
db.keys[k] = "hash"
|
||||
if _, ok := db.hashKeys[k]; !ok {
|
||||
db.hashKeys[k] = map[string]string{}
|
||||
}
|
||||
new := 0
|
||||
for idx := 0; idx < len(fv)-1; idx = idx + 2 {
|
||||
f, v := fv[idx], fv[idx+1]
|
||||
_, ok := db.hashKeys[k][f]
|
||||
db.hashKeys[k][f] = v
|
||||
db.keyVersion[k]++
|
||||
if !ok {
|
||||
new++
|
||||
}
|
||||
}
|
||||
return new
|
||||
}
|
||||
|
||||
// hashIncr changes int key value
|
||||
func (db *RedisDB) hashIncr(key, field string, delta int) (int, error) {
|
||||
v := 0
|
||||
if h, ok := db.hashKeys[key]; ok {
|
||||
if f, ok := h[field]; ok {
|
||||
var err error
|
||||
v, err = strconv.Atoi(f)
|
||||
if err != nil {
|
||||
return 0, ErrIntValueError
|
||||
}
|
||||
}
|
||||
}
|
||||
v += delta
|
||||
db.hashSet(key, field, strconv.Itoa(v))
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// hashIncrfloat changes float key value
|
||||
func (db *RedisDB) hashIncrfloat(key, field string, delta *big.Float) (*big.Float, error) {
|
||||
v := big.NewFloat(0.0)
|
||||
v.SetPrec(128)
|
||||
if h, ok := db.hashKeys[key]; ok {
|
||||
if f, ok := h[field]; ok {
|
||||
var err error
|
||||
v, _, err = big.ParseFloat(f, 10, 128, 0)
|
||||
if err != nil {
|
||||
return nil, ErrFloatValueError
|
||||
}
|
||||
}
|
||||
}
|
||||
v.Add(v, delta)
|
||||
db.hashSet(key, field, formatBig(v))
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// sortedSet set returns a sortedSet as map
|
||||
func (db *RedisDB) sortedSet(key string) map[string]float64 {
|
||||
ss := db.sortedsetKeys[key]
|
||||
return map[string]float64(ss)
|
||||
}
|
||||
|
||||
// ssetSet sets a complete sorted set.
|
||||
func (db *RedisDB) ssetSet(key string, sset sortedSet) {
|
||||
db.keys[key] = "zset"
|
||||
db.keyVersion[key]++
|
||||
db.sortedsetKeys[key] = sset
|
||||
}
|
||||
|
||||
// ssetAdd adds member to a sorted set. Returns whether this was a new member.
|
||||
func (db *RedisDB) ssetAdd(key string, score float64, member string) bool {
|
||||
ss, ok := db.sortedsetKeys[key]
|
||||
if !ok {
|
||||
ss = newSortedSet()
|
||||
db.keys[key] = "zset"
|
||||
}
|
||||
_, ok = ss[member]
|
||||
ss[member] = score
|
||||
db.sortedsetKeys[key] = ss
|
||||
db.keyVersion[key]++
|
||||
return !ok
|
||||
}
|
||||
|
||||
// All members from a sorted set, ordered by score.
|
||||
func (db *RedisDB) ssetMembers(key string) []string {
|
||||
ss, ok := db.sortedsetKeys[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
elems := ss.byScore(asc)
|
||||
members := make([]string, 0, len(elems))
|
||||
for _, e := range elems {
|
||||
members = append(members, e.member)
|
||||
}
|
||||
return members
|
||||
}
|
||||
|
||||
// All members+scores from a sorted set, ordered by score.
|
||||
func (db *RedisDB) ssetElements(key string) ssElems {
|
||||
ss, ok := db.sortedsetKeys[key]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return ss.byScore(asc)
|
||||
}
|
||||
|
||||
func (db *RedisDB) ssetRandomMember(key string) string {
|
||||
elems := db.ssetElements(key)
|
||||
if len(elems) == 0 {
|
||||
return ""
|
||||
}
|
||||
return elems[db.master.randIntn(len(elems))].member
|
||||
}
|
||||
|
||||
// ssetCard is the sorted set cardinality.
|
||||
func (db *RedisDB) ssetCard(key string) int {
|
||||
ss := db.sortedsetKeys[key]
|
||||
return ss.card()
|
||||
}
|
||||
|
||||
// ssetRank is the sorted set rank.
|
||||
func (db *RedisDB) ssetRank(key, member string, d direction) (int, bool) {
|
||||
ss := db.sortedsetKeys[key]
|
||||
return ss.rankByScore(member, d)
|
||||
}
|
||||
|
||||
// ssetScore is sorted set score.
|
||||
func (db *RedisDB) ssetScore(key, member string) float64 {
|
||||
ss := db.sortedsetKeys[key]
|
||||
return ss[member]
|
||||
}
|
||||
|
||||
// ssetRem is sorted set key delete.
|
||||
func (db *RedisDB) ssetRem(key, member string) bool {
|
||||
ss := db.sortedsetKeys[key]
|
||||
_, ok := ss[member]
|
||||
delete(ss, member)
|
||||
if len(ss) == 0 {
|
||||
// Delete key on removal of last member
|
||||
db.del(key, true)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// ssetExists tells if a member exists in a sorted set.
|
||||
func (db *RedisDB) ssetExists(key, member string) bool {
|
||||
ss := db.sortedsetKeys[key]
|
||||
_, ok := ss[member]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ssetIncrby changes float sorted set score.
|
||||
func (db *RedisDB) ssetIncrby(k, m string, delta float64) float64 {
|
||||
ss, ok := db.sortedsetKeys[k]
|
||||
if !ok {
|
||||
ss = newSortedSet()
|
||||
db.keys[k] = "zset"
|
||||
db.sortedsetKeys[k] = ss
|
||||
}
|
||||
|
||||
v, _ := ss.get(m)
|
||||
v += delta
|
||||
ss.set(v, m)
|
||||
db.keyVersion[k]++
|
||||
return v
|
||||
}
|
||||
|
||||
// setDiff implements the logic behind SDIFF*
|
||||
func (db *RedisDB) setDiff(keys []string) (setKey, error) {
|
||||
key := keys[0]
|
||||
keys = keys[1:]
|
||||
if db.exists(key) && db.t(key) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
s := setKey{}
|
||||
for k := range db.setKeys[key] {
|
||||
s[k] = struct{}{}
|
||||
}
|
||||
for _, sk := range keys {
|
||||
if !db.exists(sk) {
|
||||
continue
|
||||
}
|
||||
if db.t(sk) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
for e := range db.setKeys[sk] {
|
||||
delete(s, e)
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// setInter implements the logic behind SINTER*
|
||||
// len keys needs to be > 0
|
||||
func (db *RedisDB) setInter(keys []string) (setKey, error) {
|
||||
// all keys must either not exist, or be of type "set".
|
||||
for _, key := range keys {
|
||||
if db.exists(key) && db.t(key) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
keys = keys[1:]
|
||||
if !db.exists(key) {
|
||||
return nil, nil
|
||||
}
|
||||
if db.t(key) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
s := setKey{}
|
||||
for k := range db.setKeys[key] {
|
||||
s[k] = struct{}{}
|
||||
}
|
||||
for _, sk := range keys {
|
||||
if !db.exists(sk) {
|
||||
return setKey{}, nil
|
||||
}
|
||||
if db.t(sk) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
other := db.setKeys[sk]
|
||||
for e := range s {
|
||||
if _, ok := other[e]; ok {
|
||||
continue
|
||||
}
|
||||
delete(s, e)
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// setUnion implements the logic behind SUNION*
|
||||
func (db *RedisDB) setUnion(keys []string) (setKey, error) {
|
||||
key := keys[0]
|
||||
keys = keys[1:]
|
||||
if db.exists(key) && db.t(key) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
s := setKey{}
|
||||
for k := range db.setKeys[key] {
|
||||
s[k] = struct{}{}
|
||||
}
|
||||
for _, sk := range keys {
|
||||
if !db.exists(sk) {
|
||||
continue
|
||||
}
|
||||
if db.t(sk) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
for e := range db.setKeys[sk] {
|
||||
s[e] = struct{}{}
|
||||
}
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (db *RedisDB) newStream(key string) (*streamKey, error) {
|
||||
if s, err := db.stream(key); err != nil {
|
||||
return nil, err
|
||||
} else if s != nil {
|
||||
return nil, fmt.Errorf("ErrAlreadyExists")
|
||||
}
|
||||
|
||||
db.keys[key] = "stream"
|
||||
s := newStreamKey()
|
||||
db.streamKeys[key] = s
|
||||
db.keyVersion[key]++
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// return existing stream, or nil.
|
||||
func (db *RedisDB) stream(key string) (*streamKey, error) {
|
||||
if db.exists(key) && db.t(key) != "stream" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
|
||||
return db.streamKeys[key], nil
|
||||
}
|
||||
|
||||
// return existing stream group, or nil.
|
||||
func (db *RedisDB) streamGroup(key, group string) (*streamGroup, error) {
|
||||
s, err := db.stream(key)
|
||||
if err != nil || s == nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.groups[group], nil
|
||||
}
|
||||
|
||||
// fastForward proceeds the current timestamp with duration, works as a time machine
|
||||
func (db *RedisDB) fastForward(duration time.Duration) {
|
||||
for _, key := range db.allKeys() {
|
||||
if value, ok := db.ttl[key]; ok {
|
||||
db.ttl[key] = value - duration
|
||||
db.checkTTL(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (db *RedisDB) checkTTL(key string) {
|
||||
if v, ok := db.ttl[key]; ok && v <= 0 {
|
||||
db.del(key, true)
|
||||
}
|
||||
}
|
||||
|
||||
// hllAdd adds members to a hll. Returns 1 if at least 1 if internal HyperLogLog was altered, otherwise 0
|
||||
func (db *RedisDB) hllAdd(k string, elems ...string) int {
|
||||
s, ok := db.hllKeys[k]
|
||||
if !ok {
|
||||
s = newHll()
|
||||
db.keys[k] = "hll"
|
||||
}
|
||||
hllAltered := 0
|
||||
for _, e := range elems {
|
||||
if s.Add([]byte(e)) {
|
||||
hllAltered = 1
|
||||
}
|
||||
}
|
||||
db.hllKeys[k] = s
|
||||
db.keyVersion[k]++
|
||||
return hllAltered
|
||||
}
|
||||
|
||||
// hllCount estimates the amount of members added to hll by hllAdd. If called with several arguments, hllCount returns a sum of estimations
|
||||
func (db *RedisDB) hllCount(keys []string) (int, error) {
|
||||
countOverall := 0
|
||||
for _, key := range keys {
|
||||
if db.exists(key) && db.t(key) != "hll" {
|
||||
return 0, ErrNotValidHllValue
|
||||
}
|
||||
if !db.exists(key) {
|
||||
continue
|
||||
}
|
||||
countOverall += db.hllKeys[key].Count()
|
||||
}
|
||||
|
||||
return countOverall, nil
|
||||
}
|
||||
|
||||
// hllMerge merges all the hlls provided as keys to the first key. Creates a new hll in the first key if it contains nothing
|
||||
func (db *RedisDB) hllMerge(keys []string) error {
|
||||
for _, key := range keys {
|
||||
if db.exists(key) && db.t(key) != "hll" {
|
||||
return ErrNotValidHllValue
|
||||
}
|
||||
}
|
||||
|
||||
destKey := keys[0]
|
||||
restKeys := keys[1:]
|
||||
|
||||
var destHll *hll
|
||||
if db.exists(destKey) {
|
||||
destHll = db.hllKeys[destKey]
|
||||
} else {
|
||||
destHll = newHll()
|
||||
}
|
||||
|
||||
for _, key := range restKeys {
|
||||
if !db.exists(key) {
|
||||
continue
|
||||
}
|
||||
destHll.Merge(db.hllKeys[key])
|
||||
}
|
||||
|
||||
db.hllKeys[destKey] = destHll
|
||||
db.keys[destKey] = "hll"
|
||||
db.keyVersion[destKey]++
|
||||
|
||||
return nil
|
||||
}
|
||||
803
vendor/github.com/alicebob/miniredis/v2/direct.go
generated
vendored
803
vendor/github.com/alicebob/miniredis/v2/direct.go
generated
vendored
@@ -1,803 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
// Commands to modify and query our databases directly.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/big"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrKeyNotFound is returned when a key doesn't exist.
|
||||
ErrKeyNotFound = errors.New(msgKeyNotFound)
|
||||
|
||||
// ErrWrongType when a key is not the right type.
|
||||
ErrWrongType = errors.New(msgWrongType)
|
||||
|
||||
// ErrNotValidHllValue when a key is not a valid HyperLogLog string value.
|
||||
ErrNotValidHllValue = errors.New(msgNotValidHllValue)
|
||||
|
||||
// ErrIntValueError can returned by INCRBY
|
||||
ErrIntValueError = errors.New(msgInvalidInt)
|
||||
|
||||
// ErrFloatValueError can returned by INCRBYFLOAT
|
||||
ErrFloatValueError = errors.New(msgInvalidFloat)
|
||||
)
|
||||
|
||||
// Select sets the DB id for all direct commands.
|
||||
func (m *Miniredis) Select(i int) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.selectedDB = i
|
||||
}
|
||||
|
||||
// Keys returns all keys from the selected database, sorted.
|
||||
func (m *Miniredis) Keys() []string {
|
||||
return m.DB(m.selectedDB).Keys()
|
||||
}
|
||||
|
||||
// Keys returns all keys, sorted.
|
||||
func (db *RedisDB) Keys() []string {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.allKeys()
|
||||
}
|
||||
|
||||
// FlushAll removes all keys from all databases.
|
||||
func (m *Miniredis) FlushAll() {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
defer m.signal.Broadcast()
|
||||
|
||||
m.flushAll()
|
||||
}
|
||||
|
||||
func (m *Miniredis) flushAll() {
|
||||
for _, db := range m.dbs {
|
||||
db.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// FlushDB removes all keys from the selected database.
|
||||
func (m *Miniredis) FlushDB() {
|
||||
m.DB(m.selectedDB).FlushDB()
|
||||
}
|
||||
|
||||
// FlushDB removes all keys.
|
||||
func (db *RedisDB) FlushDB() {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
db.flush()
|
||||
}
|
||||
|
||||
// Get returns string keys added with SET.
|
||||
func (m *Miniredis) Get(k string) (string, error) {
|
||||
return m.DB(m.selectedDB).Get(k)
|
||||
}
|
||||
|
||||
// Get returns a string key.
|
||||
func (db *RedisDB) Get(k string) (string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return "", ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "string" {
|
||||
return "", ErrWrongType
|
||||
}
|
||||
return db.stringGet(k), nil
|
||||
}
|
||||
|
||||
// Set sets a string key. Removes expire.
|
||||
func (m *Miniredis) Set(k, v string) error {
|
||||
return m.DB(m.selectedDB).Set(k, v)
|
||||
}
|
||||
|
||||
// Set sets a string key. Removes expire.
|
||||
// Unlike redis the key can't be an existing non-string key.
|
||||
func (db *RedisDB) Set(k, v string) error {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "string" {
|
||||
return ErrWrongType
|
||||
}
|
||||
db.del(k, true) // Remove expire
|
||||
db.stringSet(k, v)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Incr changes a int string value by delta.
|
||||
func (m *Miniredis) Incr(k string, delta int) (int, error) {
|
||||
return m.DB(m.selectedDB).Incr(k, delta)
|
||||
}
|
||||
|
||||
// Incr changes a int string value by delta.
|
||||
func (db *RedisDB) Incr(k string, delta int) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "string" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
|
||||
return db.stringIncr(k, delta)
|
||||
}
|
||||
|
||||
// IncrByFloat increments the float value of a key by the given delta.
|
||||
// is an alias for Miniredis.Incrfloat
|
||||
func (m *Miniredis) IncrByFloat(k string, delta float64) (float64, error) {
|
||||
return m.Incrfloat(k, delta)
|
||||
}
|
||||
|
||||
// Incrfloat changes a float string value by delta.
|
||||
func (m *Miniredis) Incrfloat(k string, delta float64) (float64, error) {
|
||||
return m.DB(m.selectedDB).Incrfloat(k, delta)
|
||||
}
|
||||
|
||||
// Incrfloat changes a float string value by delta.
|
||||
func (db *RedisDB) Incrfloat(k string, delta float64) (float64, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "string" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
|
||||
v, err := db.stringIncrfloat(k, big.NewFloat(delta))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vf, _ := v.Float64()
|
||||
return vf, nil
|
||||
}
|
||||
|
||||
// List returns the list k, or an error if it's not there or something else.
|
||||
// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
|
||||
// range-ing.
|
||||
func (m *Miniredis) List(k string) ([]string, error) {
|
||||
return m.DB(m.selectedDB).List(k)
|
||||
}
|
||||
|
||||
// List returns the list k, or an error if it's not there or something else.
|
||||
// This is the same as the Redis command `LRANGE 0 -1`, but you can do your own
|
||||
// range-ing.
|
||||
func (db *RedisDB) List(k string) ([]string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "list" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
return db.listKeys[k], nil
|
||||
}
|
||||
|
||||
// Lpush prepends one value to a list. Returns the new length.
|
||||
func (m *Miniredis) Lpush(k, v string) (int, error) {
|
||||
return m.DB(m.selectedDB).Lpush(k, v)
|
||||
}
|
||||
|
||||
// Lpush prepends one value to a list. Returns the new length.
|
||||
func (db *RedisDB) Lpush(k, v string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "list" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.listLpush(k, v), nil
|
||||
}
|
||||
|
||||
// Lpop removes and returns the last element in a list.
|
||||
func (m *Miniredis) Lpop(k string) (string, error) {
|
||||
return m.DB(m.selectedDB).Lpop(k)
|
||||
}
|
||||
|
||||
// Lpop removes and returns the last element in a list.
|
||||
func (db *RedisDB) Lpop(k string) (string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if !db.exists(k) {
|
||||
return "", ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "list" {
|
||||
return "", ErrWrongType
|
||||
}
|
||||
return db.listLpop(k), nil
|
||||
}
|
||||
|
||||
// RPush appends one or multiple values to a list. Returns the new length.
|
||||
// An alias for Push
|
||||
func (m *Miniredis) RPush(k string, v ...string) (int, error) {
|
||||
return m.Push(k, v...)
|
||||
}
|
||||
|
||||
// Push add element at the end. Returns the new length.
|
||||
func (m *Miniredis) Push(k string, v ...string) (int, error) {
|
||||
return m.DB(m.selectedDB).Push(k, v...)
|
||||
}
|
||||
|
||||
// Push add element at the end. Is called RPUSH in redis. Returns the new length.
|
||||
func (db *RedisDB) Push(k string, v ...string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "list" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.listPush(k, v...), nil
|
||||
}
|
||||
|
||||
// RPop is an alias for Pop
|
||||
func (m *Miniredis) RPop(k string) (string, error) {
|
||||
return m.Pop(k)
|
||||
}
|
||||
|
||||
// Pop removes and returns the last element. Is called RPOP in Redis.
|
||||
func (m *Miniredis) Pop(k string) (string, error) {
|
||||
return m.DB(m.selectedDB).Pop(k)
|
||||
}
|
||||
|
||||
// Pop removes and returns the last element. Is called RPOP in Redis.
|
||||
func (db *RedisDB) Pop(k string) (string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if !db.exists(k) {
|
||||
return "", ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "list" {
|
||||
return "", ErrWrongType
|
||||
}
|
||||
|
||||
return db.listPop(k), nil
|
||||
}
|
||||
|
||||
// SAdd adds keys to a set. Returns the number of new keys.
|
||||
// Alias for SetAdd
|
||||
func (m *Miniredis) SAdd(k string, elems ...string) (int, error) {
|
||||
return m.SetAdd(k, elems...)
|
||||
}
|
||||
|
||||
// SetAdd adds keys to a set. Returns the number of new keys.
|
||||
func (m *Miniredis) SetAdd(k string, elems ...string) (int, error) {
|
||||
return m.DB(m.selectedDB).SetAdd(k, elems...)
|
||||
}
|
||||
|
||||
// SetAdd adds keys to a set. Returns the number of new keys.
|
||||
func (db *RedisDB) SetAdd(k string, elems ...string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "set" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.setAdd(k, elems...), nil
|
||||
}
|
||||
|
||||
// SMembers returns all keys in a set, sorted.
|
||||
// Alias for Members.
|
||||
func (m *Miniredis) SMembers(k string) ([]string, error) {
|
||||
return m.Members(k)
|
||||
}
|
||||
|
||||
// Members returns all keys in a set, sorted.
|
||||
func (m *Miniredis) Members(k string) ([]string, error) {
|
||||
return m.DB(m.selectedDB).Members(k)
|
||||
}
|
||||
|
||||
// Members gives all set keys. Sorted.
|
||||
func (db *RedisDB) Members(k string) ([]string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "set" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
return db.setMembers(k), nil
|
||||
}
|
||||
|
||||
// SIsMember tells if value is in the set.
|
||||
// Alias for IsMember
|
||||
func (m *Miniredis) SIsMember(k, v string) (bool, error) {
|
||||
return m.IsMember(k, v)
|
||||
}
|
||||
|
||||
// IsMember tells if value is in the set.
|
||||
func (m *Miniredis) IsMember(k, v string) (bool, error) {
|
||||
return m.DB(m.selectedDB).IsMember(k, v)
|
||||
}
|
||||
|
||||
// IsMember tells if value is in the set.
|
||||
func (db *RedisDB) IsMember(k, v string) (bool, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return false, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "set" {
|
||||
return false, ErrWrongType
|
||||
}
|
||||
return db.setIsMember(k, v), nil
|
||||
}
|
||||
|
||||
// HKeys returns all (sorted) keys ('fields') for a hash key.
|
||||
func (m *Miniredis) HKeys(k string) ([]string, error) {
|
||||
return m.DB(m.selectedDB).HKeys(k)
|
||||
}
|
||||
|
||||
// HKeys returns all (sorted) keys ('fields') for a hash key.
|
||||
func (db *RedisDB) HKeys(key string) ([]string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(key) {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
if db.t(key) != "hash" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
return db.hashFields(key), nil
|
||||
}
|
||||
|
||||
// Del deletes a key and any expiration value. Returns whether there was a key.
|
||||
func (m *Miniredis) Del(k string) bool {
|
||||
return m.DB(m.selectedDB).Del(k)
|
||||
}
|
||||
|
||||
// Del deletes a key and any expiration value. Returns whether there was a key.
|
||||
func (db *RedisDB) Del(k string) bool {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if !db.exists(k) {
|
||||
return false
|
||||
}
|
||||
db.del(k, true)
|
||||
return true
|
||||
}
|
||||
|
||||
// Unlink deletes a key and any expiration value. Returns where there was a key.
|
||||
// It's exactly the same as Del() and is not async. It is here for the consistency.
|
||||
func (m *Miniredis) Unlink(k string) bool {
|
||||
return m.Del(k)
|
||||
}
|
||||
|
||||
// Unlink deletes a key and any expiration value. Returns where there was a key.
|
||||
// It's exactly the same as Del() and is not async. It is here for the consistency.
|
||||
func (db *RedisDB) Unlink(k string) bool {
|
||||
return db.Del(k)
|
||||
}
|
||||
|
||||
// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
|
||||
// PEXPIREAT.
|
||||
// Note: this direct function returns 0 if there is no TTL set, unlike redis,
|
||||
// which returns -1.
|
||||
func (m *Miniredis) TTL(k string) time.Duration {
|
||||
return m.DB(m.selectedDB).TTL(k)
|
||||
}
|
||||
|
||||
// TTL is the left over time to live. As set via EXPIRE, PEXPIRE, EXPIREAT,
|
||||
// PEXPIREAT.
|
||||
// 0 if not set.
|
||||
func (db *RedisDB) TTL(k string) time.Duration {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.ttl[k]
|
||||
}
|
||||
|
||||
// SetTTL sets the TTL of a key.
|
||||
func (m *Miniredis) SetTTL(k string, ttl time.Duration) {
|
||||
m.DB(m.selectedDB).SetTTL(k, ttl)
|
||||
}
|
||||
|
||||
// SetTTL sets the time to live of a key.
|
||||
func (db *RedisDB) SetTTL(k string, ttl time.Duration) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
db.ttl[k] = ttl
|
||||
db.keyVersion[k]++
|
||||
}
|
||||
|
||||
// Type gives the type of a key, or ""
|
||||
func (m *Miniredis) Type(k string) string {
|
||||
return m.DB(m.selectedDB).Type(k)
|
||||
}
|
||||
|
||||
// Type gives the type of a key, or ""
|
||||
func (db *RedisDB) Type(k string) string {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.t(k)
|
||||
}
|
||||
|
||||
// Exists tells whether a key exists.
|
||||
func (m *Miniredis) Exists(k string) bool {
|
||||
return m.DB(m.selectedDB).Exists(k)
|
||||
}
|
||||
|
||||
// Exists tells whether a key exists.
|
||||
func (db *RedisDB) Exists(k string) bool {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.exists(k)
|
||||
}
|
||||
|
||||
// HGet returns hash keys added with HSET.
|
||||
// This will return an empty string if the key is not set. Redis would return
|
||||
// a nil.
|
||||
// Returns empty string when the key is of a different type.
|
||||
func (m *Miniredis) HGet(k, f string) string {
|
||||
return m.DB(m.selectedDB).HGet(k, f)
|
||||
}
|
||||
|
||||
// HGet returns hash keys added with HSET.
|
||||
// Returns empty string when the key is of a different type.
|
||||
func (db *RedisDB) HGet(k, f string) string {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
h, ok := db.hashKeys[k]
|
||||
if !ok {
|
||||
return ""
|
||||
}
|
||||
return h[f]
|
||||
}
|
||||
|
||||
// HSet sets hash keys.
|
||||
// If there is another key by the same name it will be gone.
|
||||
func (m *Miniredis) HSet(k string, fv ...string) {
|
||||
m.DB(m.selectedDB).HSet(k, fv...)
|
||||
}
|
||||
|
||||
// HSet sets hash keys.
|
||||
// If there is another key by the same name it will be gone.
|
||||
func (db *RedisDB) HSet(k string, fv ...string) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
db.hashSet(k, fv...)
|
||||
}
|
||||
|
||||
// HDel deletes a hash key.
|
||||
func (m *Miniredis) HDel(k, f string) {
|
||||
m.DB(m.selectedDB).HDel(k, f)
|
||||
}
|
||||
|
||||
// HDel deletes a hash key.
|
||||
func (db *RedisDB) HDel(k, f string) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
db.hdel(k, f)
|
||||
}
|
||||
|
||||
func (db *RedisDB) hdel(k, f string) {
|
||||
if _, ok := db.hashKeys[k]; !ok {
|
||||
return
|
||||
}
|
||||
delete(db.hashKeys[k], f)
|
||||
db.keyVersion[k]++
|
||||
}
|
||||
|
||||
// HIncrBy increases the integer value of a hash field by delta (int).
|
||||
func (m *Miniredis) HIncrBy(k, f string, delta int) (int, error) {
|
||||
return m.HIncr(k, f, delta)
|
||||
}
|
||||
|
||||
// HIncr increases a key/field by delta (int).
|
||||
func (m *Miniredis) HIncr(k, f string, delta int) (int, error) {
|
||||
return m.DB(m.selectedDB).HIncr(k, f, delta)
|
||||
}
|
||||
|
||||
// HIncr increases a key/field by delta (int).
|
||||
func (db *RedisDB) HIncr(k, f string, delta int) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
return db.hashIncr(k, f, delta)
|
||||
}
|
||||
|
||||
// HIncrByFloat increases a key/field by delta (float).
|
||||
func (m *Miniredis) HIncrByFloat(k, f string, delta float64) (float64, error) {
|
||||
return m.HIncrfloat(k, f, delta)
|
||||
}
|
||||
|
||||
// HIncrfloat increases a key/field by delta (float).
|
||||
func (m *Miniredis) HIncrfloat(k, f string, delta float64) (float64, error) {
|
||||
return m.DB(m.selectedDB).HIncrfloat(k, f, delta)
|
||||
}
|
||||
|
||||
// HIncrfloat increases a key/field by delta (float).
|
||||
func (db *RedisDB) HIncrfloat(k, f string, delta float64) (float64, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
v, err := db.hashIncrfloat(k, f, big.NewFloat(delta))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
vf, _ := v.Float64()
|
||||
return vf, nil
|
||||
}
|
||||
|
||||
// SRem removes fields from a set. Returns number of deleted fields.
|
||||
func (m *Miniredis) SRem(k string, fields ...string) (int, error) {
|
||||
return m.DB(m.selectedDB).SRem(k, fields...)
|
||||
}
|
||||
|
||||
// SRem removes fields from a set. Returns number of deleted fields.
|
||||
func (db *RedisDB) SRem(k string, fields ...string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if !db.exists(k) {
|
||||
return 0, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "set" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.setRem(k, fields...), nil
|
||||
}
|
||||
|
||||
// ZAdd adds a score,member to a sorted set.
|
||||
func (m *Miniredis) ZAdd(k string, score float64, member string) (bool, error) {
|
||||
return m.DB(m.selectedDB).ZAdd(k, score, member)
|
||||
}
|
||||
|
||||
// ZAdd adds a score,member to a sorted set.
|
||||
func (db *RedisDB) ZAdd(k string, score float64, member string) (bool, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if db.exists(k) && db.t(k) != "zset" {
|
||||
return false, ErrWrongType
|
||||
}
|
||||
return db.ssetAdd(k, score, member), nil
|
||||
}
|
||||
|
||||
// ZMembers returns all members of a sorted set by score
|
||||
func (m *Miniredis) ZMembers(k string) ([]string, error) {
|
||||
return m.DB(m.selectedDB).ZMembers(k)
|
||||
}
|
||||
|
||||
// ZMembers returns all members of a sorted set by score
|
||||
func (db *RedisDB) ZMembers(k string) ([]string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "zset" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
return db.ssetMembers(k), nil
|
||||
}
|
||||
|
||||
// SortedSet returns a raw string->float64 map.
|
||||
func (m *Miniredis) SortedSet(k string) (map[string]float64, error) {
|
||||
return m.DB(m.selectedDB).SortedSet(k)
|
||||
}
|
||||
|
||||
// SortedSet returns a raw string->float64 map.
|
||||
func (db *RedisDB) SortedSet(k string) (map[string]float64, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "zset" {
|
||||
return nil, ErrWrongType
|
||||
}
|
||||
return db.sortedSet(k), nil
|
||||
}
|
||||
|
||||
// ZRem deletes a member. Returns whether the was a key.
|
||||
func (m *Miniredis) ZRem(k, member string) (bool, error) {
|
||||
return m.DB(m.selectedDB).ZRem(k, member)
|
||||
}
|
||||
|
||||
// ZRem deletes a member. Returns whether the was a key.
|
||||
func (db *RedisDB) ZRem(k, member string) (bool, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
if !db.exists(k) {
|
||||
return false, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "zset" {
|
||||
return false, ErrWrongType
|
||||
}
|
||||
return db.ssetRem(k, member), nil
|
||||
}
|
||||
|
||||
// ZScore gives the score of a sorted set member.
|
||||
func (m *Miniredis) ZScore(k, member string) (float64, error) {
|
||||
return m.DB(m.selectedDB).ZScore(k, member)
|
||||
}
|
||||
|
||||
// ZScore gives the score of a sorted set member.
|
||||
func (db *RedisDB) ZScore(k, member string) (float64, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if !db.exists(k) {
|
||||
return 0, ErrKeyNotFound
|
||||
}
|
||||
if db.t(k) != "zset" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.ssetScore(k, member), nil
|
||||
}
|
||||
|
||||
// XAdd adds an entry to a stream. `id` can be left empty or be '*'.
|
||||
// If a value is given normal XADD rules apply. Values should be an even
|
||||
// length.
|
||||
func (m *Miniredis) XAdd(k string, id string, values []string) (string, error) {
|
||||
return m.DB(m.selectedDB).XAdd(k, id, values)
|
||||
}
|
||||
|
||||
// XAdd adds an entry to a stream. `id` can be left empty or be '*'.
|
||||
// If a value is given normal XADD rules apply. Values should be an even
|
||||
// length.
|
||||
func (db *RedisDB) XAdd(k string, id string, values []string) (string, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
defer db.master.signal.Broadcast()
|
||||
|
||||
s, err := db.stream(k)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if s == nil {
|
||||
s, _ = db.newStream(k)
|
||||
}
|
||||
|
||||
return s.add(id, values, db.master.effectiveNow())
|
||||
}
|
||||
|
||||
// Stream returns a slice of stream entries. Oldest first.
|
||||
func (m *Miniredis) Stream(k string) ([]StreamEntry, error) {
|
||||
return m.DB(m.selectedDB).Stream(k)
|
||||
}
|
||||
|
||||
// Stream returns a slice of stream entries. Oldest first.
|
||||
func (db *RedisDB) Stream(key string) ([]StreamEntry, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
s, err := db.stream(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if s == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return s.entries, nil
|
||||
}
|
||||
|
||||
// Publish a message to subscribers. Returns the number of receivers.
|
||||
func (m *Miniredis) Publish(channel, message string) int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return m.publish(channel, message)
|
||||
}
|
||||
|
||||
// PubSubChannels is "PUBSUB CHANNELS <pattern>". An empty pattern is fine
|
||||
// (meaning all channels).
|
||||
// Returned channels will be ordered alphabetically.
|
||||
func (m *Miniredis) PubSubChannels(pattern string) []string {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return activeChannels(m.allSubscribers(), pattern)
|
||||
}
|
||||
|
||||
// PubSubNumSub is "PUBSUB NUMSUB [channels]". It returns all channels with their
|
||||
// subscriber count.
|
||||
func (m *Miniredis) PubSubNumSub(channels ...string) map[string]int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
subs := m.allSubscribers()
|
||||
res := map[string]int{}
|
||||
for _, channel := range channels {
|
||||
res[channel] = countSubs(subs, channel)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// PubSubNumPat is "PUBSUB NUMPAT"
|
||||
func (m *Miniredis) PubSubNumPat() int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
return countPsubs(m.allSubscribers())
|
||||
}
|
||||
|
||||
// PfAdd adds keys to a hll. Returns the flag which equals to 1 if the inner hll value has been changed.
|
||||
func (m *Miniredis) PfAdd(k string, elems ...string) (int, error) {
|
||||
return m.DB(m.selectedDB).HllAdd(k, elems...)
|
||||
}
|
||||
|
||||
// HllAdd adds keys to a hll. Returns the flag which equals to true if the inner hll value has been changed.
|
||||
func (db *RedisDB) HllAdd(k string, elems ...string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
if db.exists(k) && db.t(k) != "hll" {
|
||||
return 0, ErrWrongType
|
||||
}
|
||||
return db.hllAdd(k, elems...), nil
|
||||
}
|
||||
|
||||
// PfCount returns an estimation of the amount of elements previously added to a hll.
|
||||
func (m *Miniredis) PfCount(keys ...string) (int, error) {
|
||||
return m.DB(m.selectedDB).HllCount(keys...)
|
||||
}
|
||||
|
||||
// HllCount returns an estimation of the amount of elements previously added to a hll.
|
||||
func (db *RedisDB) HllCount(keys ...string) (int, error) {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.hllCount(keys)
|
||||
}
|
||||
|
||||
// PfMerge merges all the input hlls into a hll under destKey key.
|
||||
func (m *Miniredis) PfMerge(destKey string, sourceKeys ...string) error {
|
||||
return m.DB(m.selectedDB).HllMerge(destKey, sourceKeys...)
|
||||
}
|
||||
|
||||
// HllMerge merges all the input hlls into a hll under destKey key.
|
||||
func (db *RedisDB) HllMerge(destKey string, sourceKeys ...string) error {
|
||||
db.master.Lock()
|
||||
defer db.master.Unlock()
|
||||
|
||||
return db.hllMerge(append([]string{destKey}, sourceKeys...))
|
||||
}
|
||||
|
||||
// Copy a value.
|
||||
// Needs the IDs of both the source and dest DBs (which can differ).
|
||||
// Returns ErrKeyNotFound if src does not exist.
|
||||
// Overwrites dest if it already exists (unlike the redis command, which needs a flag to allow that).
|
||||
func (m *Miniredis) Copy(srcDB int, src string, destDB int, dest string) error {
|
||||
return m.copy(m.DB(srcDB), src, m.DB(destDB), dest)
|
||||
}
|
||||
46
vendor/github.com/alicebob/miniredis/v2/geo.go
generated
vendored
46
vendor/github.com/alicebob/miniredis/v2/geo.go
generated
vendored
@@ -1,46 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/geohash"
|
||||
)
|
||||
|
||||
func toGeohash(long, lat float64) uint64 {
|
||||
return geohash.EncodeIntWithPrecision(lat, long, 52)
|
||||
}
|
||||
|
||||
func fromGeohash(score uint64) (float64, float64) {
|
||||
lat, long := geohash.DecodeIntWithPrecision(score, 52)
|
||||
return long, lat
|
||||
}
|
||||
|
||||
// haversin(θ) function
|
||||
func hsin(theta float64) float64 {
|
||||
return math.Pow(math.Sin(theta/2), 2)
|
||||
}
|
||||
|
||||
// distance function returns the distance (in meters) between two points of
|
||||
// a given longitude and latitude relatively accurately (using a spherical
|
||||
// approximation of the Earth) through the Haversin Distance Formula for
|
||||
// great arc distance on a sphere with accuracy for small distances
|
||||
// point coordinates are supplied in degrees and converted into rad. in the func
|
||||
// distance returned is meters
|
||||
// http://en.wikipedia.org/wiki/Haversine_formula
|
||||
// Source: https://gist.github.com/cdipaolo/d3f8db3848278b49db68
|
||||
func distance(lat1, lon1, lat2, lon2 float64) float64 {
|
||||
// convert to radians
|
||||
// must cast radius as float to multiply later
|
||||
var la1, lo1, la2, lo2 float64
|
||||
la1 = lat1 * math.Pi / 180
|
||||
lo1 = lon1 * math.Pi / 180
|
||||
la2 = lat2 * math.Pi / 180
|
||||
lo2 = lon2 * math.Pi / 180
|
||||
|
||||
earth := 6372797.560856 // Earth radius in METERS, according to src/geohash_helper.c
|
||||
|
||||
// calculate
|
||||
h := hsin(la2-la1) + math.Cos(la1)*math.Cos(la2)*hsin(lo2-lo1)
|
||||
|
||||
return 2 * earth * math.Asin(math.Sqrt(h))
|
||||
}
|
||||
22
vendor/github.com/alicebob/miniredis/v2/geohash/LICENSE
generated
vendored
22
vendor/github.com/alicebob/miniredis/v2/geohash/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Michael McLoughlin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
2
vendor/github.com/alicebob/miniredis/v2/geohash/README.md
generated
vendored
2
vendor/github.com/alicebob/miniredis/v2/geohash/README.md
generated
vendored
@@ -1,2 +0,0 @@
|
||||
This is a (selected) copy of github.com/mmcloughlin/geohash with the latitude
|
||||
range changed from 90 to ~85, to align with the algorithm use by Redis.
|
||||
44
vendor/github.com/alicebob/miniredis/v2/geohash/base32.go
generated
vendored
44
vendor/github.com/alicebob/miniredis/v2/geohash/base32.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
package geohash
|
||||
|
||||
// encoding encapsulates an encoding defined by a given base32 alphabet.
|
||||
type encoding struct {
|
||||
encode string
|
||||
decode [256]byte
|
||||
}
|
||||
|
||||
// newEncoding constructs a new encoding defined by the given alphabet,
|
||||
// which must be a 32-byte string.
|
||||
func newEncoding(encoder string) *encoding {
|
||||
e := new(encoding)
|
||||
e.encode = encoder
|
||||
for i := 0; i < len(e.decode); i++ {
|
||||
e.decode[i] = 0xff
|
||||
}
|
||||
for i := 0; i < len(encoder); i++ {
|
||||
e.decode[encoder[i]] = byte(i)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Decode string into bits of a 64-bit word. The string s may be at most 12
|
||||
// characters.
|
||||
func (e *encoding) Decode(s string) uint64 {
|
||||
x := uint64(0)
|
||||
for i := 0; i < len(s); i++ {
|
||||
x = (x << 5) | uint64(e.decode[s[i]])
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
// Encode bits of 64-bit word into a string.
|
||||
func (e *encoding) Encode(x uint64) string {
|
||||
b := [12]byte{}
|
||||
for i := 0; i < 12; i++ {
|
||||
b[11-i] = e.encode[x&0x1f]
|
||||
x >>= 5
|
||||
}
|
||||
return string(b[:])
|
||||
}
|
||||
|
||||
// Base32Encoding with the Geohash alphabet.
|
||||
var base32encoding = newEncoding("0123456789bcdefghjkmnpqrstuvwxyz")
|
||||
269
vendor/github.com/alicebob/miniredis/v2/geohash/geohash.go
generated
vendored
269
vendor/github.com/alicebob/miniredis/v2/geohash/geohash.go
generated
vendored
@@ -1,269 +0,0 @@
|
||||
// Package geohash provides encoding and decoding of string and integer
|
||||
// geohashes.
|
||||
package geohash
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
const (
|
||||
ENC_LAT = 85.05112878
|
||||
ENC_LONG = 180.0
|
||||
)
|
||||
|
||||
// Direction represents directions in the latitute/longitude space.
|
||||
type Direction int
|
||||
|
||||
// Cardinal and intercardinal directions
|
||||
const (
|
||||
North Direction = iota
|
||||
NorthEast
|
||||
East
|
||||
SouthEast
|
||||
South
|
||||
SouthWest
|
||||
West
|
||||
NorthWest
|
||||
)
|
||||
|
||||
// Encode the point (lat, lng) as a string geohash with the standard 12
|
||||
// characters of precision.
|
||||
func Encode(lat, lng float64) string {
|
||||
return EncodeWithPrecision(lat, lng, 12)
|
||||
}
|
||||
|
||||
// EncodeWithPrecision encodes the point (lat, lng) as a string geohash with
|
||||
// the specified number of characters of precision (max 12).
|
||||
func EncodeWithPrecision(lat, lng float64, chars uint) string {
|
||||
bits := 5 * chars
|
||||
inthash := EncodeIntWithPrecision(lat, lng, bits)
|
||||
enc := base32encoding.Encode(inthash)
|
||||
return enc[12-chars:]
|
||||
}
|
||||
|
||||
// encodeInt provides a Go implementation of integer geohash. This is the
|
||||
// default implementation of EncodeInt, but optimized versions are provided
|
||||
// for certain architectures.
|
||||
func EncodeInt(lat, lng float64) uint64 {
|
||||
latInt := encodeRange(lat, ENC_LAT)
|
||||
lngInt := encodeRange(lng, ENC_LONG)
|
||||
return interleave(latInt, lngInt)
|
||||
}
|
||||
|
||||
// EncodeIntWithPrecision encodes the point (lat, lng) to an integer with the
|
||||
// specified number of bits.
|
||||
func EncodeIntWithPrecision(lat, lng float64, bits uint) uint64 {
|
||||
hash := EncodeInt(lat, lng)
|
||||
return hash >> (64 - bits)
|
||||
}
|
||||
|
||||
// Box represents a rectangle in latitude/longitude space.
|
||||
type Box struct {
|
||||
MinLat float64
|
||||
MaxLat float64
|
||||
MinLng float64
|
||||
MaxLng float64
|
||||
}
|
||||
|
||||
// Center returns the center of the box.
|
||||
func (b Box) Center() (lat, lng float64) {
|
||||
lat = (b.MinLat + b.MaxLat) / 2.0
|
||||
lng = (b.MinLng + b.MaxLng) / 2.0
|
||||
return
|
||||
}
|
||||
|
||||
// Contains decides whether (lat, lng) is contained in the box. The
|
||||
// containment test is inclusive of the edges and corners.
|
||||
func (b Box) Contains(lat, lng float64) bool {
|
||||
return (b.MinLat <= lat && lat <= b.MaxLat &&
|
||||
b.MinLng <= lng && lng <= b.MaxLng)
|
||||
}
|
||||
|
||||
// errorWithPrecision returns the error range in latitude and longitude for in
|
||||
// integer geohash with bits of precision.
|
||||
func errorWithPrecision(bits uint) (latErr, lngErr float64) {
|
||||
b := int(bits)
|
||||
latBits := b / 2
|
||||
lngBits := b - latBits
|
||||
latErr = math.Ldexp(180.0, -latBits)
|
||||
lngErr = math.Ldexp(360.0, -lngBits)
|
||||
return
|
||||
}
|
||||
|
||||
// BoundingBox returns the region encoded by the given string geohash.
|
||||
func BoundingBox(hash string) Box {
|
||||
bits := uint(5 * len(hash))
|
||||
inthash := base32encoding.Decode(hash)
|
||||
return BoundingBoxIntWithPrecision(inthash, bits)
|
||||
}
|
||||
|
||||
// BoundingBoxIntWithPrecision returns the region encoded by the integer
|
||||
// geohash with the specified precision.
|
||||
func BoundingBoxIntWithPrecision(hash uint64, bits uint) Box {
|
||||
fullHash := hash << (64 - bits)
|
||||
latInt, lngInt := deinterleave(fullHash)
|
||||
lat := decodeRange(latInt, ENC_LAT)
|
||||
lng := decodeRange(lngInt, ENC_LONG)
|
||||
latErr, lngErr := errorWithPrecision(bits)
|
||||
return Box{
|
||||
MinLat: lat,
|
||||
MaxLat: lat + latErr,
|
||||
MinLng: lng,
|
||||
MaxLng: lng + lngErr,
|
||||
}
|
||||
}
|
||||
|
||||
// BoundingBoxInt returns the region encoded by the given 64-bit integer
|
||||
// geohash.
|
||||
func BoundingBoxInt(hash uint64) Box {
|
||||
return BoundingBoxIntWithPrecision(hash, 64)
|
||||
}
|
||||
|
||||
// DecodeCenter decodes the string geohash to the central point of the bounding box.
|
||||
func DecodeCenter(hash string) (lat, lng float64) {
|
||||
box := BoundingBox(hash)
|
||||
return box.Center()
|
||||
}
|
||||
|
||||
// DecodeIntWithPrecision decodes the provided integer geohash with bits of
|
||||
// precision to a (lat, lng) point.
|
||||
func DecodeIntWithPrecision(hash uint64, bits uint) (lat, lng float64) {
|
||||
box := BoundingBoxIntWithPrecision(hash, bits)
|
||||
return box.Center()
|
||||
}
|
||||
|
||||
// DecodeInt decodes the provided 64-bit integer geohash to a (lat, lng) point.
|
||||
func DecodeInt(hash uint64) (lat, lng float64) {
|
||||
return DecodeIntWithPrecision(hash, 64)
|
||||
}
|
||||
|
||||
// Neighbors returns a slice of geohash strings that correspond to the provided
|
||||
// geohash's neighbors.
|
||||
func Neighbors(hash string) []string {
|
||||
box := BoundingBox(hash)
|
||||
lat, lng := box.Center()
|
||||
latDelta := box.MaxLat - box.MinLat
|
||||
lngDelta := box.MaxLng - box.MinLng
|
||||
precision := uint(len(hash))
|
||||
return []string{
|
||||
// N
|
||||
EncodeWithPrecision(lat+latDelta, lng, precision),
|
||||
// NE,
|
||||
EncodeWithPrecision(lat+latDelta, lng+lngDelta, precision),
|
||||
// E,
|
||||
EncodeWithPrecision(lat, lng+lngDelta, precision),
|
||||
// SE,
|
||||
EncodeWithPrecision(lat-latDelta, lng+lngDelta, precision),
|
||||
// S,
|
||||
EncodeWithPrecision(lat-latDelta, lng, precision),
|
||||
// SW,
|
||||
EncodeWithPrecision(lat-latDelta, lng-lngDelta, precision),
|
||||
// W,
|
||||
EncodeWithPrecision(lat, lng-lngDelta, precision),
|
||||
// NW
|
||||
EncodeWithPrecision(lat+latDelta, lng-lngDelta, precision),
|
||||
}
|
||||
}
|
||||
|
||||
// NeighborsInt returns a slice of uint64s that correspond to the provided hash's
|
||||
// neighbors at 64-bit precision.
|
||||
func NeighborsInt(hash uint64) []uint64 {
|
||||
return NeighborsIntWithPrecision(hash, 64)
|
||||
}
|
||||
|
||||
// NeighborsIntWithPrecision returns a slice of uint64s that correspond to the
|
||||
// provided hash's neighbors at the given precision.
|
||||
func NeighborsIntWithPrecision(hash uint64, bits uint) []uint64 {
|
||||
box := BoundingBoxIntWithPrecision(hash, bits)
|
||||
lat, lng := box.Center()
|
||||
latDelta := box.MaxLat - box.MinLat
|
||||
lngDelta := box.MaxLng - box.MinLng
|
||||
return []uint64{
|
||||
// N
|
||||
EncodeIntWithPrecision(lat+latDelta, lng, bits),
|
||||
// NE,
|
||||
EncodeIntWithPrecision(lat+latDelta, lng+lngDelta, bits),
|
||||
// E,
|
||||
EncodeIntWithPrecision(lat, lng+lngDelta, bits),
|
||||
// SE,
|
||||
EncodeIntWithPrecision(lat-latDelta, lng+lngDelta, bits),
|
||||
// S,
|
||||
EncodeIntWithPrecision(lat-latDelta, lng, bits),
|
||||
// SW,
|
||||
EncodeIntWithPrecision(lat-latDelta, lng-lngDelta, bits),
|
||||
// W,
|
||||
EncodeIntWithPrecision(lat, lng-lngDelta, bits),
|
||||
// NW
|
||||
EncodeIntWithPrecision(lat+latDelta, lng-lngDelta, bits),
|
||||
}
|
||||
}
|
||||
|
||||
// Neighbor returns a geohash string that corresponds to the provided
|
||||
// geohash's neighbor in the provided direction
|
||||
func Neighbor(hash string, direction Direction) string {
|
||||
return Neighbors(hash)[direction]
|
||||
}
|
||||
|
||||
// NeighborInt returns a uint64 that corresponds to the provided hash's
|
||||
// neighbor in the provided direction at 64-bit precision.
|
||||
func NeighborInt(hash uint64, direction Direction) uint64 {
|
||||
return NeighborsIntWithPrecision(hash, 64)[direction]
|
||||
}
|
||||
|
||||
// NeighborIntWithPrecision returns a uint64s that corresponds to the
|
||||
// provided hash's neighbor in the provided direction at the given precision.
|
||||
func NeighborIntWithPrecision(hash uint64, bits uint, direction Direction) uint64 {
|
||||
return NeighborsIntWithPrecision(hash, bits)[direction]
|
||||
}
|
||||
|
||||
// precalculated for performance
|
||||
var exp232 = math.Exp2(32)
|
||||
|
||||
// Encode the position of x within the range -r to +r as a 32-bit integer.
|
||||
func encodeRange(x, r float64) uint32 {
|
||||
p := (x + r) / (2 * r)
|
||||
return uint32(p * exp232)
|
||||
}
|
||||
|
||||
// Decode the 32-bit range encoding X back to a value in the range -r to +r.
|
||||
func decodeRange(X uint32, r float64) float64 {
|
||||
p := float64(X) / exp232
|
||||
x := 2*r*p - r
|
||||
return x
|
||||
}
|
||||
|
||||
// Spread out the 32 bits of x into 64 bits, where the bits of x occupy even
|
||||
// bit positions.
|
||||
func spread(x uint32) uint64 {
|
||||
X := uint64(x)
|
||||
X = (X | (X << 16)) & 0x0000ffff0000ffff
|
||||
X = (X | (X << 8)) & 0x00ff00ff00ff00ff
|
||||
X = (X | (X << 4)) & 0x0f0f0f0f0f0f0f0f
|
||||
X = (X | (X << 2)) & 0x3333333333333333
|
||||
X = (X | (X << 1)) & 0x5555555555555555
|
||||
return X
|
||||
}
|
||||
|
||||
// Interleave the bits of x and y. In the result, x and y occupy even and odd
|
||||
// bitlevels, respectively.
|
||||
func interleave(x, y uint32) uint64 {
|
||||
return spread(x) | (spread(y) << 1)
|
||||
}
|
||||
|
||||
// Squash the even bitlevels of X into a 32-bit word. Odd bitlevels of X are
|
||||
// ignored, and may take any value.
|
||||
func squash(X uint64) uint32 {
|
||||
X &= 0x5555555555555555
|
||||
X = (X | (X >> 1)) & 0x3333333333333333
|
||||
X = (X | (X >> 2)) & 0x0f0f0f0f0f0f0f0f
|
||||
X = (X | (X >> 4)) & 0x00ff00ff00ff00ff
|
||||
X = (X | (X >> 8)) & 0x0000ffff0000ffff
|
||||
X = (X | (X >> 16)) & 0x00000000ffffffff
|
||||
return uint32(X)
|
||||
}
|
||||
|
||||
// Deinterleave the bits of X into 32-bit words containing the even and odd
|
||||
// bitlevels of X, respectively.
|
||||
func deinterleave(X uint64) (uint32, uint32) {
|
||||
return squash(X), squash(X >> 1)
|
||||
}
|
||||
42
vendor/github.com/alicebob/miniredis/v2/hll.go
generated
vendored
42
vendor/github.com/alicebob/miniredis/v2/hll.go
generated
vendored
@@ -1,42 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"github.com/alicebob/miniredis/v2/hyperloglog"
|
||||
)
|
||||
|
||||
type hll struct {
|
||||
inner *hyperloglog.Sketch
|
||||
}
|
||||
|
||||
func newHll() *hll {
|
||||
return &hll{
|
||||
inner: hyperloglog.New14(),
|
||||
}
|
||||
}
|
||||
|
||||
// Add returns true if cardinality has been changed, or false otherwise.
|
||||
func (h *hll) Add(item []byte) bool {
|
||||
return h.inner.Insert(item)
|
||||
}
|
||||
|
||||
// Count returns the estimation of a set cardinality.
|
||||
func (h *hll) Count() int {
|
||||
return int(h.inner.Estimate())
|
||||
}
|
||||
|
||||
// Merge merges the other hll into original one (not making a copy but doing this in place).
|
||||
func (h *hll) Merge(other *hll) {
|
||||
_ = h.inner.Merge(other.inner)
|
||||
}
|
||||
|
||||
// Bytes returns raw-bytes representation of hll data structure.
|
||||
func (h *hll) Bytes() []byte {
|
||||
dataBytes, _ := h.inner.MarshalBinary()
|
||||
return dataBytes
|
||||
}
|
||||
|
||||
func (h *hll) copy() *hll {
|
||||
return &hll{
|
||||
inner: h.inner.Clone(),
|
||||
}
|
||||
}
|
||||
21
vendor/github.com/alicebob/miniredis/v2/hyperloglog/LICENSE
generated
vendored
21
vendor/github.com/alicebob/miniredis/v2/hyperloglog/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2017 Axiom Inc. <seif@axiom.sh>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
1
vendor/github.com/alicebob/miniredis/v2/hyperloglog/README.md
generated
vendored
1
vendor/github.com/alicebob/miniredis/v2/hyperloglog/README.md
generated
vendored
@@ -1 +0,0 @@
|
||||
This is a copy of github.com/axiomhq/hyperloglog.
|
||||
180
vendor/github.com/alicebob/miniredis/v2/hyperloglog/compressed.go
generated
vendored
180
vendor/github.com/alicebob/miniredis/v2/hyperloglog/compressed.go
generated
vendored
@@ -1,180 +0,0 @@
|
||||
package hyperloglog
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
// Original author of this file is github.com/clarkduvall/hyperloglog
|
||||
type iterable interface {
|
||||
decode(i int, last uint32) (uint32, int)
|
||||
Len() int
|
||||
Iter() *iterator
|
||||
}
|
||||
|
||||
type iterator struct {
|
||||
i int
|
||||
last uint32
|
||||
v iterable
|
||||
}
|
||||
|
||||
func (iter *iterator) Next() uint32 {
|
||||
n, i := iter.v.decode(iter.i, iter.last)
|
||||
iter.last = n
|
||||
iter.i = i
|
||||
return n
|
||||
}
|
||||
|
||||
func (iter *iterator) Peek() uint32 {
|
||||
n, _ := iter.v.decode(iter.i, iter.last)
|
||||
return n
|
||||
}
|
||||
|
||||
func (iter iterator) HasNext() bool {
|
||||
return iter.i < iter.v.Len()
|
||||
}
|
||||
|
||||
type compressedList struct {
|
||||
count uint32
|
||||
last uint32
|
||||
b variableLengthList
|
||||
}
|
||||
|
||||
func (v *compressedList) Clone() *compressedList {
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
newV := &compressedList{
|
||||
count: v.count,
|
||||
last: v.last,
|
||||
}
|
||||
|
||||
newV.b = make(variableLengthList, len(v.b))
|
||||
copy(newV.b, v.b)
|
||||
return newV
|
||||
}
|
||||
|
||||
func (v *compressedList) MarshalBinary() (data []byte, err error) {
|
||||
// Marshal the variableLengthList
|
||||
bdata, err := v.b.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// At least 4 bytes for the two fixed sized values plus the size of bdata.
|
||||
data = make([]byte, 0, 4+4+len(bdata))
|
||||
|
||||
// Marshal the count and last values.
|
||||
data = append(data, []byte{
|
||||
// Number of items in the list.
|
||||
byte(v.count >> 24),
|
||||
byte(v.count >> 16),
|
||||
byte(v.count >> 8),
|
||||
byte(v.count),
|
||||
// The last item in the list.
|
||||
byte(v.last >> 24),
|
||||
byte(v.last >> 16),
|
||||
byte(v.last >> 8),
|
||||
byte(v.last),
|
||||
}...)
|
||||
|
||||
// Append the list
|
||||
return append(data, bdata...), nil
|
||||
}
|
||||
|
||||
func (v *compressedList) UnmarshalBinary(data []byte) error {
|
||||
if len(data) < 12 {
|
||||
return ErrorTooShort
|
||||
}
|
||||
|
||||
// Set the count.
|
||||
v.count, data = binary.BigEndian.Uint32(data[:4]), data[4:]
|
||||
|
||||
// Set the last value.
|
||||
v.last, data = binary.BigEndian.Uint32(data[:4]), data[4:]
|
||||
|
||||
// Set the list.
|
||||
sz, data := binary.BigEndian.Uint32(data[:4]), data[4:]
|
||||
v.b = make([]uint8, sz)
|
||||
if uint32(len(data)) < sz {
|
||||
return ErrorTooShort
|
||||
}
|
||||
for i := uint32(0); i < sz; i++ {
|
||||
v.b[i] = data[i]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCompressedList() *compressedList {
|
||||
v := &compressedList{}
|
||||
v.b = make(variableLengthList, 0)
|
||||
return v
|
||||
}
|
||||
|
||||
func (v *compressedList) Len() int {
|
||||
return len(v.b)
|
||||
}
|
||||
|
||||
func (v *compressedList) decode(i int, last uint32) (uint32, int) {
|
||||
n, i := v.b.decode(i, last)
|
||||
return n + last, i
|
||||
}
|
||||
|
||||
func (v *compressedList) Append(x uint32) {
|
||||
v.count++
|
||||
v.b = v.b.Append(x - v.last)
|
||||
v.last = x
|
||||
}
|
||||
|
||||
func (v *compressedList) Iter() *iterator {
|
||||
return &iterator{0, 0, v}
|
||||
}
|
||||
|
||||
type variableLengthList []uint8
|
||||
|
||||
func (v variableLengthList) MarshalBinary() (data []byte, err error) {
|
||||
// 4 bytes for the size of the list, and a byte for each element in the
|
||||
// list.
|
||||
data = make([]byte, 0, 4+v.Len())
|
||||
|
||||
// Length of the list. We only need 32 bits because the size of the set
|
||||
// couldn't exceed that on 32 bit architectures.
|
||||
sz := v.Len()
|
||||
data = append(data, []byte{
|
||||
byte(sz >> 24),
|
||||
byte(sz >> 16),
|
||||
byte(sz >> 8),
|
||||
byte(sz),
|
||||
}...)
|
||||
|
||||
// Marshal each element in the list.
|
||||
for i := 0; i < sz; i++ {
|
||||
data = append(data, v[i])
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (v variableLengthList) Len() int {
|
||||
return len(v)
|
||||
}
|
||||
|
||||
func (v *variableLengthList) Iter() *iterator {
|
||||
return &iterator{0, 0, v}
|
||||
}
|
||||
|
||||
func (v variableLengthList) decode(i int, last uint32) (uint32, int) {
|
||||
var x uint32
|
||||
j := i
|
||||
for ; v[j]&0x80 != 0; j++ {
|
||||
x |= uint32(v[j]&0x7f) << (uint(j-i) * 7)
|
||||
}
|
||||
x |= uint32(v[j]) << (uint(j-i) * 7)
|
||||
return x, j + 1
|
||||
}
|
||||
|
||||
func (v variableLengthList) Append(x uint32) variableLengthList {
|
||||
for x&0xffffff80 != 0 {
|
||||
v = append(v, uint8((x&0x7f)|0x80))
|
||||
x >>= 7
|
||||
}
|
||||
return append(v, uint8(x&0x7f))
|
||||
}
|
||||
424
vendor/github.com/alicebob/miniredis/v2/hyperloglog/hyperloglog.go
generated
vendored
424
vendor/github.com/alicebob/miniredis/v2/hyperloglog/hyperloglog.go
generated
vendored
@@ -1,424 +0,0 @@
|
||||
package hyperloglog
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
const (
|
||||
capacity = uint8(16)
|
||||
pp = uint8(25)
|
||||
mp = uint32(1) << pp
|
||||
version = 1
|
||||
)
|
||||
|
||||
// Sketch is a HyperLogLog data-structure for the count-distinct problem,
|
||||
// approximating the number of distinct elements in a multiset.
|
||||
type Sketch struct {
|
||||
p uint8
|
||||
b uint8
|
||||
m uint32
|
||||
alpha float64
|
||||
tmpSet set
|
||||
sparseList *compressedList
|
||||
regs *registers
|
||||
}
|
||||
|
||||
// New returns a HyperLogLog Sketch with 2^14 registers (precision 14)
|
||||
func New() *Sketch {
|
||||
return New14()
|
||||
}
|
||||
|
||||
// New14 returns a HyperLogLog Sketch with 2^14 registers (precision 14)
|
||||
func New14() *Sketch {
|
||||
sk, _ := newSketch(14, true)
|
||||
return sk
|
||||
}
|
||||
|
||||
// New16 returns a HyperLogLog Sketch with 2^16 registers (precision 16)
|
||||
func New16() *Sketch {
|
||||
sk, _ := newSketch(16, true)
|
||||
return sk
|
||||
}
|
||||
|
||||
// NewNoSparse returns a HyperLogLog Sketch with 2^14 registers (precision 14)
|
||||
// that will not use a sparse representation
|
||||
func NewNoSparse() *Sketch {
|
||||
sk, _ := newSketch(14, false)
|
||||
return sk
|
||||
}
|
||||
|
||||
// New16NoSparse returns a HyperLogLog Sketch with 2^16 registers (precision 16)
|
||||
// that will not use a sparse representation
|
||||
func New16NoSparse() *Sketch {
|
||||
sk, _ := newSketch(16, false)
|
||||
return sk
|
||||
}
|
||||
|
||||
// newSketch returns a HyperLogLog Sketch with 2^precision registers
|
||||
func newSketch(precision uint8, sparse bool) (*Sketch, error) {
|
||||
if precision < 4 || precision > 18 {
|
||||
return nil, fmt.Errorf("p has to be >= 4 and <= 18")
|
||||
}
|
||||
m := uint32(math.Pow(2, float64(precision)))
|
||||
s := &Sketch{
|
||||
m: m,
|
||||
p: precision,
|
||||
alpha: alpha(float64(m)),
|
||||
}
|
||||
if sparse {
|
||||
s.tmpSet = set{}
|
||||
s.sparseList = newCompressedList()
|
||||
} else {
|
||||
s.regs = newRegisters(m)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (sk *Sketch) sparse() bool {
|
||||
return sk.sparseList != nil
|
||||
}
|
||||
|
||||
// Clone returns a deep copy of sk.
|
||||
func (sk *Sketch) Clone() *Sketch {
|
||||
return &Sketch{
|
||||
b: sk.b,
|
||||
p: sk.p,
|
||||
m: sk.m,
|
||||
alpha: sk.alpha,
|
||||
tmpSet: sk.tmpSet.Clone(),
|
||||
sparseList: sk.sparseList.Clone(),
|
||||
regs: sk.regs.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
// Converts to normal if the sparse list is too large.
|
||||
func (sk *Sketch) maybeToNormal() {
|
||||
if uint32(len(sk.tmpSet))*100 > sk.m {
|
||||
sk.mergeSparse()
|
||||
if uint32(sk.sparseList.Len()) > sk.m {
|
||||
sk.toNormal()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Merge takes another Sketch and combines it with Sketch h.
|
||||
// If Sketch h is using the sparse Sketch, it will be converted
|
||||
// to the normal Sketch.
|
||||
func (sk *Sketch) Merge(other *Sketch) error {
|
||||
if other == nil {
|
||||
// Nothing to do
|
||||
return nil
|
||||
}
|
||||
cpOther := other.Clone()
|
||||
|
||||
if sk.p != cpOther.p {
|
||||
return errors.New("precisions must be equal")
|
||||
}
|
||||
|
||||
if sk.sparse() && other.sparse() {
|
||||
for k := range other.tmpSet {
|
||||
sk.tmpSet.add(k)
|
||||
}
|
||||
for iter := other.sparseList.Iter(); iter.HasNext(); {
|
||||
sk.tmpSet.add(iter.Next())
|
||||
}
|
||||
sk.maybeToNormal()
|
||||
return nil
|
||||
}
|
||||
|
||||
if sk.sparse() {
|
||||
sk.toNormal()
|
||||
}
|
||||
|
||||
if cpOther.sparse() {
|
||||
for k := range cpOther.tmpSet {
|
||||
i, r := decodeHash(k, cpOther.p, pp)
|
||||
sk.insert(i, r)
|
||||
}
|
||||
|
||||
for iter := cpOther.sparseList.Iter(); iter.HasNext(); {
|
||||
i, r := decodeHash(iter.Next(), cpOther.p, pp)
|
||||
sk.insert(i, r)
|
||||
}
|
||||
} else {
|
||||
if sk.b < cpOther.b {
|
||||
sk.regs.rebase(cpOther.b - sk.b)
|
||||
sk.b = cpOther.b
|
||||
} else {
|
||||
cpOther.regs.rebase(sk.b - cpOther.b)
|
||||
cpOther.b = sk.b
|
||||
}
|
||||
|
||||
for i, v := range cpOther.regs.tailcuts {
|
||||
v1 := v.get(0)
|
||||
if v1 > sk.regs.get(uint32(i)*2) {
|
||||
sk.regs.set(uint32(i)*2, v1)
|
||||
}
|
||||
v2 := v.get(1)
|
||||
if v2 > sk.regs.get(1+uint32(i)*2) {
|
||||
sk.regs.set(1+uint32(i)*2, v2)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert from sparse Sketch to dense Sketch.
|
||||
func (sk *Sketch) toNormal() {
|
||||
if len(sk.tmpSet) > 0 {
|
||||
sk.mergeSparse()
|
||||
}
|
||||
|
||||
sk.regs = newRegisters(sk.m)
|
||||
for iter := sk.sparseList.Iter(); iter.HasNext(); {
|
||||
i, r := decodeHash(iter.Next(), sk.p, pp)
|
||||
sk.insert(i, r)
|
||||
}
|
||||
|
||||
sk.tmpSet = nil
|
||||
sk.sparseList = nil
|
||||
}
|
||||
|
||||
func (sk *Sketch) insert(i uint32, r uint8) bool {
|
||||
changed := false
|
||||
if r-sk.b >= capacity {
|
||||
//overflow
|
||||
db := sk.regs.min()
|
||||
if db > 0 {
|
||||
sk.b += db
|
||||
sk.regs.rebase(db)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
if r > sk.b {
|
||||
val := r - sk.b
|
||||
if c1 := capacity - 1; c1 < val {
|
||||
val = c1
|
||||
}
|
||||
|
||||
if val > sk.regs.get(i) {
|
||||
sk.regs.set(i, val)
|
||||
changed = true
|
||||
}
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
// Insert adds element e to sketch
|
||||
func (sk *Sketch) Insert(e []byte) bool {
|
||||
x := hash(e)
|
||||
return sk.InsertHash(x)
|
||||
}
|
||||
|
||||
// InsertHash adds hash x to sketch
|
||||
func (sk *Sketch) InsertHash(x uint64) bool {
|
||||
if sk.sparse() {
|
||||
changed := sk.tmpSet.add(encodeHash(x, sk.p, pp))
|
||||
if !changed {
|
||||
return false
|
||||
}
|
||||
if uint32(len(sk.tmpSet))*100 > sk.m/2 {
|
||||
sk.mergeSparse()
|
||||
if uint32(sk.sparseList.Len()) > sk.m/2 {
|
||||
sk.toNormal()
|
||||
}
|
||||
}
|
||||
return true
|
||||
} else {
|
||||
i, r := getPosVal(x, sk.p)
|
||||
return sk.insert(uint32(i), r)
|
||||
}
|
||||
}
|
||||
|
||||
// Estimate returns the cardinality of the Sketch
|
||||
func (sk *Sketch) Estimate() uint64 {
|
||||
if sk.sparse() {
|
||||
sk.mergeSparse()
|
||||
return uint64(linearCount(mp, mp-sk.sparseList.count))
|
||||
}
|
||||
|
||||
sum, ez := sk.regs.sumAndZeros(sk.b)
|
||||
m := float64(sk.m)
|
||||
var est float64
|
||||
|
||||
var beta func(float64) float64
|
||||
if sk.p < 16 {
|
||||
beta = beta14
|
||||
} else {
|
||||
beta = beta16
|
||||
}
|
||||
|
||||
if sk.b == 0 {
|
||||
est = (sk.alpha * m * (m - ez) / (sum + beta(ez)))
|
||||
} else {
|
||||
est = (sk.alpha * m * m / sum)
|
||||
}
|
||||
|
||||
return uint64(est + 0.5)
|
||||
}
|
||||
|
||||
func (sk *Sketch) mergeSparse() {
|
||||
if len(sk.tmpSet) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
keys := make(uint64Slice, 0, len(sk.tmpSet))
|
||||
for k := range sk.tmpSet {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Sort(keys)
|
||||
|
||||
newList := newCompressedList()
|
||||
for iter, i := sk.sparseList.Iter(), 0; iter.HasNext() || i < len(keys); {
|
||||
if !iter.HasNext() {
|
||||
newList.Append(keys[i])
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
if i >= len(keys) {
|
||||
newList.Append(iter.Next())
|
||||
continue
|
||||
}
|
||||
|
||||
x1, x2 := iter.Peek(), keys[i]
|
||||
if x1 == x2 {
|
||||
newList.Append(iter.Next())
|
||||
i++
|
||||
} else if x1 > x2 {
|
||||
newList.Append(x2)
|
||||
i++
|
||||
} else {
|
||||
newList.Append(iter.Next())
|
||||
}
|
||||
}
|
||||
|
||||
sk.sparseList = newList
|
||||
sk.tmpSet = set{}
|
||||
}
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (sk *Sketch) MarshalBinary() (data []byte, err error) {
|
||||
// Marshal a version marker.
|
||||
data = append(data, version)
|
||||
// Marshal p.
|
||||
data = append(data, sk.p)
|
||||
// Marshal b
|
||||
data = append(data, sk.b)
|
||||
|
||||
if sk.sparse() {
|
||||
// It's using the sparse Sketch.
|
||||
data = append(data, byte(1))
|
||||
|
||||
// Add the tmp_set
|
||||
tsdata, err := sk.tmpSet.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
data = append(data, tsdata...)
|
||||
|
||||
// Add the sparse Sketch
|
||||
sdata, err := sk.sparseList.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(data, sdata...), nil
|
||||
}
|
||||
|
||||
// It's using the dense Sketch.
|
||||
data = append(data, byte(0))
|
||||
|
||||
// Add the dense sketch Sketch.
|
||||
sz := len(sk.regs.tailcuts)
|
||||
data = append(data, []byte{
|
||||
byte(sz >> 24),
|
||||
byte(sz >> 16),
|
||||
byte(sz >> 8),
|
||||
byte(sz),
|
||||
}...)
|
||||
|
||||
// Marshal each element in the list.
|
||||
for i := 0; i < len(sk.regs.tailcuts); i++ {
|
||||
data = append(data, byte(sk.regs.tailcuts[i]))
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// ErrorTooShort is an error that UnmarshalBinary try to parse too short
|
||||
// binary.
|
||||
var ErrorTooShort = errors.New("too short binary")
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (sk *Sketch) UnmarshalBinary(data []byte) error {
|
||||
if len(data) < 8 {
|
||||
return ErrorTooShort
|
||||
}
|
||||
|
||||
// Unmarshal version. We may need this in the future if we make
|
||||
// non-compatible changes.
|
||||
_ = data[0]
|
||||
|
||||
// Unmarshal p.
|
||||
p := data[1]
|
||||
|
||||
// Unmarshal b.
|
||||
sk.b = data[2]
|
||||
|
||||
// Determine if we need a sparse Sketch
|
||||
sparse := data[3] == byte(1)
|
||||
|
||||
// Make a newSketch Sketch if the precision doesn't match or if the Sketch was used
|
||||
if sk.p != p || sk.regs != nil || len(sk.tmpSet) > 0 || (sk.sparseList != nil && sk.sparseList.Len() > 0) {
|
||||
newh, err := newSketch(p, sparse)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newh.b = sk.b
|
||||
*sk = *newh
|
||||
}
|
||||
|
||||
// h is now initialised with the correct p. We just need to fill the
|
||||
// rest of the details out.
|
||||
if sparse {
|
||||
// Using the sparse Sketch.
|
||||
|
||||
// Unmarshal the tmp_set.
|
||||
tssz := binary.BigEndian.Uint32(data[4:8])
|
||||
sk.tmpSet = make(map[uint32]struct{}, tssz)
|
||||
|
||||
// We need to unmarshal tssz values in total, and each value requires us
|
||||
// to read 4 bytes.
|
||||
tsLastByte := int((tssz * 4) + 8)
|
||||
for i := 8; i < tsLastByte; i += 4 {
|
||||
k := binary.BigEndian.Uint32(data[i : i+4])
|
||||
sk.tmpSet[k] = struct{}{}
|
||||
}
|
||||
|
||||
// Unmarshal the sparse Sketch.
|
||||
return sk.sparseList.UnmarshalBinary(data[tsLastByte:])
|
||||
}
|
||||
|
||||
// Using the dense Sketch.
|
||||
sk.sparseList = nil
|
||||
sk.tmpSet = nil
|
||||
dsz := binary.BigEndian.Uint32(data[4:8])
|
||||
sk.regs = newRegisters(dsz * 2)
|
||||
data = data[8:]
|
||||
|
||||
for i, val := range data {
|
||||
sk.regs.tailcuts[i] = reg(val)
|
||||
if uint8(sk.regs.tailcuts[i]<<4>>4) > 0 {
|
||||
sk.regs.nz--
|
||||
}
|
||||
if uint8(sk.regs.tailcuts[i]>>4) > 0 {
|
||||
sk.regs.nz--
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
114
vendor/github.com/alicebob/miniredis/v2/hyperloglog/registers.go
generated
vendored
114
vendor/github.com/alicebob/miniredis/v2/hyperloglog/registers.go
generated
vendored
@@ -1,114 +0,0 @@
|
||||
package hyperloglog
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
type reg uint8
|
||||
type tailcuts []reg
|
||||
|
||||
type registers struct {
|
||||
tailcuts
|
||||
nz uint32
|
||||
}
|
||||
|
||||
func (r *reg) set(offset, val uint8) bool {
|
||||
var isZero bool
|
||||
if offset == 0 {
|
||||
isZero = *r < 16
|
||||
tmpVal := uint8((*r) << 4 >> 4)
|
||||
*r = reg(tmpVal | (val << 4))
|
||||
} else {
|
||||
isZero = *r&0x0f == 0
|
||||
tmpVal := uint8((*r) >> 4 << 4)
|
||||
*r = reg(tmpVal | val)
|
||||
}
|
||||
return isZero
|
||||
}
|
||||
|
||||
func (r *reg) get(offset uint8) uint8 {
|
||||
if offset == 0 {
|
||||
return uint8((*r) >> 4)
|
||||
}
|
||||
return uint8((*r) << 4 >> 4)
|
||||
}
|
||||
|
||||
func newRegisters(size uint32) *registers {
|
||||
return ®isters{
|
||||
tailcuts: make(tailcuts, size/2),
|
||||
nz: size,
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *registers) clone() *registers {
|
||||
if rs == nil {
|
||||
return nil
|
||||
}
|
||||
tc := make([]reg, len(rs.tailcuts))
|
||||
copy(tc, rs.tailcuts)
|
||||
return ®isters{
|
||||
tailcuts: tc,
|
||||
nz: rs.nz,
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *registers) rebase(delta uint8) {
|
||||
nz := uint32(len(rs.tailcuts)) * 2
|
||||
for i := range rs.tailcuts {
|
||||
for j := uint8(0); j < 2; j++ {
|
||||
val := rs.tailcuts[i].get(j)
|
||||
if val >= delta {
|
||||
rs.tailcuts[i].set(j, val-delta)
|
||||
if val-delta > 0 {
|
||||
nz--
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
rs.nz = nz
|
||||
}
|
||||
|
||||
func (rs *registers) set(i uint32, val uint8) {
|
||||
offset, index := uint8(i)&1, i/2
|
||||
if rs.tailcuts[index].set(offset, val) {
|
||||
rs.nz--
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *registers) get(i uint32) uint8 {
|
||||
offset, index := uint8(i)&1, i/2
|
||||
return rs.tailcuts[index].get(offset)
|
||||
}
|
||||
|
||||
func (rs *registers) sumAndZeros(base uint8) (res, ez float64) {
|
||||
for _, r := range rs.tailcuts {
|
||||
for j := uint8(0); j < 2; j++ {
|
||||
v := float64(base + r.get(j))
|
||||
if v == 0 {
|
||||
ez++
|
||||
}
|
||||
res += 1.0 / math.Pow(2.0, v)
|
||||
}
|
||||
}
|
||||
rs.nz = uint32(ez)
|
||||
return res, ez
|
||||
}
|
||||
|
||||
func (rs *registers) min() uint8 {
|
||||
if rs.nz > 0 {
|
||||
return 0
|
||||
}
|
||||
min := uint8(math.MaxUint8)
|
||||
for _, r := range rs.tailcuts {
|
||||
if r == 0 || min == 0 {
|
||||
return 0
|
||||
}
|
||||
if val := uint8(r << 4 >> 4); val < min {
|
||||
min = val
|
||||
}
|
||||
if val := uint8(r >> 4); val < min {
|
||||
min = val
|
||||
}
|
||||
}
|
||||
return min
|
||||
}
|
||||
92
vendor/github.com/alicebob/miniredis/v2/hyperloglog/sparse.go
generated
vendored
92
vendor/github.com/alicebob/miniredis/v2/hyperloglog/sparse.go
generated
vendored
@@ -1,92 +0,0 @@
|
||||
package hyperloglog
|
||||
|
||||
import (
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
func getIndex(k uint32, p, pp uint8) uint32 {
|
||||
if k&1 == 1 {
|
||||
return bextr32(k, 32-p, p)
|
||||
}
|
||||
return bextr32(k, pp-p+1, p)
|
||||
}
|
||||
|
||||
// Encode a hash to be used in the sparse representation.
|
||||
func encodeHash(x uint64, p, pp uint8) uint32 {
|
||||
idx := uint32(bextr(x, 64-pp, pp))
|
||||
if bextr(x, 64-pp, pp-p) == 0 {
|
||||
zeros := bits.LeadingZeros64((bextr(x, 0, 64-pp)<<pp)|(1<<pp-1)) + 1
|
||||
return idx<<7 | uint32(zeros<<1) | 1
|
||||
}
|
||||
return idx << 1
|
||||
}
|
||||
|
||||
// Decode a hash from the sparse representation.
|
||||
func decodeHash(k uint32, p, pp uint8) (uint32, uint8) {
|
||||
var r uint8
|
||||
if k&1 == 1 {
|
||||
r = uint8(bextr32(k, 1, 6)) + pp - p
|
||||
} else {
|
||||
// We can use the 64bit clz implementation and reduce the result
|
||||
// by 32 to get a clz for a 32bit word.
|
||||
r = uint8(bits.LeadingZeros64(uint64(k<<(32-pp+p-1))) - 31) // -32 + 1
|
||||
}
|
||||
return getIndex(k, p, pp), r
|
||||
}
|
||||
|
||||
type set map[uint32]struct{}
|
||||
|
||||
func (s set) add(v uint32) bool {
|
||||
_, ok := s[v]
|
||||
if ok {
|
||||
return false
|
||||
}
|
||||
s[v] = struct{}{}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s set) Clone() set {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
newS := make(map[uint32]struct{}, len(s))
|
||||
for k, v := range s {
|
||||
newS[k] = v
|
||||
}
|
||||
return newS
|
||||
}
|
||||
|
||||
func (s set) MarshalBinary() (data []byte, err error) {
|
||||
// 4 bytes for the size of the set, and 4 bytes for each key.
|
||||
// list.
|
||||
data = make([]byte, 0, 4+(4*len(s)))
|
||||
|
||||
// Length of the set. We only need 32 bits because the size of the set
|
||||
// couldn't exceed that on 32 bit architectures.
|
||||
sl := len(s)
|
||||
data = append(data, []byte{
|
||||
byte(sl >> 24),
|
||||
byte(sl >> 16),
|
||||
byte(sl >> 8),
|
||||
byte(sl),
|
||||
}...)
|
||||
|
||||
// Marshal each element in the set.
|
||||
for k := range s {
|
||||
data = append(data, []byte{
|
||||
byte(k >> 24),
|
||||
byte(k >> 16),
|
||||
byte(k >> 8),
|
||||
byte(k),
|
||||
}...)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
type uint64Slice []uint32
|
||||
|
||||
func (p uint64Slice) Len() int { return len(p) }
|
||||
func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
|
||||
func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
69
vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go
generated
vendored
69
vendor/github.com/alicebob/miniredis/v2/hyperloglog/utils.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
package hyperloglog
|
||||
|
||||
import (
|
||||
"github.com/alicebob/miniredis/v2/metro"
|
||||
"math"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
var hash = hashFunc
|
||||
|
||||
func beta14(ez float64) float64 {
|
||||
zl := math.Log(ez + 1)
|
||||
return -0.370393911*ez +
|
||||
0.070471823*zl +
|
||||
0.17393686*math.Pow(zl, 2) +
|
||||
0.16339839*math.Pow(zl, 3) +
|
||||
-0.09237745*math.Pow(zl, 4) +
|
||||
0.03738027*math.Pow(zl, 5) +
|
||||
-0.005384159*math.Pow(zl, 6) +
|
||||
0.00042419*math.Pow(zl, 7)
|
||||
}
|
||||
|
||||
func beta16(ez float64) float64 {
|
||||
zl := math.Log(ez + 1)
|
||||
return -0.37331876643753059*ez +
|
||||
-1.41704077448122989*zl +
|
||||
0.40729184796612533*math.Pow(zl, 2) +
|
||||
1.56152033906584164*math.Pow(zl, 3) +
|
||||
-0.99242233534286128*math.Pow(zl, 4) +
|
||||
0.26064681399483092*math.Pow(zl, 5) +
|
||||
-0.03053811369682807*math.Pow(zl, 6) +
|
||||
0.00155770210179105*math.Pow(zl, 7)
|
||||
}
|
||||
|
||||
func alpha(m float64) float64 {
|
||||
switch m {
|
||||
case 16:
|
||||
return 0.673
|
||||
case 32:
|
||||
return 0.697
|
||||
case 64:
|
||||
return 0.709
|
||||
}
|
||||
return 0.7213 / (1 + 1.079/m)
|
||||
}
|
||||
|
||||
func getPosVal(x uint64, p uint8) (uint64, uint8) {
|
||||
i := bextr(x, 64-p, p) // {x63,...,x64-p}
|
||||
w := x<<p | 1<<(p-1) // {x63-p,...,x0}
|
||||
rho := uint8(bits.LeadingZeros64(w)) + 1
|
||||
return i, rho
|
||||
}
|
||||
|
||||
func linearCount(m uint32, v uint32) float64 {
|
||||
fm := float64(m)
|
||||
return fm * math.Log(fm/float64(v))
|
||||
}
|
||||
|
||||
func bextr(v uint64, start, length uint8) uint64 {
|
||||
return (v >> start) & ((1 << length) - 1)
|
||||
}
|
||||
|
||||
func bextr32(v uint32, start, length uint8) uint32 {
|
||||
return (v >> start) & ((1 << length) - 1)
|
||||
}
|
||||
|
||||
func hashFunc(e []byte) uint64 {
|
||||
return metro.Hash64(e, 1337)
|
||||
}
|
||||
83
vendor/github.com/alicebob/miniredis/v2/keys.go
generated
vendored
83
vendor/github.com/alicebob/miniredis/v2/keys.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
// Translate the 'KEYS' or 'PSUBSCRIBE' argument ('foo*', 'f??', &c.) into a regexp.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// patternRE compiles a glob to a regexp. Returns nil if the given
|
||||
// pattern will never match anything.
|
||||
// The general strategy is to sandwich all non-meta characters between \Q...\E.
|
||||
func patternRE(k string) *regexp.Regexp {
|
||||
re := bytes.Buffer{}
|
||||
re.WriteString(`(?s)^\Q`)
|
||||
for i := 0; i < len(k); i++ {
|
||||
p := k[i]
|
||||
switch p {
|
||||
case '*':
|
||||
re.WriteString(`\E.*\Q`)
|
||||
case '?':
|
||||
re.WriteString(`\E.\Q`)
|
||||
case '[':
|
||||
charClass := bytes.Buffer{}
|
||||
i++
|
||||
for ; i < len(k); i++ {
|
||||
if k[i] == ']' {
|
||||
break
|
||||
}
|
||||
if k[i] == '\\' {
|
||||
if i == len(k)-1 {
|
||||
// Ends with a '\'. U-huh.
|
||||
return nil
|
||||
}
|
||||
charClass.WriteByte(k[i])
|
||||
i++
|
||||
charClass.WriteByte(k[i])
|
||||
continue
|
||||
}
|
||||
charClass.WriteByte(k[i])
|
||||
}
|
||||
if charClass.Len() == 0 {
|
||||
// '[]' is valid in Redis, but matches nothing.
|
||||
return nil
|
||||
}
|
||||
re.WriteString(`\E[`)
|
||||
re.Write(charClass.Bytes())
|
||||
re.WriteString(`]\Q`)
|
||||
|
||||
case '\\':
|
||||
if i == len(k)-1 {
|
||||
// Ends with a '\'. U-huh.
|
||||
return nil
|
||||
}
|
||||
// Forget the \, keep the next char.
|
||||
i++
|
||||
re.WriteByte(k[i])
|
||||
continue
|
||||
default:
|
||||
re.WriteByte(p)
|
||||
}
|
||||
}
|
||||
re.WriteString(`\E$`)
|
||||
return regexp.MustCompile(re.String())
|
||||
}
|
||||
|
||||
// matchKeys filters only matching keys.
|
||||
// The returned boolean is whether the match pattern was valid
|
||||
func matchKeys(keys []string, match string) ([]string, bool) {
|
||||
re := patternRE(match)
|
||||
if re == nil {
|
||||
// Special case: the given pattern won't match anything or is invalid.
|
||||
return nil, false
|
||||
}
|
||||
var res []string
|
||||
for _, k := range keys {
|
||||
if !re.MatchString(k) {
|
||||
continue
|
||||
}
|
||||
res = append(res, k)
|
||||
}
|
||||
return res, true
|
||||
}
|
||||
242
vendor/github.com/alicebob/miniredis/v2/lua.go
generated
vendored
242
vendor/github.com/alicebob/miniredis/v2/lua.go
generated
vendored
@@ -1,242 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
lua "github.com/yuin/gopher-lua"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
var luaRedisConstants = map[string]lua.LValue{
|
||||
"LOG_DEBUG": lua.LNumber(0),
|
||||
"LOG_VERBOSE": lua.LNumber(1),
|
||||
"LOG_NOTICE": lua.LNumber(2),
|
||||
"LOG_WARNING": lua.LNumber(3),
|
||||
}
|
||||
|
||||
func mkLua(srv *server.Server, c *server.Peer) (map[string]lua.LGFunction, map[string]lua.LValue) {
|
||||
mkCall := func(failFast bool) func(l *lua.LState) int {
|
||||
// one server.Ctx for a single Lua run
|
||||
pCtx := &connCtx{}
|
||||
if getCtx(c).authenticated {
|
||||
pCtx.authenticated = true
|
||||
}
|
||||
pCtx.nested = true
|
||||
pCtx.selectedDB = getCtx(c).selectedDB
|
||||
|
||||
return func(l *lua.LState) int {
|
||||
top := l.GetTop()
|
||||
if top == 0 {
|
||||
l.Error(lua.LString("Please specify at least one argument for redis.call()"), 1)
|
||||
return 0
|
||||
}
|
||||
var args []string
|
||||
for i := 1; i <= top; i++ {
|
||||
switch a := l.Get(i).(type) {
|
||||
case lua.LNumber:
|
||||
args = append(args, a.String())
|
||||
case lua.LString:
|
||||
args = append(args, string(a))
|
||||
default:
|
||||
l.Error(lua.LString("Lua redis() command arguments must be strings or integers"), 1)
|
||||
return 0
|
||||
}
|
||||
}
|
||||
if len(args) == 0 {
|
||||
l.Error(lua.LString(msgNotFromScripts), 1)
|
||||
return 0
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
wr := bufio.NewWriter(buf)
|
||||
peer := server.NewPeer(wr)
|
||||
peer.Ctx = pCtx
|
||||
srv.Dispatch(peer, args)
|
||||
wr.Flush()
|
||||
|
||||
res, err := server.ParseReply(bufio.NewReader(buf))
|
||||
if err != nil {
|
||||
if failFast {
|
||||
// call() mode
|
||||
if strings.Contains(err.Error(), "ERR unknown command") {
|
||||
l.Error(lua.LString("Unknown Redis command called from Lua script"), 1)
|
||||
} else {
|
||||
l.Error(lua.LString(err.Error()), 1)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
// pcall() mode
|
||||
l.Push(lua.LNil)
|
||||
return 1
|
||||
}
|
||||
|
||||
if res == nil {
|
||||
l.Push(lua.LFalse)
|
||||
} else {
|
||||
switch r := res.(type) {
|
||||
case int64:
|
||||
l.Push(lua.LNumber(r))
|
||||
case int:
|
||||
l.Push(lua.LNumber(r))
|
||||
case []uint8:
|
||||
l.Push(lua.LString(string(r)))
|
||||
case []interface{}:
|
||||
l.Push(redisToLua(l, r))
|
||||
case server.Simple:
|
||||
l.Push(luaStatusReply(string(r)))
|
||||
case string:
|
||||
l.Push(lua.LString(r))
|
||||
case error:
|
||||
l.Error(lua.LString(r.Error()), 1)
|
||||
return 0
|
||||
default:
|
||||
panic(fmt.Sprintf("type not handled (%T)", r))
|
||||
}
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
return map[string]lua.LGFunction{
|
||||
"call": mkCall(true),
|
||||
"pcall": mkCall(false),
|
||||
"error_reply": func(l *lua.LState) int {
|
||||
v := l.Get(1)
|
||||
msg, ok := v.(lua.LString)
|
||||
if !ok {
|
||||
l.Error(lua.LString("wrong number or type of arguments"), 1)
|
||||
return 0
|
||||
}
|
||||
res := &lua.LTable{}
|
||||
res.RawSetString("err", lua.LString(msg))
|
||||
l.Push(res)
|
||||
return 1
|
||||
},
|
||||
"log": func(l *lua.LState) int {
|
||||
level := l.CheckInt(1)
|
||||
msg := l.CheckString(2)
|
||||
_, _ = level, msg
|
||||
// do nothing by default. To see logs uncomment:
|
||||
// fmt.Printf("%v: %v", level, msg)
|
||||
return 0
|
||||
},
|
||||
"status_reply": func(l *lua.LState) int {
|
||||
v := l.Get(1)
|
||||
msg, ok := v.(lua.LString)
|
||||
if !ok {
|
||||
l.Error(lua.LString("wrong number or type of arguments"), 1)
|
||||
return 0
|
||||
}
|
||||
res := luaStatusReply(string(msg))
|
||||
l.Push(res)
|
||||
return 1
|
||||
},
|
||||
"sha1hex": func(l *lua.LState) int {
|
||||
top := l.GetTop()
|
||||
if top != 1 {
|
||||
l.Error(lua.LString("wrong number of arguments"), 1)
|
||||
return 0
|
||||
}
|
||||
msg := lua.LVAsString(l.Get(1))
|
||||
l.Push(lua.LString(sha1Hex(msg)))
|
||||
return 1
|
||||
},
|
||||
"replicate_commands": func(l *lua.LState) int {
|
||||
// ignored
|
||||
return 1
|
||||
},
|
||||
}, luaRedisConstants
|
||||
}
|
||||
|
||||
func luaToRedis(l *lua.LState, c *server.Peer, value lua.LValue) {
|
||||
if value == nil {
|
||||
c.WriteNull()
|
||||
return
|
||||
}
|
||||
|
||||
switch t := value.(type) {
|
||||
case *lua.LNilType:
|
||||
c.WriteNull()
|
||||
case lua.LBool:
|
||||
if lua.LVAsBool(value) {
|
||||
c.WriteInt(1)
|
||||
} else {
|
||||
c.WriteNull()
|
||||
}
|
||||
case lua.LNumber:
|
||||
c.WriteInt(int(lua.LVAsNumber(value)))
|
||||
case lua.LString:
|
||||
s := lua.LVAsString(value)
|
||||
c.WriteBulk(s)
|
||||
case *lua.LTable:
|
||||
// special case for tables with an 'err' or 'ok' field
|
||||
// note: according to the docs this only counts when 'err' or 'ok' is
|
||||
// the only field.
|
||||
if s := t.RawGetString("err"); s.Type() != lua.LTNil {
|
||||
c.WriteError(s.String())
|
||||
return
|
||||
}
|
||||
if s := t.RawGetString("ok"); s.Type() != lua.LTNil {
|
||||
c.WriteInline(s.String())
|
||||
return
|
||||
}
|
||||
|
||||
result := []lua.LValue{}
|
||||
for j := 1; true; j++ {
|
||||
val := l.GetTable(value, lua.LNumber(j))
|
||||
if val == nil {
|
||||
result = append(result, val)
|
||||
continue
|
||||
}
|
||||
|
||||
if val.Type() == lua.LTNil {
|
||||
break
|
||||
}
|
||||
|
||||
result = append(result, val)
|
||||
}
|
||||
|
||||
c.WriteLen(len(result))
|
||||
for _, r := range result {
|
||||
luaToRedis(l, c, r)
|
||||
}
|
||||
default:
|
||||
panic("....")
|
||||
}
|
||||
}
|
||||
|
||||
func redisToLua(l *lua.LState, res []interface{}) *lua.LTable {
|
||||
rettb := l.NewTable()
|
||||
for _, e := range res {
|
||||
var v lua.LValue
|
||||
if e == nil {
|
||||
v = lua.LFalse
|
||||
} else {
|
||||
switch et := e.(type) {
|
||||
case int64:
|
||||
v = lua.LNumber(et)
|
||||
case []uint8:
|
||||
v = lua.LString(string(et))
|
||||
case []interface{}:
|
||||
v = redisToLua(l, et)
|
||||
case string:
|
||||
v = lua.LString(et)
|
||||
default:
|
||||
// TODO: oops?
|
||||
v = lua.LString(e.(string))
|
||||
}
|
||||
}
|
||||
l.RawSet(rettb, lua.LNumber(rettb.Len()+1), v)
|
||||
}
|
||||
return rettb
|
||||
}
|
||||
|
||||
func luaStatusReply(msg string) *lua.LTable {
|
||||
tab := &lua.LTable{}
|
||||
tab.RawSetString("ok", lua.LString(msg))
|
||||
return tab
|
||||
}
|
||||
24
vendor/github.com/alicebob/miniredis/v2/metro/LICENSE
generated
vendored
24
vendor/github.com/alicebob/miniredis/v2/metro/LICENSE
generated
vendored
@@ -1,24 +0,0 @@
|
||||
This package is a mechanical translation of the reference C++ code for
|
||||
MetroHash, available at https://github.com/jandrewrogers/MetroHash
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 Damian Gryski
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
1
vendor/github.com/alicebob/miniredis/v2/metro/README.md
generated
vendored
1
vendor/github.com/alicebob/miniredis/v2/metro/README.md
generated
vendored
@@ -1 +0,0 @@
|
||||
This is a partial copy of github.com/dgryski/go-metro.
|
||||
87
vendor/github.com/alicebob/miniredis/v2/metro/metro64.go
generated
vendored
87
vendor/github.com/alicebob/miniredis/v2/metro/metro64.go
generated
vendored
@@ -1,87 +0,0 @@
|
||||
package metro
|
||||
|
||||
import "encoding/binary"
|
||||
|
||||
func Hash64(buffer []byte, seed uint64) uint64 {
|
||||
|
||||
const (
|
||||
k0 = 0xD6D018F5
|
||||
k1 = 0xA2AA033B
|
||||
k2 = 0x62992FC1
|
||||
k3 = 0x30BC5B29
|
||||
)
|
||||
|
||||
ptr := buffer
|
||||
|
||||
hash := (seed + k2) * k0
|
||||
|
||||
if len(ptr) >= 32 {
|
||||
v := [4]uint64{hash, hash, hash, hash}
|
||||
|
||||
for len(ptr) >= 32 {
|
||||
v[0] += binary.LittleEndian.Uint64(ptr[:8]) * k0
|
||||
v[0] = rotate_right(v[0], 29) + v[2]
|
||||
v[1] += binary.LittleEndian.Uint64(ptr[8:16]) * k1
|
||||
v[1] = rotate_right(v[1], 29) + v[3]
|
||||
v[2] += binary.LittleEndian.Uint64(ptr[16:24]) * k2
|
||||
v[2] = rotate_right(v[2], 29) + v[0]
|
||||
v[3] += binary.LittleEndian.Uint64(ptr[24:32]) * k3
|
||||
v[3] = rotate_right(v[3], 29) + v[1]
|
||||
ptr = ptr[32:]
|
||||
}
|
||||
|
||||
v[2] ^= rotate_right(((v[0]+v[3])*k0)+v[1], 37) * k1
|
||||
v[3] ^= rotate_right(((v[1]+v[2])*k1)+v[0], 37) * k0
|
||||
v[0] ^= rotate_right(((v[0]+v[2])*k0)+v[3], 37) * k1
|
||||
v[1] ^= rotate_right(((v[1]+v[3])*k1)+v[2], 37) * k0
|
||||
hash += v[0] ^ v[1]
|
||||
}
|
||||
|
||||
if len(ptr) >= 16 {
|
||||
v0 := hash + (binary.LittleEndian.Uint64(ptr[:8]) * k2)
|
||||
v0 = rotate_right(v0, 29) * k3
|
||||
v1 := hash + (binary.LittleEndian.Uint64(ptr[8:16]) * k2)
|
||||
v1 = rotate_right(v1, 29) * k3
|
||||
v0 ^= rotate_right(v0*k0, 21) + v1
|
||||
v1 ^= rotate_right(v1*k3, 21) + v0
|
||||
hash += v1
|
||||
ptr = ptr[16:]
|
||||
}
|
||||
|
||||
if len(ptr) >= 8 {
|
||||
hash += binary.LittleEndian.Uint64(ptr[:8]) * k3
|
||||
ptr = ptr[8:]
|
||||
hash ^= rotate_right(hash, 55) * k1
|
||||
}
|
||||
|
||||
if len(ptr) >= 4 {
|
||||
hash += uint64(binary.LittleEndian.Uint32(ptr[:4])) * k3
|
||||
hash ^= rotate_right(hash, 26) * k1
|
||||
ptr = ptr[4:]
|
||||
}
|
||||
|
||||
if len(ptr) >= 2 {
|
||||
hash += uint64(binary.LittleEndian.Uint16(ptr[:2])) * k3
|
||||
ptr = ptr[2:]
|
||||
hash ^= rotate_right(hash, 48) * k1
|
||||
}
|
||||
|
||||
if len(ptr) >= 1 {
|
||||
hash += uint64(ptr[0]) * k3
|
||||
hash ^= rotate_right(hash, 37) * k1
|
||||
}
|
||||
|
||||
hash ^= rotate_right(hash, 28)
|
||||
hash *= k0
|
||||
hash ^= rotate_right(hash, 29)
|
||||
|
||||
return hash
|
||||
}
|
||||
|
||||
func Hash64Str(buffer string, seed uint64) uint64 {
|
||||
return Hash64([]byte(buffer), seed)
|
||||
}
|
||||
|
||||
func rotate_right(v uint64, k uint) uint64 {
|
||||
return (v >> k) | (v << (64 - k))
|
||||
}
|
||||
720
vendor/github.com/alicebob/miniredis/v2/miniredis.go
generated
vendored
720
vendor/github.com/alicebob/miniredis/v2/miniredis.go
generated
vendored
@@ -1,720 +0,0 @@
|
||||
// Package miniredis is a pure Go Redis test server, for use in Go unittests.
|
||||
// There are no dependencies on system binaries, and every server you start
|
||||
// will be empty.
|
||||
//
|
||||
// import "github.com/alicebob/miniredis/v2"
|
||||
//
|
||||
// Start a server with `s := miniredis.RunT(t)`, it'll be shutdown via a t.Cleanup().
|
||||
// Or do everything manual: `s, err := miniredis.Run(); defer s.Close()`
|
||||
//
|
||||
// Point your Redis client to `s.Addr()` or `s.Host(), s.Port()`.
|
||||
//
|
||||
// Set keys directly via s.Set(...) and similar commands, or use a Redis client.
|
||||
//
|
||||
// For direct use you can select a Redis database with either `s.Select(12);
|
||||
// s.Get("foo")` or `s.DB(12).Get("foo")`.
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
var DumpMaxLineLen = 60
|
||||
|
||||
type hashKey map[string]string
|
||||
type listKey []string
|
||||
type setKey map[string]struct{}
|
||||
|
||||
// RedisDB holds a single (numbered) Redis database.
|
||||
type RedisDB struct {
|
||||
master *Miniredis // pointer to the lock in Miniredis
|
||||
id int // db id
|
||||
keys map[string]string // Master map of keys with their type
|
||||
stringKeys map[string]string // GET/SET &c. keys
|
||||
hashKeys map[string]hashKey // MGET/MSET &c. keys
|
||||
listKeys map[string]listKey // LPUSH &c. keys
|
||||
setKeys map[string]setKey // SADD &c. keys
|
||||
hllKeys map[string]*hll // PFADD &c. keys
|
||||
sortedsetKeys map[string]sortedSet // ZADD &c. keys
|
||||
streamKeys map[string]*streamKey // XADD &c. keys
|
||||
ttl map[string]time.Duration // effective TTL values
|
||||
keyVersion map[string]uint // used to watch values
|
||||
}
|
||||
|
||||
// Miniredis is a Redis server implementation.
|
||||
type Miniredis struct {
|
||||
sync.Mutex
|
||||
srv *server.Server
|
||||
port int
|
||||
passwords map[string]string // username password
|
||||
dbs map[int]*RedisDB
|
||||
selectedDB int // DB id used in the direct Get(), Set() &c.
|
||||
scripts map[string]string // sha1 -> lua src
|
||||
signal *sync.Cond
|
||||
now time.Time // time.Now() if not set.
|
||||
subscribers map[*Subscriber]struct{}
|
||||
rand *rand.Rand
|
||||
Ctx context.Context
|
||||
CtxCancel context.CancelFunc
|
||||
}
|
||||
|
||||
type txCmd func(*server.Peer, *connCtx)
|
||||
|
||||
// database id + key combo
|
||||
type dbKey struct {
|
||||
db int
|
||||
key string
|
||||
}
|
||||
|
||||
// connCtx has all state for a single connection.
|
||||
type connCtx struct {
|
||||
selectedDB int // selected DB
|
||||
authenticated bool // auth enabled and a valid AUTH seen
|
||||
transaction []txCmd // transaction callbacks. Or nil.
|
||||
dirtyTransaction bool // any error during QUEUEing
|
||||
watch map[dbKey]uint // WATCHed keys
|
||||
subscriber *Subscriber // client is in PUBSUB mode if not nil
|
||||
nested bool // this is called via Lua
|
||||
}
|
||||
|
||||
// NewMiniRedis makes a new, non-started, Miniredis object.
|
||||
func NewMiniRedis() *Miniredis {
|
||||
m := Miniredis{
|
||||
dbs: map[int]*RedisDB{},
|
||||
scripts: map[string]string{},
|
||||
subscribers: map[*Subscriber]struct{}{},
|
||||
}
|
||||
m.Ctx, m.CtxCancel = context.WithCancel(context.Background())
|
||||
m.signal = sync.NewCond(&m)
|
||||
return &m
|
||||
}
|
||||
|
||||
func newRedisDB(id int, m *Miniredis) RedisDB {
|
||||
return RedisDB{
|
||||
id: id,
|
||||
master: m,
|
||||
keys: map[string]string{},
|
||||
stringKeys: map[string]string{},
|
||||
hashKeys: map[string]hashKey{},
|
||||
listKeys: map[string]listKey{},
|
||||
setKeys: map[string]setKey{},
|
||||
hllKeys: map[string]*hll{},
|
||||
sortedsetKeys: map[string]sortedSet{},
|
||||
streamKeys: map[string]*streamKey{},
|
||||
ttl: map[string]time.Duration{},
|
||||
keyVersion: map[string]uint{},
|
||||
}
|
||||
}
|
||||
|
||||
// Run creates and Start()s a Miniredis.
|
||||
func Run() (*Miniredis, error) {
|
||||
m := NewMiniRedis()
|
||||
return m, m.Start()
|
||||
}
|
||||
|
||||
// Run creates and Start()s a Miniredis, TLS version.
|
||||
func RunTLS(cfg *tls.Config) (*Miniredis, error) {
|
||||
m := NewMiniRedis()
|
||||
return m, m.StartTLS(cfg)
|
||||
}
|
||||
|
||||
// Tester is a minimal version of a testing.T
|
||||
type Tester interface {
|
||||
Fatalf(string, ...interface{})
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// RunT start a new miniredis, pass it a testing.T. It also registers the cleanup after your test is done.
|
||||
func RunT(t Tester) *Miniredis {
|
||||
m := NewMiniRedis()
|
||||
if err := m.Start(); err != nil {
|
||||
t.Fatalf("could not start miniredis: %s", err)
|
||||
// not reached
|
||||
}
|
||||
t.Cleanup(m.Close)
|
||||
return m
|
||||
}
|
||||
|
||||
// Start starts a server. It listens on a random port on localhost. See also
|
||||
// Addr().
|
||||
func (m *Miniredis) Start() error {
|
||||
s, err := server.NewServer(fmt.Sprintf("127.0.0.1:%d", m.port))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.start(s)
|
||||
}
|
||||
|
||||
// Start starts a server, TLS version.
|
||||
func (m *Miniredis) StartTLS(cfg *tls.Config) error {
|
||||
s, err := server.NewServerTLS(fmt.Sprintf("127.0.0.1:%d", m.port), cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.start(s)
|
||||
}
|
||||
|
||||
// StartAddr runs miniredis with a given addr. Examples: "127.0.0.1:6379",
|
||||
// ":6379", or "127.0.0.1:0"
|
||||
func (m *Miniredis) StartAddr(addr string) error {
|
||||
s, err := server.NewServer(addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return m.start(s)
|
||||
}
|
||||
|
||||
func (m *Miniredis) start(s *server.Server) error {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.srv = s
|
||||
m.port = s.Addr().Port
|
||||
|
||||
commandsConnection(m)
|
||||
commandsGeneric(m)
|
||||
commandsServer(m)
|
||||
commandsString(m)
|
||||
commandsHash(m)
|
||||
commandsList(m)
|
||||
commandsPubsub(m)
|
||||
commandsSet(m)
|
||||
commandsSortedSet(m)
|
||||
commandsStream(m)
|
||||
commandsTransaction(m)
|
||||
commandsScripting(m)
|
||||
commandsGeo(m)
|
||||
commandsCluster(m)
|
||||
commandsHll(m)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restart restarts a Close()d server on the same port. Values will be
|
||||
// preserved.
|
||||
func (m *Miniredis) Restart() error {
|
||||
return m.Start()
|
||||
}
|
||||
|
||||
// Close shuts down a Miniredis.
|
||||
func (m *Miniredis) Close() {
|
||||
m.Lock()
|
||||
|
||||
if m.srv == nil {
|
||||
m.Unlock()
|
||||
return
|
||||
}
|
||||
srv := m.srv
|
||||
m.srv = nil
|
||||
m.CtxCancel()
|
||||
m.Unlock()
|
||||
|
||||
// the OnDisconnect callbacks can lock m, so run Close() outside the lock.
|
||||
srv.Close()
|
||||
|
||||
}
|
||||
|
||||
// RequireAuth makes every connection need to AUTH first. This is the old 'AUTH [password] command.
|
||||
// Remove it by setting an empty string.
|
||||
func (m *Miniredis) RequireAuth(pw string) {
|
||||
m.RequireUserAuth("default", pw)
|
||||
}
|
||||
|
||||
// Add a username/password, for use with 'AUTH [username] [password]'.
|
||||
// There are currently no access controls for commands implemented.
|
||||
// Disable access for the user with an empty password.
|
||||
func (m *Miniredis) RequireUserAuth(username, pw string) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if m.passwords == nil {
|
||||
m.passwords = map[string]string{}
|
||||
}
|
||||
if pw == "" {
|
||||
delete(m.passwords, username)
|
||||
return
|
||||
}
|
||||
m.passwords[username] = pw
|
||||
}
|
||||
|
||||
// DB returns a DB by ID.
|
||||
func (m *Miniredis) DB(i int) *RedisDB {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db(i)
|
||||
}
|
||||
|
||||
// get DB. No locks!
|
||||
func (m *Miniredis) db(i int) *RedisDB {
|
||||
if db, ok := m.dbs[i]; ok {
|
||||
return db
|
||||
}
|
||||
db := newRedisDB(i, m) // main miniredis has our mutex.
|
||||
m.dbs[i] = &db
|
||||
return &db
|
||||
}
|
||||
|
||||
// SwapDB swaps DBs by IDs.
|
||||
func (m *Miniredis) SwapDB(i, j int) bool {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.swapDB(i, j)
|
||||
}
|
||||
|
||||
// swap DB. No locks!
|
||||
func (m *Miniredis) swapDB(i, j int) bool {
|
||||
db1 := m.db(i)
|
||||
db2 := m.db(j)
|
||||
|
||||
db1.id = j
|
||||
db2.id = i
|
||||
|
||||
m.dbs[i] = db2
|
||||
m.dbs[j] = db1
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Addr returns '127.0.0.1:12345'. Can be given to a Dial(). See also Host()
|
||||
// and Port(), which return the same things.
|
||||
func (m *Miniredis) Addr() string {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.srv.Addr().String()
|
||||
}
|
||||
|
||||
// Host returns the host part of Addr().
|
||||
func (m *Miniredis) Host() string {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.srv.Addr().IP.String()
|
||||
}
|
||||
|
||||
// Port returns the (random) port part of Addr().
|
||||
func (m *Miniredis) Port() string {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return strconv.Itoa(m.srv.Addr().Port)
|
||||
}
|
||||
|
||||
// CommandCount returns the number of processed commands.
|
||||
func (m *Miniredis) CommandCount() int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return int(m.srv.TotalCommands())
|
||||
}
|
||||
|
||||
// CurrentConnectionCount returns the number of currently connected clients.
|
||||
func (m *Miniredis) CurrentConnectionCount() int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.srv.ClientsLen()
|
||||
}
|
||||
|
||||
// TotalConnectionCount returns the number of client connections since server start.
|
||||
func (m *Miniredis) TotalConnectionCount() int {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return int(m.srv.TotalConnections())
|
||||
}
|
||||
|
||||
// FastForward decreases all TTLs by the given duration. All TTLs <= 0 will be
|
||||
// expired.
|
||||
func (m *Miniredis) FastForward(duration time.Duration) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
for _, db := range m.dbs {
|
||||
db.fastForward(duration)
|
||||
}
|
||||
}
|
||||
|
||||
// Server returns the underlying server to allow custom commands to be implemented
|
||||
func (m *Miniredis) Server() *server.Server {
|
||||
return m.srv
|
||||
}
|
||||
|
||||
// Dump returns a text version of the selected DB, usable for debugging.
|
||||
//
|
||||
// Dump limits the maximum length of each key:value to "DumpMaxLineLen" characters.
|
||||
// To increase that, call something like:
|
||||
//
|
||||
// miniredis.DumpMaxLineLen = 1024
|
||||
// mr, _ = miniredis.Run()
|
||||
// mr.Dump()
|
||||
func (m *Miniredis) Dump() string {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
var (
|
||||
maxLen = DumpMaxLineLen
|
||||
indent = " "
|
||||
db = m.db(m.selectedDB)
|
||||
r = ""
|
||||
v = func(s string) string {
|
||||
suffix := ""
|
||||
if len(s) > maxLen {
|
||||
suffix = fmt.Sprintf("...(%d)", len(s))
|
||||
s = s[:maxLen-len(suffix)]
|
||||
}
|
||||
return fmt.Sprintf("%q%s", s, suffix)
|
||||
}
|
||||
)
|
||||
|
||||
for _, k := range db.allKeys() {
|
||||
r += fmt.Sprintf("- %s\n", k)
|
||||
t := db.t(k)
|
||||
switch t {
|
||||
case "string":
|
||||
r += fmt.Sprintf("%s%s\n", indent, v(db.stringKeys[k]))
|
||||
case "hash":
|
||||
for _, hk := range db.hashFields(k) {
|
||||
r += fmt.Sprintf("%s%s: %s\n", indent, hk, v(db.hashGet(k, hk)))
|
||||
}
|
||||
case "list":
|
||||
for _, lk := range db.listKeys[k] {
|
||||
r += fmt.Sprintf("%s%s\n", indent, v(lk))
|
||||
}
|
||||
case "set":
|
||||
for _, mk := range db.setMembers(k) {
|
||||
r += fmt.Sprintf("%s%s\n", indent, v(mk))
|
||||
}
|
||||
case "zset":
|
||||
for _, el := range db.ssetElements(k) {
|
||||
r += fmt.Sprintf("%s%f: %s\n", indent, el.score, v(el.member))
|
||||
}
|
||||
case "stream":
|
||||
for _, entry := range db.streamKeys[k].entries {
|
||||
r += fmt.Sprintf("%s%s\n", indent, entry.ID)
|
||||
ev := entry.Values
|
||||
for i := 0; i < len(ev)/2; i++ {
|
||||
r += fmt.Sprintf("%s%s%s: %s\n", indent, indent, v(ev[2*i]), v(ev[2*i+1]))
|
||||
}
|
||||
}
|
||||
case "hll":
|
||||
for _, entry := range db.hllKeys {
|
||||
r += fmt.Sprintf("%s%s\n", indent, v(string(entry.Bytes())))
|
||||
}
|
||||
default:
|
||||
r += fmt.Sprintf("%s(a %s, fixme!)\n", indent, t)
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// SetTime sets the time against which EXPIREAT values are compared, and the
|
||||
// time used in stream entry IDs. Will use time.Now() if this is not set.
|
||||
func (m *Miniredis) SetTime(t time.Time) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
m.now = t
|
||||
}
|
||||
|
||||
// make every command return this message. For example:
|
||||
//
|
||||
// LOADING Redis is loading the dataset in memory
|
||||
// MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'.
|
||||
//
|
||||
// Clear it with an empty string. Don't add newlines.
|
||||
func (m *Miniredis) SetError(msg string) {
|
||||
cb := server.Hook(nil)
|
||||
if msg != "" {
|
||||
cb = func(c *server.Peer, cmd string, args ...string) bool {
|
||||
c.WriteError(msg)
|
||||
return true
|
||||
}
|
||||
}
|
||||
m.srv.SetPreHook(cb)
|
||||
}
|
||||
|
||||
// isValidCMD returns true if command is valid and can be executed.
|
||||
func (m *Miniredis) isValidCMD(c *server.Peer, cmd string) bool {
|
||||
if !m.handleAuth(c) {
|
||||
return false
|
||||
}
|
||||
if m.checkPubsub(c, cmd) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// handleAuth returns false if connection has no access. It sends the reply.
|
||||
func (m *Miniredis) handleAuth(c *server.Peer) bool {
|
||||
if getCtx(c).nested {
|
||||
return true
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
if len(m.passwords) == 0 {
|
||||
return true
|
||||
}
|
||||
if !getCtx(c).authenticated {
|
||||
c.WriteError("NOAUTH Authentication required.")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// handlePubsub sends an error to the user if the connection is in PUBSUB mode.
|
||||
// It'll return true if it did.
|
||||
func (m *Miniredis) checkPubsub(c *server.Peer, cmd string) bool {
|
||||
if getCtx(c).nested {
|
||||
return false
|
||||
}
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
ctx := getCtx(c)
|
||||
if ctx.subscriber == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
prefix := "ERR "
|
||||
if strings.ToLower(cmd) == "exec" {
|
||||
prefix = "EXECABORT Transaction discarded because of: "
|
||||
}
|
||||
c.WriteError(fmt.Sprintf(
|
||||
"%sCan't execute '%s': only (P)SUBSCRIBE / (P)UNSUBSCRIBE / PING / QUIT are allowed in this context",
|
||||
prefix,
|
||||
strings.ToLower(cmd),
|
||||
))
|
||||
return true
|
||||
}
|
||||
|
||||
func getCtx(c *server.Peer) *connCtx {
|
||||
if c.Ctx == nil {
|
||||
c.Ctx = &connCtx{}
|
||||
}
|
||||
return c.Ctx.(*connCtx)
|
||||
}
|
||||
|
||||
func startTx(ctx *connCtx) {
|
||||
ctx.transaction = []txCmd{}
|
||||
ctx.dirtyTransaction = false
|
||||
}
|
||||
|
||||
func stopTx(ctx *connCtx) {
|
||||
ctx.transaction = nil
|
||||
unwatch(ctx)
|
||||
}
|
||||
|
||||
func inTx(ctx *connCtx) bool {
|
||||
return ctx.transaction != nil
|
||||
}
|
||||
|
||||
func addTxCmd(ctx *connCtx, cb txCmd) {
|
||||
ctx.transaction = append(ctx.transaction, cb)
|
||||
}
|
||||
|
||||
func watch(db *RedisDB, ctx *connCtx, key string) {
|
||||
if ctx.watch == nil {
|
||||
ctx.watch = map[dbKey]uint{}
|
||||
}
|
||||
ctx.watch[dbKey{db: db.id, key: key}] = db.keyVersion[key] // Can be 0.
|
||||
}
|
||||
|
||||
func unwatch(ctx *connCtx) {
|
||||
ctx.watch = nil
|
||||
}
|
||||
|
||||
// setDirty can be called even when not in an tx. Is an no-op then.
|
||||
func setDirty(c *server.Peer) {
|
||||
if c.Ctx == nil {
|
||||
// No transaction. Not relevant.
|
||||
return
|
||||
}
|
||||
getCtx(c).dirtyTransaction = true
|
||||
}
|
||||
|
||||
func (m *Miniredis) addSubscriber(s *Subscriber) {
|
||||
m.subscribers[s] = struct{}{}
|
||||
}
|
||||
|
||||
// closes and remove the subscriber.
|
||||
func (m *Miniredis) removeSubscriber(s *Subscriber) {
|
||||
_, ok := m.subscribers[s]
|
||||
delete(m.subscribers, s)
|
||||
if ok {
|
||||
s.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Miniredis) publish(c, msg string) int {
|
||||
n := 0
|
||||
for s := range m.subscribers {
|
||||
n += s.Publish(c, msg)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// enter 'subscribed state', or return the existing one.
|
||||
func (m *Miniredis) subscribedState(c *server.Peer) *Subscriber {
|
||||
ctx := getCtx(c)
|
||||
sub := ctx.subscriber
|
||||
if sub != nil {
|
||||
return sub
|
||||
}
|
||||
|
||||
sub = newSubscriber()
|
||||
m.addSubscriber(sub)
|
||||
|
||||
c.OnDisconnect(func() {
|
||||
m.Lock()
|
||||
m.removeSubscriber(sub)
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
ctx.subscriber = sub
|
||||
|
||||
go monitorPublish(c, sub.publish)
|
||||
go monitorPpublish(c, sub.ppublish)
|
||||
|
||||
return sub
|
||||
}
|
||||
|
||||
// whenever the p?sub count drops to 0 subscribed state should be stopped, and
|
||||
// all redis commands are allowed again.
|
||||
func endSubscriber(m *Miniredis, c *server.Peer) {
|
||||
ctx := getCtx(c)
|
||||
if sub := ctx.subscriber; sub != nil {
|
||||
m.removeSubscriber(sub) // will Close() the sub
|
||||
}
|
||||
ctx.subscriber = nil
|
||||
}
|
||||
|
||||
// Start a new pubsub subscriber. It can (un) subscribe to channels and
|
||||
// patterns, and has a channel to get published messages. Close it with
|
||||
// Close().
|
||||
// Does not close itself when there are no subscriptions left.
|
||||
func (m *Miniredis) NewSubscriber() *Subscriber {
|
||||
sub := newSubscriber()
|
||||
|
||||
m.Lock()
|
||||
m.addSubscriber(sub)
|
||||
m.Unlock()
|
||||
|
||||
return sub
|
||||
}
|
||||
|
||||
func (m *Miniredis) allSubscribers() []*Subscriber {
|
||||
var subs []*Subscriber
|
||||
for s := range m.subscribers {
|
||||
subs = append(subs, s)
|
||||
}
|
||||
return subs
|
||||
}
|
||||
|
||||
func (m *Miniredis) Seed(seed int) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
// m.rand is not safe for concurrent use.
|
||||
m.rand = rand.New(rand.NewSource(int64(seed)))
|
||||
}
|
||||
|
||||
func (m *Miniredis) randIntn(n int) int {
|
||||
if m.rand == nil {
|
||||
return rand.Intn(n)
|
||||
}
|
||||
return m.rand.Intn(n)
|
||||
}
|
||||
|
||||
// shuffle shuffles a list of strings. Kinda.
|
||||
func (m *Miniredis) shuffle(l []string) {
|
||||
for range l {
|
||||
i := m.randIntn(len(l))
|
||||
j := m.randIntn(len(l))
|
||||
l[i], l[j] = l[j], l[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Miniredis) effectiveNow() time.Time {
|
||||
if !m.now.IsZero() {
|
||||
return m.now
|
||||
}
|
||||
return time.Now().UTC()
|
||||
}
|
||||
|
||||
// convert a unixtimestamp to a duration, to use an absolute time as TTL.
|
||||
// d can be either time.Second or time.Millisecond.
|
||||
func (m *Miniredis) at(i int, d time.Duration) time.Duration {
|
||||
var ts time.Time
|
||||
switch d {
|
||||
case time.Millisecond:
|
||||
ts = time.Unix(int64(i/1000), 1000000*int64(i%1000))
|
||||
case time.Second:
|
||||
ts = time.Unix(int64(i), 0)
|
||||
default:
|
||||
panic("invalid time unit (d). Fixme!")
|
||||
}
|
||||
now := m.effectiveNow()
|
||||
return ts.Sub(now)
|
||||
}
|
||||
|
||||
// copy does not mind if dst already exists.
|
||||
func (m *Miniredis) copy(
|
||||
srcDB *RedisDB, src string,
|
||||
destDB *RedisDB, dst string,
|
||||
) error {
|
||||
if !srcDB.exists(src) {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
|
||||
switch srcDB.t(src) {
|
||||
case "string":
|
||||
destDB.stringKeys[dst] = srcDB.stringKeys[src]
|
||||
case "hash":
|
||||
destDB.hashKeys[dst] = copyHashKey(srcDB.hashKeys[src])
|
||||
case "list":
|
||||
destDB.listKeys[dst] = srcDB.listKeys[src]
|
||||
case "set":
|
||||
destDB.setKeys[dst] = copySetKey(srcDB.setKeys[src])
|
||||
case "zset":
|
||||
destDB.sortedsetKeys[dst] = copySortedSet(srcDB.sortedsetKeys[src])
|
||||
case "stream":
|
||||
destDB.streamKeys[dst] = srcDB.streamKeys[src].copy()
|
||||
case "hll":
|
||||
destDB.hllKeys[dst] = srcDB.hllKeys[src].copy()
|
||||
default:
|
||||
panic("missing case")
|
||||
}
|
||||
destDB.keys[dst] = srcDB.keys[src]
|
||||
destDB.keyVersion[dst]++
|
||||
if v, ok := srcDB.ttl[src]; ok {
|
||||
destDB.ttl[dst] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyHashKey(orig hashKey) hashKey {
|
||||
cpy := hashKey{}
|
||||
for k, v := range orig {
|
||||
cpy[k] = v
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
func copySetKey(orig setKey) setKey {
|
||||
cpy := setKey{}
|
||||
for k, v := range orig {
|
||||
cpy[k] = v
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
|
||||
func copySortedSet(orig sortedSet) sortedSet {
|
||||
cpy := sortedSet{}
|
||||
for k, v := range orig {
|
||||
cpy[k] = v
|
||||
}
|
||||
return cpy
|
||||
}
|
||||
25
vendor/github.com/alicebob/miniredis/v2/opts.go
generated
vendored
25
vendor/github.com/alicebob/miniredis/v2/opts.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// optInt parses an int option in a command.
|
||||
// Writes "invalid integer" error to c if it's not a valid integer. Returns
|
||||
// whether or not things were okay.
|
||||
func optInt(c *server.Peer, src string, dest *int) bool {
|
||||
return optIntErr(c, src, dest, msgInvalidInt)
|
||||
}
|
||||
|
||||
func optIntErr(c *server.Peer, src string, dest *int, errMsg string) bool {
|
||||
n, err := strconv.Atoi(src)
|
||||
if err != nil {
|
||||
setDirty(c)
|
||||
c.WriteError(errMsg)
|
||||
return false
|
||||
}
|
||||
*dest = n
|
||||
return true
|
||||
}
|
||||
240
vendor/github.com/alicebob/miniredis/v2/pubsub.go
generated
vendored
240
vendor/github.com/alicebob/miniredis/v2/pubsub.go
generated
vendored
@@ -1,240 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
// PubsubMessage is what gets broadcasted over pubsub channels.
|
||||
type PubsubMessage struct {
|
||||
Channel string
|
||||
Message string
|
||||
}
|
||||
|
||||
type PubsubPmessage struct {
|
||||
Pattern string
|
||||
Channel string
|
||||
Message string
|
||||
}
|
||||
|
||||
// Subscriber has the (p)subscriptions.
|
||||
type Subscriber struct {
|
||||
publish chan PubsubMessage
|
||||
ppublish chan PubsubPmessage
|
||||
channels map[string]struct{}
|
||||
patterns map[string]*regexp.Regexp
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Make a new subscriber. The channel is not buffered, so you will need to keep
|
||||
// reading using Messages(). Use Close() when done, or unsubscribe.
|
||||
func newSubscriber() *Subscriber {
|
||||
return &Subscriber{
|
||||
publish: make(chan PubsubMessage),
|
||||
ppublish: make(chan PubsubPmessage),
|
||||
channels: map[string]struct{}{},
|
||||
patterns: map[string]*regexp.Regexp{},
|
||||
}
|
||||
}
|
||||
|
||||
// Close the listening channel
|
||||
func (s *Subscriber) Close() {
|
||||
close(s.publish)
|
||||
close(s.ppublish)
|
||||
}
|
||||
|
||||
// Count the total number of channels and patterns
|
||||
func (s *Subscriber) Count() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.count()
|
||||
}
|
||||
|
||||
func (s *Subscriber) count() int {
|
||||
return len(s.channels) + len(s.patterns)
|
||||
}
|
||||
|
||||
// Subscribe to a channel. Returns the total number of (p)subscriptions after
|
||||
// subscribing.
|
||||
func (s *Subscriber) Subscribe(c string) int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.channels[c] = struct{}{}
|
||||
return s.count()
|
||||
}
|
||||
|
||||
// Unsubscribe a channel. Returns the total number of (p)subscriptions after
|
||||
// unsubscribing.
|
||||
func (s *Subscriber) Unsubscribe(c string) int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
delete(s.channels, c)
|
||||
return s.count()
|
||||
}
|
||||
|
||||
// Subscribe to a pattern. Returns the total number of (p)subscriptions after
|
||||
// subscribing.
|
||||
func (s *Subscriber) Psubscribe(pat string) int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
s.patterns[pat] = patternRE(pat)
|
||||
return s.count()
|
||||
}
|
||||
|
||||
// Unsubscribe a pattern. Returns the total number of (p)subscriptions after
|
||||
// unsubscribing.
|
||||
func (s *Subscriber) Punsubscribe(pat string) int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
delete(s.patterns, pat)
|
||||
return s.count()
|
||||
}
|
||||
|
||||
// List all subscribed channels, in alphabetical order
|
||||
func (s *Subscriber) Channels() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var cs []string
|
||||
for c := range s.channels {
|
||||
cs = append(cs, c)
|
||||
}
|
||||
sort.Strings(cs)
|
||||
return cs
|
||||
}
|
||||
|
||||
// List all subscribed patterns, in alphabetical order
|
||||
func (s *Subscriber) Patterns() []string {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
var ps []string
|
||||
for p := range s.patterns {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
sort.Strings(ps)
|
||||
return ps
|
||||
}
|
||||
|
||||
// Publish a message. Will return return how often we sent the message (can be
|
||||
// a match for a subscription and for a psubscription.
|
||||
func (s *Subscriber) Publish(c, msg string) int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
found := 0
|
||||
|
||||
subs:
|
||||
for sub := range s.channels {
|
||||
if sub == c {
|
||||
s.publish <- PubsubMessage{c, msg}
|
||||
found++
|
||||
break subs
|
||||
}
|
||||
}
|
||||
|
||||
pats:
|
||||
for orig, pat := range s.patterns {
|
||||
if pat != nil && pat.MatchString(c) {
|
||||
s.ppublish <- PubsubPmessage{orig, c, msg}
|
||||
found++
|
||||
break pats
|
||||
}
|
||||
}
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
// The channel to read messages for this subscriber. Only for messages matching
|
||||
// a SUBSCRIBE.
|
||||
func (s *Subscriber) Messages() <-chan PubsubMessage {
|
||||
return s.publish
|
||||
}
|
||||
|
||||
// The channel to read messages for this subscriber. Only for messages matching
|
||||
// a PSUBSCRIBE.
|
||||
func (s *Subscriber) Pmessages() <-chan PubsubPmessage {
|
||||
return s.ppublish
|
||||
}
|
||||
|
||||
// List all pubsub channels. If `pat` isn't empty channels names must match the
|
||||
// pattern. Channels are returned alphabetically.
|
||||
func activeChannels(subs []*Subscriber, pat string) []string {
|
||||
channels := map[string]struct{}{}
|
||||
for _, s := range subs {
|
||||
for c := range s.channels {
|
||||
channels[c] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var cpat *regexp.Regexp
|
||||
if pat != "" {
|
||||
cpat = patternRE(pat)
|
||||
}
|
||||
|
||||
var cs []string
|
||||
for k := range channels {
|
||||
if cpat != nil && !cpat.MatchString(k) {
|
||||
continue
|
||||
}
|
||||
cs = append(cs, k)
|
||||
}
|
||||
sort.Strings(cs)
|
||||
return cs
|
||||
}
|
||||
|
||||
// Count all subscribed (not psubscribed) clients for the given channel
|
||||
// pattern. Channels are returned alphabetically.
|
||||
func countSubs(subs []*Subscriber, channel string) int {
|
||||
n := 0
|
||||
for _, p := range subs {
|
||||
for c := range p.channels {
|
||||
if c == channel {
|
||||
n++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Count the total of all client psubscriptions.
|
||||
func countPsubs(subs []*Subscriber) int {
|
||||
n := 0
|
||||
for _, p := range subs {
|
||||
n += len(p.patterns)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func monitorPublish(conn *server.Peer, msgs <-chan PubsubMessage) {
|
||||
for msg := range msgs {
|
||||
conn.Block(func(c *server.Writer) {
|
||||
c.WritePushLen(3)
|
||||
c.WriteBulk("message")
|
||||
c.WriteBulk(msg.Channel)
|
||||
c.WriteBulk(msg.Message)
|
||||
c.Flush()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func monitorPpublish(conn *server.Peer, msgs <-chan PubsubPmessage) {
|
||||
for msg := range msgs {
|
||||
conn.Block(func(c *server.Writer) {
|
||||
c.WritePushLen(4)
|
||||
c.WriteBulk("pmessage")
|
||||
c.WriteBulk(msg.Pattern)
|
||||
c.WriteBulk(msg.Channel)
|
||||
c.WriteBulk(msg.Message)
|
||||
c.Flush()
|
||||
})
|
||||
}
|
||||
}
|
||||
235
vendor/github.com/alicebob/miniredis/v2/redis.go
generated
vendored
235
vendor/github.com/alicebob/miniredis/v2/redis.go
generated
vendored
@@ -1,235 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alicebob/miniredis/v2/server"
|
||||
)
|
||||
|
||||
const (
|
||||
msgWrongType = "WRONGTYPE Operation against a key holding the wrong kind of value"
|
||||
msgNotValidHllValue = "WRONGTYPE Key is not a valid HyperLogLog string value."
|
||||
msgInvalidInt = "ERR value is not an integer or out of range"
|
||||
msgInvalidFloat = "ERR value is not a valid float"
|
||||
msgInvalidMinMax = "ERR min or max is not a float"
|
||||
msgInvalidRangeItem = "ERR min or max not valid string range item"
|
||||
msgInvalidTimeout = "ERR timeout is not a float or out of range"
|
||||
msgSyntaxError = "ERR syntax error"
|
||||
msgKeyNotFound = "ERR no such key"
|
||||
msgOutOfRange = "ERR index out of range"
|
||||
msgInvalidCursor = "ERR invalid cursor"
|
||||
msgXXandNX = "ERR XX and NX options at the same time are not compatible"
|
||||
msgNegTimeout = "ERR timeout is negative"
|
||||
msgInvalidSETime = "ERR invalid expire time in set"
|
||||
msgInvalidSETEXTime = "ERR invalid expire time in setex"
|
||||
msgInvalidPSETEXTime = "ERR invalid expire time in psetex"
|
||||
msgInvalidKeysNumber = "ERR Number of keys can't be greater than number of args"
|
||||
msgNegativeKeysNumber = "ERR Number of keys can't be negative"
|
||||
msgFScriptUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try SCRIPT HELP."
|
||||
msgFPubsubUsage = "ERR Unknown subcommand or wrong number of arguments for '%s'. Try PUBSUB HELP."
|
||||
msgScriptFlush = "ERR SCRIPT FLUSH only support SYNC|ASYNC option"
|
||||
msgSingleElementPair = "ERR INCR option supports a single increment-element pair"
|
||||
msgGTLTandNX = "ERR GT, LT, and/or NX options at the same time are not compatible"
|
||||
msgInvalidStreamID = "ERR Invalid stream ID specified as stream command argument"
|
||||
msgStreamIDTooSmall = "ERR The ID specified in XADD is equal or smaller than the target stream top item"
|
||||
msgStreamIDZero = "ERR The ID specified in XADD must be greater than 0-0"
|
||||
msgNoScriptFound = "NOSCRIPT No matching script. Please use EVAL."
|
||||
msgUnsupportedUnit = "ERR unsupported unit provided. please use m, km, ft, mi"
|
||||
msgNotFromScripts = "This Redis command is not allowed from scripts"
|
||||
msgXreadUnbalanced = "ERR Unbalanced XREAD list of streams: for each stream key an ID or '$' must be specified."
|
||||
msgXgroupKeyNotFound = "ERR The XGROUP subcommand requires the key to exist. Note that for CREATE you may want to use the MKSTREAM option to create an empty stream automatically."
|
||||
msgXtrimInvalidStrategy = "ERR unsupported XTRIM strategy. Please use MAXLEN, MINID"
|
||||
msgXtrimInvalidMaxLen = "ERR value is not an integer or out of range"
|
||||
msgXtrimInvalidLimit = "ERR syntax error, LIMIT cannot be used without the special ~ option"
|
||||
msgDBIndexOutOfRange = "ERR DB index is out of range"
|
||||
msgLimitCombination = "ERR syntax error, LIMIT is only supported in combination with either BYSCORE or BYLEX"
|
||||
msgRankIsZero = "ERR RANK can't be zero: use 1 to start from the first match, 2 from the second ... or use negative to start from the end of the list"
|
||||
msgCountIsNegative = "ERR COUNT can't be negative"
|
||||
msgMaxLengthIsNegative = "ERR MAXLEN can't be negative"
|
||||
)
|
||||
|
||||
func errWrongNumber(cmd string) string {
|
||||
return fmt.Sprintf("ERR wrong number of arguments for '%s' command", strings.ToLower(cmd))
|
||||
}
|
||||
|
||||
func errLuaParseError(err error) string {
|
||||
return fmt.Sprintf("ERR Error compiling script (new function): %s", err.Error())
|
||||
}
|
||||
|
||||
func errReadgroup(key, group string) error {
|
||||
return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s'", key, group)
|
||||
}
|
||||
|
||||
func errXreadgroup(key, group string) error {
|
||||
return fmt.Errorf("NOGROUP No such key '%s' or consumer group '%s' in XREADGROUP with GROUP option", key, group)
|
||||
}
|
||||
|
||||
// withTx wraps the non-argument-checking part of command handling code in
|
||||
// transaction logic.
|
||||
func withTx(
|
||||
m *Miniredis,
|
||||
c *server.Peer,
|
||||
cb txCmd,
|
||||
) {
|
||||
ctx := getCtx(c)
|
||||
|
||||
if ctx.nested {
|
||||
// this is a call via Lua's .call(). It's already locked.
|
||||
cb(c, ctx)
|
||||
m.signal.Broadcast()
|
||||
return
|
||||
}
|
||||
|
||||
if inTx(ctx) {
|
||||
addTxCmd(ctx, cb)
|
||||
c.WriteInline("QUEUED")
|
||||
return
|
||||
}
|
||||
m.Lock()
|
||||
cb(c, ctx)
|
||||
// done, wake up anyone who waits on anything.
|
||||
m.signal.Broadcast()
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
// blockCmd is executed returns whether it is done
|
||||
type blockCmd func(*server.Peer, *connCtx) bool
|
||||
|
||||
// blocking keeps trying a command until the callback returns true. Calls
|
||||
// onTimeout after the timeout (or when we call this in a transaction).
|
||||
func blocking(
|
||||
m *Miniredis,
|
||||
c *server.Peer,
|
||||
timeout time.Duration,
|
||||
cb blockCmd,
|
||||
onTimeout func(*server.Peer),
|
||||
) {
|
||||
var (
|
||||
ctx = getCtx(c)
|
||||
)
|
||||
if inTx(ctx) {
|
||||
addTxCmd(ctx, func(c *server.Peer, ctx *connCtx) {
|
||||
if !cb(c, ctx) {
|
||||
onTimeout(c)
|
||||
}
|
||||
})
|
||||
c.WriteInline("QUEUED")
|
||||
return
|
||||
}
|
||||
|
||||
localCtx, cancel := context.WithCancel(m.Ctx)
|
||||
defer cancel()
|
||||
timedOut := false
|
||||
if timeout != 0 {
|
||||
go setCondTimer(localCtx, m.signal, &timedOut, timeout)
|
||||
}
|
||||
go func() {
|
||||
<-localCtx.Done()
|
||||
m.signal.Broadcast() // main loop might miss this signal
|
||||
}()
|
||||
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
for {
|
||||
done := cb(c, ctx)
|
||||
if done {
|
||||
return
|
||||
}
|
||||
|
||||
if m.Ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
if timedOut {
|
||||
onTimeout(c)
|
||||
return
|
||||
}
|
||||
|
||||
m.signal.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func setCondTimer(ctx context.Context, sig *sync.Cond, timedOut *bool, timeout time.Duration) {
|
||||
dl := time.NewTimer(timeout)
|
||||
defer dl.Stop()
|
||||
select {
|
||||
case <-dl.C:
|
||||
sig.L.Lock() // for timedOut
|
||||
*timedOut = true
|
||||
sig.Broadcast() // main loop might miss this signal
|
||||
sig.L.Unlock()
|
||||
case <-ctx.Done():
|
||||
}
|
||||
}
|
||||
|
||||
// formatBig formats a float the way redis does
|
||||
func formatBig(v *big.Float) string {
|
||||
// Format with %f and strip trailing 0s.
|
||||
if v.IsInf() {
|
||||
return "inf"
|
||||
}
|
||||
// if math.IsInf(v, -1) {
|
||||
// return "-inf"
|
||||
// }
|
||||
return stripZeros(fmt.Sprintf("%.17f", v))
|
||||
}
|
||||
|
||||
func stripZeros(sv string) string {
|
||||
for strings.Contains(sv, ".") {
|
||||
if sv[len(sv)-1] != '0' {
|
||||
break
|
||||
}
|
||||
// Remove trailing 0s.
|
||||
sv = sv[:len(sv)-1]
|
||||
// Ends with a '.'.
|
||||
if sv[len(sv)-1] == '.' {
|
||||
sv = sv[:len(sv)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
return sv
|
||||
}
|
||||
|
||||
// redisRange gives Go offsets for something l long with start/end in
|
||||
// Redis semantics. Both start and end can be negative.
|
||||
// Used for string range and list range things.
|
||||
// The results can be used as: v[start:end]
|
||||
// Note that GETRANGE (on a string key) never returns an empty string when end
|
||||
// is a large negative number.
|
||||
func redisRange(l, start, end int, stringSymantics bool) (int, int) {
|
||||
if start < 0 {
|
||||
start = l + start
|
||||
if start < 0 {
|
||||
start = 0
|
||||
}
|
||||
}
|
||||
if start > l {
|
||||
start = l
|
||||
}
|
||||
|
||||
if end < 0 {
|
||||
end = l + end
|
||||
if end < 0 {
|
||||
end = -1
|
||||
if stringSymantics {
|
||||
end = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
if end < math.MaxInt32 {
|
||||
end++ // end argument is inclusive in Redis.
|
||||
}
|
||||
if end > l {
|
||||
end = l
|
||||
}
|
||||
|
||||
if end < start {
|
||||
return 0, 0
|
||||
}
|
||||
return start, end
|
||||
}
|
||||
9
vendor/github.com/alicebob/miniredis/v2/server/Makefile
generated
vendored
9
vendor/github.com/alicebob/miniredis/v2/server/Makefile
generated
vendored
@@ -1,9 +0,0 @@
|
||||
.PHONY: all build test
|
||||
|
||||
all: build test
|
||||
|
||||
build:
|
||||
go build
|
||||
|
||||
test:
|
||||
go test
|
||||
157
vendor/github.com/alicebob/miniredis/v2/server/proto.go
generated
vendored
157
vendor/github.com/alicebob/miniredis/v2/server/proto.go
generated
vendored
@@ -1,157 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
type Simple string
|
||||
|
||||
// ErrProtocol is the general error for unexpected input
|
||||
var ErrProtocol = errors.New("invalid request")
|
||||
|
||||
// client always sends arrays with bulk strings
|
||||
func readArray(rd *bufio.Reader) ([]string, error) {
|
||||
line, err := rd.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(line) < 3 {
|
||||
return nil, ErrProtocol
|
||||
}
|
||||
|
||||
switch line[0] {
|
||||
default:
|
||||
return nil, ErrProtocol
|
||||
case '*':
|
||||
l, err := strconv.Atoi(line[1 : len(line)-2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// l can be -1
|
||||
var fields []string
|
||||
for ; l > 0; l-- {
|
||||
s, err := readString(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fields = append(fields, s)
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
}
|
||||
|
||||
func readString(rd *bufio.Reader) (string, error) {
|
||||
line, err := rd.ReadString('\n')
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(line) < 3 {
|
||||
return "", ErrProtocol
|
||||
}
|
||||
|
||||
switch line[0] {
|
||||
default:
|
||||
return "", ErrProtocol
|
||||
case '+', '-', ':':
|
||||
// +: simple string
|
||||
// -: errors
|
||||
// :: integer
|
||||
// Simple line based replies.
|
||||
return string(line[1 : len(line)-2]), nil
|
||||
case '$':
|
||||
// bulk strings are: `$5\r\nhello\r\n`
|
||||
length, err := strconv.Atoi(line[1 : len(line)-2])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if length < 0 {
|
||||
// -1 is a nil response
|
||||
return "", nil
|
||||
}
|
||||
var (
|
||||
buf = make([]byte, length+2)
|
||||
pos = 0
|
||||
)
|
||||
for pos < length+2 {
|
||||
n, err := rd.Read(buf[pos:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pos += n
|
||||
}
|
||||
return string(buf[:length]), nil
|
||||
}
|
||||
}
|
||||
|
||||
// parse a reply
|
||||
func ParseReply(rd *bufio.Reader) (interface{}, error) {
|
||||
line, err := rd.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(line) < 3 {
|
||||
return nil, ErrProtocol
|
||||
}
|
||||
|
||||
switch line[0] {
|
||||
default:
|
||||
return nil, ErrProtocol
|
||||
case '+':
|
||||
// +: simple string
|
||||
return Simple(line[1 : len(line)-2]), nil
|
||||
case '-':
|
||||
// -: errors
|
||||
return nil, errors.New(string(line[1 : len(line)-2]))
|
||||
case ':':
|
||||
// :: integer
|
||||
v := line[1 : len(line)-2]
|
||||
if v == "" {
|
||||
return 0, nil
|
||||
}
|
||||
n, err := strconv.Atoi(v)
|
||||
if err != nil {
|
||||
return nil, ErrProtocol
|
||||
}
|
||||
return n, nil
|
||||
case '$':
|
||||
// bulk strings are: `$5\r\nhello\r\n`
|
||||
length, err := strconv.Atoi(line[1 : len(line)-2])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if length < 0 {
|
||||
// -1 is a nil response
|
||||
return nil, nil
|
||||
}
|
||||
var (
|
||||
buf = make([]byte, length+2)
|
||||
pos = 0
|
||||
)
|
||||
for pos < length+2 {
|
||||
n, err := rd.Read(buf[pos:])
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
pos += n
|
||||
}
|
||||
return string(buf[:length]), nil
|
||||
case '*':
|
||||
// array
|
||||
l, err := strconv.Atoi(line[1 : len(line)-2])
|
||||
if err != nil {
|
||||
return nil, ErrProtocol
|
||||
}
|
||||
// l can be -1
|
||||
var fields []interface{}
|
||||
for ; l > 0; l-- {
|
||||
s, err := ParseReply(rd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fields = append(fields, s)
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
}
|
||||
487
vendor/github.com/alicebob/miniredis/v2/server/server.go
generated
vendored
487
vendor/github.com/alicebob/miniredis/v2/server/server.go
generated
vendored
@@ -1,487 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
func errUnknownCommand(cmd string, args []string) string {
|
||||
s := fmt.Sprintf("ERR unknown command `%s`, with args beginning with: ", cmd)
|
||||
if len(args) > 20 {
|
||||
args = args[:20]
|
||||
}
|
||||
for _, a := range args {
|
||||
s += fmt.Sprintf("`%s`, ", a)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Cmd is what Register expects
|
||||
type Cmd func(c *Peer, cmd string, args []string)
|
||||
|
||||
type DisconnectHandler func(c *Peer)
|
||||
|
||||
// Hook is can be added to run before every cmd. Return true if the command is done.
|
||||
type Hook func(*Peer, string, ...string) bool
|
||||
|
||||
// Server is a simple redis server
|
||||
type Server struct {
|
||||
l net.Listener
|
||||
cmds map[string]Cmd
|
||||
preHook Hook
|
||||
peers map[net.Conn]struct{}
|
||||
mu sync.Mutex
|
||||
wg sync.WaitGroup
|
||||
infoConns int
|
||||
infoCmds int
|
||||
}
|
||||
|
||||
// NewServer makes a server listening on addr. Close with .Close().
|
||||
func NewServer(addr string) (*Server, error) {
|
||||
l, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newServer(l), nil
|
||||
}
|
||||
|
||||
func NewServerTLS(addr string, cfg *tls.Config) (*Server, error) {
|
||||
l, err := tls.Listen("tcp", addr, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return newServer(l), nil
|
||||
}
|
||||
|
||||
func newServer(l net.Listener) *Server {
|
||||
s := Server{
|
||||
cmds: map[string]Cmd{},
|
||||
peers: map[net.Conn]struct{}{},
|
||||
l: l,
|
||||
}
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
s.serve(l)
|
||||
|
||||
s.mu.Lock()
|
||||
for c := range s.peers {
|
||||
c.Close()
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
return &s
|
||||
}
|
||||
|
||||
// (un)set a hook which is ran before every call. It returns true if the command is done.
|
||||
func (s *Server) SetPreHook(h Hook) {
|
||||
s.mu.Lock()
|
||||
s.preHook = h
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
func (s *Server) serve(l net.Listener) {
|
||||
for {
|
||||
conn, err := l.Accept()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.ServeConn(conn)
|
||||
}
|
||||
}
|
||||
|
||||
// ServeConn handles a net.Conn. Nice with net.Pipe()
|
||||
func (s *Server) ServeConn(conn net.Conn) {
|
||||
s.wg.Add(1)
|
||||
s.mu.Lock()
|
||||
s.peers[conn] = struct{}{}
|
||||
s.infoConns++
|
||||
s.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
defer s.wg.Done()
|
||||
defer conn.Close()
|
||||
|
||||
s.servePeer(conn)
|
||||
|
||||
s.mu.Lock()
|
||||
delete(s.peers, conn)
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
}
|
||||
|
||||
// Addr has the net.Addr struct
|
||||
func (s *Server) Addr() *net.TCPAddr {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.l == nil {
|
||||
return nil
|
||||
}
|
||||
return s.l.Addr().(*net.TCPAddr)
|
||||
}
|
||||
|
||||
// Close a server started with NewServer. It will wait until all clients are
|
||||
// closed.
|
||||
func (s *Server) Close() {
|
||||
s.mu.Lock()
|
||||
if s.l != nil {
|
||||
s.l.Close()
|
||||
}
|
||||
s.l = nil
|
||||
s.mu.Unlock()
|
||||
|
||||
s.wg.Wait()
|
||||
}
|
||||
|
||||
// Register a command. It can't have been registered before. Safe to call on a
|
||||
// running server.
|
||||
func (s *Server) Register(cmd string, f Cmd) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
cmd = strings.ToUpper(cmd)
|
||||
if _, ok := s.cmds[cmd]; ok {
|
||||
return fmt.Errorf("command already registered: %s", cmd)
|
||||
}
|
||||
s.cmds[cmd] = f
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Server) servePeer(c net.Conn) {
|
||||
r := bufio.NewReader(c)
|
||||
peer := &Peer{
|
||||
w: bufio.NewWriter(c),
|
||||
}
|
||||
defer func() {
|
||||
for _, f := range peer.onDisconnect {
|
||||
f()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
args, err := readArray(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
s.Dispatch(peer, args)
|
||||
peer.Flush()
|
||||
|
||||
s.mu.Lock()
|
||||
closed := peer.closed
|
||||
s.mu.Unlock()
|
||||
if closed {
|
||||
c.Close()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) Dispatch(c *Peer, args []string) {
|
||||
cmd, args := args[0], args[1:]
|
||||
cmdUp := strings.ToUpper(cmd)
|
||||
s.mu.Lock()
|
||||
h := s.preHook
|
||||
s.mu.Unlock()
|
||||
if h != nil {
|
||||
if h(c, cmdUp, args...) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
cb, ok := s.cmds[cmdUp]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
c.WriteError(errUnknownCommand(cmd, args))
|
||||
return
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.infoCmds++
|
||||
s.mu.Unlock()
|
||||
cb(c, cmdUp, args)
|
||||
}
|
||||
|
||||
// TotalCommands is total (known) commands since this the server started
|
||||
func (s *Server) TotalCommands() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.infoCmds
|
||||
}
|
||||
|
||||
// ClientsLen gives the number of connected clients right now
|
||||
func (s *Server) ClientsLen() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return len(s.peers)
|
||||
}
|
||||
|
||||
// TotalConnections give the number of clients connected since the server
|
||||
// started, including the currently connected ones
|
||||
func (s *Server) TotalConnections() int {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.infoConns
|
||||
}
|
||||
|
||||
// Peer is a client connected to the server
|
||||
type Peer struct {
|
||||
w *bufio.Writer
|
||||
closed bool
|
||||
Resp3 bool
|
||||
Ctx interface{} // anything goes, server won't touch this
|
||||
onDisconnect []func() // list of callbacks
|
||||
mu sync.Mutex // for Block()
|
||||
}
|
||||
|
||||
func NewPeer(w *bufio.Writer) *Peer {
|
||||
return &Peer{
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
// Flush the write buffer. Called automatically after every redis command
|
||||
func (c *Peer) Flush() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.w.Flush()
|
||||
}
|
||||
|
||||
// Close the client connection after the current command is done.
|
||||
func (c *Peer) Close() {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.closed = true
|
||||
}
|
||||
|
||||
// Register a function to execute on disconnect. There can be multiple
|
||||
// functions registered.
|
||||
func (c *Peer) OnDisconnect(f func()) {
|
||||
c.onDisconnect = append(c.onDisconnect, f)
|
||||
}
|
||||
|
||||
// issue multiple calls, guarded with a mutex
|
||||
func (c *Peer) Block(f func(*Writer)) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
f(&Writer{c.w, c.Resp3})
|
||||
}
|
||||
|
||||
// WriteError writes a redis 'Error'
|
||||
func (c *Peer) WriteError(e string) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteError(e)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteInline writes a redis inline string
|
||||
func (c *Peer) WriteInline(s string) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteInline(s)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteOK write the inline string `OK`
|
||||
func (c *Peer) WriteOK() {
|
||||
c.WriteInline("OK")
|
||||
}
|
||||
|
||||
// WriteBulk writes a bulk string
|
||||
func (c *Peer) WriteBulk(s string) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteBulk(s)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteNull writes a redis Null element
|
||||
func (c *Peer) WriteNull() {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteNull()
|
||||
})
|
||||
}
|
||||
|
||||
// WriteLen starts an array with the given length
|
||||
func (c *Peer) WriteLen(n int) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteLen(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteMapLen starts a map with the given length (number of keys)
|
||||
func (c *Peer) WriteMapLen(n int) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteMapLen(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteSetLen starts a set with the given length (number of elements)
|
||||
func (c *Peer) WriteSetLen(n int) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteSetLen(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WritePushLen starts a push-data array with the given length
|
||||
func (c *Peer) WritePushLen(n int) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WritePushLen(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteInt writes an integer
|
||||
func (c *Peer) WriteInt(n int) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteInt(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteFloat writes a float
|
||||
func (c *Peer) WriteFloat(n float64) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteFloat(n)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteRaw writes a raw redis response
|
||||
func (c *Peer) WriteRaw(s string) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteRaw(s)
|
||||
})
|
||||
}
|
||||
|
||||
// WriteStrings is a helper to (bulk)write a string list
|
||||
func (c *Peer) WriteStrings(strs []string) {
|
||||
c.Block(func(w *Writer) {
|
||||
w.WriteStrings(strs)
|
||||
})
|
||||
}
|
||||
|
||||
func toInline(s string) string {
|
||||
return strings.Map(func(r rune) rune {
|
||||
if unicode.IsSpace(r) {
|
||||
return ' '
|
||||
}
|
||||
return r
|
||||
}, s)
|
||||
}
|
||||
|
||||
// A Writer is given to the callback in Block()
|
||||
type Writer struct {
|
||||
w *bufio.Writer
|
||||
resp3 bool
|
||||
}
|
||||
|
||||
// WriteError writes a redis 'Error'
|
||||
func (w *Writer) WriteError(e string) {
|
||||
fmt.Fprintf(w.w, "-%s\r\n", toInline(e))
|
||||
}
|
||||
|
||||
func (w *Writer) WriteLen(n int) {
|
||||
fmt.Fprintf(w.w, "*%d\r\n", n)
|
||||
}
|
||||
|
||||
func (w *Writer) WriteMapLen(n int) {
|
||||
if w.resp3 {
|
||||
fmt.Fprintf(w.w, "%%%d\r\n", n)
|
||||
return
|
||||
}
|
||||
w.WriteLen(n * 2)
|
||||
}
|
||||
|
||||
func (w *Writer) WriteSetLen(n int) {
|
||||
if w.resp3 {
|
||||
fmt.Fprintf(w.w, "~%d\r\n", n)
|
||||
return
|
||||
}
|
||||
w.WriteLen(n)
|
||||
}
|
||||
|
||||
func (w *Writer) WritePushLen(n int) {
|
||||
if w.resp3 {
|
||||
fmt.Fprintf(w.w, ">%d\r\n", n)
|
||||
return
|
||||
}
|
||||
w.WriteLen(n)
|
||||
}
|
||||
|
||||
// WriteBulk writes a bulk string
|
||||
func (w *Writer) WriteBulk(s string) {
|
||||
fmt.Fprintf(w.w, "$%d\r\n%s\r\n", len(s), s)
|
||||
}
|
||||
|
||||
// WriteStrings writes a list of strings (bulk)
|
||||
func (w *Writer) WriteStrings(strs []string) {
|
||||
w.WriteLen(len(strs))
|
||||
for _, s := range strs {
|
||||
w.WriteBulk(s)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteInt writes an integer
|
||||
func (w *Writer) WriteInt(n int) {
|
||||
fmt.Fprintf(w.w, ":%d\r\n", n)
|
||||
}
|
||||
|
||||
// WriteFloat writes a float
|
||||
func (w *Writer) WriteFloat(n float64) {
|
||||
if w.resp3 {
|
||||
fmt.Fprintf(w.w, ",%s\r\n", formatFloat(n))
|
||||
return
|
||||
}
|
||||
w.WriteBulk(formatFloat(n))
|
||||
}
|
||||
|
||||
// WriteNull writes a redis Null element
|
||||
func (w *Writer) WriteNull() {
|
||||
if w.resp3 {
|
||||
fmt.Fprint(w.w, "_\r\n")
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w.w, "$-1\r\n")
|
||||
}
|
||||
|
||||
// WriteInline writes a redis inline string
|
||||
func (w *Writer) WriteInline(s string) {
|
||||
fmt.Fprintf(w.w, "+%s\r\n", toInline(s))
|
||||
}
|
||||
|
||||
// WriteRaw writes a raw redis response
|
||||
func (w *Writer) WriteRaw(s string) {
|
||||
fmt.Fprint(w.w, s)
|
||||
}
|
||||
|
||||
func (w *Writer) Flush() {
|
||||
w.w.Flush()
|
||||
}
|
||||
|
||||
// formatFloat formats a float the way redis does (sort-of)
|
||||
func formatFloat(v float64) string {
|
||||
if math.IsInf(v, 1) {
|
||||
return "inf"
|
||||
}
|
||||
if math.IsInf(v, -1) {
|
||||
return "-inf"
|
||||
}
|
||||
return stripZeros(fmt.Sprintf("%.12f", v))
|
||||
}
|
||||
|
||||
func stripZeros(sv string) string {
|
||||
for strings.Contains(sv, ".") {
|
||||
if sv[len(sv)-1] != '0' {
|
||||
break
|
||||
}
|
||||
// Remove trailing 0s.
|
||||
sv = sv[:len(sv)-1]
|
||||
// Ends with a '.'.
|
||||
if sv[len(sv)-1] == '.' {
|
||||
sv = sv[:len(sv)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
return sv
|
||||
}
|
||||
98
vendor/github.com/alicebob/miniredis/v2/sorted_set.go
generated
vendored
98
vendor/github.com/alicebob/miniredis/v2/sorted_set.go
generated
vendored
@@ -1,98 +0,0 @@
|
||||
package miniredis
|
||||
|
||||
// The most KISS way to implement a sorted set. Luckily we don't care about
|
||||
// performance that much.
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
unsorted direction = iota
|
||||
asc
|
||||
desc
|
||||
)
|
||||
|
||||
type sortedSet map[string]float64
|
||||
|
||||
type ssElem struct {
|
||||
score float64
|
||||
member string
|
||||
}
|
||||
type ssElems []ssElem
|
||||
|
||||
type byScore ssElems
|
||||
|
||||
func (sse byScore) Len() int { return len(sse) }
|
||||
func (sse byScore) Swap(i, j int) { sse[i], sse[j] = sse[j], sse[i] }
|
||||
func (sse byScore) Less(i, j int) bool {
|
||||
if sse[i].score != sse[j].score {
|
||||
return sse[i].score < sse[j].score
|
||||
}
|
||||
return sse[i].member < sse[j].member
|
||||
}
|
||||
|
||||
func newSortedSet() sortedSet {
|
||||
return sortedSet{}
|
||||
}
|
||||
|
||||
func (ss *sortedSet) card() int {
|
||||
return len(*ss)
|
||||
}
|
||||
|
||||
func (ss *sortedSet) set(score float64, member string) {
|
||||
(*ss)[member] = score
|
||||
}
|
||||
|
||||
func (ss *sortedSet) get(member string) (float64, bool) {
|
||||
v, ok := (*ss)[member]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
// elems gives the list of ssElem, ready to sort.
|
||||
func (ss *sortedSet) elems() ssElems {
|
||||
elems := make(ssElems, 0, len(*ss))
|
||||
for e, s := range *ss {
|
||||
elems = append(elems, ssElem{s, e})
|
||||
}
|
||||
return elems
|
||||
}
|
||||
|
||||
func (ss *sortedSet) byScore(d direction) ssElems {
|
||||
elems := ss.elems()
|
||||
sort.Sort(byScore(elems))
|
||||
if d == desc {
|
||||
reverseElems(elems)
|
||||
}
|
||||
return ssElems(elems)
|
||||
}
|
||||
|
||||
// rankByScore gives the (0-based) index of member, or returns false.
|
||||
func (ss *sortedSet) rankByScore(member string, d direction) (int, bool) {
|
||||
if _, ok := (*ss)[member]; !ok {
|
||||
return 0, false
|
||||
}
|
||||
for i, e := range ss.byScore(d) {
|
||||
if e.member == member {
|
||||
return i, true
|
||||
}
|
||||
}
|
||||
// Can't happen
|
||||
return 0, false
|
||||
}
|
||||
|
||||
func reverseSlice(o []string) {
|
||||
for i := range make([]struct{}, len(o)/2) {
|
||||
other := len(o) - 1 - i
|
||||
o[i], o[other] = o[other], o[i]
|
||||
}
|
||||
}
|
||||
|
||||
func reverseElems(o ssElems) {
|
||||
for i := range make([]struct{}, len(o)/2) {
|
||||
other := len(o) - 1 - i
|
||||
o[i], o[other] = o[other], o[i]
|
||||
}
|
||||
}
|
||||
419
vendor/github.com/alicebob/miniredis/v2/stream.go
generated
vendored
419
vendor/github.com/alicebob/miniredis/v2/stream.go
generated
vendored
@@ -1,419 +0,0 @@
|
||||
// Basic stream implementation.
|
||||
|
||||
package miniredis
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// a Stream is a list of entries, lowest ID (oldest) first, and all "groups".
|
||||
type streamKey struct {
|
||||
entries []StreamEntry
|
||||
groups map[string]*streamGroup
|
||||
lastAllocatedID string
|
||||
}
|
||||
|
||||
// a StreamEntry is an entry in a stream. The ID is always of the form
|
||||
// "123-123".
|
||||
// Values is an ordered list of key-value pairs.
|
||||
type StreamEntry struct {
|
||||
ID string
|
||||
Values []string
|
||||
}
|
||||
|
||||
type streamGroup struct {
|
||||
stream *streamKey
|
||||
lastID string
|
||||
pending []pendingEntry
|
||||
consumers map[string]*consumer
|
||||
}
|
||||
|
||||
type consumer struct {
|
||||
numPendingEntries int
|
||||
// TODO: "last seen" timestamp
|
||||
}
|
||||
|
||||
type pendingEntry struct {
|
||||
id string
|
||||
consumer string
|
||||
deliveryCount int
|
||||
lastDelivery time.Time
|
||||
}
|
||||
|
||||
func newStreamKey() *streamKey {
|
||||
return &streamKey{
|
||||
groups: map[string]*streamGroup{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *streamKey) generateID(now time.Time) string {
|
||||
ts := uint64(now.UnixNano()) / 1_000_000
|
||||
|
||||
next := fmt.Sprintf("%d-%d", ts, 0)
|
||||
if s.lastAllocatedID != "" && streamCmp(s.lastAllocatedID, next) >= 0 {
|
||||
last, _ := parseStreamID(s.lastAllocatedID)
|
||||
next = fmt.Sprintf("%d-%d", last[0], last[1]+1)
|
||||
}
|
||||
|
||||
lastID := s.lastID()
|
||||
if streamCmp(lastID, next) >= 0 {
|
||||
last, _ := parseStreamID(lastID)
|
||||
next = fmt.Sprintf("%d-%d", last[0], last[1]+1)
|
||||
}
|
||||
|
||||
s.lastAllocatedID = next
|
||||
return next
|
||||
}
|
||||
|
||||
func (s *streamKey) lastID() string {
|
||||
if len(s.entries) == 0 {
|
||||
return "0-0"
|
||||
}
|
||||
|
||||
return s.entries[len(s.entries)-1].ID
|
||||
}
|
||||
|
||||
func (s *streamKey) copy() *streamKey {
|
||||
cpy := &streamKey{
|
||||
entries: s.entries,
|
||||
}
|
||||
groups := map[string]*streamGroup{}
|
||||
for k, v := range s.groups {
|
||||
gr := v.copy()
|
||||
gr.stream = cpy
|
||||
groups[k] = gr
|
||||
}
|
||||
cpy.groups = groups
|
||||
return cpy
|
||||
}
|
||||
|
||||
func parseStreamID(id string) ([2]uint64, error) {
|
||||
var (
|
||||
res [2]uint64
|
||||
err error
|
||||
)
|
||||
parts := strings.SplitN(id, "-", 2)
|
||||
res[0], err = strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return res, errors.New(msgInvalidStreamID)
|
||||
}
|
||||
if len(parts) == 2 {
|
||||
res[1], err = strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return res, errors.New(msgInvalidStreamID)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// compares two stream IDs (of the full format: "123-123"). Returns: -1, 0, 1
|
||||
// The given IDs should be valid stream IDs.
|
||||
func streamCmp(a, b string) int {
|
||||
ap, _ := parseStreamID(a)
|
||||
bp, _ := parseStreamID(b)
|
||||
|
||||
switch {
|
||||
case ap[0] < bp[0]:
|
||||
return -1
|
||||
case ap[0] > bp[0]:
|
||||
return 1
|
||||
case ap[1] < bp[1]:
|
||||
return -1
|
||||
case ap[1] > bp[1]:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// formatStreamID makes a full id ("42-42") out of a partial one ("42")
|
||||
func formatStreamID(id string) (string, error) {
|
||||
var ts [2]uint64
|
||||
parts := strings.SplitN(id, "-", 2)
|
||||
|
||||
if len(parts) > 0 {
|
||||
p, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return "", errInvalidEntryID
|
||||
}
|
||||
ts[0] = p
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
p, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return "", errInvalidEntryID
|
||||
}
|
||||
ts[1] = p
|
||||
}
|
||||
return fmt.Sprintf("%d-%d", ts[0], ts[1]), nil
|
||||
}
|
||||
|
||||
func formatStreamRangeBound(id string, start bool, reverse bool) (string, error) {
|
||||
if id == "-" {
|
||||
return "0-0", nil
|
||||
}
|
||||
|
||||
if id == "+" {
|
||||
return fmt.Sprintf("%d-%d", uint64(math.MaxUint64), uint64(math.MaxUint64)), nil
|
||||
}
|
||||
|
||||
if id == "0" {
|
||||
return "0-0", nil
|
||||
}
|
||||
|
||||
parts := strings.Split(id, "-")
|
||||
if len(parts) == 2 {
|
||||
return formatStreamID(id)
|
||||
}
|
||||
|
||||
// Incomplete IDs case
|
||||
ts, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return "", errInvalidEntryID
|
||||
}
|
||||
|
||||
if (!start && !reverse) || (start && reverse) {
|
||||
return fmt.Sprintf("%d-%d", ts, uint64(math.MaxUint64)), nil
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%d-%d", ts, 0), nil
|
||||
}
|
||||
|
||||
func reversedStreamEntries(o []StreamEntry) []StreamEntry {
|
||||
newStream := make([]StreamEntry, len(o))
|
||||
for i, e := range o {
|
||||
newStream[len(o)-i-1] = e
|
||||
}
|
||||
return newStream
|
||||
}
|
||||
|
||||
func (s *streamKey) createGroup(group, id string) error {
|
||||
if _, ok := s.groups[group]; ok {
|
||||
return errors.New("BUSYGROUP Consumer Group name already exists")
|
||||
}
|
||||
|
||||
if id == "$" {
|
||||
id = s.lastID()
|
||||
}
|
||||
s.groups[group] = &streamGroup{
|
||||
stream: s,
|
||||
lastID: id,
|
||||
consumers: map[string]*consumer{},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// streamAdd adds an entry to a stream. Returns the new entry ID.
|
||||
// If id is empty or "*" the ID will be generated automatically.
|
||||
// `values` should have an even length.
|
||||
func (s *streamKey) add(entryID string, values []string, now time.Time) (string, error) {
|
||||
if entryID == "" || entryID == "*" {
|
||||
entryID = s.generateID(now)
|
||||
}
|
||||
|
||||
entryID, err := formatStreamID(entryID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if entryID == "0-0" {
|
||||
return "", errors.New(msgStreamIDZero)
|
||||
}
|
||||
if streamCmp(s.lastID(), entryID) != -1 {
|
||||
return "", errors.New(msgStreamIDTooSmall)
|
||||
}
|
||||
|
||||
s.entries = append(s.entries, StreamEntry{
|
||||
ID: entryID,
|
||||
Values: values,
|
||||
})
|
||||
return entryID, nil
|
||||
}
|
||||
|
||||
func (s *streamKey) trim(n int) {
|
||||
if len(s.entries) > n {
|
||||
s.entries = s.entries[len(s.entries)-n:]
|
||||
}
|
||||
}
|
||||
|
||||
// all entries after "id"
|
||||
func (s *streamKey) after(id string) []StreamEntry {
|
||||
pos := sort.Search(len(s.entries), func(i int) bool {
|
||||
return streamCmp(id, s.entries[i].ID) < 0
|
||||
})
|
||||
return s.entries[pos:]
|
||||
}
|
||||
|
||||
// get a stream entry by ID
|
||||
// Also returns the position in the entries slice, if found.
|
||||
func (s *streamKey) get(id string) (int, *StreamEntry) {
|
||||
pos := sort.Search(len(s.entries), func(i int) bool {
|
||||
return streamCmp(id, s.entries[i].ID) <= 0
|
||||
})
|
||||
if len(s.entries) <= pos || s.entries[pos].ID != id {
|
||||
return 0, nil
|
||||
}
|
||||
return pos, &s.entries[pos]
|
||||
}
|
||||
|
||||
func (g *streamGroup) readGroup(
|
||||
now time.Time,
|
||||
consumerID,
|
||||
id string,
|
||||
count int,
|
||||
noack bool,
|
||||
) []StreamEntry {
|
||||
if id == ">" {
|
||||
// undelivered messages
|
||||
msgs := g.stream.after(g.lastID)
|
||||
if len(msgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if count > 0 && len(msgs) > count {
|
||||
msgs = msgs[:count]
|
||||
}
|
||||
|
||||
if !noack {
|
||||
shouldAppend := len(g.pending) == 0
|
||||
for _, msg := range msgs {
|
||||
if !shouldAppend {
|
||||
shouldAppend = streamCmp(msg.ID, g.pending[len(g.pending)-1].id) == 1
|
||||
}
|
||||
|
||||
var entry *pendingEntry
|
||||
if shouldAppend {
|
||||
g.pending = append(g.pending, pendingEntry{})
|
||||
entry = &g.pending[len(g.pending)-1]
|
||||
} else {
|
||||
var pos int
|
||||
pos, entry = g.searchPending(msg.ID)
|
||||
if entry == nil {
|
||||
g.pending = append(g.pending[:pos+1], g.pending[pos:]...)
|
||||
entry = &g.pending[pos]
|
||||
} else {
|
||||
g.consumers[entry.consumer].numPendingEntries--
|
||||
}
|
||||
}
|
||||
|
||||
*entry = pendingEntry{
|
||||
id: msg.ID,
|
||||
consumer: consumerID,
|
||||
deliveryCount: 1,
|
||||
lastDelivery: now,
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := g.consumers[consumerID]; !ok {
|
||||
g.consumers[consumerID] = &consumer{}
|
||||
}
|
||||
g.consumers[consumerID].numPendingEntries += len(msgs)
|
||||
g.lastID = msgs[len(msgs)-1].ID
|
||||
return msgs
|
||||
}
|
||||
|
||||
// re-deliver messages from the pending list.
|
||||
// con := gr.consumers[consumerID]
|
||||
msgs := g.pendingAfter(id)
|
||||
var res []StreamEntry
|
||||
for i, p := range msgs {
|
||||
if p.consumer != consumerID {
|
||||
continue
|
||||
}
|
||||
_, entry := g.stream.get(p.id)
|
||||
// not found. Weird?
|
||||
if entry == nil {
|
||||
continue
|
||||
}
|
||||
p.deliveryCount += 1
|
||||
p.lastDelivery = now
|
||||
msgs[i] = p
|
||||
res = append(res, *entry)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func (g *streamGroup) searchPending(id string) (int, *pendingEntry) {
|
||||
pos := sort.Search(len(g.pending), func(i int) bool {
|
||||
return streamCmp(id, g.pending[i].id) <= 0
|
||||
})
|
||||
if pos >= len(g.pending) || g.pending[pos].id != id {
|
||||
return pos, nil
|
||||
}
|
||||
return pos, &g.pending[pos]
|
||||
}
|
||||
|
||||
func (g *streamGroup) ack(ids []string) (int, error) {
|
||||
count := 0
|
||||
for _, id := range ids {
|
||||
if _, err := parseStreamID(id); err != nil {
|
||||
return 0, errors.New(msgInvalidStreamID)
|
||||
}
|
||||
|
||||
pos, entry := g.searchPending(id)
|
||||
if entry == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
consumer := g.consumers[entry.consumer]
|
||||
consumer.numPendingEntries--
|
||||
|
||||
g.pending = append(g.pending[:pos], g.pending[pos+1:]...)
|
||||
count++
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (s *streamKey) delete(ids []string) (int, error) {
|
||||
count := 0
|
||||
for _, id := range ids {
|
||||
if _, err := parseStreamID(id); err != nil {
|
||||
return 0, errors.New(msgInvalidStreamID)
|
||||
}
|
||||
|
||||
i, entry := s.get(id)
|
||||
if entry == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
s.entries = append(s.entries[:i], s.entries[i+1:]...)
|
||||
count++
|
||||
}
|
||||
return count, nil
|
||||
}
|
||||
|
||||
func (g *streamGroup) pendingAfter(id string) []pendingEntry {
|
||||
pos := sort.Search(len(g.pending), func(i int) bool {
|
||||
return streamCmp(id, g.pending[i].id) < 0
|
||||
})
|
||||
return g.pending[pos:]
|
||||
}
|
||||
|
||||
func (g *streamGroup) pendingCount(consumer string) int {
|
||||
n := 0
|
||||
for _, p := range g.pending {
|
||||
if p.consumer == consumer {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (g *streamGroup) copy() *streamGroup {
|
||||
cns := map[string]*consumer{}
|
||||
for k, v := range g.consumers {
|
||||
c := *v
|
||||
cns[k] = &c
|
||||
}
|
||||
return &streamGroup{
|
||||
// don't copy stream
|
||||
lastID: g.lastID,
|
||||
pending: g.pending,
|
||||
consumers: cns,
|
||||
}
|
||||
}
|
||||
21
vendor/github.com/asdine/storm/LICENSE
generated
vendored
21
vendor/github.com/asdine/storm/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) [2017] [Asdine El Hrychy]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
35
vendor/github.com/asdine/storm/codec/gob/gob.go
generated
vendored
35
vendor/github.com/asdine/storm/codec/gob/gob.go
generated
vendored
@@ -1,35 +0,0 @@
|
||||
// Package gob contains a codec to encode and decode entities in Gob format
|
||||
package gob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
const name = "gob"
|
||||
|
||||
// Codec serializing objects using the gob package.
|
||||
// See https://golang.org/pkg/encoding/gob/
|
||||
var Codec = new(gobCodec)
|
||||
|
||||
type gobCodec int
|
||||
|
||||
func (c gobCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
enc := gob.NewEncoder(&b)
|
||||
err := enc.Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c gobCodec) Unmarshal(b []byte, v interface{}) error {
|
||||
r := bytes.NewReader(b)
|
||||
dec := gob.NewDecoder(r)
|
||||
return dec.Decode(v)
|
||||
}
|
||||
|
||||
func (c gobCodec) Name() string {
|
||||
return name
|
||||
}
|
||||
32
vendor/github.com/asdine/storm/v3/.gitignore
generated
vendored
32
vendor/github.com/asdine/storm/v3/.gitignore
generated
vendored
@@ -1,32 +0,0 @@
|
||||
# IDE
|
||||
.idea/
|
||||
.vscode/
|
||||
*.iml
|
||||
|
||||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# Golang vendor folder
|
||||
/vendor/
|
||||
19
vendor/github.com/asdine/storm/v3/.travis.yml
generated
vendored
19
vendor/github.com/asdine/storm/v3/.travis.yml
generated
vendored
@@ -1,19 +0,0 @@
|
||||
language: go
|
||||
|
||||
before_install:
|
||||
- go get github.com/stretchr/testify
|
||||
|
||||
env: GO111MODULE=on
|
||||
|
||||
go:
|
||||
- "1.13.x"
|
||||
- "1.14.x"
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go mod vendor
|
||||
- go test -mod vendor -race -v ./...
|
||||
21
vendor/github.com/asdine/storm/v3/LICENSE
generated
vendored
21
vendor/github.com/asdine/storm/v3/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) [2017] [Asdine El Hrychy]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
643
vendor/github.com/asdine/storm/v3/README.md
generated
vendored
643
vendor/github.com/asdine/storm/v3/README.md
generated
vendored
@@ -1,643 +0,0 @@
|
||||
# Storm
|
||||
|
||||
[](https://travis-ci.org/asdine/storm)
|
||||
[](https://godoc.org/github.com/asdine/storm)
|
||||
|
||||
Storm is a simple and powerful toolkit for [BoltDB](https://github.com/coreos/bbolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
|
||||
|
||||
In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
|
||||
|
||||
_For extended queries and support for [Badger](https://github.com/dgraph-io/badger), see also [Genji](https://github.com/asdine/genji)_
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Import Storm](#import-storm)
|
||||
- [Open a database](#open-a-database)
|
||||
- [Simple CRUD system](#simple-crud-system)
|
||||
- [Declare your structures](#declare-your-structures)
|
||||
- [Save your object](#save-your-object)
|
||||
- [Auto Increment](#auto-increment)
|
||||
- [Simple queries](#simple-queries)
|
||||
- [Fetch one object](#fetch-one-object)
|
||||
- [Fetch multiple objects](#fetch-multiple-objects)
|
||||
- [Fetch all objects](#fetch-all-objects)
|
||||
- [Fetch all objects sorted by index](#fetch-all-objects-sorted-by-index)
|
||||
- [Fetch a range of objects](#fetch-a-range-of-objects)
|
||||
- [Fetch objects by prefix](#fetch-objects-by-prefix)
|
||||
- [Skip, Limit and Reverse](#skip-limit-and-reverse)
|
||||
- [Delete an object](#delete-an-object)
|
||||
- [Update an object](#update-an-object)
|
||||
- [Initialize buckets and indexes before saving an object](#initialize-buckets-and-indexes-before-saving-an-object)
|
||||
- [Drop a bucket](#drop-a-bucket)
|
||||
- [Re-index a bucket](#re-index-a-bucket)
|
||||
- [Advanced queries](#advanced-queries)
|
||||
- [Transactions](#transactions)
|
||||
- [Options](#options)
|
||||
- [BoltOptions](#boltoptions)
|
||||
- [MarshalUnmarshaler](#marshalunmarshaler)
|
||||
- [Provided Codecs](#provided-codecs)
|
||||
- [Use existing Bolt connection](#use-existing-bolt-connection)
|
||||
- [Batch mode](#batch-mode)
|
||||
- [Nodes and nested buckets](#nodes-and-nested-buckets)
|
||||
- [Node options](#node-options)
|
||||
- [Simple Key/Value store](#simple-keyvalue-store)
|
||||
- [BoltDB](#boltdb)
|
||||
- [License](#license)
|
||||
- [Credits](#credits)
|
||||
|
||||
## Getting Started
|
||||
|
||||
```bash
|
||||
GO111MODULE=on go get -u github.com/asdine/storm/v3
|
||||
```
|
||||
|
||||
## Import Storm
|
||||
|
||||
```go
|
||||
import "github.com/asdine/storm/v3"
|
||||
```
|
||||
|
||||
## Open a database
|
||||
|
||||
Quick way of opening a database
|
||||
|
||||
```go
|
||||
db, err := storm.Open("my.db")
|
||||
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
`Open` can receive multiple options to customize the way it behaves. See [Options](#options) below
|
||||
|
||||
## Simple CRUD system
|
||||
|
||||
### Declare your structures
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ID int // primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
Age int `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
The primary key can be of any type as long as it is not a zero value. Storm will search for the tag `id`, if not present Storm will search for a field named `ID`.
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ThePrimaryKey string `storm:"id"`// primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
}
|
||||
```
|
||||
|
||||
Storm handles tags in nested structures with the `inline` tag
|
||||
|
||||
```go
|
||||
type Base struct {
|
||||
Ident bson.ObjectId `storm:"id"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Base `storm:"inline"`
|
||||
Group string `storm:"index"`
|
||||
Email string `storm:"unique"`
|
||||
Name string
|
||||
CreatedAt time.Time `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
### Save your object
|
||||
|
||||
```go
|
||||
user := User{
|
||||
ID: 10,
|
||||
Group: "staff",
|
||||
Email: "john@provider.com",
|
||||
Name: "John",
|
||||
Age: 21,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := db.Save(&user)
|
||||
// err == nil
|
||||
|
||||
user.ID++
|
||||
err = db.Save(&user)
|
||||
// err == storm.ErrAlreadyExists
|
||||
```
|
||||
|
||||
That's it.
|
||||
|
||||
`Save` creates or updates all the required indexes and buckets, checks the unique constraints and saves the object to the store.
|
||||
|
||||
#### Auto Increment
|
||||
|
||||
Storm can auto increment integer values so you don't have to worry about that when saving your objects. Also, the new value is automatically inserted in your field.
|
||||
|
||||
```go
|
||||
|
||||
type Product struct {
|
||||
Pk int `storm:"id,increment"` // primary key with auto increment
|
||||
Name string
|
||||
IntegerField uint64 `storm:"increment"`
|
||||
IndexedIntegerField uint32 `storm:"index,increment"`
|
||||
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
|
||||
}
|
||||
|
||||
p := Product{Name: "Vaccum Cleaner"}
|
||||
|
||||
fmt.Println(p.Pk)
|
||||
fmt.Println(p.IntegerField)
|
||||
fmt.Println(p.IndexedIntegerField)
|
||||
fmt.Println(p.UniqueIntegerField)
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
// 0
|
||||
|
||||
_ = db.Save(&p)
|
||||
|
||||
fmt.Println(p.Pk)
|
||||
fmt.Println(p.IntegerField)
|
||||
fmt.Println(p.IndexedIntegerField)
|
||||
fmt.Println(p.UniqueIntegerField)
|
||||
// 1
|
||||
// 1
|
||||
// 1
|
||||
// 100
|
||||
|
||||
```
|
||||
|
||||
### Simple queries
|
||||
|
||||
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwise it uses the [query system](#advanced-queries).
|
||||
|
||||
#### Fetch one object
|
||||
|
||||
```go
|
||||
var user User
|
||||
err := db.One("Email", "john@provider.com", &user)
|
||||
// err == nil
|
||||
|
||||
err = db.One("Name", "John", &user)
|
||||
// err == nil
|
||||
|
||||
err = db.One("Name", "Jack", &user)
|
||||
// err == storm.ErrNotFound
|
||||
```
|
||||
|
||||
#### Fetch multiple objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users)
|
||||
```
|
||||
|
||||
#### Fetch all objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.All(&users)
|
||||
```
|
||||
|
||||
#### Fetch all objects sorted by index
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.AllByIndex("CreatedAt", &users)
|
||||
```
|
||||
|
||||
#### Fetch a range of objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Range("Age", 10, 21, &users)
|
||||
```
|
||||
|
||||
#### Fetch objects by prefix
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Prefix("Name", "Jo", &users)
|
||||
```
|
||||
|
||||
#### Skip, Limit and Reverse
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users, storm.Skip(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Reverse())
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
|
||||
err = db.All(&users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
err = db.AllByIndex("CreatedAt", &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
err = db.Range("Age", 10, 21, &users, storm.Limit(10), storm.Skip(10), storm.Reverse())
|
||||
```
|
||||
|
||||
#### Delete an object
|
||||
|
||||
```go
|
||||
err := db.DeleteStruct(&user)
|
||||
```
|
||||
|
||||
#### Update an object
|
||||
|
||||
```go
|
||||
// Update multiple fields
|
||||
err := db.Update(&User{ID: 10, Name: "Jack", Age: 45})
|
||||
|
||||
// Update a single field
|
||||
err := db.UpdateField(&User{ID: 10}, "Age", 0)
|
||||
```
|
||||
|
||||
#### Initialize buckets and indexes before saving an object
|
||||
|
||||
```go
|
||||
err := db.Init(&User{})
|
||||
```
|
||||
|
||||
Useful when starting your application
|
||||
|
||||
#### Drop a bucket
|
||||
|
||||
Using the struct
|
||||
|
||||
```go
|
||||
err := db.Drop(&User)
|
||||
```
|
||||
|
||||
Using the bucket name
|
||||
|
||||
```go
|
||||
err := db.Drop("User")
|
||||
```
|
||||
|
||||
#### Re-index a bucket
|
||||
|
||||
```go
|
||||
err := db.ReIndex(&User{})
|
||||
```
|
||||
|
||||
Useful when the structure has changed
|
||||
|
||||
### Advanced queries
|
||||
|
||||
For more complex queries, you can use the `Select` method.
|
||||
`Select` takes any number of [`Matcher`](https://godoc.org/github.com/asdine/storm/q#Matcher) from the [`q`](https://godoc.org/github.com/asdine/storm/q) package.
|
||||
|
||||
Here are some common Matchers:
|
||||
|
||||
```go
|
||||
// Equality
|
||||
q.Eq("Name", John)
|
||||
|
||||
// Strictly greater than
|
||||
q.Gt("Age", 7)
|
||||
|
||||
// Lesser than or equal to
|
||||
q.Lte("Age", 77)
|
||||
|
||||
// Regex with name that starts with the letter D
|
||||
q.Re("Name", "^D")
|
||||
|
||||
// In the given slice of values
|
||||
q.In("Group", []string{"Staff", "Admin"})
|
||||
|
||||
// Comparing fields
|
||||
q.EqF("FieldName", "SecondFieldName")
|
||||
q.LtF("FieldName", "SecondFieldName")
|
||||
q.GtF("FieldName", "SecondFieldName")
|
||||
q.LteF("FieldName", "SecondFieldName")
|
||||
q.GteF("FieldName", "SecondFieldName")
|
||||
```
|
||||
|
||||
Matchers can also be combined with `And`, `Or` and `Not`:
|
||||
|
||||
```go
|
||||
|
||||
// Match if all match
|
||||
q.And(
|
||||
q.Gt("Age", 7),
|
||||
q.Re("Name", "^D")
|
||||
)
|
||||
|
||||
// Match if one matches
|
||||
q.Or(
|
||||
q.Re("Name", "^A"),
|
||||
q.Not(
|
||||
q.Re("Name", "^B")
|
||||
),
|
||||
q.Re("Name", "^C"),
|
||||
q.In("Group", []string{"Staff", "Admin"}),
|
||||
q.And(
|
||||
q.StrictEq("Password", []byte(password)),
|
||||
q.Eq("Registered", true)
|
||||
)
|
||||
)
|
||||
```
|
||||
|
||||
You can find the complete list in the [documentation](https://godoc.org/github.com/asdine/storm/q#Matcher).
|
||||
|
||||
`Select` takes any number of matchers and wraps them into a `q.And()` so it's not necessary to specify it. It returns a [`Query`](https://godoc.org/github.com/asdine/storm#Query) type.
|
||||
|
||||
```go
|
||||
query := db.Select(q.Gte("Age", 7), q.Lte("Age", 77))
|
||||
```
|
||||
|
||||
The `Query` type contains methods to filter and order the records.
|
||||
|
||||
```go
|
||||
// Limit
|
||||
query = query.Limit(10)
|
||||
|
||||
// Skip
|
||||
query = query.Skip(20)
|
||||
|
||||
// Calls can also be chained
|
||||
query = query.Limit(10).Skip(20).OrderBy("Age").Reverse()
|
||||
```
|
||||
|
||||
But also to specify how to fetch them.
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err = query.Find(&users)
|
||||
|
||||
var user User
|
||||
err = query.First(&user)
|
||||
```
|
||||
|
||||
Examples with `Select`:
|
||||
|
||||
```go
|
||||
// Find all users with an ID between 10 and 100
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Find(&users)
|
||||
|
||||
// Nested matchers
|
||||
err = db.Select(q.Or(
|
||||
q.Gt("ID", 50),
|
||||
q.Lt("Age", 21),
|
||||
q.And(
|
||||
q.Eq("Group", "admin"),
|
||||
q.Gte("Age", 21),
|
||||
),
|
||||
)).Find(&users)
|
||||
|
||||
query := db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name")
|
||||
|
||||
// Find multiple records
|
||||
err = query.Find(&users)
|
||||
// or
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").Find(&users)
|
||||
|
||||
// Find first record
|
||||
err = query.First(&user)
|
||||
// or
|
||||
err = db.Select(q.Gte("ID", 10), q.Lte("ID", 100)).Limit(10).Skip(5).Reverse().OrderBy("Age", "Name").First(&user)
|
||||
|
||||
// Delete all matching records
|
||||
err = query.Delete(new(User))
|
||||
|
||||
// Fetching records one by one (useful when the bucket contains a lot of records)
|
||||
query = db.Select(q.Gte("ID", 10),q.Lte("ID", 100)).OrderBy("Age", "Name")
|
||||
|
||||
err = query.Each(new(User), func(record interface{}) error) {
|
||||
u := record.(*User)
|
||||
...
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
See the [documentation](https://godoc.org/github.com/asdine/storm#Query) for a complete list of methods.
|
||||
|
||||
### Transactions
|
||||
|
||||
```go
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
accountA.Amount -= 100
|
||||
accountB.Amount += 100
|
||||
|
||||
err = tx.Save(accountA)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Save(accountB)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
|
||||
|
||||
#### BoltOptions
|
||||
|
||||
By default, Storm opens a database with the mode `0600` and a timeout of one second.
|
||||
You can change this behavior by using `BoltOptions`
|
||||
|
||||
```go
|
||||
db, err := storm.Open("my.db", storm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}))
|
||||
```
|
||||
|
||||
#### MarshalUnmarshaler
|
||||
|
||||
To store the data in BoltDB, Storm marshals it in JSON by default. If you wish to change this behavior you can pass a codec that implements [`codec.MarshalUnmarshaler`](https://godoc.org/github.com/asdine/storm/codec#MarshalUnmarshaler) via the [`storm.Codec`](https://godoc.org/github.com/asdine/storm#Codec) option:
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.Codec(myCodec))
|
||||
```
|
||||
|
||||
##### Provided Codecs
|
||||
|
||||
You can easily implement your own `MarshalUnmarshaler`, but Storm comes with built-in support for [JSON](https://godoc.org/github.com/asdine/storm/codec/json) (default), [GOB](https://godoc.org/github.com/asdine/storm/codec/gob), [Sereal](https://godoc.org/github.com/asdine/storm/codec/sereal), [Protocol Buffers](https://godoc.org/github.com/asdine/storm/codec/protobuf) and [MessagePack](https://godoc.org/github.com/asdine/storm/codec/msgpack).
|
||||
|
||||
These can be used by importing the relevant package and use that codec to configure Storm. The example below shows all variants (without proper error handling):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/asdine/storm/v3"
|
||||
"github.com/asdine/storm/v3/codec/gob"
|
||||
"github.com/asdine/storm/v3/codec/json"
|
||||
"github.com/asdine/storm/v3/codec/sereal"
|
||||
"github.com/asdine/storm/v3/codec/protobuf"
|
||||
"github.com/asdine/storm/v3/codec/msgpack"
|
||||
)
|
||||
|
||||
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
|
||||
var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
|
||||
var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
|
||||
var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
|
||||
var msgpackDb, _ = storm.Open("msgpack.db", storm.Codec(msgpack.Codec))
|
||||
```
|
||||
|
||||
**Tip**: Adding Storm tags to generated Protobuf files can be tricky. A good solution is to use [this tool](https://github.com/favadi/protoc-go-inject-tag) to inject the tags during the compilation.
|
||||
|
||||
#### Use existing Bolt connection
|
||||
|
||||
You can use an existing connection and pass it to Storm
|
||||
|
||||
```go
|
||||
bDB, _ := bolt.Open(filepath.Join(dir, "bolt.db"), 0600, &bolt.Options{Timeout: 10 * time.Second})
|
||||
db := storm.Open("my.db", storm.UseDB(bDB))
|
||||
```
|
||||
|
||||
#### Batch mode
|
||||
|
||||
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/coreos/bbolt#batch-read-write-transactions))
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.Batch())
|
||||
```
|
||||
|
||||
## Nodes and nested buckets
|
||||
|
||||
Storm takes advantage of BoltDB nested buckets feature by using `storm.Node`.
|
||||
A `storm.Node` is the underlying object used by `storm.DB` to manipulate a bucket.
|
||||
To create a nested bucket and use the same API as `storm.DB`, you can use the `DB.From` method.
|
||||
|
||||
```go
|
||||
repo := db.From("repo")
|
||||
|
||||
err := repo.Save(&Issue{
|
||||
Title: "I want more features",
|
||||
Author: user.ID,
|
||||
})
|
||||
|
||||
err = repo.Save(newRelease("0.10"))
|
||||
|
||||
var issues []Issue
|
||||
err = repo.Find("Author", user.ID, &issues)
|
||||
|
||||
var release Release
|
||||
err = repo.One("Tag", "0.10", &release)
|
||||
```
|
||||
|
||||
You can also chain the nodes to create a hierarchy
|
||||
|
||||
```go
|
||||
chars := db.From("characters")
|
||||
heroes := chars.From("heroes")
|
||||
enemies := chars.From("enemies")
|
||||
|
||||
items := db.From("items")
|
||||
potions := items.From("consumables").From("medicine").From("potions")
|
||||
```
|
||||
|
||||
You can even pass the entire hierarchy as arguments to `From`:
|
||||
|
||||
```go
|
||||
privateNotes := db.From("notes", "private")
|
||||
workNotes := db.From("notes", "work")
|
||||
```
|
||||
|
||||
### Node options
|
||||
|
||||
A Node can also be configured. Activating an option on a Node creates a copy, so a Node is always thread-safe.
|
||||
|
||||
```go
|
||||
n := db.From("my-node")
|
||||
```
|
||||
|
||||
Give a bolt.Tx transaction to the Node
|
||||
|
||||
```go
|
||||
n = n.WithTransaction(tx)
|
||||
```
|
||||
|
||||
Enable batch mode
|
||||
|
||||
```go
|
||||
n = n.WithBatch(true)
|
||||
```
|
||||
|
||||
Use a Codec
|
||||
|
||||
```go
|
||||
n = n.WithCodec(gob.Codec)
|
||||
```
|
||||
|
||||
## Simple Key/Value store
|
||||
|
||||
Storm can be used as a simple, robust, key/value store that can store anything.
|
||||
The key and the value can be of any type as long as the key is not a zero value.
|
||||
|
||||
Saving data :
|
||||
|
||||
```go
|
||||
db.Set("logs", time.Now(), "I'm eating my breakfast man")
|
||||
db.Set("sessions", bson.NewObjectId(), &someUser)
|
||||
db.Set("weird storage", "754-3010", map[string]interface{}{
|
||||
"hair": "blonde",
|
||||
"likes": []string{"cheese", "star wars"},
|
||||
})
|
||||
```
|
||||
|
||||
Fetching data :
|
||||
|
||||
```go
|
||||
user := User{}
|
||||
db.Get("sessions", someObjectId, &user)
|
||||
|
||||
var details map[string]interface{}
|
||||
db.Get("weird storage", "754-3010", &details)
|
||||
|
||||
db.Get("sessions", someObjectId, &details)
|
||||
```
|
||||
|
||||
Deleting data :
|
||||
|
||||
```go
|
||||
db.Delete("sessions", someObjectId)
|
||||
db.Delete("weird storage", "754-3010")
|
||||
```
|
||||
|
||||
You can find other useful methods in the [documentation](https://godoc.org/github.com/asdine/storm#KeyValueStore).
|
||||
|
||||
## BoltDB
|
||||
|
||||
BoltDB is still easily accessible and can be used as usual
|
||||
|
||||
```go
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("my bucket"))
|
||||
val := bucket.Get([]byte("any id"))
|
||||
fmt.Println(string(val))
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
A transaction can be also be passed to Storm
|
||||
|
||||
```go
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
dbx := db.WithTransaction(tx)
|
||||
err = dbx.Save(&user)
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Credits
|
||||
|
||||
- [Asdine El Hrychy](https://github.com/asdine)
|
||||
- [Bjørn Erik Pedersen](https://github.com/bep)
|
||||
47
vendor/github.com/asdine/storm/v3/bucket.go
generated
vendored
47
vendor/github.com/asdine/storm/v3/bucket.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
package storm
|
||||
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
|
||||
// already exist.
|
||||
func (n *node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
|
||||
var b *bolt.Bucket
|
||||
var err error
|
||||
|
||||
bucketNames := append(n.rootBucket, bucket)
|
||||
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetBucket returns the given bucket below the current node.
|
||||
func (n *node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
|
||||
var b *bolt.Bucket
|
||||
|
||||
bucketNames := append(n.rootBucket, children...)
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b = b.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if b = tx.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
1
vendor/github.com/asdine/storm/v3/codec/.gitignore
generated
vendored
1
vendor/github.com/asdine/storm/v3/codec/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
*.db
|
||||
11
vendor/github.com/asdine/storm/v3/codec/codec.go
generated
vendored
11
vendor/github.com/asdine/storm/v3/codec/codec.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// Package codec contains sub-packages with different codecs that can be used
|
||||
// to encode and decode entities in Storm.
|
||||
package codec
|
||||
|
||||
// MarshalUnmarshaler represents a codec used to marshal and unmarshal entities.
|
||||
type MarshalUnmarshaler interface {
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
Unmarshal(b []byte, v interface{}) error
|
||||
// name of this codec
|
||||
Name() string
|
||||
}
|
||||
25
vendor/github.com/asdine/storm/v3/codec/json/json.go
generated
vendored
25
vendor/github.com/asdine/storm/v3/codec/json/json.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// Package json contains a codec to encode and decode entities in JSON format
|
||||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
const name = "json"
|
||||
|
||||
// Codec that encodes to and decodes from JSON.
|
||||
var Codec = new(jsonCodec)
|
||||
|
||||
type jsonCodec int
|
||||
|
||||
func (j jsonCodec) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (j jsonCodec) Unmarshal(b []byte, v interface{}) error {
|
||||
return json.Unmarshal(b, v)
|
||||
}
|
||||
|
||||
func (j jsonCodec) Name() string {
|
||||
return name
|
||||
}
|
||||
51
vendor/github.com/asdine/storm/v3/errors.go
generated
vendored
51
vendor/github.com/asdine/storm/v3/errors.go
generated
vendored
@@ -1,51 +0,0 @@
|
||||
package storm
|
||||
|
||||
import "errors"
|
||||
|
||||
// Errors
|
||||
var (
|
||||
// ErrNoID is returned when no ID field or id tag is found in the struct.
|
||||
ErrNoID = errors.New("missing struct tag id or ID field")
|
||||
|
||||
// ErrZeroID is returned when the ID field is a zero value.
|
||||
ErrZeroID = errors.New("id field must not be a zero value")
|
||||
|
||||
// ErrBadType is returned when a method receives an unexpected value type.
|
||||
ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
|
||||
// ErrUnknownTag is returned when an unexpected tag is specified.
|
||||
ErrUnknownTag = errors.New("unknown tag")
|
||||
|
||||
// ErrIdxNotFound is returned when the specified index is not found.
|
||||
ErrIdxNotFound = errors.New("index not found")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
|
||||
ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
|
||||
|
||||
// ErrStructPtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
|
||||
ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
|
||||
|
||||
// ErrPtrNeeded is returned when an unexpected value is given, instead of a pointer.
|
||||
ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
|
||||
|
||||
// ErrNoName is returned when the specified struct has no name.
|
||||
ErrNoName = errors.New("provided target must have a name")
|
||||
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
|
||||
ErrNotInTransaction = errors.New("not in transaction")
|
||||
|
||||
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
|
||||
ErrIncompatibleValue = errors.New("incompatible value")
|
||||
|
||||
// ErrDifferentCodec is returned when using a codec different than the first codec used with the bucket.
|
||||
ErrDifferentCodec = errors.New("the selected codec is incompatible with this bucket")
|
||||
)
|
||||
226
vendor/github.com/asdine/storm/v3/extract.go
generated
vendored
226
vendor/github.com/asdine/storm/v3/extract.go
generated
vendored
@@ -1,226 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/asdine/storm/v3/index"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Storm tags
|
||||
const (
|
||||
tagID = "id"
|
||||
tagIdx = "index"
|
||||
tagUniqueIdx = "unique"
|
||||
tagInline = "inline"
|
||||
tagIncrement = "increment"
|
||||
indexPrefix = "__storm_index_"
|
||||
)
|
||||
|
||||
type fieldConfig struct {
|
||||
Name string
|
||||
Index string
|
||||
IsZero bool
|
||||
IsID bool
|
||||
Increment bool
|
||||
IncrementStart int64
|
||||
IsInteger bool
|
||||
Value *reflect.Value
|
||||
ForceUpdate bool
|
||||
}
|
||||
|
||||
// structConfig is a structure gathering all the relevant informations about a model
|
||||
type structConfig struct {
|
||||
Name string
|
||||
Fields map[string]*fieldConfig
|
||||
ID *fieldConfig
|
||||
}
|
||||
|
||||
func extract(s *reflect.Value, mi ...*structConfig) (*structConfig, error) {
|
||||
if s.Kind() == reflect.Ptr {
|
||||
e := s.Elem()
|
||||
s = &e
|
||||
}
|
||||
if s.Kind() != reflect.Struct {
|
||||
return nil, ErrBadType
|
||||
}
|
||||
|
||||
typ := s.Type()
|
||||
|
||||
var child bool
|
||||
|
||||
var m *structConfig
|
||||
if len(mi) > 0 {
|
||||
m = mi[0]
|
||||
child = true
|
||||
} else {
|
||||
m = &structConfig{}
|
||||
m.Fields = make(map[string]*fieldConfig)
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
m.Name = typ.Name()
|
||||
}
|
||||
|
||||
numFields := s.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
field := typ.Field(i)
|
||||
value := s.Field(i)
|
||||
|
||||
if field.PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
err := extractField(&value, &field, m, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if child {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if m.ID == nil {
|
||||
return nil, ErrNoID
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func extractField(value *reflect.Value, field *reflect.StructField, m *structConfig, isChild bool) error {
|
||||
var f *fieldConfig
|
||||
var err error
|
||||
|
||||
tag := field.Tag.Get("storm")
|
||||
if tag != "" {
|
||||
f = &fieldConfig{
|
||||
Name: field.Name,
|
||||
IsZero: isZero(value),
|
||||
IsInteger: isInteger(value),
|
||||
Value: value,
|
||||
IncrementStart: 1,
|
||||
}
|
||||
|
||||
tags := strings.Split(tag, ",")
|
||||
|
||||
for _, tag := range tags {
|
||||
switch tag {
|
||||
case "id":
|
||||
f.IsID = true
|
||||
f.Index = tagUniqueIdx
|
||||
case tagUniqueIdx, tagIdx:
|
||||
f.Index = tag
|
||||
case tagInline:
|
||||
if value.Kind() == reflect.Ptr {
|
||||
e := value.Elem()
|
||||
value = &e
|
||||
}
|
||||
if value.Kind() == reflect.Struct {
|
||||
a := value.Addr()
|
||||
_, err := extract(&a, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// we don't need to save this field
|
||||
return nil
|
||||
default:
|
||||
if strings.HasPrefix(tag, tagIncrement) {
|
||||
f.Increment = true
|
||||
parts := strings.Split(tag, "=")
|
||||
if parts[0] != tagIncrement {
|
||||
return ErrUnknownTag
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
f.IncrementStart, err = strconv.ParseInt(parts[1], 0, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return ErrUnknownTag
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := m.Fields[f.Name]; !ok || !isChild {
|
||||
m.Fields[f.Name] = f
|
||||
}
|
||||
}
|
||||
|
||||
if m.ID == nil && f != nil && f.IsID {
|
||||
m.ID = f
|
||||
}
|
||||
|
||||
// the field is named ID and no ID field has been detected before
|
||||
if m.ID == nil && field.Name == "ID" {
|
||||
if f == nil {
|
||||
f = &fieldConfig{
|
||||
Index: tagUniqueIdx,
|
||||
Name: field.Name,
|
||||
IsZero: isZero(value),
|
||||
IsInteger: isInteger(value),
|
||||
IsID: true,
|
||||
Value: value,
|
||||
IncrementStart: 1,
|
||||
}
|
||||
m.Fields[field.Name] = f
|
||||
}
|
||||
m.ID = f
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractSingleField(ref *reflect.Value, fieldName string) (*structConfig, error) {
|
||||
var cfg structConfig
|
||||
cfg.Fields = make(map[string]*fieldConfig)
|
||||
|
||||
f, ok := ref.Type().FieldByName(fieldName)
|
||||
if !ok || f.PkgPath != "" {
|
||||
return nil, fmt.Errorf("field %s not found", fieldName)
|
||||
}
|
||||
|
||||
v := ref.FieldByName(fieldName)
|
||||
err := extractField(&v, &f, &cfg, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
|
||||
var idx index.Index
|
||||
var err error
|
||||
|
||||
switch idxKind {
|
||||
case tagUniqueIdx:
|
||||
idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
return idx, err
|
||||
}
|
||||
|
||||
func isZero(v *reflect.Value) bool {
|
||||
zero := reflect.Zero(v.Type()).Interface()
|
||||
current := v.Interface()
|
||||
return reflect.DeepEqual(current, zero)
|
||||
}
|
||||
|
||||
func isInteger(v *reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
return v != nil && kind >= reflect.Int && kind <= reflect.Uint64
|
||||
}
|
||||
499
vendor/github.com/asdine/storm/v3/finder.go
generated
vendored
499
vendor/github.com/asdine/storm/v3/finder.go
generated
vendored
@@ -1,499 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/v3/index"
|
||||
"github.com/asdine/storm/v3/q"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// A Finder can fetch types from BoltDB.
|
||||
type Finder interface {
|
||||
// One returns one record by the specified index
|
||||
One(fieldName string, value interface{}, to interface{}) error
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// All gets all the records of a bucket.
|
||||
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
|
||||
All(to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Select a list of records that match a list of matchers. Doesn't use indexes.
|
||||
Select(matchers ...q.Matcher) Query
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Prefix returns one or more records whose given field starts with the specified prefix.
|
||||
Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
Count(data interface{}) (int, error)
|
||||
}
|
||||
|
||||
// One returns one record by the specified index
|
||||
func (n *node) One(fieldName string, value interface{}, to interface{}) error {
|
||||
sink, err := newFirstSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
if fieldName == "" {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(sink.ref)
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.StrictEq(fieldName, value))
|
||||
query.Limit(1)
|
||||
|
||||
if n.tx != nil {
|
||||
err = query.query(n.tx, sink)
|
||||
} else {
|
||||
err = n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.one(tx, bucketName, fieldName, cfg, to, val, field.IsID)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, to interface{}, val []byte, skipIndex bool) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
var id []byte
|
||||
if !skipIndex {
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
id = idx.Get(val)
|
||||
} else {
|
||||
id = val
|
||||
}
|
||||
|
||||
if id == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return n.codec.Unmarshal(raw, to)
|
||||
}
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
func (n *node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && (field.Index == "" || value == nil)) {
|
||||
query := newQuery(n, q.Eq(fieldName, value))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.find(tx, bucketName, fieldName, cfg, sink, val, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) find(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, val []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.All(val, opts)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
func (n *node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
|
||||
if fieldName == "" {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(ref).Type().Elem()
|
||||
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
cfg, err := extract(&newElem)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.Name == fieldName {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.allByIndex(tx, fieldName, cfg, &ref, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref *reflect.Value, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
fieldCfg, ok := cfg.Fields[fieldName]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.AllRecords(opts)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
|
||||
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
err = n.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// All gets all the records of a bucket.
|
||||
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
|
||||
func (n *node) All(to interface{}, options ...func(*index.Options)) error {
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
query := newQuery(n, nil).Limit(opts.Limit).Skip(opts.Skip)
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err := query.Find(to)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
if err == ErrNotFound {
|
||||
ref := reflect.ValueOf(to)
|
||||
results := reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0)
|
||||
reflect.Indirect(ref).Set(results)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
func (n *node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.And(q.Gte(fieldName, min), q.Lte(fieldName, max)))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
mn, err := toBytes(min, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mx, err := toBytes(max, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.rnge(tx, bucketName, fieldName, cfg, sink, mn, mx, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) rnge(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, min, max []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
reflect.Indirect(sink.ref).SetLen(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.Range(min, max, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// Prefix returns one or more records whose given field starts with the specified prefix.
|
||||
func (n *node) Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error {
|
||||
sink, err := newListSink(n, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName := sink.bucketName()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
ref := reflect.Indirect(reflect.New(sink.elemType))
|
||||
cfg, err := extractSingleField(&ref, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
field, ok := cfg.Fields[fieldName]
|
||||
if !ok || (!field.IsID && field.Index == "") {
|
||||
query := newQuery(n, q.Re(fieldName, fmt.Sprintf("^%s", prefix)))
|
||||
query.Skip(opts.Skip).Limit(opts.Limit)
|
||||
|
||||
if opts.Reverse {
|
||||
query.Reverse()
|
||||
}
|
||||
|
||||
err = n.readTx(func(tx *bolt.Tx) error {
|
||||
return query.query(tx, sink)
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
prfx, err := toBytes(prefix, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
return n.prefix(tx, bucketName, fieldName, cfg, sink, prfx, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) prefix(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, prefix []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
reflect.Indirect(sink.ref).SetLen(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.Prefix(prefix, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
|
||||
sorter := newSorter(n, sink)
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
if _, err := sorter.filter(nil, bucket, list[i], raw); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
func (n *node) Count(data interface{}) (int, error) {
|
||||
return n.Select().Count(data)
|
||||
}
|
||||
14
vendor/github.com/asdine/storm/v3/index/errors.go
generated
vendored
14
vendor/github.com/asdine/storm/v3/index/errors.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
package index
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
)
|
||||
14
vendor/github.com/asdine/storm/v3/index/indexes.go
generated
vendored
14
vendor/github.com/asdine/storm/v3/index/indexes.go
generated
vendored
@@ -1,14 +0,0 @@
|
||||
// Package index contains Index engines used to store values and their corresponding IDs
|
||||
package index
|
||||
|
||||
// Index interface
|
||||
type Index interface {
|
||||
Add(value []byte, targetID []byte) error
|
||||
Remove(value []byte) error
|
||||
RemoveID(id []byte) error
|
||||
Get(value []byte) []byte
|
||||
All(value []byte, opts *Options) ([][]byte, error)
|
||||
AllRecords(opts *Options) ([][]byte, error)
|
||||
Range(min []byte, max []byte, opts *Options) ([][]byte, error)
|
||||
Prefix(prefix []byte, opts *Options) ([][]byte, error)
|
||||
}
|
||||
283
vendor/github.com/asdine/storm/v3/index/list.go
generated
vendored
283
vendor/github.com/asdine/storm/v3/index/list.go
generated
vendored
@@ -1,283 +0,0 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/asdine/storm/v3/internal"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// NewListIndex loads a ListIndex
|
||||
func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ids, err := NewUniqueIndex(b, []byte("storm__ids"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ListIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
IDs: ids,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListIndex is an index that references values and the corresponding IDs.
|
||||
type ListIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
IDs *UniqueIndex
|
||||
}
|
||||
|
||||
// Add a value to the list index
|
||||
func (idx *ListIndex) Add(newValue []byte, targetID []byte) error {
|
||||
if newValue == nil || len(newValue) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
key := idx.IDs.Get(targetID)
|
||||
if key != nil {
|
||||
err := idx.IndexBucket.Delete(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.IDs.Remove(targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key = key[:0]
|
||||
}
|
||||
|
||||
key = append(key, newValue...)
|
||||
key = append(key, '_')
|
||||
key = append(key, '_')
|
||||
key = append(key, targetID...)
|
||||
|
||||
err := idx.IDs.Add(targetID, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.IndexBucket.Put(key, targetID)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *ListIndex) Remove(value []byte) error {
|
||||
var err error
|
||||
var keys [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
for k, _ := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _ = c.Next() {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
for _, k := range keys {
|
||||
err = idx.IndexBucket.Delete(k)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return idx.IDs.RemoveID(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the list index
|
||||
func (idx *ListIndex) RemoveID(targetID []byte) error {
|
||||
value := idx.IDs.Get(targetID)
|
||||
if value == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := idx.IndexBucket.Delete(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.IDs.Remove(targetID)
|
||||
}
|
||||
|
||||
// Get the first ID corresponding to the given value
|
||||
func (idx *ListIndex) Get(value []byte) []byte {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
for k, id := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, id = c.Next() {
|
||||
return id
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// All the IDs corresponding to the given value
|
||||
func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
c := idx.IndexBucket.Cursor()
|
||||
cur := internal.Cursor{C: c, Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
prefix := generatePrefix(value)
|
||||
|
||||
k, id := c.Seek(prefix)
|
||||
if cur.Reverse {
|
||||
var count int
|
||||
kc := k
|
||||
idc := id
|
||||
for ; kc != nil && bytes.HasPrefix(kc, prefix); kc, idc = c.Next() {
|
||||
count++
|
||||
k, id = kc, idc
|
||||
}
|
||||
if kc != nil {
|
||||
k, id = c.Prev()
|
||||
}
|
||||
list = make([][]byte, 0, count)
|
||||
}
|
||||
|
||||
for ; bytes.HasPrefix(k, prefix); k, id = cur.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
for k, id := c.First(); k != nil; k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.RangeCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Min: min,
|
||||
Max: max,
|
||||
CompareFn: func(val, limit []byte) int {
|
||||
pos := bytes.LastIndex(val, []byte("__"))
|
||||
return bytes.Compare(val[:pos], limit)
|
||||
},
|
||||
}
|
||||
|
||||
for k, id := c.First(); c.Continue(k); k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Prefix returns the ids whose values have the given prefix.
|
||||
func (idx *ListIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.PrefixCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
for k, id := c.First(); k != nil && c.Continue(k); k, id = c.Next() {
|
||||
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, id)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func generatePrefix(value []byte) []byte {
|
||||
prefix := make([]byte, len(value)+2)
|
||||
var i int
|
||||
for i = range value {
|
||||
prefix[i] = value[i]
|
||||
}
|
||||
prefix[i+1] = '_'
|
||||
prefix[i+2] = '_'
|
||||
return prefix
|
||||
}
|
||||
15
vendor/github.com/asdine/storm/v3/index/options.go
generated
vendored
15
vendor/github.com/asdine/storm/v3/index/options.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
package index
|
||||
|
||||
// NewOptions creates initialized Options
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
Limit: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// Options are used to customize queries
|
||||
type Options struct {
|
||||
Limit int
|
||||
Skip int
|
||||
Reverse bool
|
||||
}
|
||||
183
vendor/github.com/asdine/storm/v3/index/unique.go
generated
vendored
183
vendor/github.com/asdine/storm/v3/index/unique.go
generated
vendored
@@ -1,183 +0,0 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/asdine/storm/v3/internal"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// NewUniqueIndex loads a UniqueIndex
|
||||
func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UniqueIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UniqueIndex is an index that references unique values and the corresponding ID.
|
||||
type UniqueIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
}
|
||||
|
||||
// Add a value to the unique index
|
||||
func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
exists := idx.IndexBucket.Get(value)
|
||||
if exists != nil {
|
||||
if bytes.Equal(exists, targetID) {
|
||||
return nil
|
||||
}
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
|
||||
return idx.IndexBucket.Put(value, targetID)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *UniqueIndex) Remove(value []byte) error {
|
||||
return idx.IndexBucket.Delete(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the unique index
|
||||
func (idx *UniqueIndex) RemoveID(id []byte) error {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if bytes.Equal(ident, id) {
|
||||
return idx.Remove(val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the id corresponding to the given value
|
||||
func (idx *UniqueIndex) Get(value []byte) []byte {
|
||||
return idx.IndexBucket.Get(value)
|
||||
}
|
||||
|
||||
// All returns all the ids corresponding to the given value
|
||||
func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
id := idx.IndexBucket.Get(value)
|
||||
if id != nil {
|
||||
return [][]byte{id}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.RangeCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Min: min,
|
||||
Max: max,
|
||||
CompareFn: func(val, limit []byte) int {
|
||||
return bytes.Compare(val, limit)
|
||||
},
|
||||
}
|
||||
|
||||
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Prefix returns the ids whose values have the given prefix.
|
||||
func (idx *UniqueIndex) Prefix(prefix []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := internal.PrefixCursor{
|
||||
C: idx.IndexBucket.Cursor(),
|
||||
Reverse: opts != nil && opts.Reverse,
|
||||
Prefix: prefix,
|
||||
}
|
||||
|
||||
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// first returns the first ID of this index
|
||||
func (idx *UniqueIndex) first() []byte {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
return ident
|
||||
}
|
||||
return nil
|
||||
}
|
||||
121
vendor/github.com/asdine/storm/v3/internal/boltdb.go
generated
vendored
121
vendor/github.com/asdine/storm/v3/internal/boltdb.go
generated
vendored
@@ -1,121 +0,0 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Cursor that can be reversed
|
||||
type Cursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *Cursor) First() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Last()
|
||||
}
|
||||
|
||||
return c.C.First()
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *Cursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// RangeCursor that can be reversed
|
||||
type RangeCursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
Min []byte
|
||||
Max []byte
|
||||
CompareFn func([]byte, []byte) int
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *RangeCursor) First() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
k, v := c.C.Seek(c.Max)
|
||||
|
||||
// If Seek doesn't find a key it goes to the next.
|
||||
// If so, we need to get the previous one to avoid
|
||||
// including bigger values. #218
|
||||
if !bytes.HasPrefix(k, c.Max) && k != nil {
|
||||
k, v = c.C.Prev()
|
||||
}
|
||||
|
||||
return k, v
|
||||
}
|
||||
|
||||
return c.C.Seek(c.Min)
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *RangeCursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// Continue tells if the loop needs to continue
|
||||
func (c *RangeCursor) Continue(val []byte) bool {
|
||||
if c.Reverse {
|
||||
return val != nil && c.CompareFn(val, c.Min) >= 0
|
||||
}
|
||||
|
||||
return val != nil && c.CompareFn(val, c.Max) <= 0
|
||||
}
|
||||
|
||||
// PrefixCursor that can be reversed
|
||||
type PrefixCursor struct {
|
||||
C *bolt.Cursor
|
||||
Reverse bool
|
||||
Prefix []byte
|
||||
}
|
||||
|
||||
// First element
|
||||
func (c *PrefixCursor) First() ([]byte, []byte) {
|
||||
var k, v []byte
|
||||
|
||||
for k, v = c.C.First(); k != nil && !bytes.HasPrefix(k, c.Prefix); k, v = c.C.Next() {
|
||||
}
|
||||
|
||||
if k == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if c.Reverse {
|
||||
kc, vc := k, v
|
||||
for ; kc != nil && bytes.HasPrefix(kc, c.Prefix); kc, vc = c.C.Next() {
|
||||
k, v = kc, vc
|
||||
}
|
||||
if kc != nil {
|
||||
k, v = c.C.Prev()
|
||||
}
|
||||
}
|
||||
|
||||
return k, v
|
||||
}
|
||||
|
||||
// Next element
|
||||
func (c *PrefixCursor) Next() ([]byte, []byte) {
|
||||
if c.Reverse {
|
||||
return c.C.Prev()
|
||||
}
|
||||
|
||||
return c.C.Next()
|
||||
}
|
||||
|
||||
// Continue tells if the loop needs to continue
|
||||
func (c *PrefixCursor) Continue(val []byte) bool {
|
||||
return val != nil && bytes.HasPrefix(val, c.Prefix)
|
||||
}
|
||||
170
vendor/github.com/asdine/storm/v3/kv.go
generated
vendored
170
vendor/github.com/asdine/storm/v3/kv.go
generated
vendored
@@ -1,170 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// KeyValueStore can store and fetch values by key
|
||||
type KeyValueStore interface {
|
||||
// Get a value from a bucket
|
||||
Get(bucketName string, key interface{}, to interface{}) error
|
||||
// Set a key/value pair into a bucket
|
||||
Set(bucketName string, key interface{}, value interface{}) error
|
||||
// Delete deletes a key from a bucket
|
||||
Delete(bucketName string, key interface{}) error
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
GetBytes(bucketName string, key interface{}) ([]byte, error)
|
||||
// SetBytes sets a raw value into a bucket.
|
||||
SetBytes(bucketName string, key interface{}, value []byte) error
|
||||
// KeyExists reports the presence of a key in a bucket.
|
||||
KeyExists(bucketName string, key interface{}) (bool, error)
|
||||
}
|
||||
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var val []byte
|
||||
return val, n.readTx(func(tx *bolt.Tx) error {
|
||||
raw, err := n.getBytes(tx, bucketName, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val = make([]byte, len(raw))
|
||||
copy(val, raw)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// GetBytes gets a raw value from a bucket.
|
||||
func (n *node) getBytes(tx *bolt.Tx, bucketName string, id []byte) ([]byte, error) {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
// SetBytes sets a raw value into a bucket.
|
||||
func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error {
|
||||
if key == nil {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.setBytes(tx, bucketName, id, value)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) setBytes(tx *bolt.Tx, bucketName string, id, data []byte) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
_, err = newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bucket.Put(id, data)
|
||||
}
|
||||
|
||||
// Get a value from a bucket
|
||||
func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr {
|
||||
return ErrPtrNeeded
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readTx(func(tx *bolt.Tx) error {
|
||||
raw, err := n.getBytes(tx, bucketName, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.codec.Unmarshal(raw, to)
|
||||
})
|
||||
}
|
||||
|
||||
// Set a key/value pair into a bucket
|
||||
func (n *node) Set(bucketName string, key interface{}, value interface{}) error {
|
||||
var data []byte
|
||||
var err error
|
||||
if value != nil {
|
||||
data, err = n.codec.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return n.SetBytes(bucketName, key, data)
|
||||
}
|
||||
|
||||
// Delete deletes a key from a bucket
|
||||
func (n *node) Delete(bucketName string, key interface{}) error {
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.delete(tx, bucketName, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
||||
|
||||
// KeyExists reports the presence of a key in a bucket.
|
||||
func (n *node) KeyExists(bucketName string, key interface{}) (bool, error) {
|
||||
id, err := toBytes(key, n.codec)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var exists bool
|
||||
return exists, n.readTx(func(tx *bolt.Tx) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
v := bucket.Get(id)
|
||||
if v != nil {
|
||||
exists = true
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
69
vendor/github.com/asdine/storm/v3/metadata.go
generated
vendored
69
vendor/github.com/asdine/storm/v3/metadata.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
metaCodec = "codec"
|
||||
)
|
||||
|
||||
func newMeta(b *bolt.Bucket, n Node) (*meta, error) {
|
||||
m := b.Bucket([]byte(metadataBucket))
|
||||
if m != nil {
|
||||
name := m.Get([]byte(metaCodec))
|
||||
if string(name) != n.Codec().Name() {
|
||||
return nil, ErrDifferentCodec
|
||||
}
|
||||
return &meta{
|
||||
node: n,
|
||||
bucket: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
m, err := b.CreateBucket([]byte(metadataBucket))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Put([]byte(metaCodec), []byte(n.Codec().Name()))
|
||||
return &meta{
|
||||
node: n,
|
||||
bucket: m,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type meta struct {
|
||||
node Node
|
||||
bucket *bolt.Bucket
|
||||
}
|
||||
|
||||
func (m *meta) increment(field *fieldConfig) error {
|
||||
var err error
|
||||
counter := field.IncrementStart
|
||||
|
||||
raw := m.bucket.Get([]byte(field.Name + "counter"))
|
||||
if raw != nil {
|
||||
counter, err = numberfromb(raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
counter++
|
||||
}
|
||||
|
||||
raw, err = numbertob(counter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.bucket.Put([]byte(field.Name+"counter"), raw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
field.Value.Set(reflect.ValueOf(counter).Convert(field.Value.Type()))
|
||||
field.IsZero = false
|
||||
return nil
|
||||
}
|
||||
126
vendor/github.com/asdine/storm/v3/node.go
generated
vendored
126
vendor/github.com/asdine/storm/v3/node.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"github.com/asdine/storm/v3/codec"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// A Node in Storm represents the API to a BoltDB bucket.
|
||||
type Node interface {
|
||||
Tx
|
||||
TypeStore
|
||||
KeyValueStore
|
||||
BucketScanner
|
||||
|
||||
// From returns a new Storm node with a new bucket root below the current.
|
||||
// All DB operations on the new node will be executed relative to this bucket.
|
||||
From(addend ...string) Node
|
||||
|
||||
// Bucket returns the bucket name as a slice from the root.
|
||||
// In the normal, simple case this will be empty.
|
||||
Bucket() []string
|
||||
|
||||
// GetBucket returns the given bucket below the current node.
|
||||
GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket
|
||||
|
||||
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
|
||||
// already exist.
|
||||
CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error)
|
||||
|
||||
// WithTransaction returns a New Storm node that will use the given transaction.
|
||||
WithTransaction(tx *bolt.Tx) Node
|
||||
|
||||
// Begin starts a new transaction.
|
||||
Begin(writable bool) (Node, error)
|
||||
|
||||
// Codec used by this instance of Storm
|
||||
Codec() codec.MarshalUnmarshaler
|
||||
|
||||
// WithCodec returns a New Storm Node that will use the given Codec.
|
||||
WithCodec(codec codec.MarshalUnmarshaler) Node
|
||||
|
||||
// WithBatch returns a new Storm Node with the batch mode enabled.
|
||||
WithBatch(enabled bool) Node
|
||||
}
|
||||
|
||||
// A Node in Storm represents the API to a BoltDB bucket.
|
||||
type node struct {
|
||||
s *DB
|
||||
|
||||
// The root bucket. In the normal, simple case this will be empty.
|
||||
rootBucket []string
|
||||
|
||||
// Transaction object. Nil if not in transaction
|
||||
tx *bolt.Tx
|
||||
|
||||
// Codec of this node
|
||||
codec codec.MarshalUnmarshaler
|
||||
|
||||
// Enable batch mode for read-write transaction, instead of update mode
|
||||
batchMode bool
|
||||
}
|
||||
|
||||
// From returns a new Storm Node with a new bucket root below the current.
|
||||
// All DB operations on the new node will be executed relative to this bucket.
|
||||
func (n node) From(addend ...string) Node {
|
||||
n.rootBucket = append(n.rootBucket, addend...)
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithTransaction returns a new Storm Node that will use the given transaction.
|
||||
func (n node) WithTransaction(tx *bolt.Tx) Node {
|
||||
n.tx = tx
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithCodec returns a new Storm Node that will use the given Codec.
|
||||
func (n node) WithCodec(codec codec.MarshalUnmarshaler) Node {
|
||||
n.codec = codec
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithBatch returns a new Storm Node with the batch mode enabled.
|
||||
func (n node) WithBatch(enabled bool) Node {
|
||||
n.batchMode = enabled
|
||||
return &n
|
||||
}
|
||||
|
||||
// Bucket returns the bucket name as a slice from the root.
|
||||
// In the normal, simple case this will be empty.
|
||||
func (n *node) Bucket() []string {
|
||||
return n.rootBucket
|
||||
}
|
||||
|
||||
// Codec returns the EncodeDecoder used by this instance of Storm
|
||||
func (n *node) Codec() codec.MarshalUnmarshaler {
|
||||
return n.codec
|
||||
}
|
||||
|
||||
// Detects if already in transaction or runs a read write transaction.
|
||||
// Uses batch mode if enabled.
|
||||
func (n *node) readWriteTx(fn func(tx *bolt.Tx) error) error {
|
||||
if n.tx != nil {
|
||||
return fn(n.tx)
|
||||
}
|
||||
|
||||
if n.batchMode {
|
||||
return n.s.Bolt.Batch(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
|
||||
// Detects if already in transaction or runs a read transaction.
|
||||
func (n *node) readTx(fn func(tx *bolt.Tx) error) error {
|
||||
if n.tx != nil {
|
||||
return fn(n.tx)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return fn(tx)
|
||||
})
|
||||
}
|
||||
97
vendor/github.com/asdine/storm/v3/options.go
generated
vendored
97
vendor/github.com/asdine/storm/v3/options.go
generated
vendored
@@ -1,97 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/asdine/storm/v3/codec"
|
||||
"github.com/asdine/storm/v3/index"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// BoltOptions used to pass options to BoltDB.
|
||||
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.boltMode = mode
|
||||
opts.boltOptions = options
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Codec used to set a custom encoder and decoder. The default is JSON.
|
||||
func Codec(c codec.MarshalUnmarshaler) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.codec = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Batch enables the use of batch instead of update for read-write transactions.
|
||||
func Batch() func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.batchMode = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Root used to set the root bucket. See also the From method.
|
||||
func Root(root ...string) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.rootBucket = root
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// UseDB allows Storm to use an existing open Bolt.DB.
|
||||
// Warning: storm.DB.Close() will close the bolt.DB instance.
|
||||
func UseDB(b *bolt.DB) func(*Options) error {
|
||||
return func(opts *Options) error {
|
||||
opts.path = b.Path()
|
||||
opts.bolt = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Limit sets the maximum number of records to return
|
||||
func Limit(limit int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Limit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// Skip sets the number of records to skip
|
||||
func Skip(offset int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Skip = offset
|
||||
}
|
||||
}
|
||||
|
||||
// Reverse will return the results in descending order
|
||||
func Reverse() func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Reverse = true
|
||||
}
|
||||
}
|
||||
|
||||
// Options are used to customize the way Storm opens a database.
|
||||
type Options struct {
|
||||
// Handles encoding and decoding of objects
|
||||
codec codec.MarshalUnmarshaler
|
||||
|
||||
// Bolt file mode
|
||||
boltMode os.FileMode
|
||||
|
||||
// Bolt options
|
||||
boltOptions *bolt.Options
|
||||
|
||||
// Enable batch mode for read-write transaction, instead of update mode
|
||||
batchMode bool
|
||||
|
||||
// The root bucket name
|
||||
rootBucket []string
|
||||
|
||||
// Path of the database file
|
||||
path string
|
||||
|
||||
// Bolt is still easily accessible
|
||||
bolt *bolt.DB
|
||||
}
|
||||
122
vendor/github.com/asdine/storm/v3/q/compare.go
generated
vendored
122
vendor/github.com/asdine/storm/v3/q/compare.go
generated
vendored
@@ -1,122 +0,0 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"go/constant"
|
||||
"go/token"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func compare(a, b interface{}, tok token.Token) bool {
|
||||
vala := reflect.ValueOf(a)
|
||||
valb := reflect.ValueOf(b)
|
||||
|
||||
ak := vala.Kind()
|
||||
bk := valb.Kind()
|
||||
switch {
|
||||
// comparing nil values
|
||||
case (ak == reflect.Ptr || ak == reflect.Slice || ak == reflect.Interface || ak == reflect.Invalid) &&
|
||||
(bk == reflect.Ptr || ak == reflect.Slice || bk == reflect.Interface || bk == reflect.Invalid) &&
|
||||
(!vala.IsValid() || vala.IsNil()) && (!valb.IsValid() || valb.IsNil()):
|
||||
return true
|
||||
case ak >= reflect.Int && ak <= reflect.Int64:
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(valb.Int()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(int64(valb.Uint())))
|
||||
}
|
||||
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak >= reflect.Uint && ak <= reflect.Uint64:
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(valb.Uint()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeUint64(vala.Uint()), tok, constant.MakeUint64(uint64(valb.Int())))
|
||||
}
|
||||
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(float64(vala.Uint())), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak == reflect.Float32 || ak == reflect.Float64:
|
||||
if bk == reflect.Float32 || bk == reflect.Float64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(valb.Float()))
|
||||
}
|
||||
|
||||
if bk >= reflect.Int && bk <= reflect.Int64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Int())))
|
||||
}
|
||||
|
||||
if bk >= reflect.Uint && bk <= reflect.Uint64 {
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Uint())))
|
||||
}
|
||||
|
||||
if bk == reflect.String {
|
||||
bla, err := strconv.ParseFloat(valb.String(), 64)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(bla))
|
||||
}
|
||||
case ak == reflect.String:
|
||||
if bk == reflect.String {
|
||||
return constant.Compare(constant.MakeString(vala.String()), tok, constant.MakeString(valb.String()))
|
||||
}
|
||||
}
|
||||
|
||||
typea, typeb := reflect.TypeOf(a), reflect.TypeOf(b)
|
||||
|
||||
if typea != nil && (typea.String() == "time.Time" || typea.String() == "*time.Time") &&
|
||||
typeb != nil && (typeb.String() == "time.Time" || typeb.String() == "*time.Time") {
|
||||
|
||||
if typea.String() == "*time.Time" && vala.IsNil() {
|
||||
return true
|
||||
}
|
||||
|
||||
if typeb.String() == "*time.Time" {
|
||||
if valb.IsNil() {
|
||||
return true
|
||||
}
|
||||
valb = valb.Elem()
|
||||
}
|
||||
|
||||
var x, y int64
|
||||
x = 1
|
||||
if vala.MethodByName("Equal").Call([]reflect.Value{valb})[0].Bool() {
|
||||
y = 1
|
||||
} else if vala.MethodByName("Before").Call([]reflect.Value{valb})[0].Bool() {
|
||||
y = 2
|
||||
}
|
||||
return constant.Compare(constant.MakeInt64(x), tok, constant.MakeInt64(y))
|
||||
}
|
||||
|
||||
if tok == token.EQL {
|
||||
return reflect.DeepEqual(a, b)
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
67
vendor/github.com/asdine/storm/v3/q/fieldmatcher.go
generated
vendored
67
vendor/github.com/asdine/storm/v3/q/fieldmatcher.go
generated
vendored
@@ -1,67 +0,0 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"go/token"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ErrUnknownField is returned when an unknown field is passed.
|
||||
var ErrUnknownField = errors.New("unknown field")
|
||||
|
||||
type fieldMatcherDelegate struct {
|
||||
FieldMatcher
|
||||
Field string
|
||||
}
|
||||
|
||||
// NewFieldMatcher creates a Matcher for a given field.
|
||||
func NewFieldMatcher(field string, fm FieldMatcher) Matcher {
|
||||
return fieldMatcherDelegate{Field: field, FieldMatcher: fm}
|
||||
}
|
||||
|
||||
// FieldMatcher can be used in NewFieldMatcher as a simple way to create the
|
||||
// most common Matcher: A Matcher that evaluates one field's value.
|
||||
// For more complex scenarios, implement the Matcher interface directly.
|
||||
type FieldMatcher interface {
|
||||
MatchField(v interface{}) (bool, error)
|
||||
}
|
||||
|
||||
func (r fieldMatcherDelegate) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return r.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (r fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
|
||||
field := v.FieldByName(r.Field)
|
||||
if !field.IsValid() {
|
||||
return false, ErrUnknownField
|
||||
}
|
||||
return r.MatchField(field.Interface())
|
||||
}
|
||||
|
||||
// NewField2FieldMatcher creates a Matcher for a given field1 and field2.
|
||||
func NewField2FieldMatcher(field1, field2 string, tok token.Token) Matcher {
|
||||
return field2fieldMatcherDelegate{Field1: field1, Field2: field2, Tok: tok}
|
||||
}
|
||||
|
||||
type field2fieldMatcherDelegate struct {
|
||||
Field1, Field2 string
|
||||
Tok token.Token
|
||||
}
|
||||
|
||||
func (r field2fieldMatcherDelegate) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return r.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (r field2fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
|
||||
field1 := v.FieldByName(r.Field1)
|
||||
if !field1.IsValid() {
|
||||
return false, ErrUnknownField
|
||||
}
|
||||
field2 := v.FieldByName(r.Field2)
|
||||
if !field2.IsValid() {
|
||||
return false, ErrUnknownField
|
||||
}
|
||||
return compare(field1.Interface(), field2.Interface(), r.Tok), nil
|
||||
}
|
||||
51
vendor/github.com/asdine/storm/v3/q/regexp.go
generated
vendored
51
vendor/github.com/asdine/storm/v3/q/regexp.go
generated
vendored
@@ -1,51 +0,0 @@
|
||||
package q
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Re creates a regexp matcher. It checks if the given field matches the given regexp.
|
||||
// Note that this only supports fields of type string or []byte.
|
||||
func Re(field string, re string) Matcher {
|
||||
regexpCache.RLock()
|
||||
if r, ok := regexpCache.m[re]; ok {
|
||||
regexpCache.RUnlock()
|
||||
return NewFieldMatcher(field, ®expMatcher{r: r})
|
||||
}
|
||||
regexpCache.RUnlock()
|
||||
|
||||
regexpCache.Lock()
|
||||
r, err := regexp.Compile(re)
|
||||
if err == nil {
|
||||
regexpCache.m[re] = r
|
||||
}
|
||||
regexpCache.Unlock()
|
||||
|
||||
return NewFieldMatcher(field, ®expMatcher{r: r, err: err})
|
||||
}
|
||||
|
||||
var regexpCache = struct {
|
||||
sync.RWMutex
|
||||
m map[string]*regexp.Regexp
|
||||
}{m: make(map[string]*regexp.Regexp)}
|
||||
|
||||
type regexpMatcher struct {
|
||||
r *regexp.Regexp
|
||||
err error
|
||||
}
|
||||
|
||||
func (r *regexpMatcher) MatchField(v interface{}) (bool, error) {
|
||||
if r.err != nil {
|
||||
return false, r.err
|
||||
}
|
||||
switch fieldValue := v.(type) {
|
||||
case string:
|
||||
return r.r.MatchString(fieldValue), nil
|
||||
case []byte:
|
||||
return r.r.Match(fieldValue), nil
|
||||
default:
|
||||
return false, fmt.Errorf("Only string and []byte supported for regexp matcher, got %T", fieldValue)
|
||||
}
|
||||
}
|
||||
247
vendor/github.com/asdine/storm/v3/q/tree.go
generated
vendored
247
vendor/github.com/asdine/storm/v3/q/tree.go
generated
vendored
@@ -1,247 +0,0 @@
|
||||
// Package q contains a list of Matchers used to compare struct fields with values
|
||||
package q
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// A Matcher is used to test against a record to see if it matches.
|
||||
type Matcher interface {
|
||||
// Match is used to test the criteria against a structure.
|
||||
Match(interface{}) (bool, error)
|
||||
}
|
||||
|
||||
// A ValueMatcher is used to test against a reflect.Value.
|
||||
type ValueMatcher interface {
|
||||
// MatchValue tests if the given reflect.Value matches.
|
||||
// It is useful when the reflect.Value of an object already exists.
|
||||
MatchValue(*reflect.Value) (bool, error)
|
||||
}
|
||||
|
||||
type cmp struct {
|
||||
value interface{}
|
||||
token token.Token
|
||||
}
|
||||
|
||||
func (c *cmp) MatchField(v interface{}) (bool, error) {
|
||||
return compare(v, c.value, c.token), nil
|
||||
}
|
||||
|
||||
type trueMatcher struct{}
|
||||
|
||||
func (*trueMatcher) Match(i interface{}) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (*trueMatcher) MatchValue(v *reflect.Value) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type or struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (c *or) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return c.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (c *or) MatchValue(v *reflect.Value) (bool, error) {
|
||||
for _, matcher := range c.children {
|
||||
if vm, ok := matcher.(ValueMatcher); ok {
|
||||
ok, err := vm.MatchValue(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ok, err := matcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type and struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (c *and) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return c.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (c *and) MatchValue(v *reflect.Value) (bool, error) {
|
||||
for _, matcher := range c.children {
|
||||
if vm, ok := matcher.(ValueMatcher); ok {
|
||||
ok, err := vm.MatchValue(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ok, err := matcher.Match(v.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
type strictEq struct {
|
||||
field string
|
||||
value interface{}
|
||||
}
|
||||
|
||||
func (s *strictEq) MatchField(v interface{}) (bool, error) {
|
||||
return reflect.DeepEqual(v, s.value), nil
|
||||
}
|
||||
|
||||
type in struct {
|
||||
list interface{}
|
||||
}
|
||||
|
||||
func (i *in) MatchField(v interface{}) (bool, error) {
|
||||
ref := reflect.ValueOf(i.list)
|
||||
if ref.Kind() != reflect.Slice {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
c := cmp{
|
||||
token: token.EQL,
|
||||
}
|
||||
|
||||
for i := 0; i < ref.Len(); i++ {
|
||||
c.value = ref.Index(i).Interface()
|
||||
ok, err := c.MatchField(v)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
type not struct {
|
||||
children []Matcher
|
||||
}
|
||||
|
||||
func (n *not) Match(i interface{}) (bool, error) {
|
||||
v := reflect.Indirect(reflect.ValueOf(i))
|
||||
return n.MatchValue(&v)
|
||||
}
|
||||
|
||||
func (n *not) MatchValue(v *reflect.Value) (bool, error) {
|
||||
var err error
|
||||
|
||||
for _, matcher := range n.children {
|
||||
vm, ok := matcher.(ValueMatcher)
|
||||
if ok {
|
||||
ok, err = vm.MatchValue(v)
|
||||
} else {
|
||||
ok, err = matcher.Match(v.Interface())
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// Eq matcher, checks if the given field is equal to the given value
|
||||
func Eq(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.EQL})
|
||||
}
|
||||
|
||||
// EqF matcher, checks if the given field is equal to the given field
|
||||
func EqF(field1, field2 string) Matcher {
|
||||
return NewField2FieldMatcher(field1, field2, token.EQL)
|
||||
}
|
||||
|
||||
// StrictEq matcher, checks if the given field is deeply equal to the given value
|
||||
func StrictEq(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &strictEq{value: v})
|
||||
}
|
||||
|
||||
// Gt matcher, checks if the given field is greater than the given value
|
||||
func Gt(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.GTR})
|
||||
}
|
||||
|
||||
// GtF matcher, checks if the given field is greater than the given field
|
||||
func GtF(field1, field2 string) Matcher {
|
||||
return NewField2FieldMatcher(field1, field2, token.GTR)
|
||||
}
|
||||
|
||||
// Gte matcher, checks if the given field is greater than or equal to the given value
|
||||
func Gte(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.GEQ})
|
||||
}
|
||||
|
||||
// GteF matcher, checks if the given field is greater than or equal to the given field
|
||||
func GteF(field1, field2 string) Matcher {
|
||||
return NewField2FieldMatcher(field1, field2, token.GEQ)
|
||||
}
|
||||
|
||||
// Lt matcher, checks if the given field is lesser than the given value
|
||||
func Lt(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.LSS})
|
||||
}
|
||||
|
||||
// LtF matcher, checks if the given field is lesser than the given field
|
||||
func LtF(field1, field2 string) Matcher {
|
||||
return NewField2FieldMatcher(field1, field2, token.LSS)
|
||||
}
|
||||
|
||||
// Lte matcher, checks if the given field is lesser than or equal to the given value
|
||||
func Lte(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &cmp{value: v, token: token.LEQ})
|
||||
}
|
||||
|
||||
// LteF matcher, checks if the given field is lesser than or equal to the given field
|
||||
func LteF(field1, field2 string) Matcher {
|
||||
return NewField2FieldMatcher(field1, field2, token.LEQ)
|
||||
}
|
||||
|
||||
// In matcher, checks if the given field matches one of the value of the given slice.
|
||||
// v must be a slice.
|
||||
func In(field string, v interface{}) Matcher {
|
||||
return NewFieldMatcher(field, &in{list: v})
|
||||
}
|
||||
|
||||
// True matcher, always returns true
|
||||
func True() Matcher { return &trueMatcher{} }
|
||||
|
||||
// Or matcher, checks if at least one of the given matchers matches the record
|
||||
func Or(matchers ...Matcher) Matcher { return &or{children: matchers} }
|
||||
|
||||
// And matcher, checks if all of the given matchers matches the record
|
||||
func And(matchers ...Matcher) Matcher { return &and{children: matchers} }
|
||||
|
||||
// Not matcher, checks if all of the given matchers return false
|
||||
func Not(matchers ...Matcher) Matcher { return ¬{children: matchers} }
|
||||
219
vendor/github.com/asdine/storm/v3/query.go
generated
vendored
219
vendor/github.com/asdine/storm/v3/query.go
generated
vendored
@@ -1,219 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"github.com/asdine/storm/v3/internal"
|
||||
"github.com/asdine/storm/v3/q"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// Select a list of records that match a list of matchers. Doesn't use indexes.
|
||||
func (n *node) Select(matchers ...q.Matcher) Query {
|
||||
tree := q.And(matchers...)
|
||||
return newQuery(n, tree)
|
||||
}
|
||||
|
||||
// Query is the low level query engine used by Storm. It allows to operate searches through an entire bucket.
|
||||
type Query interface {
|
||||
// Skip matching records by the given number
|
||||
Skip(int) Query
|
||||
|
||||
// Limit the results by the given number
|
||||
Limit(int) Query
|
||||
|
||||
// Order by the given fields, in descending precedence, left-to-right.
|
||||
OrderBy(...string) Query
|
||||
|
||||
// Reverse the order of the results
|
||||
Reverse() Query
|
||||
|
||||
// Bucket specifies the bucket name
|
||||
Bucket(string) Query
|
||||
|
||||
// Find a list of matching records
|
||||
Find(interface{}) error
|
||||
|
||||
// First gets the first matching record
|
||||
First(interface{}) error
|
||||
|
||||
// Delete all matching records
|
||||
Delete(interface{}) error
|
||||
|
||||
// Count all the matching records
|
||||
Count(interface{}) (int, error)
|
||||
|
||||
// Returns all the records without decoding them
|
||||
Raw() ([][]byte, error)
|
||||
|
||||
// Execute the given function for each raw element
|
||||
RawEach(func([]byte, []byte) error) error
|
||||
|
||||
// Execute the given function for each element
|
||||
Each(interface{}, func(interface{}) error) error
|
||||
}
|
||||
|
||||
func newQuery(n *node, tree q.Matcher) *query {
|
||||
return &query{
|
||||
skip: 0,
|
||||
limit: -1,
|
||||
node: n,
|
||||
tree: tree,
|
||||
}
|
||||
}
|
||||
|
||||
type query struct {
|
||||
limit int
|
||||
skip int
|
||||
reverse bool
|
||||
tree q.Matcher
|
||||
node *node
|
||||
bucket string
|
||||
orderBy []string
|
||||
}
|
||||
|
||||
func (q *query) Skip(nb int) Query {
|
||||
q.skip = nb
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Limit(nb int) Query {
|
||||
q.limit = nb
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) OrderBy(field ...string) Query {
|
||||
q.orderBy = field
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Reverse() Query {
|
||||
q.reverse = true
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Bucket(bucketName string) Query {
|
||||
q.bucket = bucketName
|
||||
return q
|
||||
}
|
||||
|
||||
func (q *query) Find(to interface{}) error {
|
||||
sink, err := newListSink(q.node, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) First(to interface{}) error {
|
||||
sink, err := newFirstSink(q.node, to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
q.limit = 1
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Delete(kind interface{}) error {
|
||||
sink, err := newDeleteSink(q.node, kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Count(kind interface{}) (int, error) {
|
||||
sink, err := newCountSink(q.node, kind)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
err = q.runQuery(sink)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return sink.counter, nil
|
||||
}
|
||||
|
||||
func (q *query) Raw() ([][]byte, error) {
|
||||
sink := newRawSink()
|
||||
|
||||
err := q.runQuery(sink)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return sink.results, nil
|
||||
}
|
||||
|
||||
func (q *query) RawEach(fn func([]byte, []byte) error) error {
|
||||
sink := newRawSink()
|
||||
|
||||
sink.execFn = fn
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) Each(kind interface{}, fn func(interface{}) error) error {
|
||||
sink, err := newEachSink(kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sink.execFn = fn
|
||||
|
||||
return q.runQuery(sink)
|
||||
}
|
||||
|
||||
func (q *query) runQuery(sink sink) error {
|
||||
if q.node.tx != nil {
|
||||
return q.query(q.node.tx, sink)
|
||||
}
|
||||
if sink.readOnly() {
|
||||
return q.node.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return q.query(tx, sink)
|
||||
})
|
||||
}
|
||||
return q.node.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return q.query(tx, sink)
|
||||
})
|
||||
}
|
||||
|
||||
func (q *query) query(tx *bolt.Tx, sink sink) error {
|
||||
bucketName := q.bucket
|
||||
if bucketName == "" {
|
||||
bucketName = sink.bucketName()
|
||||
}
|
||||
bucket := q.node.GetBucket(tx, bucketName)
|
||||
|
||||
if q.limit == 0 {
|
||||
return sink.flush()
|
||||
}
|
||||
|
||||
sorter := newSorter(q.node, sink)
|
||||
sorter.orderBy = q.orderBy
|
||||
sorter.reverse = q.reverse
|
||||
sorter.skip = q.skip
|
||||
sorter.limit = q.limit
|
||||
if bucket != nil {
|
||||
c := internal.Cursor{C: bucket.Cursor(), Reverse: q.reverse}
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
stop, err := sorter.filter(q.tree, bucket, k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sorter.flush()
|
||||
}
|
||||
105
vendor/github.com/asdine/storm/v3/scan.go
generated
vendored
105
vendor/github.com/asdine/storm/v3/scan.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// A BucketScanner scans a Node for a list of buckets
|
||||
type BucketScanner interface {
|
||||
// PrefixScan scans the root buckets for keys matching the given prefix.
|
||||
PrefixScan(prefix string) []Node
|
||||
// PrefixScan scans the buckets in this node for keys matching the given prefix.
|
||||
RangeScan(min, max string) []Node
|
||||
}
|
||||
|
||||
// PrefixScan scans the buckets in this node for keys matching the given prefix.
|
||||
func (n *node) PrefixScan(prefix string) []Node {
|
||||
if n.tx != nil {
|
||||
return n.prefixScan(n.tx, prefix)
|
||||
}
|
||||
|
||||
var nodes []Node
|
||||
|
||||
n.readTx(func(tx *bolt.Tx) error {
|
||||
nodes = n.prefixScan(tx, prefix)
|
||||
return nil
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (n *node) prefixScan(tx *bolt.Tx, prefix string) []Node {
|
||||
var (
|
||||
prefixBytes = []byte(prefix)
|
||||
nodes []Node
|
||||
c = n.cursor(tx)
|
||||
)
|
||||
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for k, v := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, v = c.Next() {
|
||||
if v != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes = append(nodes, n.From(string(k)))
|
||||
}
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
// RangeScan scans the buckets in this node over a range such as a sortable time range.
|
||||
func (n *node) RangeScan(min, max string) []Node {
|
||||
if n.tx != nil {
|
||||
return n.rangeScan(n.tx, min, max)
|
||||
}
|
||||
|
||||
var nodes []Node
|
||||
|
||||
n.readTx(func(tx *bolt.Tx) error {
|
||||
nodes = n.rangeScan(tx, min, max)
|
||||
return nil
|
||||
})
|
||||
|
||||
return nodes
|
||||
}
|
||||
|
||||
func (n *node) rangeScan(tx *bolt.Tx, min, max string) []Node {
|
||||
var (
|
||||
minBytes = []byte(min)
|
||||
maxBytes = []byte(max)
|
||||
nodes []Node
|
||||
c = n.cursor(tx)
|
||||
)
|
||||
|
||||
for k, v := c.Seek(minBytes); k != nil && bytes.Compare(k, maxBytes) <= 0; k, v = c.Next() {
|
||||
if v != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
nodes = append(nodes, n.From(string(k)))
|
||||
}
|
||||
|
||||
return nodes
|
||||
|
||||
}
|
||||
|
||||
func (n *node) cursor(tx *bolt.Tx) *bolt.Cursor {
|
||||
var c *bolt.Cursor
|
||||
|
||||
if len(n.rootBucket) > 0 {
|
||||
b := n.GetBucket(tx)
|
||||
if b == nil {
|
||||
return nil
|
||||
}
|
||||
c = b.Cursor()
|
||||
} else {
|
||||
c = tx.Cursor()
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
620
vendor/github.com/asdine/storm/v3/sink.go
generated
vendored
620
vendor/github.com/asdine/storm/v3/sink.go
generated
vendored
@@ -1,620 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
"github.com/asdine/storm/v3/index"
|
||||
"github.com/asdine/storm/v3/q"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
type item struct {
|
||||
value *reflect.Value
|
||||
bucket *bolt.Bucket
|
||||
k []byte
|
||||
v []byte
|
||||
}
|
||||
|
||||
func newSorter(n Node, snk sink) *sorter {
|
||||
return &sorter{
|
||||
node: n,
|
||||
sink: snk,
|
||||
skip: 0,
|
||||
limit: -1,
|
||||
list: make([]*item, 0),
|
||||
err: make(chan error),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
type sorter struct {
|
||||
node Node
|
||||
sink sink
|
||||
list []*item
|
||||
skip int
|
||||
limit int
|
||||
orderBy []string
|
||||
reverse bool
|
||||
err chan error
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func (s *sorter) filter(tree q.Matcher, bucket *bolt.Bucket, k, v []byte) (bool, error) {
|
||||
itm := &item{
|
||||
bucket: bucket,
|
||||
k: k,
|
||||
v: v,
|
||||
}
|
||||
rsink, ok := s.sink.(reflectSink)
|
||||
if !ok {
|
||||
return s.add(itm)
|
||||
}
|
||||
|
||||
newElem := rsink.elem()
|
||||
if err := s.node.Codec().Unmarshal(v, newElem.Interface()); err != nil {
|
||||
return false, err
|
||||
}
|
||||
itm.value = &newElem
|
||||
|
||||
if tree != nil {
|
||||
ok, err := tree.Match(newElem.Interface())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !ok {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.orderBy) == 0 {
|
||||
return s.add(itm)
|
||||
}
|
||||
|
||||
if _, ok := s.sink.(sliceSink); ok {
|
||||
// add directly to sink, we'll apply skip/limits after sorting
|
||||
return false, s.sink.add(itm)
|
||||
}
|
||||
|
||||
s.list = append(s.list, itm)
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (s *sorter) add(itm *item) (stop bool, err error) {
|
||||
if s.limit == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if s.skip > 0 {
|
||||
s.skip--
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if s.limit > 0 {
|
||||
s.limit--
|
||||
}
|
||||
|
||||
err = s.sink.add(itm)
|
||||
|
||||
return s.limit == 0, err
|
||||
}
|
||||
|
||||
func (s *sorter) compareValue(left reflect.Value, right reflect.Value) int {
|
||||
if !left.IsValid() || !right.IsValid() {
|
||||
if left.IsValid() {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
switch left.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
l, r := left.Int(), right.Int()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
l, r := left.Uint(), right.Uint()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
l, r := left.Float(), right.Float()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.String:
|
||||
l, r := left.String(), right.String()
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
case reflect.Struct:
|
||||
if lt, lok := left.Interface().(time.Time); lok {
|
||||
if rt, rok := right.Interface().(time.Time); rok {
|
||||
if lok && rok {
|
||||
if lt.Before(rt) {
|
||||
return -1
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
default:
|
||||
rawLeft, err := toBytes(left.Interface(), s.node.Codec())
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
rawRight, err := toBytes(right.Interface(), s.node.Codec())
|
||||
if err != nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
l, r := string(rawLeft), string(rawRight)
|
||||
if l < r {
|
||||
return -1
|
||||
}
|
||||
if l > r {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s *sorter) less(leftElem reflect.Value, rightElem reflect.Value) bool {
|
||||
for _, orderBy := range s.orderBy {
|
||||
leftField := reflect.Indirect(leftElem).FieldByName(orderBy)
|
||||
if !leftField.IsValid() {
|
||||
s.err <- ErrNotFound
|
||||
return false
|
||||
}
|
||||
rightField := reflect.Indirect(rightElem).FieldByName(orderBy)
|
||||
if !rightField.IsValid() {
|
||||
s.err <- ErrNotFound
|
||||
return false
|
||||
}
|
||||
|
||||
direction := 1
|
||||
if s.reverse {
|
||||
direction = -1
|
||||
}
|
||||
|
||||
switch s.compareValue(leftField, rightField) * direction {
|
||||
case -1:
|
||||
return true
|
||||
case 1:
|
||||
return false
|
||||
default:
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *sorter) flush() error {
|
||||
if len(s.orderBy) == 0 {
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
go func() {
|
||||
sort.Sort(s)
|
||||
close(s.err)
|
||||
}()
|
||||
err := <-s.err
|
||||
close(s.done)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
if !ssink.slice().IsValid() {
|
||||
return s.sink.flush()
|
||||
}
|
||||
if s.skip >= ssink.slice().Len() {
|
||||
ssink.reset()
|
||||
return s.sink.flush()
|
||||
}
|
||||
leftBound := s.skip
|
||||
if leftBound < 0 {
|
||||
leftBound = 0
|
||||
}
|
||||
limit := s.limit
|
||||
if s.limit < 0 {
|
||||
limit = 0
|
||||
}
|
||||
|
||||
rightBound := leftBound + limit
|
||||
if rightBound > ssink.slice().Len() || rightBound == leftBound {
|
||||
rightBound = ssink.slice().Len()
|
||||
}
|
||||
ssink.setSlice(ssink.slice().Slice(leftBound, rightBound))
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
for _, itm := range s.list {
|
||||
if itm == nil {
|
||||
break
|
||||
}
|
||||
stop, err := s.add(itm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if stop {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return s.sink.flush()
|
||||
}
|
||||
|
||||
func (s *sorter) Len() int {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return 0
|
||||
default:
|
||||
}
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
return ssink.slice().Len()
|
||||
}
|
||||
return len(s.list)
|
||||
|
||||
}
|
||||
|
||||
func (s *sorter) Less(i, j int) bool {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return false
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
return s.less(ssink.slice().Index(i), ssink.slice().Index(j))
|
||||
}
|
||||
return s.less(*s.list[i].value, *s.list[j].value)
|
||||
}
|
||||
|
||||
type sink interface {
|
||||
bucketName() string
|
||||
flush() error
|
||||
add(*item) error
|
||||
readOnly() bool
|
||||
}
|
||||
|
||||
type reflectSink interface {
|
||||
elem() reflect.Value
|
||||
}
|
||||
|
||||
type sliceSink interface {
|
||||
slice() reflect.Value
|
||||
setSlice(reflect.Value)
|
||||
reset()
|
||||
}
|
||||
|
||||
func newListSink(node Node, to interface{}) (*listSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return nil, ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
sliceType := reflect.Indirect(ref).Type()
|
||||
elemType := sliceType.Elem()
|
||||
|
||||
if elemType.Kind() == reflect.Ptr {
|
||||
elemType = elemType.Elem()
|
||||
}
|
||||
|
||||
if elemType.Name() == "" {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
|
||||
return &listSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
isPtr: sliceType.Elem().Kind() == reflect.Ptr,
|
||||
elemType: elemType,
|
||||
name: elemType.Name(),
|
||||
results: reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
type listSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
results reflect.Value
|
||||
elemType reflect.Type
|
||||
name string
|
||||
isPtr bool
|
||||
idx int
|
||||
}
|
||||
|
||||
func (l *listSink) slice() reflect.Value {
|
||||
return l.results
|
||||
}
|
||||
|
||||
func (l *listSink) setSlice(s reflect.Value) {
|
||||
l.results = s
|
||||
}
|
||||
|
||||
func (l *listSink) reset() {
|
||||
l.results = reflect.MakeSlice(reflect.Indirect(l.ref).Type(), 0, 0)
|
||||
}
|
||||
|
||||
func (l *listSink) elem() reflect.Value {
|
||||
if l.results.IsValid() && l.idx < l.results.Len() {
|
||||
return l.results.Index(l.idx).Addr()
|
||||
}
|
||||
return reflect.New(l.elemType)
|
||||
}
|
||||
|
||||
func (l *listSink) bucketName() string {
|
||||
return l.name
|
||||
}
|
||||
|
||||
func (l *listSink) add(i *item) error {
|
||||
if l.idx == l.results.Len() {
|
||||
if l.isPtr {
|
||||
l.results = reflect.Append(l.results, *i.value)
|
||||
} else {
|
||||
l.results = reflect.Append(l.results, reflect.Indirect(*i.value))
|
||||
}
|
||||
}
|
||||
|
||||
l.idx++
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *listSink) flush() error {
|
||||
if l.results.IsValid() && l.results.Len() > 0 {
|
||||
reflect.Indirect(l.ref).Set(l.results)
|
||||
return nil
|
||||
}
|
||||
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
func (l *listSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newFirstSink(node Node, to interface{}) (*firstSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &firstSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type firstSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
found bool
|
||||
}
|
||||
|
||||
func (f *firstSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(f.ref).Type())
|
||||
}
|
||||
|
||||
func (f *firstSink) bucketName() string {
|
||||
return reflect.Indirect(f.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (f *firstSink) add(i *item) error {
|
||||
reflect.Indirect(f.ref).Set(i.value.Elem())
|
||||
f.found = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *firstSink) flush() error {
|
||||
if !f.found {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *firstSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newDeleteSink(node Node, kind interface{}) (*deleteSink, error) {
|
||||
ref := reflect.ValueOf(kind)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &deleteSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type deleteSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
removed int
|
||||
}
|
||||
|
||||
func (d *deleteSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(d.ref).Type())
|
||||
}
|
||||
|
||||
func (d *deleteSink) bucketName() string {
|
||||
return reflect.Indirect(d.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (d *deleteSink) add(i *item) error {
|
||||
info, err := extract(&d.ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range info.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
idx, err := getIndex(i.bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(i.k)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
d.removed++
|
||||
return i.bucket.Delete(i.k)
|
||||
}
|
||||
|
||||
func (d *deleteSink) flush() error {
|
||||
if d.removed == 0 {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *deleteSink) readOnly() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func newCountSink(node Node, kind interface{}) (*countSink, error) {
|
||||
ref := reflect.ValueOf(kind)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &countSink{
|
||||
node: node,
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type countSink struct {
|
||||
node Node
|
||||
ref reflect.Value
|
||||
counter int
|
||||
}
|
||||
|
||||
func (c *countSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(c.ref).Type())
|
||||
}
|
||||
|
||||
func (c *countSink) bucketName() string {
|
||||
return reflect.Indirect(c.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (c *countSink) add(i *item) error {
|
||||
c.counter++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *countSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *countSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newRawSink() *rawSink {
|
||||
return &rawSink{}
|
||||
}
|
||||
|
||||
type rawSink struct {
|
||||
results [][]byte
|
||||
execFn func([]byte, []byte) error
|
||||
}
|
||||
|
||||
func (r *rawSink) add(i *item) error {
|
||||
if r.execFn != nil {
|
||||
err := r.execFn(i.k, i.v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
r.results = append(r.results, i.v)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rawSink) bucketName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (r *rawSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *rawSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func newEachSink(to interface{}) (*eachSink, error) {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return nil, ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
return &eachSink{
|
||||
ref: ref,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type eachSink struct {
|
||||
ref reflect.Value
|
||||
execFn func(interface{}) error
|
||||
}
|
||||
|
||||
func (e *eachSink) elem() reflect.Value {
|
||||
return reflect.New(reflect.Indirect(e.ref).Type())
|
||||
}
|
||||
|
||||
func (e *eachSink) bucketName() string {
|
||||
return reflect.Indirect(e.ref).Type().Name()
|
||||
}
|
||||
|
||||
func (e *eachSink) add(i *item) error {
|
||||
return e.execFn(i.value.Interface())
|
||||
}
|
||||
|
||||
func (e *eachSink) flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *eachSink) readOnly() bool {
|
||||
return true
|
||||
}
|
||||
22
vendor/github.com/asdine/storm/v3/sink_sorter_swap.go
generated
vendored
22
vendor/github.com/asdine/storm/v3/sink_sorter_swap.go
generated
vendored
@@ -1,22 +0,0 @@
|
||||
// +build !go1.8
|
||||
|
||||
package storm
|
||||
|
||||
import "reflect"
|
||||
|
||||
func (s *sorter) Swap(i, j int) {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
x, y := ssink.slice().Index(i).Interface(), ssink.slice().Index(j).Interface()
|
||||
ssink.slice().Index(i).Set(reflect.ValueOf(y))
|
||||
ssink.slice().Index(j).Set(reflect.ValueOf(x))
|
||||
} else {
|
||||
s.list[i], s.list[j] = s.list[j], s.list[i]
|
||||
}
|
||||
}
|
||||
20
vendor/github.com/asdine/storm/v3/sink_sorter_swap_go1.8.go
generated
vendored
20
vendor/github.com/asdine/storm/v3/sink_sorter_swap_go1.8.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
// +build go1.8
|
||||
|
||||
package storm
|
||||
|
||||
import "reflect"
|
||||
|
||||
func (s *sorter) Swap(i, j int) {
|
||||
// skip if we encountered an earlier error
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if ssink, ok := s.sink.(sliceSink); ok {
|
||||
reflect.Swapper(ssink.slice().Interface())(i, j)
|
||||
} else {
|
||||
s.list[i], s.list[j] = s.list[j], s.list[i]
|
||||
}
|
||||
}
|
||||
425
vendor/github.com/asdine/storm/v3/store.go
generated
vendored
425
vendor/github.com/asdine/storm/v3/store.go
generated
vendored
@@ -1,425 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/v3/index"
|
||||
"github.com/asdine/storm/v3/q"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
// TypeStore stores user defined types in BoltDB.
|
||||
type TypeStore interface {
|
||||
Finder
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
Init(data interface{}) error
|
||||
|
||||
// ReIndex rebuilds all the indexes of a bucket
|
||||
ReIndex(data interface{}) error
|
||||
|
||||
// Save a structure
|
||||
Save(data interface{}) error
|
||||
|
||||
// Update a structure
|
||||
Update(data interface{}) error
|
||||
|
||||
// UpdateField updates a single field
|
||||
UpdateField(data interface{}, fieldName string, value interface{}) error
|
||||
|
||||
// Drop a bucket
|
||||
Drop(data interface{}) error
|
||||
|
||||
// DeleteStruct deletes a structure from the associated bucket
|
||||
DeleteStruct(data interface{}) error
|
||||
}
|
||||
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
func (n *node) Init(data interface{}) error {
|
||||
v := reflect.ValueOf(data)
|
||||
cfg, err := extract(&v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.init(tx, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) init(tx *bolt.Tx, cfg *structConfig) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
_, err = newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
switch fieldCfg.Index {
|
||||
case tagUniqueIdx:
|
||||
_, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
_, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *node) ReIndex(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.reIndex(tx, data, cfg)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) reIndex(tx *bolt.Tx, data interface{}, cfg *structConfig) error {
|
||||
root := n.WithTransaction(tx)
|
||||
nodes := root.From(cfg.Name).PrefixScan(indexPrefix)
|
||||
bucket := root.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
for _, node := range nodes {
|
||||
buckets := node.Bucket()
|
||||
name := buckets[len(buckets)-1]
|
||||
err := bucket.DeleteBucket([]byte(name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
total, err := root.Count(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < total; i++ {
|
||||
err = root.Select(q.True()).Skip(i).First(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = root.Update(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Save a structure
|
||||
func (n *node) Save(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
if !cfg.ID.IsInteger || !cfg.ID.Increment {
|
||||
return ErrZeroID
|
||||
}
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.save(tx, cfg, data, false)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update bool) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// save node configuration in the bucket
|
||||
meta, err := newMeta(bucket, n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
err = meta.increment(cfg.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if !update && !fieldCfg.IsID && fieldCfg.Increment && fieldCfg.IsInteger && fieldCfg.IsZero {
|
||||
err = meta.increment(fieldCfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if update && fieldCfg.IsZero && !fieldCfg.ForceUpdate {
|
||||
continue
|
||||
}
|
||||
|
||||
if fieldCfg.IsZero {
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := toBytes(fieldCfg.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var found bool
|
||||
idsSaved, err := idx.All(value, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, idSaved := range idsSaved {
|
||||
if bytes.Compare(idSaved, id) == 0 {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.Add(value, id)
|
||||
if err != nil {
|
||||
if err == index.ErrAlreadyExists {
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
raw, err := n.codec.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return bucket.Put(id, raw)
|
||||
}
|
||||
|
||||
// Update a structure
|
||||
func (n *node) Update(data interface{}) error {
|
||||
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
|
||||
numfield := ref.NumField()
|
||||
for i := 0; i < numfield; i++ {
|
||||
f := ref.Field(i)
|
||||
if ref.Type().Field(i).PkgPath != "" {
|
||||
continue
|
||||
}
|
||||
zero := reflect.Zero(f.Type()).Interface()
|
||||
actual := f.Interface()
|
||||
if !reflect.DeepEqual(actual, zero) {
|
||||
cf := current.Field(i)
|
||||
cf.Set(f)
|
||||
idxInfo, ok := cfg.Fields[ref.Type().Field(i).Name]
|
||||
if ok {
|
||||
idxInfo.Value = &cf
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// UpdateField updates a single field
|
||||
func (n *node) UpdateField(data interface{}, fieldName string, value interface{}) error {
|
||||
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
|
||||
f := current.FieldByName(fieldName)
|
||||
if !f.IsValid() {
|
||||
return ErrNotFound
|
||||
}
|
||||
tf, _ := current.Type().FieldByName(fieldName)
|
||||
if tf.PkgPath != "" {
|
||||
return ErrNotFound
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
if v.Kind() != f.Kind() {
|
||||
return ErrIncompatibleValue
|
||||
}
|
||||
f.Set(v)
|
||||
idxInfo, ok := cfg.Fields[fieldName]
|
||||
if ok {
|
||||
idxInfo.Value = &f
|
||||
idxInfo.IsZero = isZero(idxInfo.Value)
|
||||
idxInfo.ForceUpdate = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) update(data interface{}, fn func(*reflect.Value, *reflect.Value, *structConfig) error) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if cfg.ID.IsZero {
|
||||
return ErrNoID
|
||||
}
|
||||
|
||||
current := reflect.New(reflect.Indirect(ref).Type())
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
err = n.WithTransaction(tx).One(cfg.ID.Name, cfg.ID.Value.Interface(), current.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ref := reflect.ValueOf(data).Elem()
|
||||
cref := current.Elem()
|
||||
err = fn(&ref, &cref, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.save(tx, cfg, current.Interface(), true)
|
||||
})
|
||||
}
|
||||
|
||||
// Drop a bucket
|
||||
func (n *node) Drop(data interface{}) error {
|
||||
var bucketName string
|
||||
|
||||
v := reflect.ValueOf(data)
|
||||
if v.Kind() != reflect.String {
|
||||
info, err := extract(&v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucketName = info.Name
|
||||
} else {
|
||||
bucketName = v.Interface().(string)
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.drop(tx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) drop(tx *bolt.Tx, bucketName string) error {
|
||||
bucket := n.GetBucket(tx)
|
||||
if bucket == nil {
|
||||
return tx.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
return bucket.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
// DeleteStruct deletes a structure from the associated bucket
|
||||
func (n *node) DeleteStruct(data interface{}) error {
|
||||
ref := reflect.ValueOf(data)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
cfg, err := extract(&ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return n.readWriteTx(func(tx *bolt.Tx) error {
|
||||
return n.deleteStruct(tx, cfg, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
|
||||
bucket := n.GetBucket(tx, cfg.Name)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
for fieldName, fieldCfg := range cfg.Fields {
|
||||
if fieldCfg.Index == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
if err == index.ErrNotFound {
|
||||
return ErrNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
||||
142
vendor/github.com/asdine/storm/v3/storm.go
generated
vendored
142
vendor/github.com/asdine/storm/v3/storm.go
generated
vendored
@@ -1,142 +0,0 @@
|
||||
package storm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/asdine/storm/v3/codec"
|
||||
"github.com/asdine/storm/v3/codec/json"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
const (
|
||||
dbinfo = "__storm_db"
|
||||
metadataBucket = "__storm_metadata"
|
||||
)
|
||||
|
||||
// Defaults to json
|
||||
var defaultCodec = json.Codec
|
||||
|
||||
// Open opens a database at the given path with optional Storm options.
|
||||
func Open(path string, stormOptions ...func(*Options) error) (*DB, error) {
|
||||
var err error
|
||||
|
||||
var opts Options
|
||||
for _, option := range stormOptions {
|
||||
if err = option(&opts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
s := DB{
|
||||
Bolt: opts.bolt,
|
||||
}
|
||||
|
||||
n := node{
|
||||
s: &s,
|
||||
codec: opts.codec,
|
||||
batchMode: opts.batchMode,
|
||||
rootBucket: opts.rootBucket,
|
||||
}
|
||||
|
||||
if n.codec == nil {
|
||||
n.codec = defaultCodec
|
||||
}
|
||||
|
||||
if opts.boltMode == 0 {
|
||||
opts.boltMode = 0600
|
||||
}
|
||||
|
||||
if opts.boltOptions == nil {
|
||||
opts.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
|
||||
}
|
||||
|
||||
s.Node = &n
|
||||
|
||||
// skip if UseDB option is used
|
||||
if s.Bolt == nil {
|
||||
s.Bolt, err = bolt.Open(path, opts.boltMode, opts.boltOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
err = s.checkVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &s, nil
|
||||
}
|
||||
|
||||
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
|
||||
// needed operations
|
||||
type DB struct {
|
||||
// The root node that points to the root bucket.
|
||||
Node
|
||||
|
||||
// Bolt is still easily accessible
|
||||
Bolt *bolt.DB
|
||||
}
|
||||
|
||||
// Close the database
|
||||
func (s *DB) Close() error {
|
||||
return s.Bolt.Close()
|
||||
}
|
||||
|
||||
func (s *DB) checkVersion() error {
|
||||
var v string
|
||||
err := s.Get(dbinfo, "version", &v)
|
||||
if err != nil && err != ErrNotFound {
|
||||
return err
|
||||
}
|
||||
|
||||
// for now, we only set the current version if it doesn't exist.
|
||||
// v1 and v2 database files are compatible.
|
||||
if v == "" {
|
||||
return s.Set(dbinfo, "version", Version)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// toBytes turns an interface into a slice of bytes
|
||||
func toBytes(key interface{}, codec codec.MarshalUnmarshaler) ([]byte, error) {
|
||||
if key == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch t := key.(type) {
|
||||
case []byte:
|
||||
return t, nil
|
||||
case string:
|
||||
return []byte(t), nil
|
||||
case int:
|
||||
return numbertob(int64(t))
|
||||
case uint:
|
||||
return numbertob(uint64(t))
|
||||
case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
|
||||
return numbertob(t)
|
||||
default:
|
||||
return codec.Marshal(key)
|
||||
}
|
||||
}
|
||||
|
||||
func numbertob(v interface{}) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := binary.Write(&buf, binary.BigEndian, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func numberfromb(raw []byte) (int64, error) {
|
||||
r := bytes.NewReader(raw)
|
||||
var to int64
|
||||
err := binary.Read(r, binary.BigEndian, &to)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return to, nil
|
||||
}
|
||||
52
vendor/github.com/asdine/storm/v3/transaction.go
generated
vendored
52
vendor/github.com/asdine/storm/v3/transaction.go
generated
vendored
@@ -1,52 +0,0 @@
|
||||
package storm
|
||||
|
||||
import bolt "go.etcd.io/bbolt"
|
||||
|
||||
// Tx is a transaction.
|
||||
type Tx interface {
|
||||
// Commit writes all changes to disk.
|
||||
Commit() error
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
Rollback() error
|
||||
}
|
||||
|
||||
// Begin starts a new transaction.
|
||||
func (n node) Begin(writable bool) (Node, error) {
|
||||
var err error
|
||||
|
||||
n.tx, err = n.s.Bolt.Begin(writable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &n, nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
func (n *node) Rollback() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Rollback()
|
||||
if err == bolt.ErrTxClosed {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk.
|
||||
func (n *node) Commit() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Commit()
|
||||
if err == bolt.ErrTxClosed {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
4
vendor/github.com/asdine/storm/v3/version.go
generated
vendored
4
vendor/github.com/asdine/storm/v3/version.go
generated
vendored
@@ -1,4 +0,0 @@
|
||||
package storm
|
||||
|
||||
// Version of Storm
|
||||
const Version = "2.0.0"
|
||||
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user