mirror of
https://github.com/HDT3213/godis.git
synced 2025-10-06 01:07:06 +08:00
support mset in cluster
This commit is contained in:
2
cluster/commands/mget.go
Normal file
2
cluster/commands/mget.go
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
package commands
|
||||||
|
|
@@ -8,13 +8,28 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
core.RegisterCmd("mset_", execMSet_)
|
core.RegisterCmd("mset_", execMSetInLocal)
|
||||||
|
core.RegisterCmd("mset", execMSet)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type CmdLine = [][]byte
|
type CmdLine = [][]byte
|
||||||
|
|
||||||
// execMSet_ executes msets in local node
|
// node -> keys on the node
|
||||||
func execMSet_(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine) redis.Reply {
|
type RouteMap map[string][]string
|
||||||
|
|
||||||
|
func getRouteMap(cluster *core.Cluster, keys []string) RouteMap {
|
||||||
|
m := make(RouteMap)
|
||||||
|
for _, key := range keys {
|
||||||
|
slot := cluster.GetSlot(key)
|
||||||
|
node := cluster.PickNode(slot)
|
||||||
|
m[node] = append(m[node], key)
|
||||||
|
}
|
||||||
|
return m
|
||||||
|
}
|
||||||
|
|
||||||
|
// execMSetInLocal executes msets in local node
|
||||||
|
func execMSetInLocal(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine) redis.Reply {
|
||||||
if len(cmdLine) < 3 {
|
if len(cmdLine) < 3 {
|
||||||
return protocol.MakeArgNumErrReply("mset")
|
return protocol.MakeArgNumErrReply("mset")
|
||||||
}
|
}
|
||||||
@@ -22,15 +37,34 @@ func execMSet_(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine) redis
|
|||||||
return cluster.LocalExec(c, cmdLine)
|
return cluster.LocalExec(c, cmdLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
func requestRollback(cluster *core.Cluster, c redis.Connection, txId string, routeMap map[string][]string) {
|
func execMSet(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine) redis.Reply {
|
||||||
|
if len(cmdLine) < 3 || len(cmdLine)%2 != 1 {
|
||||||
|
return protocol.MakeArgNumErrReply("mset")
|
||||||
|
}
|
||||||
|
var keys []string
|
||||||
|
for i := 1; i < len(cmdLine); i += 2 {
|
||||||
|
keys = append(keys, string(cmdLine[i]))
|
||||||
|
}
|
||||||
|
routeMap := getRouteMap(cluster, keys)
|
||||||
|
if len(routeMap) == 1 {
|
||||||
|
// only one node, do it fast
|
||||||
|
for node := range routeMap {
|
||||||
|
cmdLine[0] = []byte("mset_")
|
||||||
|
return cluster.Relay(node, c, cmdLine)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return execMSetSlow(cluster, c, cmdLine, routeMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestRollback(cluster *core.Cluster, c redis.Connection, txId string, routeMap RouteMap) {
|
||||||
rollbackCmd := utils.ToCmdLine("rollback", txId)
|
rollbackCmd := utils.ToCmdLine("rollback", txId)
|
||||||
for node := range routeMap {
|
for node := range routeMap {
|
||||||
cluster.Relay(node, c, rollbackCmd)
|
cluster.Relay(node, c, rollbackCmd)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// execMSetSlow execute mset through tcc
|
// execMSetSlow execute mset through tcc
|
||||||
func execMSetSlow(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine, routeMap map[string][]string) redis.Reply {
|
func execMSetSlow(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine, routeMap RouteMap) redis.Reply {
|
||||||
txId := utils.RandString(6)
|
txId := utils.RandString(6)
|
||||||
|
|
||||||
keyValues := make(map[string][]byte)
|
keyValues := make(map[string][]byte)
|
||||||
@@ -62,7 +96,7 @@ func execMSetSlow(cluster *core.Cluster, c redis.Connection, cmdLine CmdLine, ro
|
|||||||
|
|
||||||
// send commit request
|
// send commit request
|
||||||
commiteCmd := utils.ToCmdLine("commit", txId)
|
commiteCmd := utils.ToCmdLine("commit", txId)
|
||||||
for node := range nodePrepareCmdMap {
|
for node := range nodePrepareCmdMap {
|
||||||
reply := cluster.Relay(node, c, commiteCmd)
|
reply := cluster.Relay(node, c, commiteCmd)
|
||||||
if protocol.IsErrorReply(reply) {
|
if protocol.IsErrorReply(reply) {
|
||||||
requestRollback(cluster, c, txId, routeMap)
|
requestRollback(cluster, c, txId, routeMap)
|
||||||
|
20
cluster/commands/mset_test.go
Normal file
20
cluster/commands/mset_test.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
package commands
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hdt3213/godis/cluster/core"
|
||||||
|
"github.com/hdt3213/godis/lib/utils"
|
||||||
|
"github.com/hdt3213/godis/redis/connection"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestMset(t *testing.T) {
|
||||||
|
id1 := "1"
|
||||||
|
id2 := "2"
|
||||||
|
nodes := core.MakeTestCluster([]string{id1, id2})
|
||||||
|
node1 := nodes[id1]
|
||||||
|
c := connection.NewFakeConn()
|
||||||
|
// 1, 2 will be routed to node1 and node2, see MakeTestCluster
|
||||||
|
res := execMSet(node1, c, utils.ToCmdLine("mset", "1", "1", "2", "2"))
|
||||||
|
println(res)
|
||||||
|
}
|
@@ -62,7 +62,7 @@ func RegisterDefaultCmd(name string) {
|
|||||||
// relay command to responsible peer, and return its protocol to client
|
// relay command to responsible peer, and return its protocol to client
|
||||||
func DefaultFunc(cluster *Cluster, c redis.Connection, args [][]byte) redis.Reply {
|
func DefaultFunc(cluster *Cluster, c redis.Connection, args [][]byte) redis.Reply {
|
||||||
key := string(args[1])
|
key := string(args[1])
|
||||||
slotId := GetSlot(key)
|
slotId := cluster.GetSlot(key)
|
||||||
peer := cluster.PickNode(slotId)
|
peer := cluster.PickNode(slotId)
|
||||||
if peer == cluster.SelfID() {
|
if peer == cluster.SelfID() {
|
||||||
// to self db
|
// to self db
|
||||||
|
@@ -22,6 +22,11 @@ type Cluster struct {
|
|||||||
slotsManager *slotsManager
|
slotsManager *slotsManager
|
||||||
rebalanceManger *rebalanceManager
|
rebalanceManger *rebalanceManager
|
||||||
transactions *TransactionManager
|
transactions *TransactionManager
|
||||||
|
|
||||||
|
// allow inject route implementation
|
||||||
|
getSlotImpl func(key string) uint32
|
||||||
|
pickNodeImpl func(slotID uint32) string
|
||||||
|
id_ string // for tests only
|
||||||
}
|
}
|
||||||
|
|
||||||
type Config struct {
|
type Config struct {
|
||||||
@@ -32,6 +37,9 @@ type Config struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) SelfID() string {
|
func (c *Cluster) SelfID() string {
|
||||||
|
if c.raftNode == nil {
|
||||||
|
return c.id_
|
||||||
|
}
|
||||||
return c.raftNode.Cfg.ID()
|
return c.raftNode.Cfg.ID()
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -130,6 +138,12 @@ func NewCluster(cfg *Config) (*Cluster, error) {
|
|||||||
slotsManager: newSlotsManager(),
|
slotsManager: newSlotsManager(),
|
||||||
transactions: newTransactionManager(),
|
transactions: newTransactionManager(),
|
||||||
}
|
}
|
||||||
|
cluster.pickNodeImpl = func(slotID uint32) string {
|
||||||
|
return defaultPickNodeImpl(cluster, slotID)
|
||||||
|
}
|
||||||
|
cluster.getSlotImpl = func(key string) uint32 {
|
||||||
|
return defaultGetSlotImpl(cluster, key)
|
||||||
|
}
|
||||||
cluster.injectInsertCallback()
|
cluster.injectInsertCallback()
|
||||||
cluster.injectDeleteCallback()
|
cluster.injectDeleteCallback()
|
||||||
return cluster, nil
|
return cluster, nil
|
||||||
|
@@ -46,7 +46,7 @@ func (sm *slotStatus) finishExportingWithinLock() {
|
|||||||
|
|
||||||
func (cluster *Cluster) injectInsertCallback() {
|
func (cluster *Cluster) injectInsertCallback() {
|
||||||
cb := func(dbIndex int, key string, entity *database.DataEntity) {
|
cb := func(dbIndex int, key string, entity *database.DataEntity) {
|
||||||
slotIndex := GetSlot(key)
|
slotIndex := cluster.GetSlot(key)
|
||||||
slotManager := cluster.slotsManager.getSlot(slotIndex)
|
slotManager := cluster.slotsManager.getSlot(slotIndex)
|
||||||
slotManager.mu.Lock()
|
slotManager.mu.Lock()
|
||||||
defer slotManager.mu.Unlock()
|
defer slotManager.mu.Unlock()
|
||||||
@@ -60,7 +60,7 @@ func (cluster *Cluster) injectInsertCallback() {
|
|||||||
|
|
||||||
func (cluster *Cluster) injectDeleteCallback() {
|
func (cluster *Cluster) injectDeleteCallback() {
|
||||||
cb := func(dbIndex int, key string, entity *database.DataEntity) {
|
cb := func(dbIndex int, key string, entity *database.DataEntity) {
|
||||||
slotIndex := GetSlot(key)
|
slotIndex := cluster.GetSlot(key)
|
||||||
slotManager := cluster.slotsManager.getSlot(slotIndex)
|
slotManager := cluster.slotsManager.getSlot(slotIndex)
|
||||||
slotManager.mu.Lock()
|
slotManager.mu.Lock()
|
||||||
defer slotManager.mu.Unlock()
|
defer slotManager.mu.Unlock()
|
||||||
|
@@ -79,7 +79,7 @@ func execCommit(cluster *Cluster, c redis.Connection, cmdLine CmdLine) redis.Rep
|
|||||||
}
|
}
|
||||||
cluster.transactions.mu.Unlock()
|
cluster.transactions.mu.Unlock()
|
||||||
|
|
||||||
resp := cluster.db.Exec(c, tx.realCmdLine)
|
resp := cluster.db.ExecWithLock(c, tx.realCmdLine)
|
||||||
|
|
||||||
// unlock regardless of result
|
// unlock regardless of result
|
||||||
cluster.db.RWUnLocks(0, tx.writeKeys, tx.readKeys)
|
cluster.db.RWUnLocks(0, tx.writeKeys, tx.readKeys)
|
||||||
@@ -88,7 +88,8 @@ func execCommit(cluster *Cluster, c redis.Connection, cmdLine CmdLine) redis.Rep
|
|||||||
// do not delete transaction, waiting rollback
|
// do not delete transaction, waiting rollback
|
||||||
return resp
|
return resp
|
||||||
}
|
}
|
||||||
// todo delete transaction after deadline
|
|
||||||
|
// todo: delete transaction after deadline
|
||||||
// cluster.transactions.mu.Lock()
|
// cluster.transactions.mu.Lock()
|
||||||
// delete(cluster.transactions.txs, txId)
|
// delete(cluster.transactions.txs, txId)
|
||||||
// cluster.transactions.mu.Unlock()
|
// cluster.transactions.mu.Unlock()
|
||||||
@@ -115,7 +116,7 @@ func execRollback(cluster *Cluster, c redis.Connection, cmdLine CmdLine) redis.R
|
|||||||
cluster.db.RWLocks(0, tx.writeKeys, tx.readKeys)
|
cluster.db.RWLocks(0, tx.writeKeys, tx.readKeys)
|
||||||
for i := len(tx.undoLogs) - 1; i >= 0; i-- {
|
for i := len(tx.undoLogs) - 1; i >= 0; i-- {
|
||||||
cmdline := tx.undoLogs[i]
|
cmdline := tx.undoLogs[i]
|
||||||
cluster.db.Exec(c, cmdline)
|
cluster.db.ExecWithLock(c, cmdline)
|
||||||
}
|
}
|
||||||
cluster.db.RWUnLocks(0, tx.writeKeys, tx.readKeys)
|
cluster.db.RWUnLocks(0, tx.writeKeys, tx.readKeys)
|
||||||
|
|
||||||
|
43
cluster/core/tests.go
Normal file
43
cluster/core/tests.go
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
package core
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
dbimpl "github.com/hdt3213/godis/database"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MakeTestCluster creates a cluster for test, which communications are done through local function calls.
|
||||||
|
func MakeTestCluster(ids []string) map[string]*Cluster {
|
||||||
|
nodes := make(map[string]*Cluster)
|
||||||
|
connections := NewInMemConnectionFactory()
|
||||||
|
connections.nodes = nodes
|
||||||
|
for _, id := range ids {
|
||||||
|
db := dbimpl.NewStandaloneServer()
|
||||||
|
cluster := &Cluster{
|
||||||
|
db: db,
|
||||||
|
config: &Config{},
|
||||||
|
connections: connections,
|
||||||
|
rebalanceManger: newRebalanceManager(),
|
||||||
|
slotsManager: newSlotsManager(),
|
||||||
|
transactions: newTransactionManager(),
|
||||||
|
id_: id,
|
||||||
|
}
|
||||||
|
cluster.pickNodeImpl = func(slotID uint32) string {
|
||||||
|
// skip raft for test
|
||||||
|
index := int(slotID) % len(ids)
|
||||||
|
return ids[index]
|
||||||
|
}
|
||||||
|
cluster.getSlotImpl = func(key string) uint32 {
|
||||||
|
// backdoor for test
|
||||||
|
i, err := strconv.Atoi(key)
|
||||||
|
if err == nil && i < SlotCount {
|
||||||
|
return uint32(i)
|
||||||
|
}
|
||||||
|
return defaultGetSlotImpl(cluster, key)
|
||||||
|
}
|
||||||
|
cluster.injectInsertCallback()
|
||||||
|
cluster.injectDeleteCallback()
|
||||||
|
nodes[id] = cluster
|
||||||
|
}
|
||||||
|
return nodes
|
||||||
|
}
|
@@ -52,15 +52,23 @@ func GetPartitionKey(key string) string {
|
|||||||
return key[beg+1 : end]
|
return key[beg+1 : end]
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetSlot(key string) uint32 {
|
func defaultGetSlotImpl(cluster *Cluster, key string) uint32 {
|
||||||
partitionKey := GetPartitionKey(key)
|
partitionKey := GetPartitionKey(key)
|
||||||
return crc32.ChecksumIEEE([]byte(partitionKey)) % uint32(SlotCount)
|
return crc32.ChecksumIEEE([]byte(partitionKey)) % uint32(SlotCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cluster *Cluster) GetSlot(key string) uint32 {
|
||||||
|
return cluster.getSlotImpl(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func defaultPickNodeImpl(cluster *Cluster, slotID uint32) string {
|
||||||
|
return cluster.raftNode.FSM.PickNode(slotID)
|
||||||
|
}
|
||||||
|
|
||||||
// pickNode returns the node id hosting the given slot.
|
// pickNode returns the node id hosting the given slot.
|
||||||
// If the slot is migrating, return the node which is exporting the slot
|
// If the slot is migrating, return the node which is exporting the slot
|
||||||
func (cluster *Cluster) PickNode(slotID uint32) string {
|
func (cluster *Cluster) PickNode(slotID uint32) string {
|
||||||
return cluster.raftNode.FSM.PickNode(slotID)
|
return cluster.pickNodeImpl(slotID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// format: raft.committedindex
|
// format: raft.committedindex
|
||||||
|
Reference in New Issue
Block a user