mirror of
https://github.com/HDT3213/godis.git
synced 2025-10-18 14:50:38 +08:00
raft cluster
wip: raft does not care about migrating wip: optimize code wip: raft election wip wip: fix raft leader missing log entries wip fix a dead lock batch set slot route wip: raft persist wip refactor cluster suite remove relay rename relay2 refactor: allow customizing client factory test raft refactor re-balance avoid errors caused by inconsistent status on follower nodes during raft commits test raft election
This commit is contained in:
105
cluster/com.go
105
cluster/com.go
@@ -1,69 +1,86 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/hdt3213/godis/interface/redis"
|
||||
"github.com/hdt3213/godis/lib/utils"
|
||||
"github.com/hdt3213/godis/redis/client"
|
||||
"github.com/hdt3213/godis/redis/connection"
|
||||
"github.com/hdt3213/godis/redis/protocol"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func (cluster *Cluster) getPeerClient(peer string) (*client.Client, error) {
|
||||
pool, ok := cluster.nodeConnections[peer]
|
||||
if !ok {
|
||||
return nil, errors.New("connection pool not found")
|
||||
}
|
||||
raw, err := pool.Get()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, ok := raw.(*client.Client)
|
||||
if !ok {
|
||||
return nil, errors.New("connection pool make wrong type")
|
||||
}
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func (cluster *Cluster) returnPeerClient(peer string, peerClient *client.Client) error {
|
||||
pool, ok := cluster.nodeConnections[peer]
|
||||
if !ok {
|
||||
return errors.New("connection pool not found")
|
||||
}
|
||||
pool.Put(peerClient)
|
||||
return nil
|
||||
}
|
||||
|
||||
var defaultRelayImpl = func(cluster *Cluster, node string, c redis.Connection, cmdLine CmdLine) redis.Reply {
|
||||
if node == cluster.self {
|
||||
// relay function relays command to peer or calls cluster.Exec
|
||||
func (cluster *Cluster) relay(peerId string, c redis.Connection, cmdLine [][]byte) redis.Reply {
|
||||
// use a variable to allow injecting stub for testing, see defaultRelayImpl
|
||||
if peerId == cluster.self {
|
||||
// to self db
|
||||
return cluster.db.Exec(c, cmdLine)
|
||||
return cluster.Exec(c, cmdLine)
|
||||
}
|
||||
peerClient, err := cluster.getPeerClient(node)
|
||||
// peerId is peer.Addr
|
||||
cli, err := cluster.clientFactory.GetPeerClient(peerId)
|
||||
if err != nil {
|
||||
return protocol.MakeErrReply(err.Error())
|
||||
}
|
||||
defer func() {
|
||||
_ = cluster.returnPeerClient(node, peerClient)
|
||||
_ = cluster.clientFactory.ReturnPeerClient(peerId, cli)
|
||||
}()
|
||||
peerClient.Send(utils.ToCmdLine("SELECT", strconv.Itoa(c.GetDBIndex())))
|
||||
return peerClient.Send(cmdLine)
|
||||
return cli.Send(cmdLine)
|
||||
}
|
||||
|
||||
// relay function relays command to peer
|
||||
// select db by c.GetDBIndex()
|
||||
// cannot call Prepare, Commit, execRollback of self node
|
||||
func (cluster *Cluster) relay(peer string, c redis.Connection, args [][]byte) redis.Reply {
|
||||
// use a variable to allow injecting stub for testing
|
||||
return cluster.relayImpl(cluster, peer, c, args)
|
||||
// relayByKey function relays command to peer
|
||||
// use routeKey to determine peer node
|
||||
func (cluster *Cluster) relayByKey(routeKey string, c redis.Connection, args [][]byte) redis.Reply {
|
||||
slotId := getSlot(routeKey)
|
||||
peer := cluster.pickNode(slotId)
|
||||
return cluster.relay(peer.ID, c, args)
|
||||
}
|
||||
|
||||
// broadcast function broadcasts command to all node in cluster
|
||||
func (cluster *Cluster) broadcast(c redis.Connection, args [][]byte) map[string]redis.Reply {
|
||||
result := make(map[string]redis.Reply)
|
||||
for _, node := range cluster.nodes {
|
||||
reply := cluster.relay(node, c, args)
|
||||
result[node] = reply
|
||||
for _, node := range cluster.topology.GetNodes() {
|
||||
reply := cluster.relay(node.ID, c, args)
|
||||
result[node.Addr] = reply
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ensureKey will migrate key to current node if the key is in a slot migrating to current node
|
||||
// invoker should provide with locks of key
|
||||
func (cluster *Cluster) ensureKey(key string) protocol.ErrorReply {
|
||||
slotId := getSlot(key)
|
||||
cluster.slotMu.RLock()
|
||||
slot := cluster.slots[slotId]
|
||||
cluster.slotMu.RUnlock()
|
||||
if slot == nil {
|
||||
return nil
|
||||
}
|
||||
if slot.state != slotStateImporting || slot.importedKeys.Has(key) {
|
||||
return nil
|
||||
}
|
||||
resp := cluster.relay(slot.oldNodeID, connection.NewFakeConn(), utils.ToCmdLine("DumpKey_", key))
|
||||
if protocol.IsErrorReply(resp) {
|
||||
return resp.(protocol.ErrorReply)
|
||||
}
|
||||
if protocol.IsEmptyMultiBulkReply(resp) {
|
||||
slot.importedKeys.Add(key)
|
||||
return nil
|
||||
}
|
||||
dumpResp := resp.(*protocol.MultiBulkReply)
|
||||
if len(dumpResp.Args) != 2 {
|
||||
return protocol.MakeErrReply("illegal dump key response")
|
||||
}
|
||||
// reuse copy to command ^_^
|
||||
resp = cluster.db.Exec(connection.NewFakeConn(), [][]byte{
|
||||
[]byte("CopyTo"), []byte(key), dumpResp.Args[0], dumpResp.Args[1],
|
||||
})
|
||||
if protocol.IsErrorReply(resp) {
|
||||
return resp.(protocol.ErrorReply)
|
||||
}
|
||||
slot.importedKeys.Add(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cluster *Cluster) ensureKeyWithoutLock(key string) protocol.ErrorReply {
|
||||
cluster.db.RWLocks(0, []string{key}, nil)
|
||||
defer cluster.db.RWUnLocks(0, []string{key}, nil)
|
||||
return cluster.ensureKey(key)
|
||||
}
|
||||
|
Reference in New Issue
Block a user