raft cluster

wip: raft does not care about migrating

wip: optimize code

wip: raft election

wip

wip: fix raft leader missing log entries

wip

fix a dead lock

batch set slot route

wip: raft persist

wip

refactor cluster suite

remove relay

rename relay2

refactor: allow customizing client factory

test raft

refactor re-balance

avoid errors caused by inconsistent status on follower nodes during raft commits

test raft election
This commit is contained in:
finley
2023-01-02 21:27:06 +08:00
parent df672d4c92
commit bf7f628810
54 changed files with 3122 additions and 703 deletions

View File

@@ -1,97 +1,145 @@
package cluster
import (
"errors"
"github.com/hdt3213/godis/config"
database2 "github.com/hdt3213/godis/database"
"github.com/hdt3213/godis/datastruct/dict"
"github.com/hdt3213/godis/interface/redis"
"github.com/hdt3213/godis/lib/idgenerator"
"github.com/hdt3213/godis/lib/utils"
"github.com/hdt3213/godis/redis/connection"
"github.com/hdt3213/godis/redis/parser"
"github.com/hdt3213/godis/redis/protocol"
"math/rand"
"strings"
"sync"
)
var testNodeA, testNodeB *Cluster
var simulateATimout, simulateBTimout *bool
type mockPicker struct {
nodes []string
type testClientFactory struct {
nodes []*Cluster
timeoutFlags []bool
}
func (picker *mockPicker) AddNode(keys ...string) {
picker.nodes = append(picker.nodes, keys...)
type testClient struct {
targetNode *Cluster
timeoutFlag *bool
conn redis.Connection
}
func (picker *mockPicker) PickNode(key string) string {
for _, n := range picker.nodes {
if strings.Contains(key, n) {
return n
}
func (cli *testClient) Send(cmdLine [][]byte) redis.Reply {
if *cli.timeoutFlag {
return protocol.MakeErrReply("ERR timeout")
}
return picker.nodes[0]
return cli.targetNode.Exec(cli.conn, cmdLine)
}
func makeMockRelay(peer *Cluster) (*bool, func(cluster *Cluster, node string, c redis.Connection, cmdLine CmdLine) redis.Reply) {
simulateTimeout0 := false
simulateTimeout := &simulateTimeout0
return simulateTimeout, func(cluster *Cluster, node string, c redis.Connection, cmdLine CmdLine) redis.Reply {
if len(cmdLine) == 0 {
return protocol.MakeErrReply("ERR command required")
}
if node == cluster.self {
// to self db
cmdName := strings.ToLower(string(cmdLine[0]))
if cmdName == "prepare" {
return execPrepare(cluster, c, cmdLine)
} else if cmdName == "commit" {
return execCommit(cluster, c, cmdLine)
} else if cmdName == "rollback" {
return execRollback(cluster, c, cmdLine)
func (factory *testClientFactory) GetPeerClient(peerAddr string) (peerClient, error) {
for i, n := range factory.nodes {
if n.self == peerAddr {
cli := &testClient{
targetNode: n,
timeoutFlag: &factory.timeoutFlags[i],
conn: connection.NewFakeConn(),
}
return cluster.db.Exec(c, cmdLine)
if config.Properties.RequirePass != "" {
cli.Send(utils.ToCmdLine("AUTH", config.Properties.RequirePass))
}
return cli, nil
}
if *simulateTimeout {
return protocol.MakeErrReply("ERR timeout")
}
cmdName := strings.ToLower(string(cmdLine[0]))
if cmdName == "prepare" {
return execPrepare(peer, c, cmdLine)
} else if cmdName == "commit" {
return execCommit(peer, c, cmdLine)
} else if cmdName == "rollback" {
return execRollback(peer, c, cmdLine)
}
return peer.db.Exec(c, cmdLine)
}
return nil, errors.New("peer not found")
}
func init() {
if config.Properties == nil {
config.Properties = &config.ServerProperties{}
}
addrA := "127.0.0.1:6399"
addrB := "127.0.0.1:7379"
config.Properties.Self = addrA
config.Properties.Peers = []string{addrB}
testNodeA = MakeCluster()
config.Properties.Self = addrB
config.Properties.Peers = []string{addrA}
testNodeB = MakeCluster()
simulateBTimout, testNodeA.relayImpl = makeMockRelay(testNodeB)
testNodeA.peerPicker = &mockPicker{}
testNodeA.peerPicker.AddNode(addrA, addrB)
simulateATimout, testNodeB.relayImpl = makeMockRelay(testNodeA)
testNodeB.peerPicker = &mockPicker{}
testNodeB.peerPicker.AddNode(addrB, addrA)
type mockStream struct {
targetNode *Cluster
ch <-chan *parser.Payload
}
func MakeTestCluster(peers []string) *Cluster {
if config.Properties == nil {
config.Properties = &config.ServerProperties{}
}
config.Properties.Self = "127.0.0.1:6399"
config.Properties.Peers = peers
return MakeCluster()
func (s *mockStream) Stream() <-chan *parser.Payload {
return s.ch
}
func (s *mockStream) Close() error {
return nil
}
func (factory *testClientFactory) NewStream(peerAddr string, cmdLine CmdLine) (peerStream, error) {
for _, n := range factory.nodes {
if n.self == peerAddr {
conn := connection.NewFakeConn()
if config.Properties.RequirePass != "" {
n.Exec(conn, utils.ToCmdLine("AUTH", config.Properties.RequirePass))
}
result := n.Exec(conn, cmdLine)
conn.Write(result.ToBytes())
ch := parser.ParseStream(conn)
return &mockStream{
targetNode: n,
ch: ch,
}, nil
}
}
return nil, errors.New("node not found")
}
func (factory *testClientFactory) ReturnPeerClient(peer string, peerClient peerClient) error {
return nil
}
func (factory *testClientFactory) Close() error {
return nil
}
// mockClusterNodes creates a fake cluster for test
// timeoutFlags should have the same length as addresses, set timeoutFlags[i] == true could simulate addresses[i] timeout
func mockClusterNodes(addresses []string, timeoutFlags []bool) []*Cluster {
nodes := make([]*Cluster, len(addresses))
// build fixedTopology
slots := make([]*Slot, slotCount)
nodeMap := make(map[string]*Node)
for _, addr := range addresses {
nodeMap[addr] = &Node{
ID: addr,
Addr: addr,
Slots: nil,
}
}
for i := range slots {
addr := addresses[i%len(addresses)]
slots[i] = &Slot{
ID: uint32(i),
NodeID: addr,
Flags: 0,
}
nodeMap[addr].Slots = append(nodeMap[addr].Slots, slots[i])
}
factory := &testClientFactory{
nodes: nodes,
timeoutFlags: timeoutFlags,
}
for i, addr := range addresses {
topo := &fixedTopology{
mu: sync.RWMutex{},
nodeMap: nodeMap,
slots: slots,
selfNodeID: addr,
}
nodes[i] = &Cluster{
self: addr,
db: database2.NewStandaloneServer(),
transactions: dict.MakeSimple(),
idGenerator: idgenerator.MakeGenerator(config.Properties.Self),
topology: topo,
clientFactory: factory,
}
}
return nodes
}
var addresses = []string{"127.0.0.1:6399", "127.0.0.1:7379"}
var timeoutFlags = []bool{false, false}
var testCluster = mockClusterNodes(addresses, timeoutFlags)
func toArgs(cmd ...string) [][]byte {
args := make([][]byte, len(cmd))
for i, s := range cmd {