mirror of
https://github.com/HDT3213/godis.git
synced 2025-10-08 18:20:49 +08:00
rename MultiDB to Server; rename AofHandler to Persister
This commit is contained in:
@@ -85,14 +85,14 @@ type masterStatus struct {
|
||||
rewriting atomic.Boolean
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) bgSaveForReplication() {
|
||||
func (server *Server) bgSaveForReplication() {
|
||||
go func() {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
logger.Errorf("panic: %v", e)
|
||||
}
|
||||
}()
|
||||
if err := mdb.saveForReplication(); err != nil {
|
||||
if err := server.saveForReplication(); err != nil {
|
||||
logger.Errorf("save for replication error: %v", err)
|
||||
}
|
||||
}()
|
||||
@@ -100,23 +100,23 @@ func (mdb *MultiDB) bgSaveForReplication() {
|
||||
}
|
||||
|
||||
// saveForReplication does bg-save and send rdb to waiting slaves
|
||||
func (mdb *MultiDB) saveForReplication() error {
|
||||
func (server *Server) saveForReplication() error {
|
||||
rdbFile, err := ioutil.TempFile("", "*.rdb")
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp rdb failed: %v", err)
|
||||
}
|
||||
rdbFilename := rdbFile.Name()
|
||||
mdb.masterStatus.mu.Lock()
|
||||
mdb.masterStatus.bgSaveState = bgSaveRunning
|
||||
mdb.masterStatus.rdbFilename = rdbFilename // todo: can reuse config.Properties.RDBFilename?
|
||||
server.masterStatus.mu.Lock()
|
||||
server.masterStatus.bgSaveState = bgSaveRunning
|
||||
server.masterStatus.rdbFilename = rdbFilename // todo: can reuse config.Properties.RDBFilename?
|
||||
aofListener := &replAofListener{
|
||||
mdb: mdb,
|
||||
backlog: mdb.masterStatus.backlog,
|
||||
mdb: server,
|
||||
backlog: server.masterStatus.backlog,
|
||||
}
|
||||
mdb.masterStatus.aofListener = aofListener
|
||||
mdb.masterStatus.mu.Unlock()
|
||||
server.masterStatus.aofListener = aofListener
|
||||
server.masterStatus.mu.Unlock()
|
||||
|
||||
err = mdb.aofHandler.Rewrite2RDBForReplication(rdbFilename, aofListener, nil)
|
||||
err = server.persister.Rewrite2RDBForReplication(rdbFilename, aofListener, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -124,18 +124,18 @@ func (mdb *MultiDB) saveForReplication() error {
|
||||
|
||||
// change bgSaveState and get waitSlaves for sending
|
||||
waitSlaves := make(map[*slaveClient]struct{})
|
||||
mdb.masterStatus.mu.Lock()
|
||||
mdb.masterStatus.bgSaveState = bgSaveFinish
|
||||
for slave := range mdb.masterStatus.waitSlaves {
|
||||
server.masterStatus.mu.Lock()
|
||||
server.masterStatus.bgSaveState = bgSaveFinish
|
||||
for slave := range server.masterStatus.waitSlaves {
|
||||
waitSlaves[slave] = struct{}{}
|
||||
}
|
||||
mdb.masterStatus.waitSlaves = nil
|
||||
mdb.masterStatus.mu.Unlock()
|
||||
server.masterStatus.waitSlaves = nil
|
||||
server.masterStatus.mu.Unlock()
|
||||
|
||||
for slave := range waitSlaves {
|
||||
err = mdb.masterFullReSyncWithSlave(slave)
|
||||
err = server.masterFullReSyncWithSlave(slave)
|
||||
if err != nil {
|
||||
mdb.removeSlave(slave)
|
||||
server.removeSlave(slave)
|
||||
logger.Errorf("masterFullReSyncWithSlave error: %v", err)
|
||||
continue
|
||||
}
|
||||
@@ -143,7 +143,7 @@ func (mdb *MultiDB) saveForReplication() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) rewriteRDB() error {
|
||||
func (server *Server) rewriteRDB() error {
|
||||
rdbFile, err := ioutil.TempFile("", "*.rdb")
|
||||
if err != nil {
|
||||
return fmt.Errorf("create temp rdb failed: %v", err)
|
||||
@@ -152,25 +152,25 @@ func (mdb *MultiDB) rewriteRDB() error {
|
||||
newBacklog := &replBacklog{}
|
||||
aofListener := &replAofListener{
|
||||
backlog: newBacklog,
|
||||
mdb: mdb,
|
||||
mdb: server,
|
||||
}
|
||||
hook := func() {
|
||||
// pausing aof first, then lock masterStatus.
|
||||
// use the same order as replAofListener to avoid dead lock
|
||||
mdb.masterStatus.mu.Lock()
|
||||
defer mdb.masterStatus.mu.Unlock()
|
||||
newBacklog.beginOffset = mdb.masterStatus.backlog.currentOffset
|
||||
server.masterStatus.mu.Lock()
|
||||
defer server.masterStatus.mu.Unlock()
|
||||
newBacklog.beginOffset = server.masterStatus.backlog.currentOffset
|
||||
}
|
||||
err = mdb.aofHandler.Rewrite2RDBForReplication(rdbFilename, aofListener, hook)
|
||||
err = server.persister.Rewrite2RDBForReplication(rdbFilename, aofListener, hook)
|
||||
if err != nil { // wait rdb result
|
||||
return err
|
||||
}
|
||||
mdb.masterStatus.mu.Lock()
|
||||
mdb.masterStatus.rdbFilename = rdbFilename
|
||||
mdb.masterStatus.backlog = newBacklog
|
||||
mdb.aofHandler.RemoveListener(mdb.masterStatus.aofListener)
|
||||
mdb.masterStatus.aofListener = aofListener
|
||||
mdb.masterStatus.mu.Unlock()
|
||||
server.masterStatus.mu.Lock()
|
||||
server.masterStatus.rdbFilename = rdbFilename
|
||||
server.masterStatus.backlog = newBacklog
|
||||
server.persister.RemoveListener(server.masterStatus.aofListener)
|
||||
server.masterStatus.aofListener = aofListener
|
||||
server.masterStatus.mu.Unlock()
|
||||
// It is ok to know that new backlog is ready later, so we change readyToSend without sync
|
||||
// But setting readyToSend=true must after new backlog is really ready (that means master.mu.Unlock)
|
||||
aofListener.readyToSend = true
|
||||
@@ -178,21 +178,21 @@ func (mdb *MultiDB) rewriteRDB() error {
|
||||
}
|
||||
|
||||
// masterFullReSyncWithSlave send replication header, rdb file and all backlogs to slave
|
||||
func (mdb *MultiDB) masterFullReSyncWithSlave(slave *slaveClient) error {
|
||||
func (server *Server) masterFullReSyncWithSlave(slave *slaveClient) error {
|
||||
// write replication header
|
||||
header := "+FULLRESYNC " + mdb.masterStatus.replId + " " +
|
||||
strconv.FormatInt(mdb.masterStatus.backlog.beginOffset, 10) + protocol.CRLF
|
||||
header := "+FULLRESYNC " + server.masterStatus.replId + " " +
|
||||
strconv.FormatInt(server.masterStatus.backlog.beginOffset, 10) + protocol.CRLF
|
||||
_, err := slave.conn.Write([]byte(header))
|
||||
if err != nil {
|
||||
return fmt.Errorf("write replication header to slave failed: %v", err)
|
||||
}
|
||||
// send rdb
|
||||
rdbFile, err := os.Open(mdb.masterStatus.rdbFilename)
|
||||
rdbFile, err := os.Open(server.masterStatus.rdbFilename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("open rdb file %s for replication error: %v", mdb.masterStatus.rdbFilename, err)
|
||||
return fmt.Errorf("open rdb file %s for replication error: %v", server.masterStatus.rdbFilename, err)
|
||||
}
|
||||
slave.state = slaveStateSendingRDB
|
||||
rdbInfo, _ := os.Stat(mdb.masterStatus.rdbFilename)
|
||||
rdbInfo, _ := os.Stat(server.masterStatus.rdbFilename)
|
||||
rdbSize := rdbInfo.Size()
|
||||
rdbHeader := "$" + strconv.FormatInt(rdbSize, 10) + protocol.CRLF
|
||||
_, err = slave.conn.Write([]byte(rdbHeader))
|
||||
@@ -205,36 +205,36 @@ func (mdb *MultiDB) masterFullReSyncWithSlave(slave *slaveClient) error {
|
||||
}
|
||||
|
||||
// send backlog
|
||||
mdb.masterStatus.mu.RLock()
|
||||
backlog, currentOffset := mdb.masterStatus.backlog.getSnapshot()
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
server.masterStatus.mu.RLock()
|
||||
backlog, currentOffset := server.masterStatus.backlog.getSnapshot()
|
||||
server.masterStatus.mu.RUnlock()
|
||||
_, err = slave.conn.Write(backlog)
|
||||
if err != nil {
|
||||
return fmt.Errorf("full resync write backlog to slave failed: %v", err)
|
||||
}
|
||||
|
||||
// set slave as online
|
||||
mdb.setSlaveOnline(slave, currentOffset)
|
||||
server.setSlaveOnline(slave, currentOffset)
|
||||
return nil
|
||||
}
|
||||
|
||||
var cannotPartialSync = errors.New("cannot do partial sync")
|
||||
|
||||
func (mdb *MultiDB) masterTryPartialSyncWithSlave(slave *slaveClient, replId string, slaveOffset int64) error {
|
||||
mdb.masterStatus.mu.RLock()
|
||||
if replId != mdb.masterStatus.replId {
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
func (server *Server) masterTryPartialSyncWithSlave(slave *slaveClient, replId string, slaveOffset int64) error {
|
||||
server.masterStatus.mu.RLock()
|
||||
if replId != server.masterStatus.replId {
|
||||
server.masterStatus.mu.RUnlock()
|
||||
return cannotPartialSync
|
||||
}
|
||||
if !mdb.masterStatus.backlog.isValidOffset(slaveOffset) {
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
if !server.masterStatus.backlog.isValidOffset(slaveOffset) {
|
||||
server.masterStatus.mu.RUnlock()
|
||||
return cannotPartialSync
|
||||
}
|
||||
backlog, currentOffset := mdb.masterStatus.backlog.getSnapshotAfter(slaveOffset)
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
backlog, currentOffset := server.masterStatus.backlog.getSnapshotAfter(slaveOffset)
|
||||
server.masterStatus.mu.RUnlock()
|
||||
|
||||
// send replication header
|
||||
header := "+CONTINUE " + mdb.masterStatus.replId + protocol.CRLF
|
||||
header := "+CONTINUE " + server.masterStatus.replId + protocol.CRLF
|
||||
_, err := slave.conn.Write([]byte(header))
|
||||
if err != nil {
|
||||
return fmt.Errorf("write replication header to slave failed: %v", err)
|
||||
@@ -246,27 +246,27 @@ func (mdb *MultiDB) masterTryPartialSyncWithSlave(slave *slaveClient, replId str
|
||||
}
|
||||
|
||||
// set slave online
|
||||
mdb.setSlaveOnline(slave, currentOffset)
|
||||
server.setSlaveOnline(slave, currentOffset)
|
||||
return nil
|
||||
}
|
||||
|
||||
// masterSendUpdatesToSlave only sends data to online slaves after bgSave is finished
|
||||
// if bgSave is running, updates will be sent after the saving finished
|
||||
func (mdb *MultiDB) masterSendUpdatesToSlave() error {
|
||||
func (server *Server) masterSendUpdatesToSlave() error {
|
||||
onlineSlaves := make(map[*slaveClient]struct{})
|
||||
mdb.masterStatus.mu.RLock()
|
||||
beginOffset := mdb.masterStatus.backlog.beginOffset
|
||||
backlog, currentOffset := mdb.masterStatus.backlog.getSnapshot()
|
||||
for slave := range mdb.masterStatus.onlineSlaves {
|
||||
server.masterStatus.mu.RLock()
|
||||
beginOffset := server.masterStatus.backlog.beginOffset
|
||||
backlog, currentOffset := server.masterStatus.backlog.getSnapshot()
|
||||
for slave := range server.masterStatus.onlineSlaves {
|
||||
onlineSlaves[slave] = struct{}{}
|
||||
}
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
server.masterStatus.mu.RUnlock()
|
||||
for slave := range onlineSlaves {
|
||||
slaveBeginOffset := slave.offset - beginOffset
|
||||
_, err := slave.conn.Write(backlog[slaveBeginOffset:])
|
||||
if err != nil {
|
||||
logger.Errorf("send updates backlog to slave failed: %v", err)
|
||||
mdb.removeSlave(slave)
|
||||
server.removeSlave(slave)
|
||||
continue
|
||||
}
|
||||
slave.offset = currentOffset
|
||||
@@ -274,48 +274,48 @@ func (mdb *MultiDB) masterSendUpdatesToSlave() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) execPSync(c redis.Connection, args [][]byte) redis.Reply {
|
||||
func (server *Server) execPSync(c redis.Connection, args [][]byte) redis.Reply {
|
||||
replId := string(args[0])
|
||||
replOffset, err := strconv.ParseInt(string(args[1]), 10, 64)
|
||||
if err != nil {
|
||||
return protocol.MakeErrReply("ERR value is not an integer or out of range")
|
||||
}
|
||||
mdb.masterStatus.mu.Lock()
|
||||
defer mdb.masterStatus.mu.Unlock()
|
||||
slave := mdb.masterStatus.slaveMap[c]
|
||||
server.masterStatus.mu.Lock()
|
||||
defer server.masterStatus.mu.Unlock()
|
||||
slave := server.masterStatus.slaveMap[c]
|
||||
if slave == nil {
|
||||
slave = &slaveClient{
|
||||
conn: c,
|
||||
}
|
||||
c.SetSlave()
|
||||
mdb.masterStatus.slaveMap[c] = slave
|
||||
server.masterStatus.slaveMap[c] = slave
|
||||
}
|
||||
if mdb.masterStatus.bgSaveState == bgSaveIdle {
|
||||
if server.masterStatus.bgSaveState == bgSaveIdle {
|
||||
slave.state = slaveStateWaitSaveEnd
|
||||
mdb.masterStatus.waitSlaves[slave] = struct{}{}
|
||||
mdb.bgSaveForReplication()
|
||||
} else if mdb.masterStatus.bgSaveState == bgSaveRunning {
|
||||
server.masterStatus.waitSlaves[slave] = struct{}{}
|
||||
server.bgSaveForReplication()
|
||||
} else if server.masterStatus.bgSaveState == bgSaveRunning {
|
||||
slave.state = slaveStateWaitSaveEnd
|
||||
mdb.masterStatus.waitSlaves[slave] = struct{}{}
|
||||
} else if mdb.masterStatus.bgSaveState == bgSaveFinish {
|
||||
server.masterStatus.waitSlaves[slave] = struct{}{}
|
||||
} else if server.masterStatus.bgSaveState == bgSaveFinish {
|
||||
go func() {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
logger.Errorf("panic: %v", e)
|
||||
}
|
||||
}()
|
||||
err := mdb.masterTryPartialSyncWithSlave(slave, replId, replOffset)
|
||||
err := server.masterTryPartialSyncWithSlave(slave, replId, replOffset)
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
if err != nil && err != cannotPartialSync {
|
||||
mdb.removeSlave(slave)
|
||||
server.removeSlave(slave)
|
||||
logger.Errorf("masterTryPartialSyncWithSlave error: %v", err)
|
||||
return
|
||||
}
|
||||
// assert err == cannotPartialSync
|
||||
if err := mdb.masterFullReSyncWithSlave(slave); err != nil {
|
||||
mdb.removeSlave(slave)
|
||||
if err := server.masterFullReSyncWithSlave(slave); err != nil {
|
||||
server.removeSlave(slave)
|
||||
logger.Errorf("masterFullReSyncWithSlave error: %v", err)
|
||||
return
|
||||
}
|
||||
@@ -324,13 +324,13 @@ func (mdb *MultiDB) execPSync(c redis.Connection, args [][]byte) redis.Reply {
|
||||
return &protocol.NoReply{}
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) execReplConf(c redis.Connection, args [][]byte) redis.Reply {
|
||||
func (server *Server) execReplConf(c redis.Connection, args [][]byte) redis.Reply {
|
||||
if len(args)%2 != 0 {
|
||||
return protocol.MakeSyntaxErrReply()
|
||||
}
|
||||
mdb.masterStatus.mu.RLock()
|
||||
slave := mdb.masterStatus.slaveMap[c]
|
||||
mdb.masterStatus.mu.RUnlock()
|
||||
server.masterStatus.mu.RLock()
|
||||
slave := server.masterStatus.slaveMap[c]
|
||||
server.masterStatus.mu.RUnlock()
|
||||
for i := 0; i < len(args); i += 2 {
|
||||
key := strings.ToLower(string(args[i]))
|
||||
value := string(args[i+1])
|
||||
@@ -348,55 +348,56 @@ func (mdb *MultiDB) execReplConf(c redis.Connection, args [][]byte) redis.Reply
|
||||
return protocol.MakeOkReply()
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) removeSlave(slave *slaveClient) {
|
||||
mdb.masterStatus.mu.Lock()
|
||||
defer mdb.masterStatus.mu.Unlock()
|
||||
func (server *Server) removeSlave(slave *slaveClient) {
|
||||
server.masterStatus.mu.Lock()
|
||||
defer server.masterStatus.mu.Unlock()
|
||||
_ = slave.conn.Close()
|
||||
delete(mdb.masterStatus.slaveMap, slave.conn)
|
||||
delete(mdb.masterStatus.waitSlaves, slave)
|
||||
delete(mdb.masterStatus.onlineSlaves, slave)
|
||||
delete(server.masterStatus.slaveMap, slave.conn)
|
||||
delete(server.masterStatus.waitSlaves, slave)
|
||||
delete(server.masterStatus.onlineSlaves, slave)
|
||||
logger.Info("disconnect with slave " + slave.conn.Name())
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) setSlaveOnline(slave *slaveClient, currentOffset int64) {
|
||||
mdb.masterStatus.mu.Lock()
|
||||
defer mdb.masterStatus.mu.Unlock()
|
||||
func (server *Server) setSlaveOnline(slave *slaveClient, currentOffset int64) {
|
||||
server.masterStatus.mu.Lock()
|
||||
defer server.masterStatus.mu.Unlock()
|
||||
slave.state = slaveStateOnline
|
||||
slave.offset = currentOffset
|
||||
mdb.masterStatus.onlineSlaves[slave] = struct{}{}
|
||||
server.masterStatus.onlineSlaves[slave] = struct{}{}
|
||||
}
|
||||
|
||||
var pingBytes = protocol.MakeMultiBulkReply(utils.ToCmdLine("ping")).ToBytes()
|
||||
|
||||
const maxBacklogSize = 10 * 1024 * 1024 // 10MB
|
||||
|
||||
func (mdb *MultiDB) masterCron() {
|
||||
mdb.masterStatus.mu.Lock()
|
||||
if len(mdb.masterStatus.slaveMap) == 0 { // no slaves, do nothing
|
||||
func (server *Server) masterCron() {
|
||||
server.masterStatus.mu.Lock()
|
||||
if len(server.masterStatus.slaveMap) == 0 { // no slaves, do nothing
|
||||
return
|
||||
}
|
||||
if mdb.masterStatus.bgSaveState == bgSaveFinish {
|
||||
mdb.masterStatus.backlog.appendBytes(pingBytes)
|
||||
if server.masterStatus.bgSaveState == bgSaveFinish {
|
||||
server.masterStatus.backlog.appendBytes(pingBytes)
|
||||
}
|
||||
backlogSize := len(mdb.masterStatus.backlog.buf)
|
||||
mdb.masterStatus.mu.Unlock()
|
||||
if err := mdb.masterSendUpdatesToSlave(); err != nil {
|
||||
backlogSize := len(server.masterStatus.backlog.buf)
|
||||
server.masterStatus.mu.Unlock()
|
||||
if err := server.masterSendUpdatesToSlave(); err != nil {
|
||||
logger.Errorf("masterSendUpdatesToSlave error: %v", err)
|
||||
}
|
||||
if backlogSize > maxBacklogSize && !mdb.masterStatus.rewriting.Get() {
|
||||
if backlogSize > maxBacklogSize && !server.masterStatus.rewriting.Get() {
|
||||
go func() {
|
||||
mdb.masterStatus.rewriting.Set(true)
|
||||
defer mdb.masterStatus.rewriting.Set(false)
|
||||
if err := mdb.rewriteRDB(); err != nil {
|
||||
mdb.masterStatus.rewriting.Set(false)
|
||||
server.masterStatus.rewriting.Set(true)
|
||||
defer server.masterStatus.rewriting.Set(false)
|
||||
if err := server.rewriteRDB(); err != nil {
|
||||
server.masterStatus.rewriting.Set(false)
|
||||
logger.Errorf("rewrite error: %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// replAofListener is an implementation for aof.Listener
|
||||
type replAofListener struct {
|
||||
mdb *MultiDB
|
||||
mdb *Server
|
||||
backlog *replBacklog // may NOT be mdb.masterStatus.backlog
|
||||
readyToSend bool
|
||||
}
|
||||
@@ -417,8 +418,8 @@ func (listener *replAofListener) Callback(cmdLines []CmdLine) {
|
||||
}
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) initMaster() {
|
||||
mdb.masterStatus = &masterStatus{
|
||||
func (server *Server) initMaster() {
|
||||
server.masterStatus = &masterStatus{
|
||||
mu: sync.RWMutex{},
|
||||
replId: utils.RandHexString(40),
|
||||
backlog: &replBacklog{},
|
||||
@@ -430,28 +431,28 @@ func (mdb *MultiDB) initMaster() {
|
||||
}
|
||||
}
|
||||
|
||||
func (mdb *MultiDB) stopMaster() {
|
||||
mdb.masterStatus.mu.Lock()
|
||||
defer mdb.masterStatus.mu.Unlock()
|
||||
func (server *Server) stopMaster() {
|
||||
server.masterStatus.mu.Lock()
|
||||
defer server.masterStatus.mu.Unlock()
|
||||
|
||||
// disconnect with slave
|
||||
for _, slave := range mdb.masterStatus.slaveMap {
|
||||
for _, slave := range server.masterStatus.slaveMap {
|
||||
_ = slave.conn.Close()
|
||||
delete(mdb.masterStatus.slaveMap, slave.conn)
|
||||
delete(mdb.masterStatus.waitSlaves, slave)
|
||||
delete(mdb.masterStatus.onlineSlaves, slave)
|
||||
delete(server.masterStatus.slaveMap, slave.conn)
|
||||
delete(server.masterStatus.waitSlaves, slave)
|
||||
delete(server.masterStatus.onlineSlaves, slave)
|
||||
}
|
||||
|
||||
// clean master status
|
||||
if mdb.aofHandler != nil {
|
||||
mdb.aofHandler.RemoveListener(mdb.masterStatus.aofListener)
|
||||
if server.persister != nil {
|
||||
server.persister.RemoveListener(server.masterStatus.aofListener)
|
||||
}
|
||||
_ = os.Remove(mdb.masterStatus.rdbFilename)
|
||||
mdb.masterStatus.rdbFilename = ""
|
||||
mdb.masterStatus.replId = ""
|
||||
mdb.masterStatus.backlog = &replBacklog{}
|
||||
mdb.masterStatus.slaveMap = make(map[redis.Connection]*slaveClient)
|
||||
mdb.masterStatus.waitSlaves = make(map[*slaveClient]struct{})
|
||||
mdb.masterStatus.onlineSlaves = make(map[*slaveClient]struct{})
|
||||
mdb.masterStatus.bgSaveState = bgSaveIdle
|
||||
_ = os.Remove(server.masterStatus.rdbFilename)
|
||||
server.masterStatus.rdbFilename = ""
|
||||
server.masterStatus.replId = ""
|
||||
server.masterStatus.backlog = &replBacklog{}
|
||||
server.masterStatus.slaveMap = make(map[redis.Connection]*slaveClient)
|
||||
server.masterStatus.waitSlaves = make(map[*slaveClient]struct{})
|
||||
server.masterStatus.onlineSlaves = make(map[*slaveClient]struct{})
|
||||
server.masterStatus.bgSaveState = bgSaveIdle
|
||||
}
|
||||
|
Reference in New Issue
Block a user