Created flow for deleting key consistently within cluster.

This commit is contained in:
Kelvin Mwinuka
2024-03-11 05:22:38 +08:00
parent 8000cef72d
commit c611dd60ee
6 changed files with 120 additions and 37 deletions

View File

@@ -3,10 +3,12 @@ package raft
import (
"context"
"encoding/json"
"fmt"
"github.com/echovault/echovault/src/utils"
"github.com/hashicorp/raft"
"io"
"log"
"strings"
)
type FSMOpts struct {
@@ -43,6 +45,17 @@ func (fsm *FSM) Apply(log *raft.Log) interface{} {
ctx := context.WithValue(context.Background(), utils.ContextServerID("ServerID"), request.ServerID)
ctx = context.WithValue(ctx, utils.ContextConnID("ConnectionID"), request.ConnectionID)
switch strings.ToLower(request.Type) {
default:
return utils.ApplyResponse{
Error: fmt.Errorf("unsupported raft command type %s", request.Type),
Response: nil,
}
case "delete-key":
// TODO: Handle key deletion
case "command":
// Handle command
command, err := fsm.options.GetCommand(request.CMD[0])
if err != nil {
@@ -71,6 +84,7 @@ func (fsm *FSM) Apply(log *raft.Log) interface{} {
}
}
}
}
return nil
}

View File

@@ -3,7 +3,6 @@ package server
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/echovault/echovault/src/utils"
"time"
@@ -13,25 +12,59 @@ func (server *Server) IsInCluster() bool {
return server.Config.BootstrapCluster || server.Config.JoinAddr != ""
}
func (server *Server) raftApply(ctx context.Context, cmd []string) ([]byte, error) {
func (server *Server) raftApplyDeleteKey(ctx context.Context, key string) error {
serverId, _ := ctx.Value(utils.ContextServerID("ServerID")).(string)
deleteKeyRequest := utils.ApplyRequest{
Type: "delete-key",
ServerID: serverId,
ConnectionID: "nil",
Key: key,
}
b, err := json.Marshal(deleteKeyRequest)
if err != nil {
return fmt.Errorf("could not parse delete key request for key: %s", key)
}
applyFuture := server.raft.Apply(b, 500*time.Millisecond)
if err = applyFuture.Error(); err != nil {
return err
}
r, ok := applyFuture.Response().(utils.ApplyResponse)
if !ok {
return fmt.Errorf("unprocessable entity %v", r)
}
if r.Error != nil {
return r.Error
}
return nil
}
func (server *Server) raftApplyCommand(ctx context.Context, cmd []string) ([]byte, error) {
serverId, _ := ctx.Value(utils.ContextServerID("ServerID")).(string)
connectionId, _ := ctx.Value(utils.ContextConnID("ConnectionID")).(string)
applyRequest := utils.ApplyRequest{
Type: "command",
ServerID: serverId,
ConnectionID: connectionId,
CMD: cmd,
}
b, err := json.Marshal(applyRequest)
if err != nil {
return nil, errors.New("could not parse request")
return nil, fmt.Errorf("could not parse command request for commad: %+v", cmd)
}
applyFuture := server.raft.Apply(b, 500*time.Millisecond)
if err := applyFuture.Error(); err != nil {
if err = applyFuture.Error(); err != nil {
return nil, err
}

View File

@@ -202,7 +202,6 @@ func (server *Server) RemoveExpiry(key string) {
Value: server.store[key].Value,
ExpireAt: time.Time{},
}
// Remove key from slice of keys associated with expiry
server.keysWithExpiry.rwMutex.Lock()
defer server.keysWithExpiry.rwMutex.Unlock()
@@ -294,6 +293,7 @@ func (server *Server) updateKeyInCache(ctx context.Context, key string) error {
return nil
}
// adjustMemoryUsage should only be called from standalone server or from raft cluster leader.
func (server *Server) adjustMemoryUsage(ctx context.Context) error {
// If max memory is 0, there's no need to adjust memory usage.
if server.Config.MaxMemory == 0 {
@@ -327,10 +327,20 @@ func (server *Server) adjustMemoryUsage(ctx context.Context) error {
if server.lfuCache.cache.Len() == 0 {
return fmt.Errorf("adjsutMemoryUsage -> LFU cache empty")
}
key := server.lfuCache.cache.Pop().(string)
if !server.IsInCluster() {
// If in standalone mode, directly delete the key
if err := server.DeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err)
}
} else if server.IsInCluster() && server.raft.IsRaftLeader() {
// If in raft cluster, send command to delete key from cluster
if err := server.raftApplyDeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err)
}
}
// Run garbage collection
runtime.GC()
// Return if we're below max memory
@@ -349,10 +359,21 @@ func (server *Server) adjustMemoryUsage(ctx context.Context) error {
if server.lruCache.cache.Len() == 0 {
return fmt.Errorf("adjsutMemoryUsage -> LRU cache empty")
}
key := server.lruCache.cache.Pop().(string)
if !server.IsInCluster() {
// If in standalone mode, directly delete the key.
if err := server.DeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err)
}
} else if server.IsInCluster() && server.raft.IsRaftLeader() {
// If in cluster mode and the node is a cluster leader,
// send command to delete the key from the cluster.
if err := server.raftApplyDeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err)
}
}
// Run garbage collection
runtime.GC()
// Return if we're below max memory
@@ -374,10 +395,16 @@ func (server *Server) adjustMemoryUsage(ctx context.Context) error {
idx := rand.Intn(len(server.keyLocks))
for key, _ := range server.keyLocks {
if idx == 0 {
// Lock the key
if !server.IsInCluster() {
// If in standalone mode, directly delete the key
if err := server.DeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err)
}
} else if server.IsInCluster() && server.raft.IsRaftLeader() {
if err := server.raftApplyDeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err)
}
}
// Run garbage collection
runtime.GC()
// Return if we're below max memory
@@ -399,10 +426,16 @@ func (server *Server) adjustMemoryUsage(ctx context.Context) error {
key := server.keysWithExpiry.keys[idx]
server.keysWithExpiry.rwMutex.RUnlock()
// Delete the key
if !server.IsInCluster() {
// If in standalone mode, directly delete the key
if err := server.DeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> volatile keys random: %+v", err)
}
} else if server.IsInCluster() && server.raft.IsRaftLeader() {
if err := server.raftApplyDeleteKey(ctx, key); err != nil {
return fmt.Errorf("adjustMemoryUsage -> volatile keys randome: %+v", err)
}
}
// Run garbage collection
runtime.GC()

View File

@@ -84,7 +84,8 @@ func (server *Server) handleCommand(ctx context.Context, message []byte, conn *n
// Handle other commands that need to be synced across the cluster
if server.raft.IsRaftLeader() {
res, err := server.raftApply(ctx, cmd)
var res []byte
res, err = server.raftApplyCommand(ctx, cmd)
if err != nil {
return nil, err
}

View File

@@ -101,7 +101,7 @@ func NewServer(opts Opts) *Server {
AddVoter: server.raft.AddVoter,
RemoveRaftServer: server.raft.RemoveServer,
IsRaftLeader: server.raft.IsRaftLeader,
ApplyMutate: server.raftApply,
ApplyMutate: server.raftApplyCommand,
})
} else {
// Set up standalone snapshot engine

View File

@@ -41,9 +41,11 @@ type ContextServerID string
type ContextConnID string
type ApplyRequest struct {
Type string `json:"Type"` // command | delete-key
ServerID string `json:"ServerID"`
ConnectionID string `json:"ConnectionID"`
CMD []string `json:"CMD"`
Key string `json:"Key"`
}
type ApplyResponse struct {