Updated Embedded ACL test and enabled memory adjustment after key update

This commit is contained in:
Kelvin Mwinuka
2024-07-01 08:55:37 +08:00
parent d787c6cc31
commit 5a04bcade0
3 changed files with 941 additions and 849 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -418,8 +418,6 @@ func TestEchoVault_ACLConfig(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
t.Parallel()
// Create new server instance // Create new server instance
conf := DefaultConfig() conf := DefaultConfig()
conf.DataDir = "" conf.DataDir = ""

View File

@@ -15,6 +15,7 @@
package echovault package echovault
import ( import (
"container/heap"
"context" "context"
"errors" "errors"
"fmt" "fmt"
@@ -23,8 +24,10 @@ import (
"github.com/echovault/echovault/internal/eviction" "github.com/echovault/echovault/internal/eviction"
"log" "log"
"math/rand" "math/rand"
"runtime"
"slices" "slices"
"strings" "strings"
"sync"
"time" "time"
) )
@@ -363,174 +366,200 @@ func (server *EchoVault) updateKeysInCache(ctx context.Context, keys []string) e
} }
} }
// TODO: Adjust memory by taking all databases into account (largest database?). server.storeLock.Lock()
//if err := server.adjustMemoryUsage(ctx); err != nil { defer server.storeLock.Unlock()
// return fmt.Errorf("updateKeysInCache: %+v", err)
//} wg := sync.WaitGroup{}
errChan := make(chan error)
doneChan := make(chan struct{})
for db, _ := range server.store {
wg.Add(1)
ctx := context.WithValue(ctx, "Database", db)
go func(ctx context.Context, database int, wg *sync.WaitGroup, errChan *chan error) {
if err := server.adjustMemoryUsage(ctx); err != nil {
*errChan <- fmt.Errorf("adjustMemoryUsade database %d", database)
}
wg.Done()
}(ctx, db, &wg, &errChan)
}
go func() {
wg.Wait()
doneChan <- struct{}{}
}()
select {
case err := <-errChan:
return fmt.Errorf("adjustMemoryUsage error: %+v", err)
case <-doneChan:
}
return nil return nil
} }
// TODO: Implement support for multiple databases.
// adjustMemoryUsage should only be called from standalone echovault or from raft cluster leader. // adjustMemoryUsage should only be called from standalone echovault or from raft cluster leader.
//func (server *EchoVault) adjustMemoryUsage(ctx context.Context) error { func (server *EchoVault) adjustMemoryUsage(ctx context.Context) error {
// // If max memory is 0, there's no need to adjust memory usage. // If max memory is 0, there's no need to adjust memory usage.
// if server.config.MaxMemory == 0 { if server.config.MaxMemory == 0 {
// return nil return nil
// } }
// // Check if memory usage is above max-memory.
// // If it is, pop items from the cache until we get under the limit. database := ctx.Value("Database").(int)
// var memStats runtime.MemStats
// runtime.ReadMemStats(&memStats) // Check if memory usage is above max-memory.
// // If we're using less memory than the max-memory, there's no need to evict. // If it is, pop items from the cache until we get under the limit.
// if memStats.HeapInuse < server.config.MaxMemory { var memStats runtime.MemStats
// return nil runtime.ReadMemStats(&memStats)
// } // If we're using less memory than the max-memory, there's no need to evict.
// // Force a garbage collection first before we start evicting keys. if memStats.HeapInuse < server.config.MaxMemory {
// runtime.GC() return nil
// runtime.ReadMemStats(&memStats) }
// if memStats.HeapInuse < server.config.MaxMemory { // Force a garbage collection first before we start evicting keys.
// return nil runtime.GC()
// } runtime.ReadMemStats(&memStats)
// // We've done a GC, but we're still at or above the max memory limit. if memStats.HeapInuse < server.config.MaxMemory {
// // Start a loop that evicts keys until either the heap is empty or return nil
// // we're below the max memory limit. }
// server.storeLock.Lock() // We've done a GC, but we're still at or above the max memory limit.
// defer server.storeLock.Unlock() // Start a loop that evicts keys until either the heap is empty or
// switch { // we're below the max memory limit.
// case slices.Contains([]string{constants.AllKeysLFU, constants.VolatileLFU}, strings.ToLower(server.config.EvictionPolicy)): switch {
// // Remove keys from LFU cache until we're below the max memory limit or case slices.Contains([]string{constants.AllKeysLFU, constants.VolatileLFU}, strings.ToLower(server.config.EvictionPolicy)):
// // until the LFU cache is empty. // Remove keys from LFU cache until we're below the max memory limit or
// server.lfuCache.mutex.Lock() // until the LFU cache is empty.
// defer server.lfuCache.mutex.Unlock() server.lfuCache.mutex.Lock()
// for { defer server.lfuCache.mutex.Unlock()
// // Return if cache is empty for {
// if server.lfuCache.cache.Len() == 0 { // Return if cache is empty
// return fmt.Errorf("adjustMemoryUsage -> LFU cache empty") if server.lfuCache.cache[database].Len() == 0 {
// } return fmt.Errorf("adjustMemoryUsage -> LFU cache empty")
// }
// key := heap.Pop(&server.lfuCache.cache).(string)
// if !server.isInCluster() { key := heap.Pop(server.lfuCache.cache[database]).(string)
// // If in standalone mode, directly delete the key if !server.isInCluster() {
// if err := server.deleteKey(key); err != nil { // If in standalone mode, directly delete the key
// return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err) if err := server.deleteKey(ctx, key); err != nil {
// } return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err)
// } else if server.isInCluster() && server.raft.IsRaftLeader() { }
// // If in raft cluster, send command to delete key from cluster } else if server.isInCluster() && server.raft.IsRaftLeader() {
// if err := server.raftApplyDeleteKey(ctx, key); err != nil { // If in raft cluster, send command to delete key from cluster
// return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err) if err := server.raftApplyDeleteKey(ctx, key); err != nil {
// } return fmt.Errorf("adjustMemoryUsage -> LFU cache eviction: %+v", err)
// } }
// }
// // Run garbage collection // Run garbage collection
// runtime.GC() runtime.GC()
// // Return if we're below max memory // Return if we're below max memory
// runtime.ReadMemStats(&memStats) runtime.ReadMemStats(&memStats)
// if memStats.HeapInuse < server.config.MaxMemory { if memStats.HeapInuse < server.config.MaxMemory {
// return nil return nil
// } }
// } }
// case slices.Contains([]string{constants.AllKeysLRU, constants.VolatileLRU}, strings.ToLower(server.config.EvictionPolicy)): case slices.Contains([]string{constants.AllKeysLRU, constants.VolatileLRU}, strings.ToLower(server.config.EvictionPolicy)):
// // Remove keys from th LRU cache until we're below the max memory limit or // Remove keys from th LRU cache until we're below the max memory limit or
// // until the LRU cache is empty. // until the LRU cache is empty.
// server.lruCache.mutex.Lock() server.lruCache.mutex.Lock()
// defer server.lruCache.mutex.Unlock() defer server.lruCache.mutex.Unlock()
// for { for {
// // Return if cache is empty // Return if cache is empty
// if server.lruCache.cache.Len() == 0 { if server.lruCache.cache[database].Len() == 0 {
// return fmt.Errorf("adjsutMemoryUsage -> LRU cache empty") return fmt.Errorf("adjustMemoryUsage -> LRU cache empty")
// } }
//
// key := heap.Pop(&server.lruCache.cache).(string) key := heap.Pop(server.lruCache.cache[database]).(string)
// if !server.isInCluster() { if !server.isInCluster() {
// // If in standalone mode, directly delete the key. // If in standalone mode, directly delete the key.
// if err := server.deleteKey(key); err != nil { if err := server.deleteKey(ctx, key); err != nil {
// return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err) return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err)
// } }
// } else if server.isInCluster() && server.raft.IsRaftLeader() { } else if server.isInCluster() && server.raft.IsRaftLeader() {
// // If in cluster mode and the node is a cluster leader, // If in cluster mode and the node is a cluster leader,
// // send command to delete the key from the cluster. // send command to delete the key from the cluster.
// if err := server.raftApplyDeleteKey(ctx, key); err != nil { if err := server.raftApplyDeleteKey(ctx, key); err != nil {
// return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err) return fmt.Errorf("adjustMemoryUsage -> LRU cache eviction: %+v", err)
// } }
// } }
//
// // Run garbage collection // Run garbage collection
// runtime.GC() runtime.GC()
// // Return if we're below max memory // Return if we're below max memory
// runtime.ReadMemStats(&memStats) runtime.ReadMemStats(&memStats)
// if memStats.HeapInuse < server.config.MaxMemory { if memStats.HeapInuse < server.config.MaxMemory {
// return nil return nil
// } }
// } }
// case slices.Contains([]string{constants.AllKeysRandom}, strings.ToLower(server.config.EvictionPolicy)): case slices.Contains([]string{constants.AllKeysRandom}, strings.ToLower(server.config.EvictionPolicy)):
// // Remove random keys until we're below the max memory limit // Remove random keys until we're below the max memory limit
// // or there are no more keys remaining. // or there are no more keys remaining.
// for { for {
// server.storeLock.Lock() // If there are no keys, return error
// // If there are no keys, return error if len(server.store) == 0 {
// if len(server.store) == 0 { err := errors.New("no keys to evict")
// err := errors.New("no keys to evict") return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err)
// server.storeLock.Unlock() }
// return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err) // Get random key in the database
// } idx := rand.Intn(len(server.store))
// // Get random key for db, data := range server.store {
// idx := rand.Intn(len(server.store)) if db == database {
// for key, _ := range server.store { for key, _ := range data {
// if idx == 0 { if idx == 0 {
// if !server.isInCluster() { if !server.isInCluster() {
// // If in standalone mode, directly delete the key // If in standalone mode, directly delete the key
// if err := server.deleteKey(key); err != nil { if err := server.deleteKey(ctx, key); err != nil {
// return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err) return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err)
// } }
// } else if server.isInCluster() && server.raft.IsRaftLeader() { } else if server.isInCluster() && server.raft.IsRaftLeader() {
// if err := server.raftApplyDeleteKey(ctx, key); err != nil { if err := server.raftApplyDeleteKey(ctx, key); err != nil {
// return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err) return fmt.Errorf("adjustMemoryUsage -> all keys random: %+v", err)
// } }
// } }
// // Run garbage collection // Run garbage collection
// runtime.GC() runtime.GC()
// // Return if we're below max memory // Return if we're below max memory
// runtime.ReadMemStats(&memStats) runtime.ReadMemStats(&memStats)
// if memStats.HeapInuse < server.config.MaxMemory { if memStats.HeapInuse < server.config.MaxMemory {
// return nil return nil
// } }
// } }
// idx-- idx--
// } }
// } }
// case slices.Contains([]string{constants.VolatileRandom}, strings.ToLower(server.config.EvictionPolicy)): }
// // Remove random keys with an associated expiry time until we're below the max memory limit }
// // or there are no more keys with expiry time. case slices.Contains([]string{constants.VolatileRandom}, strings.ToLower(server.config.EvictionPolicy)):
// for { // Remove random keys with an associated expiry time until we're below the max memory limit
// // Get random volatile key // or there are no more keys with expiry time.
// server.keysWithExpiry.rwMutex.RLock() for {
// idx := rand.Intn(len(server.keysWithExpiry.keys)) // Get random volatile key
// key := server.keysWithExpiry.keys[idx] server.keysWithExpiry.rwMutex.RLock()
// server.keysWithExpiry.rwMutex.RUnlock() idx := rand.Intn(len(server.keysWithExpiry.keys))
// key := server.keysWithExpiry.keys[database][idx]
// if !server.isInCluster() { server.keysWithExpiry.rwMutex.RUnlock()
// // If in standalone mode, directly delete the key
// if err := server.deleteKey(key); err != nil { if !server.isInCluster() {
// return fmt.Errorf("adjustMemoryUsage -> volatile keys random: %+v", err) // If in standalone mode, directly delete the key
// } if err := server.deleteKey(ctx, key); err != nil {
// } else if server.isInCluster() && server.raft.IsRaftLeader() { return fmt.Errorf("adjustMemoryUsage -> volatile keys random: %+v", err)
// if err := server.raftApplyDeleteKey(ctx, key); err != nil { }
// return fmt.Errorf("adjustMemoryUsage -> volatile keys randome: %+v", err) } else if server.isInCluster() && server.raft.IsRaftLeader() {
// } if err := server.raftApplyDeleteKey(ctx, key); err != nil {
// } return fmt.Errorf("adjustMemoryUsage -> volatile keys randome: %+v", err)
// }
// // Run garbage collection }
// runtime.GC()
// // Return if we're below max memory // Run garbage collection
// runtime.ReadMemStats(&memStats) runtime.GC()
// if memStats.HeapInuse < server.config.MaxMemory { // Return if we're below max memory
// return nil runtime.ReadMemStats(&memStats)
// } if memStats.HeapInuse < server.config.MaxMemory {
// } return nil
// default: }
// return nil }
// } default:
// } return nil
}
}
// evictKeysWithExpiredTTL is a function that samples keys with an associated TTL // evictKeysWithExpiredTTL is a function that samples keys with an associated TTL
// and evicts keys that are currently expired. // and evicts keys that are currently expired.