From ae1872d7009cf48f9441f62753360265b4016da7 Mon Sep 17 00:00:00 2001 From: Karl Seguin Date: Fri, 5 Feb 2021 19:24:54 +0800 Subject: [PATCH] add ForEachFunc --- bucket.go | 12 ++++++++++ cache.go | 10 +++++++- cache_test.go | 42 +++++++++++++++++++++++++++++++++ layeredbucket.go | 9 ++++++++ layeredcache.go | 4 ++++ layeredcache_test.go | 55 +++++++++++++++++++++++++++++++++++++++----- readme.md | 5 +++- 7 files changed, 129 insertions(+), 8 deletions(-) diff --git a/bucket.go b/bucket.go index a2b9162..e7e2ab0 100644 --- a/bucket.go +++ b/bucket.go @@ -17,6 +17,18 @@ func (b *bucket) itemCount() int { return len(b.lookup) } +func (b *bucket) forEachFunc(matches func(key string, item *Item) bool) bool { + lookup := b.lookup + b.RLock() + defer b.RUnlock() + for key, item := range lookup { + if !matches(key, item) { + return false + } + } + return true +} + func (b *bucket) get(key string) *Item { b.RLock() defer b.RUnlock() diff --git a/cache.go b/cache.go index 16ea3d7..0f87c92 100644 --- a/cache.go +++ b/cache.go @@ -76,6 +76,14 @@ func (c *Cache) DeleteFunc(matches func(key string, item *Item) bool) int { return count } +func (c *Cache) ForEachFunc(matches func(key string, item *Item) bool) { + for _, b := range c.buckets { + if !b.forEachFunc(matches) { + break + } + } +} + // Get an item from the cache. Returns nil if the item wasn't found. // This can return an expired item. Use item.Expired() to see if the item // is expired and item.TTL() to see how long until the item expires (which @@ -210,7 +218,7 @@ func (c *Cache) promote(item *Item) { case c.promotables <- item: default: } - + } func (c *Cache) worker() { diff --git a/cache_test.go b/cache_test.go index 2578b92..921bd4d 100644 --- a/cache_test.go +++ b/cache_test.go @@ -1,6 +1,7 @@ package ccache import ( + "sort" "strconv" "sync/atomic" "testing" @@ -273,6 +274,34 @@ func (_ CacheTests) ResizeOnTheFly() { Expect(cache.Get("6").Value()).To.Equal(6) } +func (_ CacheTests) ForEachFunc() { + cache := New(Configure().MaxSize(3).ItemsToPrune(1)) + Expect(forEachKeys(cache)).To.Equal([]string{}) + + cache.Set("1", 1, time.Minute) + Expect(forEachKeys(cache)).To.Equal([]string{"1"}) + + cache.Set("2", 2, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeys(cache)).To.Equal([]string{"1", "2"}) + + cache.Set("3", 3, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeys(cache)).To.Equal([]string{"1", "2", "3"}) + + cache.Set("4", 4, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeys(cache)).To.Equal([]string{"2", "3", "4"}) + + cache.Set("stop", 5, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeys(cache)).Not.To.Contain("stop") + + cache.Set("6", 6, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeys(cache)).Not.To.Contain("stop") +} + type SizedItem struct { id int s int64 @@ -293,3 +322,16 @@ func gcCache(cache *Cache) { cache.gc() cache.restart() } + +func forEachKeys(cache *Cache) []string { + keys := make([]string, 0, 10) + cache.ForEachFunc(func(key string, i *Item) bool { + if key == "stop" { + return false + } + keys = append(keys, key) + return true + }) + sort.Strings(keys) + return keys +} diff --git a/layeredbucket.go b/layeredbucket.go index 18f4a71..46e704d 100644 --- a/layeredbucket.go +++ b/layeredbucket.go @@ -102,6 +102,15 @@ func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool { return true } +func (b *layeredBucket) forEachFunc(primary string, matches func(key string, item *Item) bool) { + b.RLock() + bucket, exists := b.buckets[primary] + b.RUnlock() + if exists { + bucket.forEachFunc(matches) + } +} + func (b *layeredBucket) clear() { b.Lock() defer b.Unlock() diff --git a/layeredcache.go b/layeredcache.go index 40f7123..3ffaf0d 100644 --- a/layeredcache.go +++ b/layeredcache.go @@ -73,6 +73,10 @@ func (c *LayeredCache) Get(primary, secondary string) *Item { return item } +func (c *LayeredCache) ForEachFunc(primary string, matches func(key string, item *Item) bool) { + c.bucket(primary).forEachFunc(primary, matches) +} + // Get the secondary cache for a given primary key. This operation will // never return nil. In the case where the primary key does not exist, a // new, underlying, empty bucket will be created and returned. diff --git a/layeredcache_test.go b/layeredcache_test.go index f1f7a65..390b4e4 100644 --- a/layeredcache_test.go +++ b/layeredcache_test.go @@ -1,6 +1,7 @@ package ccache import ( + "sort" "strconv" "sync/atomic" "testing" @@ -261,12 +262,6 @@ func (_ LayeredCacheTests) ResizeOnTheFly() { Expect(cache.Get("6", "a").Value()).To.Equal(6) } -func newLayered() *LayeredCache { - c := Layered(Configure()) - c.Clear() - return c -} - func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() { cache := Layered(Configure().MaxSize(9).ItemsToPrune(2)) for i := 0; i < 7; i++ { @@ -329,6 +324,41 @@ func (_ LayeredCacheTests) ReplaceChangesSize() { checkLayeredSize(cache, 5) } +func (_ LayeredCacheTests) EachFunc() { + cache := Layered(Configure().MaxSize(3).ItemsToPrune(1)) + Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{}) + + cache.Set("1", "a", 1, time.Minute) + Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a"}) + + cache.Set("1", "b", 2, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a", "b"}) + + cache.Set("1", "c", 3, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a", "b", "c"}) + + cache.Set("1", "d", 4, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"b", "c", "d"}) + + // iteration is non-deterministic, all we know for sure is "stop" should not be in there + cache.Set("1", "stop", 5, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeysLayered(cache, "1")).Not.To.Contain("stop") + + cache.Set("1", "e", 6, time.Minute) + time.Sleep(time.Millisecond * 10) + Expect(forEachKeysLayered(cache, "1")).Not.To.Contain("stop") +} + +func newLayered() *LayeredCache { + c := Layered(Configure()) + c.Clear() + return c +} + func checkLayeredSize(cache *LayeredCache, sz int64) { cache.Stop() Expect(cache.size).To.Equal(sz) @@ -340,3 +370,16 @@ func gcLayeredCache(cache *LayeredCache) { cache.gc() cache.restart() } + +func forEachKeysLayered(cache *LayeredCache, primary string) []string { + keys := make([]string, 0, 10) + cache.ForEachFunc(primary, func(key string, i *Item) bool { + if key == "stop" { + return false + } + keys = append(keys, key) + return true + }) + sort.Strings(keys) + return keys +} diff --git a/readme.md b/readme.md index 47a8bc2..8617f18 100644 --- a/readme.md +++ b/readme.md @@ -97,7 +97,10 @@ cache.Delete("user:4") `DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed. ### DeleteFunc -`DeleteFunc` deletes all items that the provded matches func evaluates to true. Returns the number of keys removed. +`DeleteFunc` deletes all items that the provided matches func evaluates to true. Returns the number of keys removed. + +### ForEachFunc +`ForEachFunc` iterates through all keys and values in the map and passes them to the provided function. Iteration stops if the function returns false. Iteration order is random. ### Clear `Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish.