From f28a7755a1a51486f70765fbd0fbbefd89ad11c8 Mon Sep 17 00:00:00 2001 From: Karl Seguin Date: Thu, 18 Mar 2021 18:45:44 +0800 Subject: [PATCH 1/2] document the simplicity of fetch --- cache.go | 3 +++ layeredcache.go | 3 +++ readme.md | 2 ++ 3 files changed, 8 insertions(+) diff --git a/cache.go b/cache.go index 0f87c92..d81d9a7 100644 --- a/cache.go +++ b/cache.go @@ -136,6 +136,9 @@ func (c *Cache) Replace(key string, value interface{}) bool { // Attempts to get the value from the cache and calles fetch on a miss (missing // or stale item). If fetch returns an error, no value is cached and the error // is returned back to the caller. +// Note that Fetch merely calls the public Get and Set functions. If you want +// a different Fetch behavior, such as thundering herd protection or returning +// expired items, implement it in your application. func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { item := c.Get(key) if item != nil && !item.Expired() { diff --git a/layeredcache.go b/layeredcache.go index 3ffaf0d..d36d690 100644 --- a/layeredcache.go +++ b/layeredcache.go @@ -131,6 +131,9 @@ func (c *LayeredCache) Replace(primary, secondary string, value interface{}) boo // Attempts to get the value from the cache and calles fetch on a miss. // If fetch returns an error, no value is cached and the error is returned back // to the caller. +// Note that Fetch merely calls the public Get and Set functions. If you want +// a different Fetch behavior, such as thundering herd protection or returning +// expired items, implement it in your application. func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) { item := c.Get(primary, secondary) if item != nil { diff --git a/readme.md b/readme.md index 8617f18..b45a487 100644 --- a/readme.md +++ b/readme.md @@ -86,6 +86,8 @@ item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) }) ``` +`Fetch` doesn't do anything fancy: it merely uses the public `Get` and `Set` functions. If you want more advanced behavior, such as using a singleflight to protect against thundering herd, support a callback that accepts the key, or returning expired items, you should implement that in your application. + ### Delete `Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key: From df2d98315c8aa29a0ee16f4b5273b8fb50c52759 Mon Sep 17 00:00:00 2001 From: Karl Seguin Date: Thu, 18 Mar 2021 19:29:04 +0800 Subject: [PATCH 2/2] Conditionally prune more than itemsToPrune items It's possible, though unlikely, that c.size will be larger than c.maxSize by more than c.itemsToPrune. The most likely case that this can happen is when using SetMaxSize to dynamically adjust the cache size. The gc will now always clear to at least c.maxSize. --- Makefile | 2 +- cache.go | 8 +++++++- cache_test.go | 3 +++ go.mod | 2 +- go.sum | 2 ++ layeredcache.go | 8 +++++++- layeredcache_test.go | 5 +++-- 7 files changed, 24 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 5b3f26b..fffa15f 100644 --- a/Makefile +++ b/Makefile @@ -1,5 +1,5 @@ t: - go test ./... + go test ./... -race -count=1 f: go fmt ./... diff --git a/cache.go b/cache.go index d81d9a7..a354961 100644 --- a/cache.go +++ b/cache.go @@ -304,7 +304,13 @@ func (c *Cache) doPromote(item *Item) bool { func (c *Cache) gc() int { dropped := 0 element := c.list.Back() - for i := 0; i < c.itemsToPrune; i++ { + + itemsToPrune := int64(c.itemsToPrune) + if min := c.size - c.maxSize; min > itemsToPrune { + itemsToPrune = min + } + + for i := int64(0); i < itemsToPrune; i++ { if element == nil { return dropped } diff --git a/cache_test.go b/cache_test.go index 921bd4d..3f2a0e6 100644 --- a/cache_test.go +++ b/cache_test.go @@ -18,6 +18,7 @@ func Test_Cache(t *testing.T) { func (_ CacheTests) DeletesAValue() { cache := New(Configure()) + defer cache.Stop() Expect(cache.ItemCount()).To.Equal(0) cache.Set("spice", "flow", time.Minute) @@ -32,6 +33,7 @@ func (_ CacheTests) DeletesAValue() { func (_ CacheTests) DeletesAPrefix() { cache := New(Configure()) + defer cache.Stop() Expect(cache.ItemCount()).To.Equal(0) cache.Set("aaa", "1", time.Minute) @@ -55,6 +57,7 @@ func (_ CacheTests) DeletesAPrefix() { func (_ CacheTests) DeletesAFunc() { cache := New(Configure()) + defer cache.Stop() Expect(cache.ItemCount()).To.Equal(0) cache.Set("a", 1, time.Minute) diff --git a/go.mod b/go.mod index eebeeb9..434fead 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,6 @@ module github.com/karlseguin/ccache/v2 go 1.13 require ( - github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 + github.com/karlseguin/expect v1.0.7 github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 ) diff --git a/go.sum b/go.sum index f2d23bc..863759d 100644 --- a/go.sum +++ b/go.sum @@ -2,5 +2,7 @@ github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8 github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA= github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8= +github.com/karlseguin/expect v1.0.7 h1:OF4mqjblc450v8nKARBS5Q0AweBNR0A+O3VjjpxwBrg= +github.com/karlseguin/expect v1.0.7/go.mod h1:lXdI8iGiQhmzpnnmU/EGA60vqKs8NbRNFnhhrJGoD5g= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ= github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM= diff --git a/layeredcache.go b/layeredcache.go index d36d690..aaac25e 100644 --- a/layeredcache.go +++ b/layeredcache.go @@ -286,7 +286,13 @@ func (c *LayeredCache) doPromote(item *Item) bool { func (c *LayeredCache) gc() int { element := c.list.Back() dropped := 0 - for i := 0; i < c.itemsToPrune; i++ { + itemsToPrune := int64(c.itemsToPrune) + + if min := c.size - c.maxSize; min > itemsToPrune { + itemsToPrune = min + } + + for i := int64(0); i < itemsToPrune; i++ { if element == nil { return dropped } diff --git a/layeredcache_test.go b/layeredcache_test.go index 390b4e4..cdb1106 100644 --- a/layeredcache_test.go +++ b/layeredcache_test.go @@ -235,8 +235,9 @@ func (_ LayeredCacheTests) ResizeOnTheFly() { for i := 0; i < 5; i++ { cache.Set(strconv.Itoa(i), "a", i, time.Minute) } + time.Sleep(time.Millisecond * 20) cache.SetMaxSize(3) - time.Sleep(time.Millisecond * 10) + time.Sleep(time.Millisecond * 20) Expect(cache.GetDropped()).To.Equal(2) Expect(cache.Get("0", "a")).To.Equal(nil) Expect(cache.Get("1", "a")).To.Equal(nil) @@ -245,7 +246,7 @@ func (_ LayeredCacheTests) ResizeOnTheFly() { Expect(cache.Get("4", "a").Value()).To.Equal(4) cache.Set("5", "a", 5, time.Minute) - time.Sleep(time.Millisecond * 5) + time.Sleep(time.Millisecond * 10) Expect(cache.GetDropped()).To.Equal(1) Expect(cache.Get("2", "a")).To.Equal(nil) Expect(cache.Get("3", "a").Value()).To.Equal(3)