The cache can now do reference counting so that the LRU algorithm is aware of
long-lived objects and won't clean them up. Oftentimes, the value returned from a cache hit is short-lived. As a silly example: func GetUser(http.responseWrite) { user := cache.Get("user:1") response.Write(serialize(user)) } It's fine if the cache's GC cleans up "user:1" while the user variable has a reference to the object..the cache's reference is removed and the real GC will clean it up at some point after the user variable falls out of scope. However, what if user is long-lived? Possibly stored as a reference to another cached object? Normally (without this commit) the next time you call cache.Get("user:1"), you'll get a miss and will need to refetch the object; even though the original user object is still somewhere in memory - you just lost your reference to it from the cache. By enabling the Track() configuration flag, and calling TrackingGet() (instead of Get), the cache will track that the object is in-use and won't GC it (even if there's great memory pressure (what's the point? something else is holding on to it anyways). Calling item.Release() will decrement the number of references. When the count is 0, the item can be pruned from the cache. The returned value is a TrackedItem which exposes: - Value() interface{} (to get the actual cached value) - Release() to release the item back in the cache
This commit is contained in:
@@ -38,7 +38,7 @@ func (b *Bucket) delete(key string) {
|
||||
delete(b.lookup, key)
|
||||
}
|
||||
|
||||
func (b *Bucket) getAndDelete(key string) *Item{
|
||||
func (b *Bucket) getAndDelete(key string) *Item {
|
||||
b.Lock()
|
||||
defer b.Unlock()
|
||||
item := b.lookup[key]
|
||||
|
@@ -1,9 +1,9 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
"time"
|
||||
"github.com/karlseguin/gspec"
|
||||
"testing"
|
||||
"github.com/viki-org/gspec"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGetMissFromBucket(t *testing.T) {
|
||||
@@ -44,7 +44,7 @@ func TestSetsAnExistingItem(t *testing.T) {
|
||||
}
|
||||
|
||||
func testBucket() *Bucket {
|
||||
b := &Bucket{lookup: make(map[string]*Item),}
|
||||
b := &Bucket{lookup: make(map[string]*Item)}
|
||||
b.lookup["power"] = &Item{
|
||||
key: "power",
|
||||
value: TestValue("9000"),
|
||||
|
61
cache.go
61
cache.go
@@ -1,10 +1,12 @@
|
||||
// An LRU cached aimed at high concurrency
|
||||
package ccache
|
||||
|
||||
import (
|
||||
"time"
|
||||
"runtime"
|
||||
"hash/fnv"
|
||||
"container/list"
|
||||
"hash/fnv"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Cache struct {
|
||||
@@ -35,15 +37,33 @@ func New(config *Configuration) *Cache {
|
||||
}
|
||||
|
||||
func (c *Cache) Get(key string) interface{} {
|
||||
if item := c.get(key); item != nil {
|
||||
return item.value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cache) TrackingGet(key string) TrackedItem {
|
||||
item := c.get(key)
|
||||
if item == nil {
|
||||
return NilTracked
|
||||
}
|
||||
item.track()
|
||||
return item
|
||||
}
|
||||
|
||||
func (c *Cache) get(key string) *Item {
|
||||
bucket := c.bucket(key)
|
||||
item := bucket.get(key)
|
||||
if item == nil { return nil }
|
||||
if item == nil {
|
||||
return nil
|
||||
}
|
||||
if item.expires.Before(time.Now()) {
|
||||
c.deleteItem(bucket, item)
|
||||
return nil
|
||||
}
|
||||
c.conditionalPromote(item)
|
||||
return item.value
|
||||
return item
|
||||
}
|
||||
|
||||
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
|
||||
@@ -57,7 +77,9 @@ func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
|
||||
|
||||
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (interface{}, error) {
|
||||
item := c.Get(key)
|
||||
if item != nil { return item, nil }
|
||||
if item != nil {
|
||||
return item, nil
|
||||
}
|
||||
value, err := fetch()
|
||||
if err == nil {
|
||||
c.Set(key, value, duration)
|
||||
@@ -93,7 +115,9 @@ func (c *Cache) bucket(key string) *Bucket {
|
||||
}
|
||||
|
||||
func (c *Cache) conditionalPromote(item *Item) {
|
||||
if item.shouldPromote(c.getsPerPromote) == false { return }
|
||||
if item.shouldPromote(c.getsPerPromote) == false {
|
||||
return
|
||||
}
|
||||
c.promote(item)
|
||||
}
|
||||
|
||||
@@ -105,12 +129,15 @@ func (c *Cache) worker() {
|
||||
ms := new(runtime.MemStats)
|
||||
for {
|
||||
select {
|
||||
case item := <- c.promotables:
|
||||
wasNew := c.doPromote(item)
|
||||
if wasNew == false { continue }
|
||||
case item := <-c.promotables:
|
||||
if wasNew := c.doPromote(item); wasNew == false {
|
||||
continue
|
||||
}
|
||||
runtime.ReadMemStats(ms)
|
||||
if ms.HeapAlloc > c.size { c.gc() }
|
||||
case item := <- c.deletables:
|
||||
if ms.HeapAlloc > c.size {
|
||||
c.gc()
|
||||
}
|
||||
case item := <-c.deletables:
|
||||
c.list.Remove(item.element)
|
||||
}
|
||||
}
|
||||
@@ -129,11 +156,17 @@ func (c *Cache) doPromote(item *Item) bool {
|
||||
}
|
||||
|
||||
func (c *Cache) gc() {
|
||||
for i := 0; i < c.itemsToPrune; i++ {
|
||||
element := c.list.Back()
|
||||
if element == nil { return }
|
||||
for i := 0; i < c.itemsToPrune; i++ {
|
||||
if element == nil {
|
||||
return
|
||||
}
|
||||
prev := element.Prev()
|
||||
item := element.Value.(*Item)
|
||||
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
||||
c.bucket(item.key).delete(item.key)
|
||||
c.list.Remove(element)
|
||||
}
|
||||
element = prev
|
||||
}
|
||||
}
|
||||
|
49
cache_test.go
Normal file
49
cache_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
"github.com/karlseguin/gspec"
|
||||
"testing"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestGCsTheOldestItems(t *testing.T) {
|
||||
spec := gspec.New(t)
|
||||
cache := New(Configure().ItemsToPrune(10))
|
||||
for i := 0; i < 500; i++ {
|
||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||
}
|
||||
cache.gc()
|
||||
spec.Expect(cache.Get("9")).ToBeNil()
|
||||
spec.Expect(cache.Get("10").(int)).ToEqual(10)
|
||||
}
|
||||
|
||||
func TestPromotedItemsDontGetPruned(t *testing.T) {
|
||||
spec := gspec.New(t)
|
||||
cache := New(Configure().ItemsToPrune(10).GetsPerPromote(1))
|
||||
for i := 0; i < 500; i++ {
|
||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||
}
|
||||
cache.Get("9")
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
cache.gc()
|
||||
spec.Expect(cache.Get("9").(int)).ToEqual(9)
|
||||
spec.Expect(cache.Get("10")).ToBeNil()
|
||||
spec.Expect(cache.Get("11").(int)).ToEqual(11)
|
||||
}
|
||||
|
||||
func TestTrackerDoesNotCleanupHeldInstance(t *testing.T) {
|
||||
spec := gspec.New(t)
|
||||
cache := New(Configure().ItemsToPrune(10).Track())
|
||||
for i := 0; i < 10; i++ {
|
||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||
}
|
||||
item := cache.TrackingGet("0")
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
cache.gc()
|
||||
spec.Expect(cache.Get("0").(int)).ToEqual(0)
|
||||
spec.Expect(cache.Get("1")).ToBeNil()
|
||||
item.Release()
|
||||
cache.gc()
|
||||
spec.Expect(cache.Get("0")).ToBeNil()
|
||||
}
|
@@ -7,16 +7,18 @@ type Configuration struct {
|
||||
deleteBuffer int
|
||||
promoteBuffer int
|
||||
getsPerPromote int32
|
||||
tracking bool
|
||||
}
|
||||
|
||||
func Configure() *Configuration {
|
||||
return &Configuration {
|
||||
return &Configuration{
|
||||
buckets: 64,
|
||||
itemsToPrune: 500,
|
||||
deleteBuffer: 1024,
|
||||
getsPerPromote: 10,
|
||||
promoteBuffer: 1024,
|
||||
size: 500 * 1024 * 1024,
|
||||
tracking: false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -49,3 +51,8 @@ func (c *Configuration) GetsPerPromote(count int) *Configuration {
|
||||
c.getsPerPromote = int32(count)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Configuration) Track() *Configuration {
|
||||
c.tracking = true
|
||||
return c
|
||||
}
|
||||
|
31
item.go
31
item.go
@@ -1,16 +1,29 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
"sync/atomic"
|
||||
"container/list"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TrackedItem interface {
|
||||
Value() interface{}
|
||||
Release()
|
||||
}
|
||||
|
||||
type nilItem struct{}
|
||||
|
||||
func (n *nilItem) Value() interface{} { return nil }
|
||||
func (n *nilItem) Release() {}
|
||||
|
||||
var NilTracked = new(nilItem)
|
||||
|
||||
type Item struct {
|
||||
key string
|
||||
sync.RWMutex
|
||||
promotions int32
|
||||
refCount int32
|
||||
expires time.Time
|
||||
value interface{}
|
||||
element *list.Element
|
||||
@@ -28,3 +41,15 @@ func newItem(key string, value interface{}, expires time.Time) *Item {
|
||||
func (i *Item) shouldPromote(getsPerPromote int32) bool {
|
||||
return atomic.AddInt32(&i.promotions, 1) == getsPerPromote
|
||||
}
|
||||
|
||||
func (i *Item) Value() interface{} {
|
||||
return i.value
|
||||
}
|
||||
|
||||
func (i *Item) track() {
|
||||
atomic.AddInt32(&i.refCount, 1)
|
||||
}
|
||||
|
||||
func (i *Item) Release() {
|
||||
atomic.AddInt32(&i.refCount, -1)
|
||||
}
|
||||
|
@@ -1,8 +1,8 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
"github.com/karlseguin/gspec"
|
||||
"testing"
|
||||
"github.com/viki-org/gspec"
|
||||
)
|
||||
|
||||
func TestItemPromotability(t *testing.T) {
|
||||
|
Reference in New Issue
Block a user