17 Commits

Author SHA1 Message Date
Karl Seguin
a2d6215577 Merge pull request #8 from jdeppe-pivotal/master
Add a SecondaryCache which exposes the secondary part of a LayeredCache
2016-11-03 22:19:53 +07:00
Jens Deppe
a451d7262c Integrate feedback and upstream fixes
- Ensure correct locking in GetOrCreateSecondaryCache
- Fetch now returns a *Item
2016-11-01 23:53:22 -07:00
Jens Deppe
d2c2442186 Merge remote-tracking branch 'seguin/master' 2016-11-01 20:33:44 -07:00
Karl Seguin
8adbb5637b return *Item from layered cache fetch instead of interface{} 2016-11-02 09:34:09 +07:00
Jens Deppe
c1634a4d00 Add concept of a SecondaryCache which exposes the secondary part of a LayeredCache 2016-11-01 09:01:39 -07:00
Karl Seguin
2f6b517f7b Merge pull request #6 from HasMatthew/nanosecond_ttl
Use nanosecond-resolution TTL instead of second-resolution.
2016-07-07 20:03:45 -07:00
Matthew Dale
162d4e27ca Use nanosecond-resolution TTL instead of second-resolution. 2016-07-07 15:32:49 -07:00
Karl Seguin
ddcff8e624 Merge pull request #5 from dvdplm/master
Fetch does not return stale items
2016-02-05 22:16:17 +08:00
David Palm
3665b16e83 Better test 2016-02-05 14:34:58 +01:00
David Palm
d5307b40af Fetch does not return stale items 2016-02-03 16:07:59 +01:00
Karl Seguin
74754c77cc Partially fixing #3.
On close, drain the deletables channel (unblocking an waiting goroutines) and
close deletables. Like Gets and Sets against a now-closed promotables,
this means any subsequent to Deletes from deletables will panic.

I'm still not sure that this is ccache's responsibility. If a client closes a DB
connection, we'd expect subsequent operations against the now-closed connection
to fail. My main problems with defer'ing a recover are:

1 - the performance overhead on every single get / set / delete
2 - not communicating with the caller that the requested operatin is no longer
    valid.
2015-07-26 11:05:48 +08:00
Karl Seguin
bfa769c6b6 add Stop method to stop the background worker and make it possible for the GC to reap the object 2015-07-23 22:24:50 +08:00
Karl Seguin
41f1a3cfcb gonna be one of those days... 2015-01-07 08:12:17 +07:00
Karl Seguin
f9c7f14b7b Fetch's API wasn't usable. It returned different values types based on whether
the fetch was needed or not. It now behaves consistently (with itself and with
Get), returning an *Item.
2015-01-07 08:09:39 +07:00
Karl Seguin
6df1e24ae3 2 changes:
1 -
Previously, we determined if an item should be promoted in the main getter
thread. This required that we protect the item.promotions variable, as both
the getter and the worker were concurrently accessing it. This change pushes
the conditional promotion to the worker (from the getter's point of view, items
are always promoted). Since only the worker ever accesses .promotions, we no
longer must protect access to it.

2 -
The total size of the cache was being maintained by both the worker thread
and the calling code. This required that we protect access to cache.size. Now,
only the worker ever changes the size. While this simplifies much of the code,
it means that we can't easily replace an item (replacement either via Set or
Replace). A replcement now involves creating a new object and deleting the old
one (using the existing deletables and promotable infrastructure). The only
noticeable impact frmo this change is that, despite previous documentation,
Replace WILL cause the item to be promoted (but it still only does so if it
exists and it still doesn't extend the original TTL).
2014-12-28 11:11:32 +07:00
Karl Seguin
557d56ec6f guard all access to item.promotions 2014-12-28 10:35:20 +07:00
Karl Seguin
c75dcd4c12 link to rcache 2014-12-06 17:19:23 +07:00
12 changed files with 391 additions and 176 deletions

View File

@@ -2,7 +2,6 @@ package ccache
import (
"sync"
"sync/atomic"
"time"
)
@@ -17,45 +16,14 @@ func (b *bucket) get(key string) *Item {
return b.lookup[key]
}
func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, bool, int64) {
expires := time.Now().Add(duration).Unix()
b.Lock()
defer b.Unlock()
if existing, exists := b.lookup[key]; exists {
existing.value = value
existing.expires = expires
d := int64(0)
if sized, ok := value.(Sized); ok {
newSize := sized.Size()
d = newSize - existing.size
if d != 0 {
atomic.StoreInt64(&existing.size, newSize)
}
}
return existing, false, int64(d)
}
func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) {
expires := time.Now().Add(duration).UnixNano()
item := newItem(key, value, expires)
b.lookup[key] = item
return item, true, int64(item.size)
}
func (b *bucket) replace(key string, value interface{}) (bool, int64) {
b.Lock()
defer b.Unlock()
existing, exists := b.lookup[key]
if exists == false {
return false, 0
}
d := int64(0)
if sized, ok := value.(Sized); ok {
newSize := sized.Size()
d = newSize - existing.size
if d != 0 {
atomic.StoreInt64(&existing.size, newSize)
}
}
existing.value = value
return true, d
existing := b.lookup[key]
b.lookup[key] = item
return item, existing
}
func (b *bucket) delete(key string) *Item {

View File

@@ -32,37 +32,20 @@ func (_ *BucketTests) DeleteItemFromBucket() {
func (_ *BucketTests) SetsANewBucketItem() {
bucket := testBucket()
item, new, d := bucket.set("spice", TestValue("flow"), time.Minute)
item, existing := bucket.set("spice", TestValue("flow"), time.Minute)
assertValue(item, "flow")
item = bucket.get("spice")
assertValue(item, "flow")
Expect(new).To.Equal(true)
Expect(d).To.Equal(1)
Expect(existing).To.Equal(nil)
}
func (_ *BucketTests) SetsAnExistingItem() {
bucket := testBucket()
item, new, d := bucket.set("power", TestValue("9002"), time.Minute)
item, existing := bucket.set("power", TestValue("9001"), time.Minute)
assertValue(item, "9002")
item = bucket.get("power")
assertValue(item, "9002")
Expect(new).To.Equal(false)
Expect(d).To.Equal(0)
}
func (_ *BucketTests) ReplaceDoesNothingIfKeyDoesNotExist() {
bucket := testBucket()
Expect(bucket.replace("power", TestValue("9002"))).To.Equal(false)
Expect(bucket.get("power")).To.Equal(nil)
}
func (_ *BucketTests) ReplaceReplacesThevalue() {
bucket := testBucket()
item, _, _ := bucket.set("power", TestValue("9002"), time.Minute)
Expect(bucket.replace("power", TestValue("9004"))).To.Equal(true)
Expect(item.Value().(string)).To.Equal("9004")
Expect(bucket.get("power").Value().(string)).To.Equal("9004")
//not sure how to test that the TTL hasn't changed sort of a sleep..
assertValue(existing, "9001")
}
func testBucket() *bucket {

111
cache.go
View File

@@ -43,13 +43,12 @@ func New(config *Configuration) *Cache {
// is expired and item.TTL() to see how long until the item expires (which
// will be negative for an already expired item).
func (c *Cache) Get(key string) *Item {
bucket := c.bucket(key)
item := bucket.get(key)
item := c.bucket(key).get(key)
if item == nil {
return nil
}
if item.expires > time.Now().Unix() {
c.conditionalPromote(item)
if item.expires > time.Now().UnixNano() {
c.promote(item)
}
return item
}
@@ -67,41 +66,34 @@ func (c *Cache) TrackingGet(key string) TrackedItem {
// Set the value in the cache for the specified duration
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
item, new, d := c.bucket(key).set(key, value, duration)
if new {
c.promote(item)
} else {
c.conditionalPromote(item)
}
if d != 0 {
atomic.AddInt64(&c.size, d)
}
c.set(key, value, duration)
}
// Replace the value if it exists, does not set if it doesn't.
// Returns true if the item existed an was replaced, false otherwise.
// Replace does not reset item's TTL nor does it alter its position in the LRU
// Replace does not reset item's TTL
func (c *Cache) Replace(key string, value interface{}) bool {
exists, d := c.bucket(key).replace(key, value)
if d != 0 {
atomic.AddInt64(&c.size, d)
item := c.bucket(key).get(key)
if item == nil {
return false
}
return exists
c.Set(key, value, item.TTL())
return true
}
// Attempts to get the value from the cache and calles fetch on a miss.
// If fetch returns an error, no value is cached and the error is returned back
// to the caller.
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (interface{}, error) {
// Attempts to get the value from the cache and calles fetch on a miss (missing
// or stale item). If fetch returns an error, no value is cached and the error
// is returned back to the caller.
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
item := c.Get(key)
if item != nil {
if item != nil && !item.Expired() {
return item, nil
}
value, err := fetch()
if err == nil {
c.Set(key, value, duration)
if err != nil {
return nil, err
}
return value, err
return c.set(key, value, duration), nil
}
// Remove the item from the cache, return true if the item was present, false otherwise.
@@ -123,24 +115,32 @@ func (c *Cache) Clear() {
c.list = list.New()
}
// Stops the background worker. Operations performed on the cache after Stop
// is called are likely to panic
func (c *Cache) Stop() {
close(c.promotables)
}
func (c *Cache) deleteItem(bucket *bucket, item *Item) {
bucket.delete(item.key) //stop other GETs from getting it
c.deletables <- item
}
func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item {
item, existing := c.bucket(key).set(key, value, duration)
if existing != nil {
c.deletables <- existing
}
c.promote(item)
return item
}
func (c *Cache) bucket(key string) *bucket {
h := fnv.New32a()
h.Write([]byte(key))
return c.buckets[h.Sum32()&c.bucketMask]
}
func (c *Cache) conditionalPromote(item *Item) {
if item.shouldPromote(c.getsPerPromote) == false {
return
}
c.promote(item)
}
func (c *Cache) promote(item *Item) {
c.promotables <- item
}
@@ -148,19 +148,37 @@ func (c *Cache) promote(item *Item) {
func (c *Cache) worker() {
for {
select {
case item := <-c.promotables:
if c.doPromote(item) && atomic.LoadInt64(&c.size) > c.maxSize {
case item, ok := <-c.promotables:
if ok == false {
goto drain
}
if c.doPromote(item) && c.size > c.maxSize {
c.gc()
}
case item := <-c.deletables:
atomic.AddInt64(&c.size, -item.size)
if item.element == nil {
item.promotions = -2
} else {
c.list.Remove(item.element)
}
c.doDelete(item)
}
}
drain:
for {
select {
case item := <-c.deletables:
c.doDelete(item)
default:
close(c.deletables)
return
}
}
}
func (c *Cache) doDelete(item *Item) {
if item.element == nil {
item.promotions = -2
} else {
c.size -= item.size
c.list.Remove(item.element)
}
}
func (c *Cache) doPromote(item *Item) bool {
@@ -168,11 +186,15 @@ func (c *Cache) doPromote(item *Item) bool {
if item.promotions == -2 {
return false
}
item.promotions = 0
if item.element != nil { //not a new item
c.list.MoveToFront(item.element)
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.element)
item.promotions = 0
}
return false
}
c.size += item.size
item.element = c.list.PushFront(item)
return true
}
@@ -187,8 +209,9 @@ func (c *Cache) gc() {
item := element.Value.(*Item)
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
c.bucket(item.key).delete(item.key)
atomic.AddInt64(&c.size, -item.size)
c.size -= item.size
c.list.Remove(element)
item.promotions = -2
}
element = prev
}

View File

@@ -1,10 +1,11 @@
package ccache
import (
. "github.com/karlseguin/expect"
"strconv"
"testing"
"time"
. "github.com/karlseguin/expect"
)
type CacheTests struct{}
@@ -22,6 +23,17 @@ func (_ CacheTests) DeletesAValue() {
Expect(cache.Get("worm").Value()).To.Equal("sand")
}
func (_ CacheTests) FetchesExpiredItems() {
cache := New(Configure())
fn := func() (interface{}, error) { return "moo-moo", nil }
cache.Set("beef", "moo", time.Second*-1)
Expect(cache.Get("beef").Value()).To.Equal("moo")
out, _ := cache.Fetch("beef", time.Second, fn)
Expect(out.Value()).To.Equal("moo-moo")
}
func (_ CacheTests) GCsTheOldestItems() {
cache := New(Configure().ItemsToPrune(10))
for i := 0; i < 500; i++ {
@@ -83,22 +95,27 @@ func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2")).To.Equal(nil)
Expect(cache.Get("3").Value().(*SizedItem).id).To.Equal(3)
Expect(cache.Get("3")).To.Equal(nil)
Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4)
}
func (_ CacheTests) SetUpdatesSizeOnDelta() {
cache := New(Configure())
cache.Set("a", &SizedItem{0, 2}, time.Minute)
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
cache.Set("b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
cache.Set("b", &SizedItem{0, 2}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
cache.Delete("b")
time.Sleep(time.Millisecond * 10)
time.Sleep(time.Millisecond * 100)
Expect(cache.size).To.Equal(int64(2))
}
@@ -108,6 +125,7 @@ func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache.Set("2", &SizedItem{1, 2}, time.Minute)
cache.Set("3", &SizedItem{1, 2}, time.Minute)
cache.Replace("4", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
}
@@ -117,12 +135,15 @@ func (_ CacheTests) ReplaceChangesSize() {
cache.Set("2", &SizedItem{1, 2}, time.Minute)
cache.Replace("2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
cache.Replace("2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(3))
cache.Replace("2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
}

13
item.go
View File

@@ -60,14 +60,15 @@ func newItem(key string, value interface{}, expires int64) *Item {
return &Item{
key: key,
value: value,
promotions: -1,
promotions: 0,
size: size,
expires: expires,
}
}
func (i *Item) shouldPromote(getsPerPromote int32) bool {
return atomic.AddInt32(&i.promotions, 1) == getsPerPromote
i.promotions += 1
return i.promotions == getsPerPromote
}
func (i *Item) Value() interface{} {
@@ -84,19 +85,19 @@ func (i *Item) Release() {
func (i *Item) Expired() bool {
expires := atomic.LoadInt64(&i.expires)
return expires < time.Now().Unix()
return expires < time.Now().UnixNano()
}
func (i *Item) TTL() time.Duration {
expires := atomic.LoadInt64(&i.expires)
return time.Second * time.Duration(expires-time.Now().Unix())
return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
}
func (i *Item) Expires() time.Time {
expires := atomic.LoadInt64(&i.expires)
return time.Unix(expires, 0)
return time.Unix(0, expires)
}
func (i *Item) Extend(duration time.Duration) {
atomic.StoreInt64(&i.expires, time.Now().Add(duration).Unix())
atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
}

View File

@@ -1,9 +1,11 @@
package ccache
import (
. "github.com/karlseguin/expect"
"math"
"testing"
"time"
. "github.com/karlseguin/expect"
)
type ItemTests struct{}
@@ -19,29 +21,29 @@ func (_ *ItemTests) Promotability() {
}
func (_ *ItemTests) Expired() {
now := time.Now().Unix()
item1 := &Item{expires: now + 1}
item2 := &Item{expires: now - 1}
now := time.Now().UnixNano()
item1 := &Item{expires: now + (10 * int64(time.Millisecond))}
item2 := &Item{expires: now - (10 * int64(time.Millisecond))}
Expect(item1.Expired()).To.Equal(false)
Expect(item2.Expired()).To.Equal(true)
}
func (_ *ItemTests) TTL() {
now := time.Now().Unix()
item1 := &Item{expires: now + 10}
item2 := &Item{expires: now - 10}
Expect(item1.TTL()).To.Equal(time.Second * 10)
Expect(item2.TTL()).To.Equal(time.Second * -10)
now := time.Now().UnixNano()
item1 := &Item{expires: now + int64(time.Second)}
item2 := &Item{expires: now - int64(time.Second)}
Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
}
func (_ *ItemTests) Expires() {
now := time.Now().Unix()
item := &Item{expires: now + 10}
Expect(item.Expires().Unix()).To.Equal(now + 10)
now := time.Now().UnixNano()
item := &Item{expires: now + (10)}
Expect(item.Expires().UnixNano()).To.Equal(now + 10)
}
func (_ *ItemTests) Extend() {
item := &Item{expires: time.Now().Unix() + 10}
item := &Item{expires: time.Now().UnixNano() + 10}
item.Extend(time.Minute * 2)
Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
}

View File

@@ -11,16 +11,24 @@ type layeredBucket struct {
}
func (b *layeredBucket) get(primary, secondary string) *Item {
bucket := b.getSecondaryBucket(primary)
if bucket == nil {
return nil
}
return bucket.get(secondary)
}
func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return nil
}
return bucket.get(secondary)
return bucket
}
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, bool, int64) {
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) {
b.Lock()
bkt, exists := b.buckets[primary]
if exists == false {
@@ -28,21 +36,9 @@ func (b *layeredBucket) set(primary, secondary string, value interface{}, durati
b.buckets[primary] = bkt
}
b.Unlock()
item, new, d := bkt.set(secondary, value, duration)
if new {
item.group = primary
}
return item, new, d
}
func (b *layeredBucket) replace(primary, secondary string, value interface{}) (bool, int64) {
b.Lock()
bucket, exists := b.buckets[primary]
b.Unlock()
if exists == false {
return false, 0
}
return bucket.replace(secondary, value)
item, existing := bkt.set(secondary, value, duration)
item.group = primary
return item, existing
}
func (b *layeredBucket) delete(primary, secondary string) *Item {

View File

@@ -54,17 +54,34 @@ func Layered(config *Configuration) *LayeredCache {
// is expired and item.TTL() to see how long until the item expires (which
// will be negative for an already expired item).
func (c *LayeredCache) Get(primary, secondary string) *Item {
bucket := c.bucket(primary)
item := bucket.get(primary, secondary)
item := c.bucket(primary).get(primary, secondary)
if item == nil {
return nil
}
if item.expires > time.Now().Unix() {
c.conditionalPromote(item)
if item.expires > time.Now().UnixNano() {
c.promote(item)
}
return item
}
// Get the secondary cache for a given primary key. This operation will
// never return nil. In the case where the primary key does not exist, a
// new, underlying, empty bucket will be created and returned.
func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
primaryBkt := c.bucket(primary)
bkt := primaryBkt.getSecondaryBucket(primary)
primaryBkt.Lock()
if bkt == nil {
bkt = &bucket{lookup: make(map[string]*Item)}
primaryBkt.buckets[primary] = bkt
}
primaryBkt.Unlock()
return &SecondaryCache{
bucket: bkt,
pCache: c,
}
}
// Used when the cache was created with the Track() configuration option.
// Avoid otherwise
func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
@@ -78,41 +95,34 @@ func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
// Set the value in the cache for the specified duration
func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
item, new, d := c.bucket(primary).set(primary, secondary, value, duration)
if new {
c.promote(item)
} else {
c.conditionalPromote(item)
}
if d != 0 {
atomic.AddInt64(&c.size, d)
}
c.set(primary, secondary, value, duration)
}
// Replace the value if it exists, does not set if it doesn't.
// Returns true if the item existed an was replaced, false otherwise.
// Replace does not reset item's TTL nor does it alter its position in the LRU
func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool {
exists, d := c.bucket(primary).replace(primary, secondary, value)
if d != 0 {
atomic.AddInt64(&c.size, d)
item := c.bucket(primary).get(primary, secondary)
if item == nil {
return false
}
return exists
c.Set(primary, secondary, value, item.TTL())
return true
}
// Attempts to get the value from the cache and calles fetch on a miss.
// If fetch returns an error, no value is cached and the error is returned back
// to the caller.
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (interface{}, error) {
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
item := c.Get(primary, secondary)
if item != nil {
return item, nil
}
value, err := fetch()
if err == nil {
c.Set(primary, secondary, value, duration)
if err != nil {
return nil, err
}
return value, err
return c.set(primary, secondary, value, duration), nil
}
// Remove the item from the cache, return true if the item was present, false otherwise.
@@ -139,19 +149,21 @@ func (c *LayeredCache) Clear() {
c.list = list.New()
}
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
item, existing := c.bucket(primary).set(primary, secondary, value, duration)
if existing != nil {
c.deletables <- existing
}
c.promote(item)
return item
}
func (c *LayeredCache) bucket(key string) *layeredBucket {
h := fnv.New32a()
h.Write([]byte(key))
return c.buckets[h.Sum32()&c.bucketMask]
}
func (c *LayeredCache) conditionalPromote(item *Item) {
if item.shouldPromote(c.getsPerPromote) == false {
return
}
c.promote(item)
}
func (c *LayeredCache) promote(item *Item) {
c.promotables <- item
}
@@ -160,14 +172,14 @@ func (c *LayeredCache) worker() {
for {
select {
case item := <-c.promotables:
if c.doPromote(item) && atomic.LoadInt64(&c.size) > c.maxSize {
if c.doPromote(item) && c.size > c.maxSize {
c.gc()
}
case item := <-c.deletables:
atomic.AddInt64(&c.size, -item.size)
if item.element == nil {
item.promotions = -2
} else {
c.size -= item.size
c.list.Remove(item.element)
}
}
@@ -179,12 +191,14 @@ func (c *LayeredCache) doPromote(item *Item) bool {
if item.promotions == -2 {
return false
}
item.promotions = 0
if item.element != nil { //not a new item
c.list.MoveToFront(item.element)
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.element)
item.promotions = 0
}
return false
}
c.size += item.size
item.element = c.list.PushFront(item)
return true
}
@@ -198,9 +212,10 @@ func (c *LayeredCache) gc() {
prev := element.Prev()
item := element.Value.(*Item)
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
atomic.AddInt64(&c.size, -item.size)
c.bucket(item.group).delete(item.group, item.key)
c.size -= item.size
c.list.Remove(element)
item.promotions = -2
}
element = prev
}

View File

@@ -152,20 +152,25 @@ func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
Expect(cache.Get("pri", "0")).To.Equal(nil)
Expect(cache.Get("pri", "1")).To.Equal(nil)
Expect(cache.Get("pri", "2")).To.Equal(nil)
Expect(cache.Get("pri", "3").Value().(*SizedItem).id).To.Equal(3)
Expect(cache.Get("pri", "3")).To.Equal(nil)
Expect(cache.Get("pri", "4").Value().(*SizedItem).id).To.Equal(4)
}
func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
cache := Layered(Configure())
cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute)
cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(7))
cache.Delete("pri", "b")
time.Sleep(time.Millisecond * 10)
@@ -178,6 +183,7 @@ func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
cache.Replace("sec", "3", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
}
@@ -187,11 +193,14 @@ func (_ LayeredCacheTests) ReplaceChangesSize() {
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
cache.Replace("pri", "2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
cache.Replace("pri", "2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(3))
cache.Replace("pri", "2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
}

View File

@@ -101,6 +101,10 @@ cache.Replace("user:4", user)
`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned.
### Stop
The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called
the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache.
## Tracking
CCache supports a special tracking mode which is meant to be used in conjunction with other pieces of your code that maintains a long-lived reference to data.
@@ -146,7 +150,23 @@ cache.Delete("/users/goku", "type:xml")
// OR
cache.DeleteAll("/users/goku")
```
# SecondaryCache
In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with:
```go
cache := ccache.Layered(ccache.Configure())
sCache := cache.GetOrCreateSecondaryCache("/users/goku")
sCache.Set("type:json", "{value_to_cache}", time.Minute * 5)
```
The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key.
## Size
By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache.
However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes.
## Want Something Simpler?
For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache)

72
secondarycache.go Normal file
View File

@@ -0,0 +1,72 @@
package ccache
import "time"
type SecondaryCache struct {
bucket *bucket
pCache *LayeredCache
}
// Get the secondary key.
// The semantics are the same as for LayeredCache.Get
func (s *SecondaryCache) Get(secondary string) *Item {
return s.bucket.get(secondary)
}
// Set the secondary key to a value.
// The semantics are the same as for LayeredCache.Set
func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
item, existing := s.bucket.set(secondary, value, duration)
if existing != nil {
s.pCache.deletables <- existing
}
s.pCache.promote(item)
return item
}
// Fetch or set a secondary key.
// The semantics are the same as for LayeredCache.Fetch
func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
item := s.Get(secondary)
if item != nil {
return item, nil
}
value, err := fetch()
if err != nil {
return nil, err
}
return s.Set(secondary, value, duration), nil
}
// Delete a secondary key.
// The semantics are the same as for LayeredCache.Delete
func (s *SecondaryCache) Delete(secondary string) bool {
item := s.bucket.delete(secondary)
if item != nil {
s.pCache.deletables <- item
return true
}
return false
}
// Replace a secondary key.
// The semantics are the same as for LayeredCache.Replace
func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
item := s.Get(secondary)
if item == nil {
return false
}
s.Set(secondary, value, item.TTL())
return true
}
// Track a secondary key.
// The semantics are the same as for LayeredCache.TrackingGet
func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem {
item := c.Get(secondary)
if item == nil {
return NilTracked
}
item.track()
return item
}

105
secondarycache_test.go Normal file
View File

@@ -0,0 +1,105 @@
package ccache
import (
. "github.com/karlseguin/expect"
"testing"
"time"
"strconv"
)
type SecondaryCacheTests struct{}
func Test_SecondaryCache(t *testing.T) {
Expectify(new(SecondaryCacheTests), t)
}
func (_ SecondaryCacheTests) GetsANonExistantValue() {
cache := newLayered().GetOrCreateSecondaryCache("foo")
Expect(cache).Not.To.Equal(nil)
}
func (_ SecondaryCacheTests) SetANewValue() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(sCache.Get("stop")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("orinoco", "another value", time.Minute)
Expect(sCache.Get("orinoco").Value()).To.Equal("another value")
Expect(cache.Get("spice", "orinoco").Value()).To.Equal("another value")
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("flow", "a value", time.Minute)
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
}
func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
cache.Set("spice", "sister", "ghanima", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
cache.Delete("spice", "flow")
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(sCache.Get("flow")).To.Equal(nil)
sCache.Delete("sister")
Expect(cache.Get("spice", "sister")).To.Equal(nil)
Expect(sCache.Get("sister")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
Expect(cache.Get("spice", "flow")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
cache := newLayered()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
}
func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
cache := newLayered()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) {return "a fetched value", nil})
Expect(val.Value().(string)).To.Equal("value-a")
}
func (_ SecondaryCacheTests) FetchReturnsANewValue() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) {return "a fetched value", nil})
Expect(val.Value().(string)).To.Equal("a fetched value")
}
func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
cache := Layered(Configure().ItemsToPrune(10).Track())
for i := 0; i < 10; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
sCache := cache.GetOrCreateSecondaryCache("0")
item := sCache.TrackingGet("a")
time.Sleep(time.Millisecond * 10)
cache.gc()
Expect(cache.Get("0", "a").Value()).To.Equal(0)
Expect(cache.Get("1", "a")).To.Equal(nil)
item.Release()
cache.gc()
Expect(cache.Get("0", "a")).To.Equal(nil)
}