Compare commits
24 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
36ffada8b5 | ||
![]() |
36d03ce88e | ||
![]() |
b779edb2ca | ||
![]() |
d9f8808f13 | ||
![]() |
97e7acb2af | ||
![]() |
5fe99ab07a | ||
![]() |
1189f7f993 | ||
![]() |
839a17bedb | ||
![]() |
0dbf3f125f | ||
![]() |
f3b2b9fd88 | ||
![]() |
aa0e37ad6f | ||
![]() |
df91803297 | ||
![]() |
a42bd4a9c8 | ||
![]() |
e9b7be5016 | ||
![]() |
fdd08e71c4 | ||
![]() |
992cd9564b | ||
![]() |
f63031fa40 | ||
![]() |
d56665a86e | ||
![]() |
223703f7f0 | ||
![]() |
a24d7f8c53 | ||
![]() |
3b58df727e | ||
![]() |
4c88bf60e6 | ||
![]() |
eab9dbaa7f | ||
![]() |
937ca294e6 |
16
bucket.go
16
bucket.go
@@ -23,9 +23,9 @@ func (b *bucket) get(key string) *Item {
|
||||
return b.lookup[key]
|
||||
}
|
||||
|
||||
func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) {
|
||||
func (b *bucket) set(key string, value interface{}, duration time.Duration, track bool) (*Item, *Item) {
|
||||
expires := time.Now().Add(duration).UnixNano()
|
||||
item := newItem(key, value, expires)
|
||||
item := newItem(key, value, expires, track)
|
||||
b.Lock()
|
||||
existing := b.lookup[key]
|
||||
b.lookup[key] = item
|
||||
@@ -54,13 +54,13 @@ func (b *bucket) delete(key string) *Item {
|
||||
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
|
||||
// (We do this so that the write to the channel is under the read lock and not the
|
||||
// write lock)
|
||||
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
|
||||
func (b *bucket) deleteFunc(matches func(key string, item *Item) bool, deletables chan *Item) int {
|
||||
lookup := b.lookup
|
||||
items := make([]*Item, 0, len(lookup)/10)
|
||||
items := make([]*Item, 0)
|
||||
|
||||
b.RLock()
|
||||
for key, item := range lookup {
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
if matches(key, item) {
|
||||
deletables <- item
|
||||
items = append(items, item)
|
||||
}
|
||||
@@ -80,6 +80,12 @@ func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
|
||||
return len(items)
|
||||
}
|
||||
|
||||
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
|
||||
return b.deleteFunc(func(key string, item *Item) bool {
|
||||
return strings.HasPrefix(key, prefix)
|
||||
}, deletables)
|
||||
}
|
||||
|
||||
func (b *bucket) clear() {
|
||||
b.Lock()
|
||||
b.lookup = make(map[string]*Item)
|
||||
|
@@ -1,9 +1,10 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
. "github.com/karlseguin/expect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/karlseguin/expect"
|
||||
)
|
||||
|
||||
type BucketTests struct {
|
||||
@@ -32,7 +33,7 @@ func (_ *BucketTests) DeleteItemFromBucket() {
|
||||
|
||||
func (_ *BucketTests) SetsANewBucketItem() {
|
||||
bucket := testBucket()
|
||||
item, existing := bucket.set("spice", TestValue("flow"), time.Minute)
|
||||
item, existing := bucket.set("spice", TestValue("flow"), time.Minute, false)
|
||||
assertValue(item, "flow")
|
||||
item = bucket.get("spice")
|
||||
assertValue(item, "flow")
|
||||
@@ -41,7 +42,7 @@ func (_ *BucketTests) SetsANewBucketItem() {
|
||||
|
||||
func (_ *BucketTests) SetsAnExistingItem() {
|
||||
bucket := testBucket()
|
||||
item, existing := bucket.set("power", TestValue("9001"), time.Minute)
|
||||
item, existing := bucket.set("power", TestValue("9001"), time.Minute, false)
|
||||
assertValue(item, "9001")
|
||||
item = bucket.get("power")
|
||||
assertValue(item, "9001")
|
||||
|
54
cache.go
54
cache.go
@@ -17,6 +17,10 @@ type setMaxSize struct {
|
||||
size int64
|
||||
}
|
||||
|
||||
type clear struct {
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type Cache struct {
|
||||
*Configuration
|
||||
list *list.List
|
||||
@@ -38,7 +42,7 @@ func New(config *Configuration) *Cache {
|
||||
buckets: make([]*bucket, config.buckets),
|
||||
control: make(chan interface{}),
|
||||
}
|
||||
for i := 0; i < int(config.buckets); i++ {
|
||||
for i := 0; i < config.buckets; i++ {
|
||||
c.buckets[i] = &bucket{
|
||||
lookup: make(map[string]*Item),
|
||||
}
|
||||
@@ -63,6 +67,15 @@ func (c *Cache) DeletePrefix(prefix string) int {
|
||||
return count
|
||||
}
|
||||
|
||||
// Deletes all items that the matches func evaluates to true.
|
||||
func (c *Cache) DeleteFunc(matches func(key string, item *Item) bool) int {
|
||||
count := 0
|
||||
for _, b := range c.buckets {
|
||||
count += b.deleteFunc(matches, c.deletables)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Get an item from the cache. Returns nil if the item wasn't found.
|
||||
// This can return an expired item. Use item.Expired() to see if the item
|
||||
// is expired and item.TTL() to see how long until the item expires (which
|
||||
@@ -72,7 +85,7 @@ func (c *Cache) Get(key string) *Item {
|
||||
if item == nil {
|
||||
return nil
|
||||
}
|
||||
if item.expires > time.Now().UnixNano() {
|
||||
if !item.Expired() {
|
||||
c.promote(item)
|
||||
}
|
||||
return item
|
||||
@@ -89,9 +102,15 @@ func (c *Cache) TrackingGet(key string) TrackedItem {
|
||||
return item
|
||||
}
|
||||
|
||||
// Used when the cache was created with the Track() configuration option.
|
||||
// Sets the item, and returns a tracked reference to it.
|
||||
func (c *Cache) TrackingSet(key string, value interface{}, duration time.Duration) TrackedItem {
|
||||
return c.set(key, value, duration, true)
|
||||
}
|
||||
|
||||
// Set the value in the cache for the specified duration
|
||||
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
|
||||
c.set(key, value, duration)
|
||||
c.set(key, value, duration, false)
|
||||
}
|
||||
|
||||
// Replace the value if it exists, does not set if it doesn't.
|
||||
@@ -118,7 +137,7 @@ func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interfac
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.set(key, value, duration), nil
|
||||
return c.set(key, value, duration, false), nil
|
||||
}
|
||||
|
||||
// Remove the item from the cache, return true if the item was present, false otherwise.
|
||||
@@ -131,13 +150,11 @@ func (c *Cache) Delete(key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
//this isn't thread safe. It's meant to be called from non-concurrent tests
|
||||
// Clears the cache
|
||||
func (c *Cache) Clear() {
|
||||
for _, bucket := range c.buckets {
|
||||
bucket.clear()
|
||||
}
|
||||
c.size = 0
|
||||
c.list = list.New()
|
||||
done := make(chan struct{})
|
||||
c.control <- clear{done: done}
|
||||
<-done
|
||||
}
|
||||
|
||||
// Stops the background worker. Operations performed on the cache after Stop
|
||||
@@ -173,8 +190,8 @@ func (c *Cache) deleteItem(bucket *bucket, item *Item) {
|
||||
c.deletables <- item
|
||||
}
|
||||
|
||||
func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item {
|
||||
item, existing := c.bucket(key).set(key, value, duration)
|
||||
func (c *Cache) set(key string, value interface{}, duration time.Duration, track bool) *Item {
|
||||
item, existing := c.bucket(key).set(key, value, duration, track)
|
||||
if existing != nil {
|
||||
c.deletables <- existing
|
||||
}
|
||||
@@ -189,7 +206,11 @@ func (c *Cache) bucket(key string) *bucket {
|
||||
}
|
||||
|
||||
func (c *Cache) promote(item *Item) {
|
||||
c.promotables <- item
|
||||
select {
|
||||
case c.promotables <- item:
|
||||
default:
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (c *Cache) worker() {
|
||||
@@ -216,6 +237,13 @@ func (c *Cache) worker() {
|
||||
if c.size > c.maxSize {
|
||||
dropped += c.gc()
|
||||
}
|
||||
case clear:
|
||||
for _, bucket := range c.buckets {
|
||||
bucket.clear()
|
||||
}
|
||||
c.size = 0
|
||||
c.list = list.New()
|
||||
msg.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package ccache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -51,11 +52,40 @@ func (_ CacheTests) DeletesAPrefix() {
|
||||
Expect(cache.ItemCount()).To.Equal(2)
|
||||
}
|
||||
|
||||
func (_ CacheTests) DeletesAFunc() {
|
||||
cache := New(Configure())
|
||||
Expect(cache.ItemCount()).To.Equal(0)
|
||||
|
||||
cache.Set("a", 1, time.Minute)
|
||||
cache.Set("b", 2, time.Minute)
|
||||
cache.Set("c", 3, time.Minute)
|
||||
cache.Set("d", 4, time.Minute)
|
||||
cache.Set("e", 5, time.Minute)
|
||||
cache.Set("f", 6, time.Minute)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
||||
return false
|
||||
})).To.Equal(0)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
||||
return item.Value().(int) < 4
|
||||
})).To.Equal(3)
|
||||
Expect(cache.ItemCount()).To.Equal(3)
|
||||
|
||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
||||
return key == "d"
|
||||
})).To.Equal(1)
|
||||
Expect(cache.ItemCount()).To.Equal(2)
|
||||
|
||||
}
|
||||
|
||||
func (_ CacheTests) OnDeleteCallbackCalled() {
|
||||
onDeleteFnCalled := false
|
||||
onDeleteFnCalled := int32(0)
|
||||
onDeleteFn := func(item *Item) {
|
||||
if item.key == "spice" {
|
||||
onDeleteFnCalled = true
|
||||
atomic.AddInt32(&onDeleteFnCalled, 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +99,7 @@ func (_ CacheTests) OnDeleteCallbackCalled() {
|
||||
|
||||
Expect(cache.Get("spice")).To.Equal(nil)
|
||||
Expect(cache.Get("worm").Value()).To.Equal("sand")
|
||||
Expect(onDeleteFnCalled).To.Equal(true)
|
||||
Expect(atomic.LoadInt32(&onDeleteFnCalled)).To.Eql(1)
|
||||
}
|
||||
|
||||
func (_ CacheTests) FetchesExpiredItems() {
|
||||
@@ -111,18 +141,21 @@ func (_ CacheTests) PromotedItemsDontGetPruned() {
|
||||
}
|
||||
|
||||
func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
|
||||
cache := New(Configure().ItemsToPrune(10).Track())
|
||||
for i := 0; i < 10; i++ {
|
||||
cache := New(Configure().ItemsToPrune(11).Track())
|
||||
item0 := cache.TrackingSet("0", 0, time.Minute)
|
||||
for i := 1; i < 11; i++ {
|
||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||
}
|
||||
item := cache.TrackingGet("0")
|
||||
item1 := cache.TrackingGet("1")
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
gcCache(cache)
|
||||
Expect(cache.Get("0").Value()).To.Equal(0)
|
||||
Expect(cache.Get("1")).To.Equal(nil)
|
||||
item.Release()
|
||||
Expect(cache.Get("1").Value()).To.Equal(1)
|
||||
item0.Release()
|
||||
item1.Release()
|
||||
gcCache(cache)
|
||||
Expect(cache.Get("0")).To.Equal(nil)
|
||||
Expect(cache.Get("1")).To.Equal(nil)
|
||||
}
|
||||
|
||||
func (_ CacheTests) RemovesOldestItemWhenFull() {
|
||||
|
@@ -1,8 +1,9 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
. "github.com/karlseguin/expect"
|
||||
"testing"
|
||||
|
||||
. "github.com/karlseguin/expect"
|
||||
)
|
||||
|
||||
type ConfigurationTests struct{}
|
||||
|
2
go.mod
2
go.mod
@@ -1,4 +1,4 @@
|
||||
module github.com/karlseguin/ccache
|
||||
module github.com/karlseguin/ccache/v2
|
||||
|
||||
go 1.13
|
||||
|
||||
|
8
item.go
8
item.go
@@ -52,18 +52,22 @@ type Item struct {
|
||||
element *list.Element
|
||||
}
|
||||
|
||||
func newItem(key string, value interface{}, expires int64) *Item {
|
||||
func newItem(key string, value interface{}, expires int64, track bool) *Item {
|
||||
size := int64(1)
|
||||
if sized, ok := value.(Sized); ok {
|
||||
size = sized.Size()
|
||||
}
|
||||
return &Item{
|
||||
item := &Item{
|
||||
key: key,
|
||||
value: value,
|
||||
promotions: 0,
|
||||
size: size,
|
||||
expires: expires,
|
||||
}
|
||||
if track {
|
||||
item.refCount = 1
|
||||
}
|
||||
return item
|
||||
}
|
||||
|
||||
func (i *Item) shouldPromote(getsPerPromote int32) bool {
|
||||
|
@@ -38,7 +38,7 @@ func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
|
||||
return bucket
|
||||
}
|
||||
|
||||
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) {
|
||||
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration, track bool) (*Item, *Item) {
|
||||
b.Lock()
|
||||
bkt, exists := b.buckets[primary]
|
||||
if exists == false {
|
||||
@@ -46,7 +46,7 @@ func (b *layeredBucket) set(primary, secondary string, value interface{}, durati
|
||||
b.buckets[primary] = bkt
|
||||
}
|
||||
b.Unlock()
|
||||
item, existing := bkt.set(secondary, value, duration)
|
||||
item, existing := bkt.set(secondary, value, duration, track)
|
||||
item.group = primary
|
||||
return item, existing
|
||||
}
|
||||
@@ -61,6 +61,26 @@ func (b *layeredBucket) delete(primary, secondary string) *Item {
|
||||
return bucket.delete(secondary)
|
||||
}
|
||||
|
||||
func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *Item) int {
|
||||
b.RLock()
|
||||
bucket, exists := b.buckets[primary]
|
||||
b.RUnlock()
|
||||
if exists == false {
|
||||
return 0
|
||||
}
|
||||
return bucket.deletePrefix(prefix, deletables)
|
||||
}
|
||||
|
||||
func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item *Item) bool, deletables chan *Item) int {
|
||||
b.RLock()
|
||||
bucket, exists := b.buckets[primary]
|
||||
b.RUnlock()
|
||||
if exists == false {
|
||||
return 0
|
||||
}
|
||||
return bucket.deleteFunc(matches, deletables)
|
||||
}
|
||||
|
||||
func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
|
||||
b.RLock()
|
||||
bucket, exists := b.buckets[primary]
|
||||
|
@@ -102,9 +102,14 @@ func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
|
||||
return item
|
||||
}
|
||||
|
||||
// Set the value in the cache for the specified duration
|
||||
func (c *LayeredCache) TrackingSet(primary, secondary string, value interface{}, duration time.Duration) TrackedItem {
|
||||
return c.set(primary, secondary, value, duration, true)
|
||||
}
|
||||
|
||||
// Set the value in the cache for the specified duration
|
||||
func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
|
||||
c.set(primary, secondary, value, duration)
|
||||
c.set(primary, secondary, value, duration, false)
|
||||
}
|
||||
|
||||
// Replace the value if it exists, does not set if it doesn't.
|
||||
@@ -131,7 +136,7 @@ func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.set(primary, secondary, value, duration), nil
|
||||
return c.set(primary, secondary, value, duration, false), nil
|
||||
}
|
||||
|
||||
// Remove the item from the cache, return true if the item was present, false otherwise.
|
||||
@@ -149,13 +154,21 @@ func (c *LayeredCache) DeleteAll(primary string) bool {
|
||||
return c.bucket(primary).deleteAll(primary, c.deletables)
|
||||
}
|
||||
|
||||
//this isn't thread safe. It's meant to be called from non-concurrent tests
|
||||
// Deletes all items that share the same primary key and prefix.
|
||||
func (c *LayeredCache) DeletePrefix(primary, prefix string) int {
|
||||
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
|
||||
}
|
||||
|
||||
// Deletes all items that share the same primary key and where the matches func evaluates to true.
|
||||
func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item *Item) bool) int {
|
||||
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
|
||||
}
|
||||
|
||||
// Clears the cache
|
||||
func (c *LayeredCache) Clear() {
|
||||
for _, bucket := range c.buckets {
|
||||
bucket.clear()
|
||||
}
|
||||
c.size = 0
|
||||
c.list = list.New()
|
||||
done := make(chan struct{})
|
||||
c.control <- clear{done: done}
|
||||
<-done
|
||||
}
|
||||
|
||||
func (c *LayeredCache) Stop() {
|
||||
@@ -183,8 +196,8 @@ func (c *LayeredCache) restart() {
|
||||
go c.worker()
|
||||
}
|
||||
|
||||
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
|
||||
item, existing := c.bucket(primary).set(primary, secondary, value, duration)
|
||||
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration, track bool) *Item {
|
||||
item, existing := c.bucket(primary).set(primary, secondary, value, duration, track)
|
||||
if existing != nil {
|
||||
c.deletables <- existing
|
||||
}
|
||||
@@ -234,6 +247,13 @@ func (c *LayeredCache) worker() {
|
||||
if c.size > c.maxSize {
|
||||
dropped += c.gc()
|
||||
}
|
||||
case clear:
|
||||
for _, bucket := range c.buckets {
|
||||
bucket.clear()
|
||||
}
|
||||
c.size = 0
|
||||
c.list = list.New()
|
||||
msg.done <- struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -2,6 +2,7 @@ package ccache
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -71,13 +72,64 @@ func (_ *LayeredCacheTests) DeletesAValue() {
|
||||
Expect(cache.ItemCount()).To.Equal(2)
|
||||
}
|
||||
|
||||
func (_ *LayeredCacheTests) DeletesAPrefix() {
|
||||
cache := newLayered()
|
||||
Expect(cache.ItemCount()).To.Equal(0)
|
||||
|
||||
cache.Set("spice", "aaa", "1", time.Minute)
|
||||
cache.Set("spice", "aab", "2", time.Minute)
|
||||
cache.Set("spice", "aac", "3", time.Minute)
|
||||
cache.Set("leto", "aac", "3", time.Minute)
|
||||
cache.Set("spice", "ac", "4", time.Minute)
|
||||
cache.Set("spice", "z5", "7", time.Minute)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeletePrefix("spice", "9a")).To.Equal(0)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeletePrefix("spice", "aa")).To.Equal(3)
|
||||
Expect(cache.Get("spice", "aaa")).To.Equal(nil)
|
||||
Expect(cache.Get("spice", "aab")).To.Equal(nil)
|
||||
Expect(cache.Get("spice", "aac")).To.Equal(nil)
|
||||
Expect(cache.Get("spice", "ac").Value()).To.Equal("4")
|
||||
Expect(cache.Get("spice", "z5").Value()).To.Equal("7")
|
||||
Expect(cache.ItemCount()).To.Equal(3)
|
||||
}
|
||||
|
||||
func (_ *LayeredCacheTests) DeletesAFunc() {
|
||||
cache := newLayered()
|
||||
Expect(cache.ItemCount()).To.Equal(0)
|
||||
|
||||
cache.Set("spice", "a", 1, time.Minute)
|
||||
cache.Set("leto", "b", 2, time.Minute)
|
||||
cache.Set("spice", "c", 3, time.Minute)
|
||||
cache.Set("spice", "d", 4, time.Minute)
|
||||
cache.Set("spice", "e", 5, time.Minute)
|
||||
cache.Set("spice", "f", 6, time.Minute)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
||||
return false
|
||||
})).To.Equal(0)
|
||||
Expect(cache.ItemCount()).To.Equal(6)
|
||||
|
||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
||||
return item.Value().(int) < 4
|
||||
})).To.Equal(2)
|
||||
Expect(cache.ItemCount()).To.Equal(4)
|
||||
|
||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
||||
return key == "d"
|
||||
})).To.Equal(1)
|
||||
Expect(cache.ItemCount()).To.Equal(3)
|
||||
|
||||
}
|
||||
|
||||
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
|
||||
|
||||
onDeleteFnCalled := false
|
||||
onDeleteFnCalled := int32(0)
|
||||
onDeleteFn := func(item *Item) {
|
||||
|
||||
if item.group == "spice" && item.key == "flow" {
|
||||
onDeleteFnCalled = true
|
||||
atomic.AddInt32(&onDeleteFnCalled, 1)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,7 +147,7 @@ func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
|
||||
Expect(cache.Get("spice", "worm")).To.Equal(nil)
|
||||
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
|
||||
|
||||
Expect(onDeleteFnCalled).To.Equal(true)
|
||||
Expect(atomic.LoadInt32(&onDeleteFnCalled)).To.Eql(1)
|
||||
}
|
||||
|
||||
func (_ *LayeredCacheTests) DeletesALayer() {
|
||||
@@ -143,17 +195,20 @@ func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
|
||||
|
||||
func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
||||
cache := Layered(Configure().ItemsToPrune(10).Track())
|
||||
for i := 0; i < 10; i++ {
|
||||
item0 := cache.TrackingSet("0", "a", 0, time.Minute)
|
||||
for i := 1; i < 11; i++ {
|
||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||
}
|
||||
item := cache.TrackingGet("0", "a")
|
||||
item1 := cache.TrackingGet("1", "a")
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
gcLayeredCache(cache)
|
||||
Expect(cache.Get("0", "a").Value()).To.Equal(0)
|
||||
Expect(cache.Get("1", "a")).To.Equal(nil)
|
||||
item.Release()
|
||||
Expect(cache.Get("1", "a").Value()).To.Equal(1)
|
||||
item0.Release()
|
||||
item1.Release()
|
||||
gcLayeredCache(cache)
|
||||
Expect(cache.Get("0", "a")).To.Equal(nil)
|
||||
Expect(cache.Get("1", "a")).To.Equal(nil)
|
||||
}
|
||||
|
||||
func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
|
||||
@@ -207,7 +262,9 @@ func (_ LayeredCacheTests) ResizeOnTheFly() {
|
||||
}
|
||||
|
||||
func newLayered() *LayeredCache {
|
||||
return Layered(Configure())
|
||||
c := Layered(Configure())
|
||||
c.Clear()
|
||||
return c
|
||||
}
|
||||
|
||||
func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
|
||||
|
15
readme.md
15
readme.md
@@ -14,7 +14,7 @@ Unless otherwise stated, all methods are thread-safe.
|
||||
First, download the project:
|
||||
|
||||
```go
|
||||
go get github.com/karlseguin/ccache
|
||||
go get github.com/karlseguin/ccache/v2
|
||||
```
|
||||
|
||||
## Configuration
|
||||
@@ -23,7 +23,7 @@ Next, import and create a `Cache` instance:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/karlseguin/ccache"
|
||||
"github.com/karlseguin/ccache/v2"
|
||||
)
|
||||
|
||||
var cache = ccache.New(ccache.Configure())
|
||||
@@ -87,7 +87,7 @@ item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error)
|
||||
```
|
||||
|
||||
### Delete
|
||||
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key:
|
||||
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key:
|
||||
|
||||
```go
|
||||
cache.Delete("user:4")
|
||||
@@ -96,8 +96,11 @@ cache.Delete("user:4")
|
||||
### DeletePrefix
|
||||
`DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed.
|
||||
|
||||
### DeleteFunc
|
||||
`DeleteFunc` deletes all items that the provded matches func evaluates to true. Returns the number of keys removed.
|
||||
|
||||
### Clear
|
||||
`Clear` clears the cache. This method is **not** thread safe. It is meant to be used from tests.
|
||||
`Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish.
|
||||
|
||||
### Extend
|
||||
The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time.
|
||||
@@ -117,7 +120,7 @@ You can get the number of keys evicted due to memory pressure by calling `GetDro
|
||||
```go
|
||||
dropped := cache.GetDropped()
|
||||
```
|
||||
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant ot be called asynchronously for statistics /monitoring purposes.
|
||||
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes.
|
||||
|
||||
### Stop
|
||||
The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called
|
||||
@@ -140,7 +143,7 @@ user := item.Value() //will be nil if "user:4" didn't exist in the cache
|
||||
item.Release() //can be called even if item.Value() returned nil
|
||||
```
|
||||
|
||||
In practice, `Release` wouldn't be called until later, at some other place in your code.
|
||||
In practice, `Release` wouldn't be called until later, at some other place in your code. `TrackingSet` can be used to set a value to be tracked.
|
||||
|
||||
There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways.
|
||||
|
||||
|
@@ -16,7 +16,7 @@ func (s *SecondaryCache) Get(secondary string) *Item {
|
||||
// Set the secondary key to a value.
|
||||
// The semantics are the same as for LayeredCache.Set
|
||||
func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
|
||||
item, existing := s.bucket.set(secondary, value, duration)
|
||||
item, existing := s.bucket.set(secondary, value, duration, false)
|
||||
if existing != nil {
|
||||
s.pCache.deletables <- existing
|
||||
}
|
||||
|
@@ -1,10 +1,11 @@
|
||||
package ccache
|
||||
|
||||
import (
|
||||
. "github.com/karlseguin/expect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/karlseguin/expect"
|
||||
)
|
||||
|
||||
type SecondaryCacheTests struct{}
|
||||
|
Reference in New Issue
Block a user