47 Commits

Author SHA1 Message Date
Karl Seguin
d7846ec7e0 grab lookup len under read lock 2020-08-13 10:41:28 +08:00
Karl Seguin
f63031fa40 Merge pull request #45 from bep/DeleteFunc
Add DeleteFunc
2020-08-13 10:37:20 +08:00
Bjørn Erik Pedersen
d56665a86e Add DeleteFunc
This shares DeletePrefixs's implementation.
2020-08-12 17:47:11 +02:00
Karl Seguin
223703f7f0 Merge pull request #44 from bep/layered-delete-prefix
Add DeletePrefix to LayeredCache
2020-08-12 08:39:47 +08:00
Bjørn Erik Pedersen
a24d7f8c53 Add DeletePrefix to LayeredCache 2020-08-11 19:04:54 +02:00
Karl Seguin
3b58df727e Merge pull request #43 from jonathonlacher/patch-1
fix spelling in readme
2020-07-17 14:07:50 +08:00
Jonathon Lacher
4c88bf60e6 fix spelling in readme 2020-07-16 15:15:43 -05:00
Karl Seguin
eab9dbaa7f update readme to /v2 2020-06-29 20:50:55 +08:00
Karl Seguin
937ca294e6 go mod version 2020-06-29 15:22:50 +08:00
Karl Seguin
40275a30c8 Ability to dynamically SetMaxSize
To support this, rather than adding another field/channel like
`getDroppedReq`, I added a `control` channel that can be used for these
miscellaneous interactions with the worker. The control can also be
used to take over for the `donec` channel
2020-06-26 20:22:30 +08:00
Karl Seguin
d9aec58960 add GetDropped documentation 2020-02-16 11:54:07 +08:00
Karl Seguin
1a257a89d6 add GetDropped function 2020-02-05 22:05:05 +08:00
Karl Seguin
78289f8f0b Merge pull request #38 from karlseguin/DeletePrefix
Delete prefix
2020-01-23 13:03:42 +08:00
Karl Seguin
79f9dcde21 fewer defers, document DeletePrefix 2020-01-23 12:55:55 +08:00
Karl Seguin
04261a5282 Merge branch 'master' into DeletePrefix 2020-01-23 12:50:53 +08:00
Karl Seguin
569ae60338 Merge pull request #37 from aporeto-inc/pin-expect
fixed/module: expect needs to be on master
2020-01-23 11:44:58 +08:00
Antoine Mercadal
048ac0669f fixed/module: expect needs to be on master 2020-01-22 19:35:19 -08:00
Karl Seguin
2ff4136636 Merge pull request #36 from aporeto-inc/go-modules
migrate to go modules
2020-01-23 11:19:07 +08:00
Antoine Mercadal
f79de0e254 migrate to go modules 2020-01-22 19:16:52 -08:00
Karl Seguin
356e164dd5 preliminary work on DeletePrefix 2020-01-23 10:27:12 +08:00
Karl Seguin
2ff889bcae document Clear 2020-01-23 10:04:47 +08:00
Karl Seguin
46ec5d2257 explicitly state the thread-safety nature of the library 2020-01-23 09:38:19 +08:00
Karl Seguin
ec06cd93a0 Merge pull request #28 from buglloc/bucket_tests
Fixed bucket tests
2019-02-23 22:26:40 +07:00
Andrew Krasichkov
8d8b062716 fixed bucket tests 2019-02-14 15:54:01 +03:00
Karl Seguin
3385784411 Add cache.ItemCount() intt64 API 2019-01-26 12:33:50 +07:00
Karl Seguin
692cd618b2 guard access to item.promotions in LayeredCache, which was applied to Cache in 557d56ec6f 2018-12-27 22:54:50 +07:00
Karl Seguin
142396791e Merge pull request #22 from alexejk/gcOnDelete
Calling OnDelete from gc()
2018-11-26 20:29:43 +07:00
Alexej Kubarev
243f5c8219 Fixes #21. Callong OnDelete during gc() 2018-11-25 15:31:09 -08:00
Alexej Kubarev
7e55af0a9c Small routine lock file upgrade 2018-11-25 15:29:47 -08:00
Karl Seguin
a317416755 Merge pull request #16 from alexejk/onremove
Support for OnDelete() callback
2018-07-22 11:17:02 +07:00
Alexej Kubarev
7421e2d7b4 Adding support for OnDelete callback function
OnDelete will receive an item that is being processed for deletion to support calling cleanup function specific to the item stored
2018-07-16 18:20:17 +02:00
Alexej Kubarev
72059a01e9 Adding missing ignore file to ensure vendor folder is not checked in 2018-07-16 18:18:51 +02:00
Alexej Kubarev
00324cb2d2 Adding dep dependency management files to utilize vendoring 2018-07-16 18:18:02 +02:00
Karl Seguin
b425c9ca00 Merge pull request #11 from EdwardBetts/spelling
correct spelling mistake
2017-09-04 13:47:32 +07:00
Edward Betts
0d05fa8278 correct spelling mistake 2017-09-01 11:40:44 +01:00
Karl Seguin
3ba9789cfd Merge pull request #10 from heyitsanthony/test-races
fix data races in tests
2017-02-17 13:08:20 +07:00
Anthony Romano
b3c864ded7 cache: make Stop() synchronous and races in tests
worker goroutine running concurrently with tests would cause data race errors
when running tests with -race enabled.
2017-02-13 15:39:24 -08:00
Anthony Romano
c69270ce08 layeredcache: add Stop() and fix races in tests
worker goroutine running concurrently with tests would cause data race errors
when running tests with -race enabled.
2017-02-13 15:39:24 -08:00
Karl Seguin
12c7ffdc19 Merge pull request #9 from spicydog/patch-1
Fix an error in example "itemsToPrune"
2016-12-22 18:15:50 +07:00
spicydog
77679ba342 Fix an error in example "itemsToPrune"
itemsToPrune -> ItemsToPrune
2016-12-21 20:07:16 +10:00
Karl Seguin
a2d6215577 Merge pull request #8 from jdeppe-pivotal/master
Add a SecondaryCache which exposes the secondary part of a LayeredCache
2016-11-03 22:19:53 +07:00
Jens Deppe
a451d7262c Integrate feedback and upstream fixes
- Ensure correct locking in GetOrCreateSecondaryCache
- Fetch now returns a *Item
2016-11-01 23:53:22 -07:00
Jens Deppe
d2c2442186 Merge remote-tracking branch 'seguin/master' 2016-11-01 20:33:44 -07:00
Karl Seguin
8adbb5637b return *Item from layered cache fetch instead of interface{} 2016-11-02 09:34:09 +07:00
Jens Deppe
c1634a4d00 Add concept of a SecondaryCache which exposes the secondary part of a LayeredCache 2016-11-01 09:01:39 -07:00
Karl Seguin
2f6b517f7b Merge pull request #6 from HasMatthew/nanosecond_ttl
Use nanosecond-resolution TTL instead of second-resolution.
2016-07-07 20:03:45 -07:00
Matthew Dale
162d4e27ca Use nanosecond-resolution TTL instead of second-resolution. 2016-07-07 15:32:49 -07:00
16 changed files with 835 additions and 79 deletions

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
vendor/

View File

@@ -1,6 +1,7 @@
package ccache
import (
"strings"
"sync"
"time"
)
@@ -10,6 +11,12 @@ type bucket struct {
lookup map[string]*Item
}
func (b *bucket) itemCount() int {
b.RLock()
defer b.RUnlock()
return len(b.lookup)
}
func (b *bucket) get(key string) *Item {
b.RLock()
defer b.RUnlock()
@@ -17,25 +24,75 @@ func (b *bucket) get(key string) *Item {
}
func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) {
expires := time.Now().Add(duration).Unix()
expires := time.Now().Add(duration).UnixNano()
item := newItem(key, value, expires)
b.Lock()
defer b.Unlock()
existing := b.lookup[key]
b.lookup[key] = item
b.Unlock()
return item, existing
}
func (b *bucket) delete(key string) *Item {
b.Lock()
defer b.Unlock()
item := b.lookup[key]
delete(b.lookup, key)
b.Unlock()
return item
}
// This is an expensive operation, so we do what we can to optimize it and limit
// the impact it has on concurrent operations. Specifically, we:
// 1 - Do an initial iteration to collect matches. This allows us to do the
// "expensive" prefix check (on all values) using only a read-lock
// 2 - Do a second iteration, under write lock, for the matched results to do
// the actual deletion
// Also, this is the only place where the Bucket is aware of cache detail: the
// deletables channel. Passing it here lets us avoid iterating over matched items
// again in the cache. Further, we pass item to deletables BEFORE actually removing
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
// (We do this so that the write to the channel is under the read lock and not the
// write lock)
func (b *bucket) deleteFunc(matches func(key string, item interface{}) bool, deletables chan *Item) int {
lookup := b.lookup
b.RLock()
l := len(lookup)
b.RUnlock()
items := make([]*Item, 0, l/10)
b.RLock()
for key, item := range lookup {
if matches(key, item) {
deletables <- item
items = append(items, item)
}
}
b.RUnlock()
if len(items) == 0 {
// avoid the write lock if we can
return 0
}
b.Lock()
for _, item := range items {
delete(lookup, item.key)
}
b.Unlock()
return len(items)
}
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
return b.deleteFunc(func(key string, item interface{}) bool {
return strings.HasPrefix(key, prefix)
}, deletables)
}
func (b *bucket) clear() {
b.Lock()
defer b.Unlock()
b.lookup = make(map[string]*Item)
b.Unlock()
}

View File

@@ -9,7 +9,7 @@ import (
type BucketTests struct {
}
func Tests_Bucket(t *testing.T) {
func Test_Bucket(t *testing.T) {
Expectify(new(BucketTests), t)
}
@@ -42,10 +42,10 @@ func (_ *BucketTests) SetsANewBucketItem() {
func (_ *BucketTests) SetsAnExistingItem() {
bucket := testBucket()
item, existing := bucket.set("power", TestValue("9001"), time.Minute)
assertValue(item, "9002")
assertValue(item, "9001")
item = bucket.get("power")
assertValue(item, "9002")
assertValue(existing, "9001")
assertValue(item, "9001")
assertValue(existing, "9000")
}
func testBucket() *bucket {

View File

@@ -8,6 +8,15 @@ import (
"time"
)
// The cache has a generic 'control' channel that is used to send
// messages to the worker. These are the messages that can be sent to it
type getDropped struct {
res chan int
}
type setMaxSize struct {
size int64
}
type Cache struct {
*Configuration
list *list.List
@@ -16,6 +25,7 @@ type Cache struct {
bucketMask uint32
deletables chan *Item
promotables chan *Item
control chan interface{}
}
// Create a new cache with the specified configuration
@@ -26,18 +36,42 @@ func New(config *Configuration) *Cache {
Configuration: config,
bucketMask: uint32(config.buckets) - 1,
buckets: make([]*bucket, config.buckets),
deletables: make(chan *Item, config.deleteBuffer),
promotables: make(chan *Item, config.promoteBuffer),
control: make(chan interface{}),
}
for i := 0; i < int(config.buckets); i++ {
c.buckets[i] = &bucket{
lookup: make(map[string]*Item),
}
}
go c.worker()
c.restart()
return c
}
func (c *Cache) ItemCount() int {
count := 0
for _, b := range c.buckets {
count += b.itemCount()
}
return count
}
func (c *Cache) DeletePrefix(prefix string) int {
count := 0
for _, b := range c.buckets {
count += b.deletePrefix(prefix, c.deletables)
}
return count
}
// Deletes all items that the matches func evaluates to true.
func (c *Cache) DeleteFunc(matches func(key string, item interface{}) bool) int {
count := 0
for _, b := range c.buckets {
count += b.deleteFunc(matches, c.deletables)
}
return count
}
// Get an item from the cache. Returns nil if the item wasn't found.
// This can return an expired item. Use item.Expired() to see if the item
// is expired and item.TTL() to see how long until the item expires (which
@@ -47,7 +81,7 @@ func (c *Cache) Get(key string) *Item {
if item == nil {
return nil
}
if item.expires > time.Now().Unix() {
if item.expires > time.Now().UnixNano() {
c.promote(item)
}
return item
@@ -119,6 +153,28 @@ func (c *Cache) Clear() {
// is called are likely to panic
func (c *Cache) Stop() {
close(c.promotables)
<-c.control
}
// Gets the number of items removed from the cache due to memory pressure since
// the last time GetDropped was called
func (c *Cache) GetDropped() int {
res := make(chan int)
c.control <- getDropped{res: res}
return <-res
}
// Sets a new max size. That can result in a GC being run if the new maxium size
// is smaller than the cached size
func (c *Cache) SetMaxSize(size int64) {
c.control <- setMaxSize{size}
}
func (c *Cache) restart() {
c.deletables = make(chan *Item, c.deleteBuffer)
c.promotables = make(chan *Item, c.promoteBuffer)
c.control = make(chan interface{})
go c.worker()
}
func (c *Cache) deleteItem(bucket *bucket, item *Item) {
@@ -146,6 +202,8 @@ func (c *Cache) promote(item *Item) {
}
func (c *Cache) worker() {
defer close(c.control)
dropped := 0
for {
select {
case item, ok := <-c.promotables:
@@ -153,10 +211,21 @@ func (c *Cache) worker() {
goto drain
}
if c.doPromote(item) && c.size > c.maxSize {
c.gc()
dropped += c.gc()
}
case item := <-c.deletables:
c.doDelete(item)
case control := <-c.control:
switch msg := control.(type) {
case getDropped:
msg.res <- dropped
dropped = 0
case setMaxSize:
c.maxSize = msg.size
if c.size > c.maxSize {
dropped += c.gc()
}
}
}
}
@@ -177,6 +246,9 @@ func (c *Cache) doDelete(item *Item) {
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.element)
}
}
@@ -199,11 +271,12 @@ func (c *Cache) doPromote(item *Item) bool {
return true
}
func (c *Cache) gc() {
func (c *Cache) gc() int {
dropped := 0
element := c.list.Back()
for i := 0; i < c.itemsToPrune; i++ {
if element == nil {
return
return dropped
}
prev := element.Prev()
item := element.Value.(*Item)
@@ -211,8 +284,13 @@ func (c *Cache) gc() {
c.bucket(item.key).delete(item.key)
c.size -= item.size
c.list.Remove(element)
if c.onDelete != nil {
c.onDelete(item)
}
dropped += 1
item.promotions = -2
}
element = prev
}
return dropped
}

View File

@@ -16,11 +16,89 @@ func Test_Cache(t *testing.T) {
func (_ CacheTests) DeletesAValue() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("spice", "flow", time.Minute)
cache.Set("worm", "sand", time.Minute)
Expect(cache.ItemCount()).To.Equal(2)
cache.Delete("spice")
Expect(cache.Get("spice")).To.Equal(nil)
Expect(cache.Get("worm").Value()).To.Equal("sand")
Expect(cache.ItemCount()).To.Equal(1)
}
func (_ CacheTests) DeletesAPrefix() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("aaa", "1", time.Minute)
cache.Set("aab", "2", time.Minute)
cache.Set("aac", "3", time.Minute)
cache.Set("ac", "4", time.Minute)
cache.Set("z5", "7", time.Minute)
Expect(cache.ItemCount()).To.Equal(5)
Expect(cache.DeletePrefix("9a")).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(5)
Expect(cache.DeletePrefix("aa")).To.Equal(3)
Expect(cache.Get("aaa")).To.Equal(nil)
Expect(cache.Get("aab")).To.Equal(nil)
Expect(cache.Get("aac")).To.Equal(nil)
Expect(cache.Get("ac").Value()).To.Equal("4")
Expect(cache.Get("z5").Value()).To.Equal("7")
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ CacheTests) DeletesAFunc() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("a", 1, time.Minute)
cache.Set("b", 2, time.Minute)
cache.Set("c", 3, time.Minute)
cache.Set("d", 4, time.Minute)
cache.Set("e", 5, time.Minute)
cache.Set("f", 6, time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return false
})).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return item.(*Item).Value().(int) < 4
})).To.Equal(3)
Expect(cache.ItemCount()).To.Equal(3)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return key == "d"
})).To.Equal(1)
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ CacheTests) OnDeleteCallbackCalled() {
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
if item.key == "spice" {
onDeleteFnCalled = true
}
}
cache := New(Configure().OnDelete(onDeleteFn))
cache.Set("spice", "flow", time.Minute)
cache.Set("worm", "sand", time.Minute)
time.Sleep(time.Millisecond * 10) // Run once to init
cache.Delete("spice")
time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
Expect(cache.Get("spice")).To.Equal(nil)
Expect(cache.Get("worm").Value()).To.Equal("sand")
Expect(onDeleteFnCalled).To.Equal(true)
}
func (_ CacheTests) FetchesExpiredItems() {
@@ -41,9 +119,10 @@ func (_ CacheTests) GCsTheOldestItems() {
}
//let the items get promoted (and added to our list)
time.Sleep(time.Millisecond * 10)
cache.gc()
gcCache(cache)
Expect(cache.Get("9")).To.Equal(nil)
Expect(cache.Get("10").Value()).To.Equal(10)
Expect(cache.ItemCount()).To.Equal(490)
}
func (_ CacheTests) PromotedItemsDontGetPruned() {
@@ -54,7 +133,7 @@ func (_ CacheTests) PromotedItemsDontGetPruned() {
time.Sleep(time.Millisecond * 10) //run the worker once to init the list
cache.Get("9")
time.Sleep(time.Millisecond * 10)
cache.gc()
gcCache(cache)
Expect(cache.Get("9").Value()).To.Equal(9)
Expect(cache.Get("10")).To.Equal(nil)
Expect(cache.Get("11").Value()).To.Equal(11)
@@ -67,16 +146,23 @@ func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
}
item := cache.TrackingGet("0")
time.Sleep(time.Millisecond * 10)
cache.gc()
gcCache(cache)
Expect(cache.Get("0").Value()).To.Equal(0)
Expect(cache.Get("1")).To.Equal(nil)
item.Release()
cache.gc()
gcCache(cache)
Expect(cache.Get("0")).To.Equal(nil)
}
func (_ CacheTests) RemovesOldestItemWhenFull() {
cache := New(Configure().MaxSize(5).ItemsToPrune(1))
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
if item.key == "0" {
onDeleteFnCalled = true
}
}
cache := New(Configure().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
for i := 0; i < 7; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
@@ -84,6 +170,8 @@ func (_ CacheTests) RemovesOldestItemWhenFull() {
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2").Value()).To.Equal(2)
Expect(onDeleteFnCalled).To.Equal(true)
Expect(cache.ItemCount()).To.Equal(5)
}
func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
@@ -97,6 +185,8 @@ func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
Expect(cache.Get("2")).To.Equal(nil)
Expect(cache.Get("3")).To.Equal(nil)
Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(0)
}
func (_ CacheTests) SetUpdatesSizeOnDelta() {
@@ -104,19 +194,19 @@ func (_ CacheTests) SetUpdatesSizeOnDelta() {
cache.Set("a", &SizedItem{0, 2}, time.Minute)
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkSize(cache, 5)
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkSize(cache, 5)
cache.Set("b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
checkSize(cache, 6)
cache.Set("b", &SizedItem{0, 2}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
checkSize(cache, 4)
cache.Delete("b")
time.Sleep(time.Millisecond * 100)
Expect(cache.size).To.Equal(int64(2))
checkSize(cache, 2)
}
func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
@@ -126,7 +216,7 @@ func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache.Set("3", &SizedItem{1, 2}, time.Minute)
cache.Replace("4", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
checkSize(cache, 6)
}
func (_ CacheTests) ReplaceChangesSize() {
@@ -136,15 +226,47 @@ func (_ CacheTests) ReplaceChangesSize() {
cache.Replace("2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
checkSize(cache, 4)
cache.Replace("2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(3))
checkSize(cache, 3)
cache.Replace("2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkSize(cache, 5)
}
func (_ CacheTests) ResizeOnTheFly() {
cache := New(Configure().MaxSize(9).ItemsToPrune(1))
for i := 0; i < 5; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
cache.SetMaxSize(3)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(2)
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2").Value()).To.Equal(2)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
cache.Set("5", 5, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.GetDropped()).To.Equal(1)
Expect(cache.Get("2")).To.Equal(nil)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
Expect(cache.Get("5").Value()).To.Equal(5)
cache.SetMaxSize(10)
cache.Set("6", 6, time.Minute)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(0)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
Expect(cache.Get("5").Value()).To.Equal(5)
Expect(cache.Get("6").Value()).To.Equal(6)
}
type SizedItem struct {
@@ -155,3 +277,15 @@ type SizedItem struct {
func (s *SizedItem) Size() int64 {
return s.s
}
func checkSize(cache *Cache, sz int64) {
cache.Stop()
Expect(cache.size).To.Equal(sz)
cache.restart()
}
func gcCache(cache *Cache) {
cache.Stop()
cache.gc()
cache.restart()
}

View File

@@ -8,6 +8,7 @@ type Configuration struct {
promoteBuffer int
getsPerPromote int32
tracking bool
onDelete func(item *Item)
}
// Creates a configuration object with sensible defaults
@@ -65,7 +66,7 @@ func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
return c
}
// Give a large cache with a high read / write ratio, it's usually unecessary
// Give a large cache with a high read / write ratio, it's usually unnecessary
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
// a key must have before being promoted
// [3]
@@ -92,3 +93,11 @@ func (c *Configuration) Track() *Configuration {
c.tracking = true
return c
}
// OnDelete allows setting a callback function to react to ideam deletion.
// This typically allows to do a cleanup of resources, such as calling a Close() on
// cached object that require some kind of tear-down.
func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration {
c.onDelete = callback
return c
}

8
go.mod Normal file
View File

@@ -0,0 +1,8 @@
module github.com/karlseguin/ccache/v2
go 1.13
require (
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0
)

6
go.sum Normal file
View File

@@ -0,0 +1,6 @@
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=

View File

@@ -85,19 +85,19 @@ func (i *Item) Release() {
func (i *Item) Expired() bool {
expires := atomic.LoadInt64(&i.expires)
return expires < time.Now().Unix()
return expires < time.Now().UnixNano()
}
func (i *Item) TTL() time.Duration {
expires := atomic.LoadInt64(&i.expires)
return time.Second * time.Duration(expires-time.Now().Unix())
return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
}
func (i *Item) Expires() time.Time {
expires := atomic.LoadInt64(&i.expires)
return time.Unix(expires, 0)
return time.Unix(0, expires)
}
func (i *Item) Extend(duration time.Duration) {
atomic.StoreInt64(&i.expires, time.Now().Add(duration).Unix())
atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
}

View File

@@ -1,9 +1,11 @@
package ccache
import (
. "github.com/karlseguin/expect"
"math"
"testing"
"time"
. "github.com/karlseguin/expect"
)
type ItemTests struct{}
@@ -19,29 +21,29 @@ func (_ *ItemTests) Promotability() {
}
func (_ *ItemTests) Expired() {
now := time.Now().Unix()
item1 := &Item{expires: now + 1}
item2 := &Item{expires: now - 1}
now := time.Now().UnixNano()
item1 := &Item{expires: now + (10 * int64(time.Millisecond))}
item2 := &Item{expires: now - (10 * int64(time.Millisecond))}
Expect(item1.Expired()).To.Equal(false)
Expect(item2.Expired()).To.Equal(true)
}
func (_ *ItemTests) TTL() {
now := time.Now().Unix()
item1 := &Item{expires: now + 10}
item2 := &Item{expires: now - 10}
Expect(item1.TTL()).To.Equal(time.Second * 10)
Expect(item2.TTL()).To.Equal(time.Second * -10)
now := time.Now().UnixNano()
item1 := &Item{expires: now + int64(time.Second)}
item2 := &Item{expires: now - int64(time.Second)}
Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
}
func (_ *ItemTests) Expires() {
now := time.Now().Unix()
item := &Item{expires: now + 10}
Expect(item.Expires().Unix()).To.Equal(now + 10)
now := time.Now().UnixNano()
item := &Item{expires: now + (10)}
Expect(item.Expires().UnixNano()).To.Equal(now + 10)
}
func (_ *ItemTests) Extend() {
item := &Item{expires: time.Now().Unix() + 10}
item := &Item{expires: time.Now().UnixNano() + 10}
item.Extend(time.Minute * 2)
Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
}

View File

@@ -10,14 +10,32 @@ type layeredBucket struct {
buckets map[string]*bucket
}
func (b *layeredBucket) itemCount() int {
count := 0
b.RLock()
defer b.RUnlock()
for _, b := range b.buckets {
count += b.itemCount()
}
return count
}
func (b *layeredBucket) get(primary, secondary string) *Item {
bucket := b.getSecondaryBucket(primary)
if bucket == nil {
return nil
}
return bucket.get(secondary)
}
func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return nil
}
return bucket.get(secondary)
return bucket
}
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) {
@@ -43,6 +61,26 @@ func (b *layeredBucket) delete(primary, secondary string) *Item {
return bucket.delete(secondary)
}
func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *Item) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return 0
}
return bucket.deletePrefix(prefix, deletables)
}
func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item interface{}) bool, deletables chan *Item) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return 0
}
return bucket.deleteFunc(matches, deletables)
}
func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
b.RLock()
bucket, exists := b.buckets[primary]

View File

@@ -16,6 +16,7 @@ type LayeredCache struct {
size int64
deletables chan *Item
promotables chan *Item
control chan interface{}
}
// Create a new layered cache with the specified configuration.
@@ -38,17 +39,25 @@ func Layered(config *Configuration) *LayeredCache {
bucketMask: uint32(config.buckets) - 1,
buckets: make([]*layeredBucket, config.buckets),
deletables: make(chan *Item, config.deleteBuffer),
promotables: make(chan *Item, config.promoteBuffer),
control: make(chan interface{}),
}
for i := 0; i < int(config.buckets); i++ {
c.buckets[i] = &layeredBucket{
buckets: make(map[string]*bucket),
}
}
go c.worker()
c.restart()
return c
}
func (c *LayeredCache) ItemCount() int {
count := 0
for _, b := range c.buckets {
count += b.itemCount()
}
return count
}
// Get an item from the cache. Returns nil if the item wasn't found.
// This can return an expired item. Use item.Expired() to see if the item
// is expired and item.TTL() to see how long until the item expires (which
@@ -58,12 +67,30 @@ func (c *LayeredCache) Get(primary, secondary string) *Item {
if item == nil {
return nil
}
if item.expires > time.Now().Unix() {
if item.expires > time.Now().UnixNano() {
c.promote(item)
}
return item
}
// Get the secondary cache for a given primary key. This operation will
// never return nil. In the case where the primary key does not exist, a
// new, underlying, empty bucket will be created and returned.
func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
primaryBkt := c.bucket(primary)
bkt := primaryBkt.getSecondaryBucket(primary)
primaryBkt.Lock()
if bkt == nil {
bkt = &bucket{lookup: make(map[string]*Item)}
primaryBkt.buckets[primary] = bkt
}
primaryBkt.Unlock()
return &SecondaryCache{
bucket: bkt,
pCache: c,
}
}
// Used when the cache was created with the Track() configuration option.
// Avoid otherwise
func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
@@ -95,7 +122,7 @@ func (c *LayeredCache) Replace(primary, secondary string, value interface{}) boo
// Attempts to get the value from the cache and calles fetch on a miss.
// If fetch returns an error, no value is cached and the error is returned back
// to the caller.
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (interface{}, error) {
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
item := c.Get(primary, secondary)
if item != nil {
return item, nil
@@ -122,6 +149,16 @@ func (c *LayeredCache) DeleteAll(primary string) bool {
return c.bucket(primary).deleteAll(primary, c.deletables)
}
// Deletes all items that share the same primary key and prefix.
func (c *LayeredCache) DeletePrefix(primary, prefix string) int {
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
}
// Deletes all items that share the same primary key and where the matches func evaluates to true.
func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item interface{}) bool) int {
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
}
//this isn't thread safe. It's meant to be called from non-concurrent tests
func (c *LayeredCache) Clear() {
for _, bucket := range c.buckets {
@@ -131,6 +168,31 @@ func (c *LayeredCache) Clear() {
c.list = list.New()
}
func (c *LayeredCache) Stop() {
close(c.promotables)
<-c.control
}
// Gets the number of items removed from the cache due to memory pressure since
// the last time GetDropped was called
func (c *LayeredCache) GetDropped() int {
res := make(chan int)
c.control <- getDropped{res: res}
return <-res
}
// Sets a new max size. That can result in a GC being run if the new maxium size
// is smaller than the cached size
func (c *LayeredCache) SetMaxSize(size int64) {
c.control <- setMaxSize{size}
}
func (c *LayeredCache) restart() {
c.promotables = make(chan *Item, c.promoteBuffer)
c.control = make(chan interface{})
go c.worker()
}
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
item, existing := c.bucket(primary).set(primary, secondary, value, duration)
if existing != nil {
@@ -151,32 +213,51 @@ func (c *LayeredCache) promote(item *Item) {
}
func (c *LayeredCache) worker() {
defer close(c.control)
dropped := 0
for {
select {
case item := <-c.promotables:
case item, ok := <-c.promotables:
if ok == false {
return
}
if c.doPromote(item) && c.size > c.maxSize {
c.gc()
dropped += c.gc()
}
case item := <-c.deletables:
if item.element == nil {
item.promotions = -2
atomic.StoreInt32(&item.promotions, -2)
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.element)
}
case control := <-c.control:
switch msg := control.(type) {
case getDropped:
msg.res <- dropped
dropped = 0
case setMaxSize:
c.maxSize = msg.size
if c.size > c.maxSize {
dropped += c.gc()
}
}
}
}
}
func (c *LayeredCache) doPromote(item *Item) bool {
// deleted before it ever got promoted
if item.promotions == -2 {
if atomic.LoadInt32(&item.promotions) == -2 {
return false
}
if item.element != nil { //not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.element)
item.promotions = 0
atomic.StoreInt32(&item.promotions, 0)
}
return false
}
@@ -185,11 +266,12 @@ func (c *LayeredCache) doPromote(item *Item) bool {
return true
}
func (c *LayeredCache) gc() {
func (c *LayeredCache) gc() int {
element := c.list.Back()
dropped := 0
for i := 0; i < c.itemsToPrune; i++ {
if element == nil {
return
return dropped
}
prev := element.Prev()
item := element.Value.(*Item)
@@ -198,7 +280,9 @@ func (c *LayeredCache) gc() {
c.size -= item.size
c.list.Remove(element)
item.promotions = -2
dropped += 1
}
element = prev
}
return dropped
}

View File

@@ -1,10 +1,11 @@
package ccache
import (
. "github.com/karlseguin/expect"
"strconv"
"testing"
"time"
. "github.com/karlseguin/expect"
)
type LayeredCacheTests struct{}
@@ -16,6 +17,7 @@ func Test_LayeredCache(t *testing.T) {
func (_ *LayeredCacheTests) GetsANonExistantValue() {
cache := newLayered()
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(0)
}
func (_ *LayeredCacheTests) SetANewValue() {
@@ -23,6 +25,7 @@ func (_ *LayeredCacheTests) SetANewValue() {
cache.Set("spice", "flow", "a value", time.Minute)
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
Expect(cache.Get("spice", "stop")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(1)
}
func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
@@ -37,6 +40,7 @@ func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
Expect(cache.Get("leto", "brother")).To.Equal(nil)
Expect(cache.Get("baron", "friend")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(3)
}
func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
@@ -50,6 +54,7 @@ func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() {
cache.Set("spice", "flow", "value-a", time.Minute)
Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true)
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
Expect(cache.ItemCount()).To.Equal(1)
//not sure how to test that the TTL hasn't changed sort of a sleep..
}
@@ -63,6 +68,87 @@ func (_ *LayeredCacheTests) DeletesAValue() {
Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
Expect(cache.Get("spice", "worm")).To.Equal(nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ *LayeredCacheTests) DeletesAPrefix() {
cache := newLayered()
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("spice", "aaa", "1", time.Minute)
cache.Set("spice", "aab", "2", time.Minute)
cache.Set("spice", "aac", "3", time.Minute)
cache.Set("leto", "aac", "3", time.Minute)
cache.Set("spice", "ac", "4", time.Minute)
cache.Set("spice", "z5", "7", time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeletePrefix("spice", "9a")).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeletePrefix("spice", "aa")).To.Equal(3)
Expect(cache.Get("spice", "aaa")).To.Equal(nil)
Expect(cache.Get("spice", "aab")).To.Equal(nil)
Expect(cache.Get("spice", "aac")).To.Equal(nil)
Expect(cache.Get("spice", "ac").Value()).To.Equal("4")
Expect(cache.Get("spice", "z5").Value()).To.Equal("7")
Expect(cache.ItemCount()).To.Equal(3)
}
func (_ *LayeredCacheTests) DeletesAFunc() {
cache := newLayered()
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("spice", "a", 1, time.Minute)
cache.Set("leto", "b", 2, time.Minute)
cache.Set("spice", "c", 3, time.Minute)
cache.Set("spice", "d", 4, time.Minute)
cache.Set("spice", "e", 5, time.Minute)
cache.Set("spice", "f", 6, time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return false
})).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return item.(*Item).Value().(int) < 4
})).To.Equal(2)
Expect(cache.ItemCount()).To.Equal(4)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return key == "d"
})).To.Equal(1)
Expect(cache.ItemCount()).To.Equal(3)
}
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
if item.group == "spice" && item.key == "flow" {
onDeleteFnCalled = true
}
}
cache := Layered(Configure().OnDelete(onDeleteFn))
cache.Set("spice", "flow", "value-a", time.Minute)
cache.Set("spice", "must", "value-b", time.Minute)
cache.Set("leto", "sister", "ghanima", time.Minute)
time.Sleep(time.Millisecond * 10) // Run once to init
cache.Delete("spice", "flow")
time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
Expect(cache.Get("spice", "worm")).To.Equal(nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
Expect(onDeleteFnCalled).To.Equal(true)
}
func (_ *LayeredCacheTests) DeletesALayer() {
@@ -86,7 +172,7 @@ func (_ LayeredCacheTests) GCsTheOldestItems() {
cache.Set("xx", "b", 9001, time.Minute)
//let the items get promoted (and added to our list)
time.Sleep(time.Millisecond * 10)
cache.gc()
gcLayeredCache(cache)
Expect(cache.Get("xx", "a")).To.Equal(nil)
Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
Expect(cache.Get("8", "a")).To.Equal(nil)
@@ -102,7 +188,7 @@ func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
time.Sleep(time.Millisecond * 10) //run the worker once to init the list
cache.Get("9", "a")
time.Sleep(time.Millisecond * 10)
cache.gc()
gcLayeredCache(cache)
Expect(cache.Get("9", "a").Value()).To.Equal(9)
Expect(cache.Get("10", "a")).To.Equal(nil)
Expect(cache.Get("11", "a").Value()).To.Equal(11)
@@ -115,11 +201,11 @@ func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
}
item := cache.TrackingGet("0", "a")
time.Sleep(time.Millisecond * 10)
cache.gc()
gcLayeredCache(cache)
Expect(cache.Get("0", "a").Value()).To.Equal(0)
Expect(cache.Get("1", "a")).To.Equal(nil)
item.Release()
cache.gc()
gcLayeredCache(cache)
Expect(cache.Get("0", "a")).To.Equal(nil)
}
@@ -137,6 +223,40 @@ func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
Expect(cache.Get("2", "a")).To.Equal(nil)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
Expect(cache.GetDropped()).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(0)
}
func (_ LayeredCacheTests) ResizeOnTheFly() {
cache := Layered(Configure().MaxSize(9).ItemsToPrune(1))
for i := 0; i < 5; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
cache.SetMaxSize(3)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(2)
Expect(cache.Get("0", "a")).To.Equal(nil)
Expect(cache.Get("1", "a")).To.Equal(nil)
Expect(cache.Get("2", "a").Value()).To.Equal(2)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
cache.Set("5", "a", 5, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.GetDropped()).To.Equal(1)
Expect(cache.Get("2", "a")).To.Equal(nil)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
Expect(cache.Get("5", "a").Value()).To.Equal(5)
cache.SetMaxSize(10)
cache.Set("6", "a", 6, time.Minute)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(0)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
Expect(cache.Get("5", "a").Value()).To.Equal(5)
Expect(cache.Get("6", "a").Value()).To.Equal(6)
}
func newLayered() *LayeredCache {
@@ -161,20 +281,20 @@ func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkLayeredSize(cache, 5)
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkLayeredSize(cache, 5)
cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
checkLayeredSize(cache, 6)
cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute)
cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(7))
checkLayeredSize(cache, 7)
cache.Delete("pri", "b")
time.Sleep(time.Millisecond * 10)
Expect(cache.size).To.Equal(int64(5))
checkLayeredSize(cache, 5)
}
func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
@@ -184,7 +304,7 @@ func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
cache.Replace("sec", "3", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(6))
checkLayeredSize(cache, 6)
}
func (_ LayeredCacheTests) ReplaceChangesSize() {
@@ -194,13 +314,25 @@ func (_ LayeredCacheTests) ReplaceChangesSize() {
cache.Replace("pri", "2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(4))
checkLayeredSize(cache, 4)
cache.Replace("pri", "2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(3))
checkLayeredSize(cache, 3)
cache.Replace("pri", "2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
Expect(cache.size).To.Equal(int64(5))
checkLayeredSize(cache, 5)
}
func checkLayeredSize(cache *LayeredCache, sz int64) {
cache.Stop()
Expect(cache.size).To.Equal(sz)
cache.restart()
}
func gcLayeredCache(cache *LayeredCache) {
cache.Stop()
cache.gc()
cache.restart()
}

View File

@@ -7,11 +7,15 @@ Lock contention on the list is reduced by:
* Using a buffered channel to queue promotions for a single worker
* Garbage collecting within the same thread as the worker
Unless otherwise stated, all methods are thread-safe.
## Setup
First, download the project:
go get github.com/karlseguin/ccache
```go
go get github.com/karlseguin/ccache/v2
```
## Configuration
Next, import and create a `Cache` instance:
@@ -19,7 +23,7 @@ Next, import and create a `Cache` instance:
```go
import (
"github.com/karlseguin/ccache"
"github.com/karlseguin/ccache/v2"
)
var cache = ccache.New(ccache.Configure())
@@ -28,7 +32,7 @@ var cache = ccache.New(ccache.Configure())
`Configure` exposes a chainable API:
```go
var cache = ccache.New(ccache.Configure().MaxSize(1000).itemsToPrune(100))
var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100))
```
The most likely configuration options to tweak are:
@@ -83,12 +87,18 @@ item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error)
```
### Delete
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key:
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key:
```go
cache.Delete("user:4")
```
### DeletePrefix
`DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed.
### Clear
`Clear` clears the cache. This method is **not** thread safe. It is meant to be used from tests.
### Extend
The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time.
@@ -101,6 +111,14 @@ cache.Replace("user:4", user)
`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned.
### GetDropped
You can get the number of keys evicted due to memory pressure by calling `GetDropped`:
```go
dropped := cache.GetDropped()
```
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes.
### Stop
The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called
the cache should not be used (calls are likely to panic). Stop must be called in order to allow the garbage collector to reap the cache.
@@ -122,11 +140,11 @@ user := item.Value() //will be nil if "user:4" didn't exist in the cache
item.Release() //can be called even if item.Value() returned nil
```
In practive, `Release` wouldn't be called until later, at some other place in your code.
In practice, `Release` wouldn't be called until later, at some other place in your code.
There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways.
More important, it helps ensure that you're code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system.
More important, it helps ensure that your code returns consistent data. With tracking, "user:4" might be purged, and a subsequent `Fetch` would reload the data. This can result in different versions of "user:4" being returned by different parts of your system.
## LayeredCache
@@ -151,6 +169,18 @@ cache.Delete("/users/goku", "type:xml")
cache.DeleteAll("/users/goku")
```
# SecondaryCache
In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with:
```go
cache := ccache.Layered(ccache.Configure())
sCache := cache.GetOrCreateSecondaryCache("/users/goku")
sCache.Set("type:json", "{value_to_cache}", time.Minute * 5)
```
The semantics for interacting with the `SecondaryCache` are exactly the same as for a regular `Cache`. However, one difference is that `Get` will not return nil, but will return an empty 'cache' for a non-existent primary key.
## Size
By default, items added to a cache have a size of 1. This means that if you configure `MaxSize(10000)`, you'll be able to store 10000 items in the cache.

72
secondarycache.go Normal file
View File

@@ -0,0 +1,72 @@
package ccache
import "time"
type SecondaryCache struct {
bucket *bucket
pCache *LayeredCache
}
// Get the secondary key.
// The semantics are the same as for LayeredCache.Get
func (s *SecondaryCache) Get(secondary string) *Item {
return s.bucket.get(secondary)
}
// Set the secondary key to a value.
// The semantics are the same as for LayeredCache.Set
func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
item, existing := s.bucket.set(secondary, value, duration)
if existing != nil {
s.pCache.deletables <- existing
}
s.pCache.promote(item)
return item
}
// Fetch or set a secondary key.
// The semantics are the same as for LayeredCache.Fetch
func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
item := s.Get(secondary)
if item != nil {
return item, nil
}
value, err := fetch()
if err != nil {
return nil, err
}
return s.Set(secondary, value, duration), nil
}
// Delete a secondary key.
// The semantics are the same as for LayeredCache.Delete
func (s *SecondaryCache) Delete(secondary string) bool {
item := s.bucket.delete(secondary)
if item != nil {
s.pCache.deletables <- item
return true
}
return false
}
// Replace a secondary key.
// The semantics are the same as for LayeredCache.Replace
func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
item := s.Get(secondary)
if item == nil {
return false
}
s.Set(secondary, value, item.TTL())
return true
}
// Track a secondary key.
// The semantics are the same as for LayeredCache.TrackingGet
func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem {
item := c.Get(secondary)
if item == nil {
return NilTracked
}
item.track()
return item
}

105
secondarycache_test.go Normal file
View File

@@ -0,0 +1,105 @@
package ccache
import (
. "github.com/karlseguin/expect"
"strconv"
"testing"
"time"
)
type SecondaryCacheTests struct{}
func Test_SecondaryCache(t *testing.T) {
Expectify(new(SecondaryCacheTests), t)
}
func (_ SecondaryCacheTests) GetsANonExistantValue() {
cache := newLayered().GetOrCreateSecondaryCache("foo")
Expect(cache).Not.To.Equal(nil)
}
func (_ SecondaryCacheTests) SetANewValue() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(sCache.Get("stop")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("orinoco", "another value", time.Minute)
Expect(sCache.Get("orinoco").Value()).To.Equal("another value")
Expect(cache.Get("spice", "orinoco").Value()).To.Equal("another value")
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("flow", "a value", time.Minute)
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
}
func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
cache := newLayered()
cache.Set("spice", "flow", "a value", time.Minute)
cache.Set("spice", "sister", "ghanima", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
cache.Delete("spice", "flow")
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(sCache.Get("flow")).To.Equal(nil)
sCache.Delete("sister")
Expect(cache.Get("spice", "sister")).To.Equal(nil)
Expect(sCache.Get("sister")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
Expect(cache.Get("spice", "flow")).To.Equal(nil)
}
func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
cache := newLayered()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
}
func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
cache := newLayered()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
Expect(val.Value().(string)).To.Equal("value-a")
}
func (_ SecondaryCacheTests) FetchReturnsANewValue() {
cache := newLayered()
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
Expect(val.Value().(string)).To.Equal("a fetched value")
}
func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
cache := Layered(Configure().ItemsToPrune(10).Track())
for i := 0; i < 10; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
sCache := cache.GetOrCreateSecondaryCache("0")
item := sCache.TrackingGet("a")
time.Sleep(time.Millisecond * 10)
gcLayeredCache(cache)
Expect(cache.Get("0", "a").Value()).To.Equal(0)
Expect(cache.Get("1", "a")).To.Equal(nil)
item.Release()
gcLayeredCache(cache)
Expect(cache.Get("0", "a")).To.Equal(nil)
}