8 Commits

Author SHA1 Message Date
Karl Seguin
d7846ec7e0 grab lookup len under read lock 2020-08-13 10:41:28 +08:00
Karl Seguin
f63031fa40 Merge pull request #45 from bep/DeleteFunc
Add DeleteFunc
2020-08-13 10:37:20 +08:00
Bjørn Erik Pedersen
d56665a86e Add DeleteFunc
This shares DeletePrefixs's implementation.
2020-08-12 17:47:11 +02:00
Karl Seguin
223703f7f0 Merge pull request #44 from bep/layered-delete-prefix
Add DeletePrefix to LayeredCache
2020-08-12 08:39:47 +08:00
Bjørn Erik Pedersen
a24d7f8c53 Add DeletePrefix to LayeredCache 2020-08-11 19:04:54 +02:00
Karl Seguin
3b58df727e Merge pull request #43 from jonathonlacher/patch-1
fix spelling in readme
2020-07-17 14:07:50 +08:00
Jonathon Lacher
4c88bf60e6 fix spelling in readme 2020-07-16 15:15:43 -05:00
Karl Seguin
eab9dbaa7f update readme to /v2 2020-06-29 20:50:55 +08:00
7 changed files with 139 additions and 7 deletions

View File

@@ -54,13 +54,18 @@ func (b *bucket) delete(key string) *Item {
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
// (We do this so that the write to the channel is under the read lock and not the
// write lock)
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
func (b *bucket) deleteFunc(matches func(key string, item interface{}) bool, deletables chan *Item) int {
lookup := b.lookup
items := make([]*Item, 0, len(lookup)/10)
b.RLock()
l := len(lookup)
b.RUnlock()
items := make([]*Item, 0, l/10)
b.RLock()
for key, item := range lookup {
if strings.HasPrefix(key, prefix) {
if matches(key, item) {
deletables <- item
items = append(items, item)
}
@@ -80,6 +85,12 @@ func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
return len(items)
}
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
return b.deleteFunc(func(key string, item interface{}) bool {
return strings.HasPrefix(key, prefix)
}, deletables)
}
func (b *bucket) clear() {
b.Lock()
b.lookup = make(map[string]*Item)

View File

@@ -63,6 +63,15 @@ func (c *Cache) DeletePrefix(prefix string) int {
return count
}
// Deletes all items that the matches func evaluates to true.
func (c *Cache) DeleteFunc(matches func(key string, item interface{}) bool) int {
count := 0
for _, b := range c.buckets {
count += b.deleteFunc(matches, c.deletables)
}
return count
}
// Get an item from the cache. Returns nil if the item wasn't found.
// This can return an expired item. Use item.Expired() to see if the item
// is expired and item.TTL() to see how long until the item expires (which

View File

@@ -51,6 +51,35 @@ func (_ CacheTests) DeletesAPrefix() {
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ CacheTests) DeletesAFunc() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("a", 1, time.Minute)
cache.Set("b", 2, time.Minute)
cache.Set("c", 3, time.Minute)
cache.Set("d", 4, time.Minute)
cache.Set("e", 5, time.Minute)
cache.Set("f", 6, time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return false
})).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return item.(*Item).Value().(int) < 4
})).To.Equal(3)
Expect(cache.ItemCount()).To.Equal(3)
Expect(cache.DeleteFunc(func(key string, item interface{}) bool {
return key == "d"
})).To.Equal(1)
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ CacheTests) OnDeleteCallbackCalled() {
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {

View File

@@ -61,6 +61,26 @@ func (b *layeredBucket) delete(primary, secondary string) *Item {
return bucket.delete(secondary)
}
func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *Item) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return 0
}
return bucket.deletePrefix(prefix, deletables)
}
func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item interface{}) bool, deletables chan *Item) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
return 0
}
return bucket.deleteFunc(matches, deletables)
}
func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
b.RLock()
bucket, exists := b.buckets[primary]

View File

@@ -149,6 +149,16 @@ func (c *LayeredCache) DeleteAll(primary string) bool {
return c.bucket(primary).deleteAll(primary, c.deletables)
}
// Deletes all items that share the same primary key and prefix.
func (c *LayeredCache) DeletePrefix(primary, prefix string) int {
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
}
// Deletes all items that share the same primary key and where the matches func evaluates to true.
func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item interface{}) bool) int {
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
}
//this isn't thread safe. It's meant to be called from non-concurrent tests
func (c *LayeredCache) Clear() {
for _, bucket := range c.buckets {

View File

@@ -71,6 +71,59 @@ func (_ *LayeredCacheTests) DeletesAValue() {
Expect(cache.ItemCount()).To.Equal(2)
}
func (_ *LayeredCacheTests) DeletesAPrefix() {
cache := newLayered()
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("spice", "aaa", "1", time.Minute)
cache.Set("spice", "aab", "2", time.Minute)
cache.Set("spice", "aac", "3", time.Minute)
cache.Set("leto", "aac", "3", time.Minute)
cache.Set("spice", "ac", "4", time.Minute)
cache.Set("spice", "z5", "7", time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeletePrefix("spice", "9a")).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeletePrefix("spice", "aa")).To.Equal(3)
Expect(cache.Get("spice", "aaa")).To.Equal(nil)
Expect(cache.Get("spice", "aab")).To.Equal(nil)
Expect(cache.Get("spice", "aac")).To.Equal(nil)
Expect(cache.Get("spice", "ac").Value()).To.Equal("4")
Expect(cache.Get("spice", "z5").Value()).To.Equal("7")
Expect(cache.ItemCount()).To.Equal(3)
}
func (_ *LayeredCacheTests) DeletesAFunc() {
cache := newLayered()
Expect(cache.ItemCount()).To.Equal(0)
cache.Set("spice", "a", 1, time.Minute)
cache.Set("leto", "b", 2, time.Minute)
cache.Set("spice", "c", 3, time.Minute)
cache.Set("spice", "d", 4, time.Minute)
cache.Set("spice", "e", 5, time.Minute)
cache.Set("spice", "f", 6, time.Minute)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return false
})).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(6)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return item.(*Item).Value().(int) < 4
})).To.Equal(2)
Expect(cache.ItemCount()).To.Equal(4)
Expect(cache.DeleteFunc("spice", func(key string, item interface{}) bool {
return key == "d"
})).To.Equal(1)
Expect(cache.ItemCount()).To.Equal(3)
}
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
onDeleteFnCalled := false

View File

@@ -14,7 +14,7 @@ Unless otherwise stated, all methods are thread-safe.
First, download the project:
```go
go get github.com/karlseguin/ccache
go get github.com/karlseguin/ccache/v2
```
## Configuration
@@ -23,7 +23,7 @@ Next, import and create a `Cache` instance:
```go
import (
"github.com/karlseguin/ccache"
"github.com/karlseguin/ccache/v2"
)
var cache = ccache.New(ccache.Configure())
@@ -87,7 +87,7 @@ item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error)
```
### Delete
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key:
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key:
```go
cache.Delete("user:4")
@@ -117,7 +117,7 @@ You can get the number of keys evicted due to memory pressure by calling `GetDro
```go
dropped := cache.GetDropped()
```
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant ot be called asynchronously for statistics /monitoring purposes.
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes.
### Stop
The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called