Initial pass at leveraging generics
Still need to replace the linked list with a generic linked list and want to remove the dependency on the expect package.
This commit is contained in:
15
Makefile
15
Makefile
@@ -1,5 +1,18 @@
|
|||||||
|
.PHONY: t
|
||||||
t:
|
t:
|
||||||
go test ./... -race -count=1
|
go test ./...
|
||||||
|
|
||||||
|
.PHONY: f
|
||||||
f:
|
f:
|
||||||
go fmt ./...
|
go fmt ./...
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: c
|
||||||
|
c:
|
||||||
|
go test -race -covermode=atomic ./... -coverprofile=cover.out && \
|
||||||
|
# go tool cover -html=cover.out && \
|
||||||
|
go tool cover -func cover.out \
|
||||||
|
| grep -vP '[89]\d\.\d%' | grep -v '100.0%' \
|
||||||
|
|| true
|
||||||
|
|
||||||
|
rm cover.out
|
||||||
|
26
bucket.go
26
bucket.go
@@ -6,18 +6,18 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type bucket struct {
|
type bucket[T any] struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
lookup map[string]*Item
|
lookup map[string]*Item[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) itemCount() int {
|
func (b *bucket[T]) itemCount() int {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
defer b.RUnlock()
|
defer b.RUnlock()
|
||||||
return len(b.lookup)
|
return len(b.lookup)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) forEachFunc(matches func(key string, item *Item) bool) bool {
|
func (b *bucket[T]) forEachFunc(matches func(key string, item *Item[T]) bool) bool {
|
||||||
lookup := b.lookup
|
lookup := b.lookup
|
||||||
b.RLock()
|
b.RLock()
|
||||||
defer b.RUnlock()
|
defer b.RUnlock()
|
||||||
@@ -29,13 +29,13 @@ func (b *bucket) forEachFunc(matches func(key string, item *Item) bool) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) get(key string) *Item {
|
func (b *bucket[T]) get(key string) *Item[T] {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
defer b.RUnlock()
|
defer b.RUnlock()
|
||||||
return b.lookup[key]
|
return b.lookup[key]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) set(key string, value interface{}, duration time.Duration, track bool) (*Item, *Item) {
|
func (b *bucket[T]) set(key string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
|
||||||
expires := time.Now().Add(duration).UnixNano()
|
expires := time.Now().Add(duration).UnixNano()
|
||||||
item := newItem(key, value, expires, track)
|
item := newItem(key, value, expires, track)
|
||||||
b.Lock()
|
b.Lock()
|
||||||
@@ -45,7 +45,7 @@ func (b *bucket) set(key string, value interface{}, duration time.Duration, trac
|
|||||||
return item, existing
|
return item, existing
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) delete(key string) *Item {
|
func (b *bucket[T]) delete(key string) *Item[T] {
|
||||||
b.Lock()
|
b.Lock()
|
||||||
item := b.lookup[key]
|
item := b.lookup[key]
|
||||||
delete(b.lookup, key)
|
delete(b.lookup, key)
|
||||||
@@ -66,9 +66,9 @@ func (b *bucket) delete(key string) *Item {
|
|||||||
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
|
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
|
||||||
// (We do this so that the write to the channel is under the read lock and not the
|
// (We do this so that the write to the channel is under the read lock and not the
|
||||||
// write lock)
|
// write lock)
|
||||||
func (b *bucket) deleteFunc(matches func(key string, item *Item) bool, deletables chan *Item) int {
|
func (b *bucket[T]) deleteFunc(matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
|
||||||
lookup := b.lookup
|
lookup := b.lookup
|
||||||
items := make([]*Item, 0)
|
items := make([]*Item[T], 0)
|
||||||
|
|
||||||
b.RLock()
|
b.RLock()
|
||||||
for key, item := range lookup {
|
for key, item := range lookup {
|
||||||
@@ -92,14 +92,14 @@ func (b *bucket) deleteFunc(matches func(key string, item *Item) bool, deletable
|
|||||||
return len(items)
|
return len(items)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
|
func (b *bucket[T]) deletePrefix(prefix string, deletables chan *Item[T]) int {
|
||||||
return b.deleteFunc(func(key string, item *Item) bool {
|
return b.deleteFunc(func(key string, item *Item[T]) bool {
|
||||||
return strings.HasPrefix(key, prefix)
|
return strings.HasPrefix(key, prefix)
|
||||||
}, deletables)
|
}, deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *bucket) clear() {
|
func (b *bucket[T]) clear() {
|
||||||
b.Lock()
|
b.Lock()
|
||||||
b.lookup = make(map[string]*Item)
|
b.lookup = make(map[string]*Item[T])
|
||||||
b.Unlock()
|
b.Unlock()
|
||||||
}
|
}
|
||||||
|
@@ -33,7 +33,7 @@ func (_ *BucketTests) DeleteItemFromBucket() {
|
|||||||
|
|
||||||
func (_ *BucketTests) SetsANewBucketItem() {
|
func (_ *BucketTests) SetsANewBucketItem() {
|
||||||
bucket := testBucket()
|
bucket := testBucket()
|
||||||
item, existing := bucket.set("spice", TestValue("flow"), time.Minute, false)
|
item, existing := bucket.set("spice", "flow", time.Minute, false)
|
||||||
assertValue(item, "flow")
|
assertValue(item, "flow")
|
||||||
item = bucket.get("spice")
|
item = bucket.get("spice")
|
||||||
assertValue(item, "flow")
|
assertValue(item, "flow")
|
||||||
@@ -42,29 +42,22 @@ func (_ *BucketTests) SetsANewBucketItem() {
|
|||||||
|
|
||||||
func (_ *BucketTests) SetsAnExistingItem() {
|
func (_ *BucketTests) SetsAnExistingItem() {
|
||||||
bucket := testBucket()
|
bucket := testBucket()
|
||||||
item, existing := bucket.set("power", TestValue("9001"), time.Minute, false)
|
item, existing := bucket.set("power", "9001", time.Minute, false)
|
||||||
assertValue(item, "9001")
|
assertValue(item, "9001")
|
||||||
item = bucket.get("power")
|
item = bucket.get("power")
|
||||||
assertValue(item, "9001")
|
assertValue(item, "9001")
|
||||||
assertValue(existing, "9000")
|
assertValue(existing, "9000")
|
||||||
}
|
}
|
||||||
|
|
||||||
func testBucket() *bucket {
|
func testBucket() *bucket[string] {
|
||||||
b := &bucket{lookup: make(map[string]*Item)}
|
b := &bucket[string]{lookup: make(map[string]*Item[string])}
|
||||||
b.lookup["power"] = &Item{
|
b.lookup["power"] = &Item[string]{
|
||||||
key: "power",
|
key: "power",
|
||||||
value: TestValue("9000"),
|
value: "9000",
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
func assertValue(item *Item, expected string) {
|
func assertValue(item *Item[string], expected string) {
|
||||||
value := item.value.(TestValue)
|
Expect(item.value).To.Equal(expected)
|
||||||
Expect(value).To.Equal(TestValue(expected))
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestValue string
|
|
||||||
|
|
||||||
func (v TestValue) Expires() time.Time {
|
|
||||||
return time.Now()
|
|
||||||
}
|
}
|
||||||
|
92
cache.go
92
cache.go
@@ -35,37 +35,37 @@ type gc struct {
|
|||||||
done chan struct{}
|
done chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Cache struct {
|
type Cache[T any] struct {
|
||||||
*Configuration
|
*Configuration[T]
|
||||||
list *list.List
|
list *list.List
|
||||||
size int64
|
size int64
|
||||||
buckets []*bucket
|
buckets []*bucket[T]
|
||||||
bucketMask uint32
|
bucketMask uint32
|
||||||
deletables chan *Item
|
deletables chan *Item[T]
|
||||||
promotables chan *Item
|
promotables chan *Item[T]
|
||||||
control chan interface{}
|
control chan interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new cache with the specified configuration
|
// Create a new cache with the specified configuration
|
||||||
// See ccache.Configure() for creating a configuration
|
// See ccache.Configure() for creating a configuration
|
||||||
func New(config *Configuration) *Cache {
|
func New[T any](config *Configuration[T]) *Cache[T] {
|
||||||
c := &Cache{
|
c := &Cache[T]{
|
||||||
list: list.New(),
|
list: list.New(),
|
||||||
Configuration: config,
|
Configuration: config,
|
||||||
bucketMask: uint32(config.buckets) - 1,
|
bucketMask: uint32(config.buckets) - 1,
|
||||||
buckets: make([]*bucket, config.buckets),
|
buckets: make([]*bucket[T], config.buckets),
|
||||||
control: make(chan interface{}),
|
control: make(chan interface{}),
|
||||||
}
|
}
|
||||||
for i := 0; i < config.buckets; i++ {
|
for i := 0; i < config.buckets; i++ {
|
||||||
c.buckets[i] = &bucket{
|
c.buckets[i] = &bucket[T]{
|
||||||
lookup: make(map[string]*Item),
|
lookup: make(map[string]*Item[T]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.restart()
|
c.restart()
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) ItemCount() int {
|
func (c *Cache[T]) ItemCount() int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, b := range c.buckets {
|
for _, b := range c.buckets {
|
||||||
count += b.itemCount()
|
count += b.itemCount()
|
||||||
@@ -73,7 +73,7 @@ func (c *Cache) ItemCount() int {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) DeletePrefix(prefix string) int {
|
func (c *Cache[T]) DeletePrefix(prefix string) int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, b := range c.buckets {
|
for _, b := range c.buckets {
|
||||||
count += b.deletePrefix(prefix, c.deletables)
|
count += b.deletePrefix(prefix, c.deletables)
|
||||||
@@ -82,7 +82,7 @@ func (c *Cache) DeletePrefix(prefix string) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Deletes all items that the matches func evaluates to true.
|
// Deletes all items that the matches func evaluates to true.
|
||||||
func (c *Cache) DeleteFunc(matches func(key string, item *Item) bool) int {
|
func (c *Cache[T]) DeleteFunc(matches func(key string, item *Item[T]) bool) int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, b := range c.buckets {
|
for _, b := range c.buckets {
|
||||||
count += b.deleteFunc(matches, c.deletables)
|
count += b.deleteFunc(matches, c.deletables)
|
||||||
@@ -90,7 +90,7 @@ func (c *Cache) DeleteFunc(matches func(key string, item *Item) bool) int {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) ForEachFunc(matches func(key string, item *Item) bool) {
|
func (c *Cache[T]) ForEachFunc(matches func(key string, item *Item[T]) bool) {
|
||||||
for _, b := range c.buckets {
|
for _, b := range c.buckets {
|
||||||
if !b.forEachFunc(matches) {
|
if !b.forEachFunc(matches) {
|
||||||
break
|
break
|
||||||
@@ -102,7 +102,7 @@ func (c *Cache) ForEachFunc(matches func(key string, item *Item) bool) {
|
|||||||
// This can return an expired item. Use item.Expired() to see if the item
|
// This can return an expired item. Use item.Expired() to see if the item
|
||||||
// is expired and item.TTL() to see how long until the item expires (which
|
// is expired and item.TTL() to see how long until the item expires (which
|
||||||
// will be negative for an already expired item).
|
// will be negative for an already expired item).
|
||||||
func (c *Cache) Get(key string) *Item {
|
func (c *Cache[T]) Get(key string) *Item[T] {
|
||||||
item := c.bucket(key).get(key)
|
item := c.bucket(key).get(key)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -118,10 +118,10 @@ func (c *Cache) Get(key string) *Item {
|
|||||||
|
|
||||||
// Used when the cache was created with the Track() configuration option.
|
// Used when the cache was created with the Track() configuration option.
|
||||||
// Avoid otherwise
|
// Avoid otherwise
|
||||||
func (c *Cache) TrackingGet(key string) TrackedItem {
|
func (c *Cache[T]) TrackingGet(key string) TrackedItem[T] {
|
||||||
item := c.Get(key)
|
item := c.Get(key)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return NilTracked
|
return nil
|
||||||
}
|
}
|
||||||
item.track()
|
item.track()
|
||||||
return item
|
return item
|
||||||
@@ -129,19 +129,19 @@ func (c *Cache) TrackingGet(key string) TrackedItem {
|
|||||||
|
|
||||||
// Used when the cache was created with the Track() configuration option.
|
// Used when the cache was created with the Track() configuration option.
|
||||||
// Sets the item, and returns a tracked reference to it.
|
// Sets the item, and returns a tracked reference to it.
|
||||||
func (c *Cache) TrackingSet(key string, value interface{}, duration time.Duration) TrackedItem {
|
func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T] {
|
||||||
return c.set(key, value, duration, true)
|
return c.set(key, value, duration, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the value in the cache for the specified duration
|
// Set the value in the cache for the specified duration
|
||||||
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
|
func (c *Cache[T]) Set(key string, value T, duration time.Duration) {
|
||||||
c.set(key, value, duration, false)
|
c.set(key, value, duration, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the value if it exists, does not set if it doesn't.
|
// Replace the value if it exists, does not set if it doesn't.
|
||||||
// Returns true if the item existed an was replaced, false otherwise.
|
// Returns true if the item existed an was replaced, false otherwise.
|
||||||
// Replace does not reset item's TTL
|
// Replace does not reset item's TTL
|
||||||
func (c *Cache) Replace(key string, value interface{}) bool {
|
func (c *Cache[T]) Replace(key string, value T) bool {
|
||||||
item := c.bucket(key).get(key)
|
item := c.bucket(key).get(key)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return false
|
return false
|
||||||
@@ -156,7 +156,7 @@ func (c *Cache) Replace(key string, value interface{}) bool {
|
|||||||
// Note that Fetch merely calls the public Get and Set functions. If you want
|
// Note that Fetch merely calls the public Get and Set functions. If you want
|
||||||
// a different Fetch behavior, such as thundering herd protection or returning
|
// a different Fetch behavior, such as thundering herd protection or returning
|
||||||
// expired items, implement it in your application.
|
// expired items, implement it in your application.
|
||||||
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
|
func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
|
||||||
item := c.Get(key)
|
item := c.Get(key)
|
||||||
if item != nil && !item.Expired() {
|
if item != nil && !item.Expired() {
|
||||||
return item, nil
|
return item, nil
|
||||||
@@ -169,7 +169,7 @@ func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove the item from the cache, return true if the item was present, false otherwise.
|
// Remove the item from the cache, return true if the item was present, false otherwise.
|
||||||
func (c *Cache) Delete(key string) bool {
|
func (c *Cache[T]) Delete(key string) bool {
|
||||||
item := c.bucket(key).delete(key)
|
item := c.bucket(key).delete(key)
|
||||||
if item != nil {
|
if item != nil {
|
||||||
c.deletables <- item
|
c.deletables <- item
|
||||||
@@ -180,7 +180,7 @@ func (c *Cache) Delete(key string) bool {
|
|||||||
|
|
||||||
// Clears the cache
|
// Clears the cache
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) Clear() {
|
func (c *Cache[T]) Clear() {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- clear{done: done}
|
c.control <- clear{done: done}
|
||||||
<-done
|
<-done
|
||||||
@@ -189,7 +189,7 @@ func (c *Cache) Clear() {
|
|||||||
// Stops the background worker. Operations performed on the cache after Stop
|
// Stops the background worker. Operations performed on the cache after Stop
|
||||||
// is called are likely to panic
|
// is called are likely to panic
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) Stop() {
|
func (c *Cache[T]) Stop() {
|
||||||
close(c.promotables)
|
close(c.promotables)
|
||||||
<-c.control
|
<-c.control
|
||||||
}
|
}
|
||||||
@@ -197,7 +197,7 @@ func (c *Cache) Stop() {
|
|||||||
// Gets the number of items removed from the cache due to memory pressure since
|
// Gets the number of items removed from the cache due to memory pressure since
|
||||||
// the last time GetDropped was called
|
// the last time GetDropped was called
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) GetDropped() int {
|
func (c *Cache[T]) GetDropped() int {
|
||||||
return doGetDropped(c.control)
|
return doGetDropped(c.control)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -221,7 +221,7 @@ func doGetDropped(controlCh chan<- interface{}) int {
|
|||||||
// now calling SyncUpdates. If other goroutines are using the cache at the same time, there is
|
// now calling SyncUpdates. If other goroutines are using the cache at the same time, there is
|
||||||
// no way to know whether any of them still have pending state updates when SyncUpdates returns.
|
// no way to know whether any of them still have pending state updates when SyncUpdates returns.
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) SyncUpdates() {
|
func (c *Cache[T]) SyncUpdates() {
|
||||||
doSyncUpdates(c.control)
|
doSyncUpdates(c.control)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -234,7 +234,7 @@ func doSyncUpdates(controlCh chan<- interface{}) {
|
|||||||
// Sets a new max size. That can result in a GC being run if the new maxium size
|
// Sets a new max size. That can result in a GC being run if the new maxium size
|
||||||
// is smaller than the cached size
|
// is smaller than the cached size
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) SetMaxSize(size int64) {
|
func (c *Cache[T]) SetMaxSize(size int64) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- setMaxSize{size: size, done: done}
|
c.control <- setMaxSize{size: size, done: done}
|
||||||
<-done
|
<-done
|
||||||
@@ -243,7 +243,7 @@ func (c *Cache) SetMaxSize(size int64) {
|
|||||||
// Forces GC. There should be no reason to call this function, except from tests
|
// Forces GC. There should be no reason to call this function, except from tests
|
||||||
// which require synchronous GC.
|
// which require synchronous GC.
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) GC() {
|
func (c *Cache[T]) GC() {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- gc{done: done}
|
c.control <- gc{done: done}
|
||||||
<-done
|
<-done
|
||||||
@@ -253,25 +253,25 @@ func (c *Cache) GC() {
|
|||||||
// by the worker goroutine. It's meant to be called periodically for metrics, or
|
// by the worker goroutine. It's meant to be called periodically for metrics, or
|
||||||
// from tests.
|
// from tests.
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *Cache) GetSize() int64 {
|
func (c *Cache[T]) GetSize() int64 {
|
||||||
res := make(chan int64)
|
res := make(chan int64)
|
||||||
c.control <- getSize{res}
|
c.control <- getSize{res}
|
||||||
return <-res
|
return <-res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) restart() {
|
func (c *Cache[T]) restart() {
|
||||||
c.deletables = make(chan *Item, c.deleteBuffer)
|
c.deletables = make(chan *Item[T], c.deleteBuffer)
|
||||||
c.promotables = make(chan *Item, c.promoteBuffer)
|
c.promotables = make(chan *Item[T], c.promoteBuffer)
|
||||||
c.control = make(chan interface{})
|
c.control = make(chan interface{})
|
||||||
go c.worker()
|
go c.worker()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) deleteItem(bucket *bucket, item *Item) {
|
func (c *Cache[T]) deleteItem(bucket *bucket[T], item *Item[T]) {
|
||||||
bucket.delete(item.key) //stop other GETs from getting it
|
bucket.delete(item.key) //stop other GETs from getting it
|
||||||
c.deletables <- item
|
c.deletables <- item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) set(key string, value interface{}, duration time.Duration, track bool) *Item {
|
func (c *Cache[T]) set(key string, value T, duration time.Duration, track bool) *Item[T] {
|
||||||
item, existing := c.bucket(key).set(key, value, duration, track)
|
item, existing := c.bucket(key).set(key, value, duration, track)
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
c.deletables <- existing
|
c.deletables <- existing
|
||||||
@@ -280,16 +280,16 @@ func (c *Cache) set(key string, value interface{}, duration time.Duration, track
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) bucket(key string) *bucket {
|
func (c *Cache[T]) bucket(key string) *bucket[T] {
|
||||||
h := fnv.New32a()
|
h := fnv.New32a()
|
||||||
h.Write([]byte(key))
|
h.Write([]byte(key))
|
||||||
return c.buckets[h.Sum32()&c.bucketMask]
|
return c.buckets[h.Sum32()&c.bucketMask]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) worker() {
|
func (c *Cache[T]) worker() {
|
||||||
defer close(c.control)
|
defer close(c.control)
|
||||||
dropped := 0
|
dropped := 0
|
||||||
promoteItem := func(item *Item) {
|
promoteItem := func(item *Item[T]) {
|
||||||
if c.doPromote(item) && c.size > c.maxSize {
|
if c.doPromote(item) && c.size > c.maxSize {
|
||||||
dropped += c.gc()
|
dropped += c.gc()
|
||||||
}
|
}
|
||||||
@@ -351,11 +351,11 @@ drain:
|
|||||||
// blocking. If some other goroutine sends an item on either channel after this method has
|
// blocking. If some other goroutine sends an item on either channel after this method has
|
||||||
// finished receiving, that's OK, because SyncUpdates only guarantees processing of values
|
// finished receiving, that's OK, because SyncUpdates only guarantees processing of values
|
||||||
// that were already sent by the same goroutine.
|
// that were already sent by the same goroutine.
|
||||||
func doAllPendingPromotesAndDeletes(
|
func doAllPendingPromotesAndDeletes[T any](
|
||||||
promotables <-chan *Item,
|
promotables <-chan *Item[T],
|
||||||
promoteFn func(*Item),
|
promoteFn func(*Item[T]),
|
||||||
deletables <-chan *Item,
|
deletables <-chan *Item[T],
|
||||||
deleteFn func(*Item),
|
deleteFn func(*Item[T]),
|
||||||
) {
|
) {
|
||||||
doAllPromotes:
|
doAllPromotes:
|
||||||
for {
|
for {
|
||||||
@@ -379,7 +379,7 @@ doAllDeletes:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) doDelete(item *Item) {
|
func (c *Cache[T]) doDelete(item *Item[T]) {
|
||||||
if item.element == nil {
|
if item.element == nil {
|
||||||
item.promotions = -2
|
item.promotions = -2
|
||||||
} else {
|
} else {
|
||||||
@@ -391,7 +391,7 @@ func (c *Cache) doDelete(item *Item) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) doPromote(item *Item) bool {
|
func (c *Cache[T]) doPromote(item *Item[T]) bool {
|
||||||
//already deleted
|
//already deleted
|
||||||
if item.promotions == -2 {
|
if item.promotions == -2 {
|
||||||
return false
|
return false
|
||||||
@@ -409,7 +409,7 @@ func (c *Cache) doPromote(item *Item) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cache) gc() int {
|
func (c *Cache[T]) gc() int {
|
||||||
dropped := 0
|
dropped := 0
|
||||||
element := c.list.Back()
|
element := c.list.Back()
|
||||||
|
|
||||||
@@ -423,7 +423,7 @@ func (c *Cache) gc() int {
|
|||||||
return dropped
|
return dropped
|
||||||
}
|
}
|
||||||
prev := element.Prev()
|
prev := element.Prev()
|
||||||
item := element.Value.(*Item)
|
item := element.Value.(*Item[T])
|
||||||
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
||||||
c.bucket(item.key).delete(item.key)
|
c.bucket(item.key).delete(item.key)
|
||||||
c.size -= item.size
|
c.size -= item.size
|
||||||
|
@@ -17,7 +17,7 @@ func Test_Cache(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) DeletesAValue() {
|
func (_ CacheTests) DeletesAValue() {
|
||||||
cache := New(Configure())
|
cache := New[string](Configure[string]())
|
||||||
defer cache.Stop()
|
defer cache.Stop()
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
|
|
||||||
@@ -32,7 +32,7 @@ func (_ CacheTests) DeletesAValue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) DeletesAPrefix() {
|
func (_ CacheTests) DeletesAPrefix() {
|
||||||
cache := New(Configure())
|
cache := New[string](Configure[string]())
|
||||||
defer cache.Stop()
|
defer cache.Stop()
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
|
|
||||||
@@ -56,7 +56,7 @@ func (_ CacheTests) DeletesAPrefix() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) DeletesAFunc() {
|
func (_ CacheTests) DeletesAFunc() {
|
||||||
cache := New(Configure())
|
cache := New[int](Configure[int]())
|
||||||
defer cache.Stop()
|
defer cache.Stop()
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
|
|
||||||
@@ -68,17 +68,17 @@ func (_ CacheTests) DeletesAFunc() {
|
|||||||
cache.Set("f", 6, time.Minute)
|
cache.Set("f", 6, time.Minute)
|
||||||
Expect(cache.ItemCount()).To.Equal(6)
|
Expect(cache.ItemCount()).To.Equal(6)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc(func(key string, item *Item[int]) bool {
|
||||||
return false
|
return false
|
||||||
})).To.Equal(0)
|
})).To.Equal(0)
|
||||||
Expect(cache.ItemCount()).To.Equal(6)
|
Expect(cache.ItemCount()).To.Equal(6)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc(func(key string, item *Item[int]) bool {
|
||||||
return item.Value().(int) < 4
|
return item.Value() < 4
|
||||||
})).To.Equal(3)
|
})).To.Equal(3)
|
||||||
Expect(cache.ItemCount()).To.Equal(3)
|
Expect(cache.ItemCount()).To.Equal(3)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc(func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc(func(key string, item *Item[int]) bool {
|
||||||
return key == "d"
|
return key == "d"
|
||||||
})).To.Equal(1)
|
})).To.Equal(1)
|
||||||
Expect(cache.ItemCount()).To.Equal(2)
|
Expect(cache.ItemCount()).To.Equal(2)
|
||||||
@@ -87,13 +87,13 @@ func (_ CacheTests) DeletesAFunc() {
|
|||||||
|
|
||||||
func (_ CacheTests) OnDeleteCallbackCalled() {
|
func (_ CacheTests) OnDeleteCallbackCalled() {
|
||||||
onDeleteFnCalled := int32(0)
|
onDeleteFnCalled := int32(0)
|
||||||
onDeleteFn := func(item *Item) {
|
onDeleteFn := func(item *Item[string]) {
|
||||||
if item.key == "spice" {
|
if item.key == "spice" {
|
||||||
atomic.AddInt32(&onDeleteFnCalled, 1)
|
atomic.AddInt32(&onDeleteFnCalled, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cache := New(Configure().OnDelete(onDeleteFn))
|
cache := New[string](Configure[string]().OnDelete(onDeleteFn))
|
||||||
cache.Set("spice", "flow", time.Minute)
|
cache.Set("spice", "flow", time.Minute)
|
||||||
cache.Set("worm", "sand", time.Minute)
|
cache.Set("worm", "sand", time.Minute)
|
||||||
|
|
||||||
@@ -108,8 +108,8 @@ func (_ CacheTests) OnDeleteCallbackCalled() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) FetchesExpiredItems() {
|
func (_ CacheTests) FetchesExpiredItems() {
|
||||||
cache := New(Configure())
|
cache := New[string](Configure[string]())
|
||||||
fn := func() (interface{}, error) { return "moo-moo", nil }
|
fn := func() (string, error) { return "moo-moo", nil }
|
||||||
|
|
||||||
cache.Set("beef", "moo", time.Second*-1)
|
cache.Set("beef", "moo", time.Second*-1)
|
||||||
Expect(cache.Get("beef").Value()).To.Equal("moo")
|
Expect(cache.Get("beef").Value()).To.Equal("moo")
|
||||||
@@ -119,7 +119,7 @@ func (_ CacheTests) FetchesExpiredItems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) GCsTheOldestItems() {
|
func (_ CacheTests) GCsTheOldestItems() {
|
||||||
cache := New(Configure().ItemsToPrune(10))
|
cache := New[int](Configure[int]().ItemsToPrune(10))
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -131,7 +131,7 @@ func (_ CacheTests) GCsTheOldestItems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) PromotedItemsDontGetPruned() {
|
func (_ CacheTests) PromotedItemsDontGetPruned() {
|
||||||
cache := New(Configure().ItemsToPrune(10).GetsPerPromote(1))
|
cache := New[int](Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -145,7 +145,7 @@ func (_ CacheTests) PromotedItemsDontGetPruned() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
|
func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
|
||||||
cache := New(Configure().ItemsToPrune(11).Track())
|
cache := New[int](Configure[int]().ItemsToPrune(11).Track())
|
||||||
item0 := cache.TrackingSet("0", 0, time.Minute)
|
item0 := cache.TrackingSet("0", 0, time.Minute)
|
||||||
for i := 1; i < 11; i++ {
|
for i := 1; i < 11; i++ {
|
||||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||||
@@ -164,13 +164,13 @@ func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
|
|||||||
|
|
||||||
func (_ CacheTests) RemovesOldestItemWhenFull() {
|
func (_ CacheTests) RemovesOldestItemWhenFull() {
|
||||||
onDeleteFnCalled := false
|
onDeleteFnCalled := false
|
||||||
onDeleteFn := func(item *Item) {
|
onDeleteFn := func(item *Item[int]) {
|
||||||
if item.key == "0" {
|
if item.key == "0" {
|
||||||
onDeleteFnCalled = true
|
onDeleteFnCalled = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cache := New(Configure().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
|
cache := New[int](Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -183,7 +183,7 @@ func (_ CacheTests) RemovesOldestItemWhenFull() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
|
func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
|
||||||
cache := New(Configure().MaxSize(9).ItemsToPrune(2))
|
cache := New[*SizedItem](Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
|
cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -192,13 +192,13 @@ func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
|
|||||||
Expect(cache.Get("1")).To.Equal(nil)
|
Expect(cache.Get("1")).To.Equal(nil)
|
||||||
Expect(cache.Get("2")).To.Equal(nil)
|
Expect(cache.Get("2")).To.Equal(nil)
|
||||||
Expect(cache.Get("3")).To.Equal(nil)
|
Expect(cache.Get("3")).To.Equal(nil)
|
||||||
Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4)
|
Expect(cache.Get("4").Value().id).To.Equal(4)
|
||||||
Expect(cache.GetDropped()).To.Equal(4)
|
Expect(cache.GetDropped()).To.Equal(4)
|
||||||
Expect(cache.GetDropped()).To.Equal(0)
|
Expect(cache.GetDropped()).To.Equal(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) SetUpdatesSizeOnDelta() {
|
func (_ CacheTests) SetUpdatesSizeOnDelta() {
|
||||||
cache := New(Configure())
|
cache := New[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("a", &SizedItem{0, 2}, time.Minute)
|
cache.Set("a", &SizedItem{0, 2}, time.Minute)
|
||||||
cache.Set("b", &SizedItem{0, 3}, time.Minute)
|
cache.Set("b", &SizedItem{0, 3}, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
@@ -218,7 +218,7 @@ func (_ CacheTests) SetUpdatesSizeOnDelta() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
||||||
cache := New(Configure())
|
cache := New[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("1", &SizedItem{1, 2}, time.Minute)
|
cache.Set("1", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("2", &SizedItem{1, 2}, time.Minute)
|
cache.Set("2", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("3", &SizedItem{1, 2}, time.Minute)
|
cache.Set("3", &SizedItem{1, 2}, time.Minute)
|
||||||
@@ -228,7 +228,7 @@ func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) ReplaceChangesSize() {
|
func (_ CacheTests) ReplaceChangesSize() {
|
||||||
cache := New(Configure())
|
cache := New[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("1", &SizedItem{1, 2}, time.Minute)
|
cache.Set("1", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("2", &SizedItem{1, 2}, time.Minute)
|
cache.Set("2", &SizedItem{1, 2}, time.Minute)
|
||||||
|
|
||||||
@@ -246,7 +246,7 @@ func (_ CacheTests) ReplaceChangesSize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) ResizeOnTheFly() {
|
func (_ CacheTests) ResizeOnTheFly() {
|
||||||
cache := New(Configure().MaxSize(9).ItemsToPrune(1))
|
cache := New[int](Configure[int]().MaxSize(9).ItemsToPrune(1))
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
cache.Set(strconv.Itoa(i), i, time.Minute)
|
cache.Set(strconv.Itoa(i), i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -278,8 +278,8 @@ func (_ CacheTests) ResizeOnTheFly() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ CacheTests) ForEachFunc() {
|
func (_ CacheTests) ForEachFunc() {
|
||||||
cache := New(Configure().MaxSize(3).ItemsToPrune(1))
|
cache := New[int](Configure[int]().MaxSize(3).ItemsToPrune(1))
|
||||||
Expect(forEachKeys(cache)).To.Equal([]string{})
|
Expect(forEachKeys[int](cache)).To.Equal([]string{})
|
||||||
|
|
||||||
cache.Set("1", 1, time.Minute)
|
cache.Set("1", 1, time.Minute)
|
||||||
Expect(forEachKeys(cache)).To.Equal([]string{"1"})
|
Expect(forEachKeys(cache)).To.Equal([]string{"1"})
|
||||||
@@ -314,9 +314,9 @@ func (s *SizedItem) Size() int64 {
|
|||||||
return s.s
|
return s.s
|
||||||
}
|
}
|
||||||
|
|
||||||
func forEachKeys(cache *Cache) []string {
|
func forEachKeys[T any](cache *Cache[T]) []string {
|
||||||
keys := make([]string, 0, 10)
|
keys := make([]string, 0, 10)
|
||||||
cache.ForEachFunc(func(key string, i *Item) bool {
|
cache.ForEachFunc(func(key string, i *Item[T]) bool {
|
||||||
if key == "stop" {
|
if key == "stop" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,6 @@
|
|||||||
package ccache
|
package ccache
|
||||||
|
|
||||||
type Configuration struct {
|
type Configuration[T any] struct {
|
||||||
maxSize int64
|
maxSize int64
|
||||||
buckets int
|
buckets int
|
||||||
itemsToPrune int
|
itemsToPrune int
|
||||||
@@ -8,14 +8,14 @@ type Configuration struct {
|
|||||||
promoteBuffer int
|
promoteBuffer int
|
||||||
getsPerPromote int32
|
getsPerPromote int32
|
||||||
tracking bool
|
tracking bool
|
||||||
onDelete func(item *Item)
|
onDelete func(item *Item[T])
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates a configuration object with sensible defaults
|
// Creates a configuration object with sensible defaults
|
||||||
// Use this as the start of the fluent configuration:
|
// Use this as the start of the fluent configuration:
|
||||||
// e.g.: ccache.New(ccache.Configure().MaxSize(10000))
|
// e.g.: ccache.New(ccache.Configure().MaxSize(10000))
|
||||||
func Configure() *Configuration {
|
func Configure[T any]() *Configuration[T] {
|
||||||
return &Configuration{
|
return &Configuration[T]{
|
||||||
buckets: 16,
|
buckets: 16,
|
||||||
itemsToPrune: 500,
|
itemsToPrune: 500,
|
||||||
deleteBuffer: 1024,
|
deleteBuffer: 1024,
|
||||||
@@ -28,7 +28,7 @@ func Configure() *Configuration {
|
|||||||
|
|
||||||
// The max size for the cache
|
// The max size for the cache
|
||||||
// [5000]
|
// [5000]
|
||||||
func (c *Configuration) MaxSize(max int64) *Configuration {
|
func (c *Configuration[T]) MaxSize(max int64) *Configuration[T] {
|
||||||
c.maxSize = max
|
c.maxSize = max
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@@ -36,7 +36,7 @@ func (c *Configuration) MaxSize(max int64) *Configuration {
|
|||||||
// Keys are hashed into % bucket count to provide greater concurrency (every set
|
// Keys are hashed into % bucket count to provide greater concurrency (every set
|
||||||
// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...)
|
// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...)
|
||||||
// [16]
|
// [16]
|
||||||
func (c *Configuration) Buckets(count uint32) *Configuration {
|
func (c *Configuration[T]) Buckets(count uint32) *Configuration[T] {
|
||||||
if count == 0 || ((count&(^count+1)) == count) == false {
|
if count == 0 || ((count&(^count+1)) == count) == false {
|
||||||
count = 16
|
count = 16
|
||||||
}
|
}
|
||||||
@@ -46,7 +46,7 @@ func (c *Configuration) Buckets(count uint32) *Configuration {
|
|||||||
|
|
||||||
// The number of items to prune when memory is low
|
// The number of items to prune when memory is low
|
||||||
// [500]
|
// [500]
|
||||||
func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
|
func (c *Configuration[T]) ItemsToPrune(count uint32) *Configuration[T] {
|
||||||
c.itemsToPrune = int(count)
|
c.itemsToPrune = int(count)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@@ -54,14 +54,14 @@ func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
|
|||||||
// The size of the queue for items which should be promoted. If the queue fills
|
// The size of the queue for items which should be promoted. If the queue fills
|
||||||
// up, promotions are skipped
|
// up, promotions are skipped
|
||||||
// [1024]
|
// [1024]
|
||||||
func (c *Configuration) PromoteBuffer(size uint32) *Configuration {
|
func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T] {
|
||||||
c.promoteBuffer = int(size)
|
c.promoteBuffer = int(size)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
// The size of the queue for items which should be deleted. If the queue fills
|
// The size of the queue for items which should be deleted. If the queue fills
|
||||||
// up, calls to Delete() will block
|
// up, calls to Delete() will block
|
||||||
func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
|
func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T] {
|
||||||
c.deleteBuffer = int(size)
|
c.deleteBuffer = int(size)
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@@ -70,7 +70,7 @@ func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
|
|||||||
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
|
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
|
||||||
// a key must have before being promoted
|
// a key must have before being promoted
|
||||||
// [3]
|
// [3]
|
||||||
func (c *Configuration) GetsPerPromote(count int32) *Configuration {
|
func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T] {
|
||||||
c.getsPerPromote = count
|
c.getsPerPromote = count
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@@ -89,7 +89,7 @@ func (c *Configuration) GetsPerPromote(count int32) *Configuration {
|
|||||||
// By turning tracking on and using the cache's TrackingGet, the cache
|
// By turning tracking on and using the cache's TrackingGet, the cache
|
||||||
// won't evict items which you haven't called Release() on. It's a simple reference
|
// won't evict items which you haven't called Release() on. It's a simple reference
|
||||||
// counter.
|
// counter.
|
||||||
func (c *Configuration) Track() *Configuration {
|
func (c *Configuration[T]) Track() *Configuration[T] {
|
||||||
c.tracking = true
|
c.tracking = true
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
@@ -97,7 +97,7 @@ func (c *Configuration) Track() *Configuration {
|
|||||||
// OnDelete allows setting a callback function to react to ideam deletion.
|
// OnDelete allows setting a callback function to react to ideam deletion.
|
||||||
// This typically allows to do a cleanup of resources, such as calling a Close() on
|
// This typically allows to do a cleanup of resources, such as calling a Close() on
|
||||||
// cached object that require some kind of tear-down.
|
// cached object that require some kind of tear-down.
|
||||||
func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration {
|
func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T] {
|
||||||
c.onDelete = callback
|
c.onDelete = callback
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
@@ -14,7 +14,7 @@ func Test_Configuration(t *testing.T) {
|
|||||||
|
|
||||||
func (_ *ConfigurationTests) BucketsPowerOf2() {
|
func (_ *ConfigurationTests) BucketsPowerOf2() {
|
||||||
for i := uint32(0); i < 31; i++ {
|
for i := uint32(0); i < 31; i++ {
|
||||||
c := Configure().Buckets(i)
|
c := Configure[int]().Buckets(i)
|
||||||
if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
|
if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
|
||||||
Expect(c.buckets).ToEqual(int(i))
|
Expect(c.buckets).ToEqual(int(i))
|
||||||
} else {
|
} else {
|
||||||
|
6
go.mod
6
go.mod
@@ -1,5 +1,7 @@
|
|||||||
module github.com/karlseguin/ccache/v2
|
module github.com/karlseguin/ccache/v3
|
||||||
|
|
||||||
go 1.13
|
go 1.18
|
||||||
|
|
||||||
require github.com/karlseguin/expect v1.0.7
|
require github.com/karlseguin/expect v1.0.7
|
||||||
|
|
||||||
|
require github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 // indirect
|
||||||
|
56
item.go
56
item.go
@@ -11,8 +11,8 @@ type Sized interface {
|
|||||||
Size() int64
|
Size() int64
|
||||||
}
|
}
|
||||||
|
|
||||||
type TrackedItem interface {
|
type TrackedItem[T any] interface {
|
||||||
Value() interface{}
|
Value() T
|
||||||
Release()
|
Release()
|
||||||
Expired() bool
|
Expired() bool
|
||||||
TTL() time.Duration
|
TTL() time.Duration
|
||||||
@@ -20,45 +20,25 @@ type TrackedItem interface {
|
|||||||
Extend(duration time.Duration)
|
Extend(duration time.Duration)
|
||||||
}
|
}
|
||||||
|
|
||||||
type nilItem struct{}
|
type Item[T any] struct {
|
||||||
|
|
||||||
func (n *nilItem) Value() interface{} { return nil }
|
|
||||||
func (n *nilItem) Release() {}
|
|
||||||
|
|
||||||
func (i *nilItem) Expired() bool {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *nilItem) TTL() time.Duration {
|
|
||||||
return time.Minute
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *nilItem) Expires() time.Time {
|
|
||||||
return time.Time{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (i *nilItem) Extend(duration time.Duration) {
|
|
||||||
}
|
|
||||||
|
|
||||||
var NilTracked = new(nilItem)
|
|
||||||
|
|
||||||
type Item struct {
|
|
||||||
key string
|
key string
|
||||||
group string
|
group string
|
||||||
promotions int32
|
promotions int32
|
||||||
refCount int32
|
refCount int32
|
||||||
expires int64
|
expires int64
|
||||||
size int64
|
size int64
|
||||||
value interface{}
|
value T
|
||||||
element *list.Element
|
element *list.Element
|
||||||
}
|
}
|
||||||
|
|
||||||
func newItem(key string, value interface{}, expires int64, track bool) *Item {
|
func newItem[T any](key string, value T, expires int64, track bool) *Item[T] {
|
||||||
size := int64(1)
|
size := int64(1)
|
||||||
if sized, ok := value.(Sized); ok {
|
|
||||||
|
// https://github.com/golang/go/issues/49206
|
||||||
|
if sized, ok := (interface{})(value).(Sized); ok {
|
||||||
size = sized.Size()
|
size = sized.Size()
|
||||||
}
|
}
|
||||||
item := &Item{
|
item := &Item[T]{
|
||||||
key: key,
|
key: key,
|
||||||
value: value,
|
value: value,
|
||||||
promotions: 0,
|
promotions: 0,
|
||||||
@@ -71,39 +51,39 @@ func newItem(key string, value interface{}, expires int64, track bool) *Item {
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) shouldPromote(getsPerPromote int32) bool {
|
func (i *Item[T]) shouldPromote(getsPerPromote int32) bool {
|
||||||
i.promotions += 1
|
i.promotions += 1
|
||||||
return i.promotions == getsPerPromote
|
return i.promotions == getsPerPromote
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) Value() interface{} {
|
func (i *Item[T]) Value() T {
|
||||||
return i.value
|
return i.value
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) track() {
|
func (i *Item[T]) track() {
|
||||||
atomic.AddInt32(&i.refCount, 1)
|
atomic.AddInt32(&i.refCount, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) Release() {
|
func (i *Item[T]) Release() {
|
||||||
atomic.AddInt32(&i.refCount, -1)
|
atomic.AddInt32(&i.refCount, -1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) Expired() bool {
|
func (i *Item[T]) Expired() bool {
|
||||||
expires := atomic.LoadInt64(&i.expires)
|
expires := atomic.LoadInt64(&i.expires)
|
||||||
return expires < time.Now().UnixNano()
|
return expires < time.Now().UnixNano()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) TTL() time.Duration {
|
func (i *Item[T]) TTL() time.Duration {
|
||||||
expires := atomic.LoadInt64(&i.expires)
|
expires := atomic.LoadInt64(&i.expires)
|
||||||
return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
|
return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) Expires() time.Time {
|
func (i *Item[T]) Expires() time.Time {
|
||||||
expires := atomic.LoadInt64(&i.expires)
|
expires := atomic.LoadInt64(&i.expires)
|
||||||
return time.Unix(0, expires)
|
return time.Unix(0, expires)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Item) Extend(duration time.Duration) {
|
func (i *Item[T]) Extend(duration time.Duration) {
|
||||||
atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
|
atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -113,6 +93,6 @@ func (i *Item) Extend(duration time.Duration) {
|
|||||||
// purposes, and because otherwise including an Item in a call to fmt.Printf or
|
// purposes, and because otherwise including an Item in a call to fmt.Printf or
|
||||||
// fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe
|
// fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe
|
||||||
// way.
|
// way.
|
||||||
func (i *Item) String() string {
|
func (i *Item[T]) String() string {
|
||||||
return fmt.Sprintf("Item(%v)", i.value)
|
return fmt.Sprintf("Item(%v)", i.value)
|
||||||
}
|
}
|
||||||
|
14
item_test.go
14
item_test.go
@@ -15,35 +15,35 @@ func Test_Item(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *ItemTests) Promotability() {
|
func (_ *ItemTests) Promotability() {
|
||||||
item := &Item{promotions: 4}
|
item := &Item[int]{promotions: 4}
|
||||||
Expect(item.shouldPromote(5)).To.Equal(true)
|
Expect(item.shouldPromote(5)).To.Equal(true)
|
||||||
Expect(item.shouldPromote(5)).To.Equal(false)
|
Expect(item.shouldPromote(5)).To.Equal(false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *ItemTests) Expired() {
|
func (_ *ItemTests) Expired() {
|
||||||
now := time.Now().UnixNano()
|
now := time.Now().UnixNano()
|
||||||
item1 := &Item{expires: now + (10 * int64(time.Millisecond))}
|
item1 := &Item[int]{expires: now + (10 * int64(time.Millisecond))}
|
||||||
item2 := &Item{expires: now - (10 * int64(time.Millisecond))}
|
item2 := &Item[int]{expires: now - (10 * int64(time.Millisecond))}
|
||||||
Expect(item1.Expired()).To.Equal(false)
|
Expect(item1.Expired()).To.Equal(false)
|
||||||
Expect(item2.Expired()).To.Equal(true)
|
Expect(item2.Expired()).To.Equal(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *ItemTests) TTL() {
|
func (_ *ItemTests) TTL() {
|
||||||
now := time.Now().UnixNano()
|
now := time.Now().UnixNano()
|
||||||
item1 := &Item{expires: now + int64(time.Second)}
|
item1 := &Item[int]{expires: now + int64(time.Second)}
|
||||||
item2 := &Item{expires: now - int64(time.Second)}
|
item2 := &Item[int]{expires: now - int64(time.Second)}
|
||||||
Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
|
Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
|
||||||
Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
|
Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *ItemTests) Expires() {
|
func (_ *ItemTests) Expires() {
|
||||||
now := time.Now().UnixNano()
|
now := time.Now().UnixNano()
|
||||||
item := &Item{expires: now + (10)}
|
item := &Item[int]{expires: now + (10)}
|
||||||
Expect(item.Expires().UnixNano()).To.Equal(now + 10)
|
Expect(item.Expires().UnixNano()).To.Equal(now + 10)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *ItemTests) Extend() {
|
func (_ *ItemTests) Extend() {
|
||||||
item := &Item{expires: time.Now().UnixNano() + 10}
|
item := &Item[int]{expires: time.Now().UnixNano() + 10}
|
||||||
item.Extend(time.Minute * 2)
|
item.Extend(time.Minute * 2)
|
||||||
Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
|
Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
|
||||||
}
|
}
|
||||||
|
@@ -5,12 +5,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type layeredBucket struct {
|
type layeredBucket[T any] struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
buckets map[string]*bucket
|
buckets map[string]*bucket[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) itemCount() int {
|
func (b *layeredBucket[T]) itemCount() int {
|
||||||
count := 0
|
count := 0
|
||||||
b.RLock()
|
b.RLock()
|
||||||
defer b.RUnlock()
|
defer b.RUnlock()
|
||||||
@@ -20,7 +20,7 @@ func (b *layeredBucket) itemCount() int {
|
|||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) get(primary, secondary string) *Item {
|
func (b *layeredBucket[T]) get(primary, secondary string) *Item[T] {
|
||||||
bucket := b.getSecondaryBucket(primary)
|
bucket := b.getSecondaryBucket(primary)
|
||||||
if bucket == nil {
|
if bucket == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -28,7 +28,7 @@ func (b *layeredBucket) get(primary, secondary string) *Item {
|
|||||||
return bucket.get(secondary)
|
return bucket.get(secondary)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
|
func (b *layeredBucket[T]) getSecondaryBucket(primary string) *bucket[T] {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -38,11 +38,11 @@ func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
|
|||||||
return bucket
|
return bucket
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration, track bool) (*Item, *Item) {
|
func (b *layeredBucket[T]) set(primary, secondary string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
|
||||||
b.Lock()
|
b.Lock()
|
||||||
bkt, exists := b.buckets[primary]
|
bkt, exists := b.buckets[primary]
|
||||||
if exists == false {
|
if exists == false {
|
||||||
bkt = &bucket{lookup: make(map[string]*Item)}
|
bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
|
||||||
b.buckets[primary] = bkt
|
b.buckets[primary] = bkt
|
||||||
}
|
}
|
||||||
b.Unlock()
|
b.Unlock()
|
||||||
@@ -51,7 +51,7 @@ func (b *layeredBucket) set(primary, secondary string, value interface{}, durati
|
|||||||
return item, existing
|
return item, existing
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) delete(primary, secondary string) *Item {
|
func (b *layeredBucket[T]) delete(primary, secondary string) *Item[T] {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -61,7 +61,7 @@ func (b *layeredBucket) delete(primary, secondary string) *Item {
|
|||||||
return bucket.delete(secondary)
|
return bucket.delete(secondary)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *Item) int {
|
func (b *layeredBucket[T]) deletePrefix(primary, prefix string, deletables chan *Item[T]) int {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -71,7 +71,7 @@ func (b *layeredBucket) deletePrefix(primary, prefix string, deletables chan *It
|
|||||||
return bucket.deletePrefix(prefix, deletables)
|
return bucket.deletePrefix(prefix, deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item *Item) bool, deletables chan *Item) int {
|
func (b *layeredBucket[T]) deleteFunc(primary string, matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -81,7 +81,7 @@ func (b *layeredBucket) deleteFunc(primary string, matches func(key string, item
|
|||||||
return bucket.deleteFunc(matches, deletables)
|
return bucket.deleteFunc(matches, deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
|
func (b *layeredBucket[T]) deleteAll(primary string, deletables chan *Item[T]) bool {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -102,7 +102,7 @@ func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) forEachFunc(primary string, matches func(key string, item *Item) bool) {
|
func (b *layeredBucket[T]) forEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
|
||||||
b.RLock()
|
b.RLock()
|
||||||
bucket, exists := b.buckets[primary]
|
bucket, exists := b.buckets[primary]
|
||||||
b.RUnlock()
|
b.RUnlock()
|
||||||
@@ -111,11 +111,11 @@ func (b *layeredBucket) forEachFunc(primary string, matches func(key string, ite
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *layeredBucket) clear() {
|
func (b *layeredBucket[T]) clear() {
|
||||||
b.Lock()
|
b.Lock()
|
||||||
defer b.Unlock()
|
defer b.Unlock()
|
||||||
for _, bucket := range b.buckets {
|
for _, bucket := range b.buckets {
|
||||||
bucket.clear()
|
bucket.clear()
|
||||||
}
|
}
|
||||||
b.buckets = make(map[string]*bucket)
|
b.buckets = make(map[string]*bucket[T])
|
||||||
}
|
}
|
||||||
|
@@ -8,14 +8,14 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type LayeredCache struct {
|
type LayeredCache[T any] struct {
|
||||||
*Configuration
|
*Configuration[T]
|
||||||
list *list.List
|
list *list.List
|
||||||
buckets []*layeredBucket
|
buckets []*layeredBucket[T]
|
||||||
bucketMask uint32
|
bucketMask uint32
|
||||||
size int64
|
size int64
|
||||||
deletables chan *Item
|
deletables chan *Item[T]
|
||||||
promotables chan *Item
|
promotables chan *Item[T]
|
||||||
control chan interface{}
|
control chan interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -32,25 +32,25 @@ type LayeredCache struct {
|
|||||||
// secondary key 2 = ".xml"
|
// secondary key 2 = ".xml"
|
||||||
|
|
||||||
// See ccache.Configure() for creating a configuration
|
// See ccache.Configure() for creating a configuration
|
||||||
func Layered(config *Configuration) *LayeredCache {
|
func Layered[T any](config *Configuration[T]) *LayeredCache[T] {
|
||||||
c := &LayeredCache{
|
c := &LayeredCache[T]{
|
||||||
list: list.New(),
|
list: list.New(),
|
||||||
Configuration: config,
|
Configuration: config,
|
||||||
bucketMask: uint32(config.buckets) - 1,
|
bucketMask: uint32(config.buckets) - 1,
|
||||||
buckets: make([]*layeredBucket, config.buckets),
|
buckets: make([]*layeredBucket[T], config.buckets),
|
||||||
deletables: make(chan *Item, config.deleteBuffer),
|
deletables: make(chan *Item[T], config.deleteBuffer),
|
||||||
control: make(chan interface{}),
|
control: make(chan interface{}),
|
||||||
}
|
}
|
||||||
for i := 0; i < int(config.buckets); i++ {
|
for i := 0; i < int(config.buckets); i++ {
|
||||||
c.buckets[i] = &layeredBucket{
|
c.buckets[i] = &layeredBucket[T]{
|
||||||
buckets: make(map[string]*bucket),
|
buckets: make(map[string]*bucket[T]),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
c.restart()
|
c.restart()
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) ItemCount() int {
|
func (c *LayeredCache[T]) ItemCount() int {
|
||||||
count := 0
|
count := 0
|
||||||
for _, b := range c.buckets {
|
for _, b := range c.buckets {
|
||||||
count += b.itemCount()
|
count += b.itemCount()
|
||||||
@@ -62,7 +62,7 @@ func (c *LayeredCache) ItemCount() int {
|
|||||||
// This can return an expired item. Use item.Expired() to see if the item
|
// This can return an expired item. Use item.Expired() to see if the item
|
||||||
// is expired and item.TTL() to see how long until the item expires (which
|
// is expired and item.TTL() to see how long until the item expires (which
|
||||||
// will be negative for an already expired item).
|
// will be negative for an already expired item).
|
||||||
func (c *LayeredCache) Get(primary, secondary string) *Item {
|
func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T] {
|
||||||
item := c.bucket(primary).get(primary, secondary)
|
item := c.bucket(primary).get(primary, secondary)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return nil
|
return nil
|
||||||
@@ -76,23 +76,23 @@ func (c *LayeredCache) Get(primary, secondary string) *Item {
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) ForEachFunc(primary string, matches func(key string, item *Item) bool) {
|
func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
|
||||||
c.bucket(primary).forEachFunc(primary, matches)
|
c.bucket(primary).forEachFunc(primary, matches)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the secondary cache for a given primary key. This operation will
|
// Get the secondary cache for a given primary key. This operation will
|
||||||
// never return nil. In the case where the primary key does not exist, a
|
// never return nil. In the case where the primary key does not exist, a
|
||||||
// new, underlying, empty bucket will be created and returned.
|
// new, underlying, empty bucket will be created and returned.
|
||||||
func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
|
func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T] {
|
||||||
primaryBkt := c.bucket(primary)
|
primaryBkt := c.bucket(primary)
|
||||||
bkt := primaryBkt.getSecondaryBucket(primary)
|
bkt := primaryBkt.getSecondaryBucket(primary)
|
||||||
primaryBkt.Lock()
|
primaryBkt.Lock()
|
||||||
if bkt == nil {
|
if bkt == nil {
|
||||||
bkt = &bucket{lookup: make(map[string]*Item)}
|
bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
|
||||||
primaryBkt.buckets[primary] = bkt
|
primaryBkt.buckets[primary] = bkt
|
||||||
}
|
}
|
||||||
primaryBkt.Unlock()
|
primaryBkt.Unlock()
|
||||||
return &SecondaryCache{
|
return &SecondaryCache[T]{
|
||||||
bucket: bkt,
|
bucket: bkt,
|
||||||
pCache: c,
|
pCache: c,
|
||||||
}
|
}
|
||||||
@@ -100,29 +100,29 @@ func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache
|
|||||||
|
|
||||||
// Used when the cache was created with the Track() configuration option.
|
// Used when the cache was created with the Track() configuration option.
|
||||||
// Avoid otherwise
|
// Avoid otherwise
|
||||||
func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
|
func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T] {
|
||||||
item := c.Get(primary, secondary)
|
item := c.Get(primary, secondary)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return NilTracked
|
return nil
|
||||||
}
|
}
|
||||||
item.track()
|
item.track()
|
||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the value in the cache for the specified duration
|
// Set the value in the cache for the specified duration
|
||||||
func (c *LayeredCache) TrackingSet(primary, secondary string, value interface{}, duration time.Duration) TrackedItem {
|
func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T] {
|
||||||
return c.set(primary, secondary, value, duration, true)
|
return c.set(primary, secondary, value, duration, true)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the value in the cache for the specified duration
|
// Set the value in the cache for the specified duration
|
||||||
func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
|
func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration) {
|
||||||
c.set(primary, secondary, value, duration, false)
|
c.set(primary, secondary, value, duration, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Replace the value if it exists, does not set if it doesn't.
|
// Replace the value if it exists, does not set if it doesn't.
|
||||||
// Returns true if the item existed an was replaced, false otherwise.
|
// Returns true if the item existed an was replaced, false otherwise.
|
||||||
// Replace does not reset item's TTL nor does it alter its position in the LRU
|
// Replace does not reset item's TTL nor does it alter its position in the LRU
|
||||||
func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool {
|
func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool {
|
||||||
item := c.bucket(primary).get(primary, secondary)
|
item := c.bucket(primary).get(primary, secondary)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return false
|
return false
|
||||||
@@ -137,7 +137,7 @@ func (c *LayeredCache) Replace(primary, secondary string, value interface{}) boo
|
|||||||
// Note that Fetch merely calls the public Get and Set functions. If you want
|
// Note that Fetch merely calls the public Get and Set functions. If you want
|
||||||
// a different Fetch behavior, such as thundering herd protection or returning
|
// a different Fetch behavior, such as thundering herd protection or returning
|
||||||
// expired items, implement it in your application.
|
// expired items, implement it in your application.
|
||||||
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
|
func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
|
||||||
item := c.Get(primary, secondary)
|
item := c.Get(primary, secondary)
|
||||||
if item != nil {
|
if item != nil {
|
||||||
return item, nil
|
return item, nil
|
||||||
@@ -150,7 +150,7 @@ func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Remove the item from the cache, return true if the item was present, false otherwise.
|
// Remove the item from the cache, return true if the item was present, false otherwise.
|
||||||
func (c *LayeredCache) Delete(primary, secondary string) bool {
|
func (c *LayeredCache[T]) Delete(primary, secondary string) bool {
|
||||||
item := c.bucket(primary).delete(primary, secondary)
|
item := c.bucket(primary).delete(primary, secondary)
|
||||||
if item != nil {
|
if item != nil {
|
||||||
c.deletables <- item
|
c.deletables <- item
|
||||||
@@ -160,47 +160,47 @@ func (c *LayeredCache) Delete(primary, secondary string) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Deletes all items that share the same primary key
|
// Deletes all items that share the same primary key
|
||||||
func (c *LayeredCache) DeleteAll(primary string) bool {
|
func (c *LayeredCache[T]) DeleteAll(primary string) bool {
|
||||||
return c.bucket(primary).deleteAll(primary, c.deletables)
|
return c.bucket(primary).deleteAll(primary, c.deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes all items that share the same primary key and prefix.
|
// Deletes all items that share the same primary key and prefix.
|
||||||
func (c *LayeredCache) DeletePrefix(primary, prefix string) int {
|
func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int {
|
||||||
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
|
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deletes all items that share the same primary key and where the matches func evaluates to true.
|
// Deletes all items that share the same primary key and where the matches func evaluates to true.
|
||||||
func (c *LayeredCache) DeleteFunc(primary string, matches func(key string, item *Item) bool) int {
|
func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int {
|
||||||
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
|
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Clears the cache
|
// Clears the cache
|
||||||
func (c *LayeredCache) Clear() {
|
func (c *LayeredCache[T]) Clear() {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- clear{done: done}
|
c.control <- clear{done: done}
|
||||||
<-done
|
<-done
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) Stop() {
|
func (c *LayeredCache[T]) Stop() {
|
||||||
close(c.promotables)
|
close(c.promotables)
|
||||||
<-c.control
|
<-c.control
|
||||||
}
|
}
|
||||||
|
|
||||||
// Gets the number of items removed from the cache due to memory pressure since
|
// Gets the number of items removed from the cache due to memory pressure since
|
||||||
// the last time GetDropped was called
|
// the last time GetDropped was called
|
||||||
func (c *LayeredCache) GetDropped() int {
|
func (c *LayeredCache[T]) GetDropped() int {
|
||||||
return doGetDropped(c.control)
|
return doGetDropped(c.control)
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncUpdates waits until the cache has finished asynchronous state updates for any operations
|
// SyncUpdates waits until the cache has finished asynchronous state updates for any operations
|
||||||
// that were done by the current goroutine up to now. See Cache.SyncUpdates for details.
|
// that were done by the current goroutine up to now. See Cache.SyncUpdates for details.
|
||||||
func (c *LayeredCache) SyncUpdates() {
|
func (c *LayeredCache[T]) SyncUpdates() {
|
||||||
doSyncUpdates(c.control)
|
doSyncUpdates(c.control)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets a new max size. That can result in a GC being run if the new maxium size
|
// Sets a new max size. That can result in a GC being run if the new maxium size
|
||||||
// is smaller than the cached size
|
// is smaller than the cached size
|
||||||
func (c *LayeredCache) SetMaxSize(size int64) {
|
func (c *LayeredCache[T]) SetMaxSize(size int64) {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- setMaxSize{size: size, done: done}
|
c.control <- setMaxSize{size: size, done: done}
|
||||||
<-done
|
<-done
|
||||||
@@ -209,7 +209,7 @@ func (c *LayeredCache) SetMaxSize(size int64) {
|
|||||||
// Forces GC. There should be no reason to call this function, except from tests
|
// Forces GC. There should be no reason to call this function, except from tests
|
||||||
// which require synchronous GC.
|
// which require synchronous GC.
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *LayeredCache) GC() {
|
func (c *LayeredCache[T]) GC() {
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
c.control <- gc{done: done}
|
c.control <- gc{done: done}
|
||||||
<-done
|
<-done
|
||||||
@@ -219,19 +219,19 @@ func (c *LayeredCache) GC() {
|
|||||||
// by the worker goroutine. It's meant to be called periodically for metrics, or
|
// by the worker goroutine. It's meant to be called periodically for metrics, or
|
||||||
// from tests.
|
// from tests.
|
||||||
// This is a control command.
|
// This is a control command.
|
||||||
func (c *LayeredCache) GetSize() int64 {
|
func (c *LayeredCache[T]) GetSize() int64 {
|
||||||
res := make(chan int64)
|
res := make(chan int64)
|
||||||
c.control <- getSize{res}
|
c.control <- getSize{res}
|
||||||
return <-res
|
return <-res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) restart() {
|
func (c *LayeredCache[T]) restart() {
|
||||||
c.promotables = make(chan *Item, c.promoteBuffer)
|
c.promotables = make(chan *Item[T], c.promoteBuffer)
|
||||||
c.control = make(chan interface{})
|
c.control = make(chan interface{})
|
||||||
go c.worker()
|
go c.worker()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration, track bool) *Item {
|
func (c *LayeredCache[T]) set(primary, secondary string, value T, duration time.Duration, track bool) *Item[T] {
|
||||||
item, existing := c.bucket(primary).set(primary, secondary, value, duration, track)
|
item, existing := c.bucket(primary).set(primary, secondary, value, duration, track)
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
c.deletables <- existing
|
c.deletables <- existing
|
||||||
@@ -240,25 +240,25 @@ func (c *LayeredCache) set(primary, secondary string, value interface{}, duratio
|
|||||||
return item
|
return item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) bucket(key string) *layeredBucket {
|
func (c *LayeredCache[T]) bucket(key string) *layeredBucket[T] {
|
||||||
h := fnv.New32a()
|
h := fnv.New32a()
|
||||||
h.Write([]byte(key))
|
h.Write([]byte(key))
|
||||||
return c.buckets[h.Sum32()&c.bucketMask]
|
return c.buckets[h.Sum32()&c.bucketMask]
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) promote(item *Item) {
|
func (c *LayeredCache[T]) promote(item *Item[T]) {
|
||||||
c.promotables <- item
|
c.promotables <- item
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) worker() {
|
func (c *LayeredCache[T]) worker() {
|
||||||
defer close(c.control)
|
defer close(c.control)
|
||||||
dropped := 0
|
dropped := 0
|
||||||
promoteItem := func(item *Item) {
|
promoteItem := func(item *Item[T]) {
|
||||||
if c.doPromote(item) && c.size > c.maxSize {
|
if c.doPromote(item) && c.size > c.maxSize {
|
||||||
dropped += c.gc()
|
dropped += c.gc()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deleteItem := func(item *Item) {
|
deleteItem := func(item *Item[T]) {
|
||||||
if item.element == nil {
|
if item.element == nil {
|
||||||
atomic.StoreInt32(&item.promotions, -2)
|
atomic.StoreInt32(&item.promotions, -2)
|
||||||
} else {
|
} else {
|
||||||
@@ -310,7 +310,7 @@ func (c *LayeredCache) worker() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) doPromote(item *Item) bool {
|
func (c *LayeredCache[T]) doPromote(item *Item[T]) bool {
|
||||||
// deleted before it ever got promoted
|
// deleted before it ever got promoted
|
||||||
if atomic.LoadInt32(&item.promotions) == -2 {
|
if atomic.LoadInt32(&item.promotions) == -2 {
|
||||||
return false
|
return false
|
||||||
@@ -327,7 +327,7 @@ func (c *LayeredCache) doPromote(item *Item) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *LayeredCache) gc() int {
|
func (c *LayeredCache[T]) gc() int {
|
||||||
element := c.list.Back()
|
element := c.list.Back()
|
||||||
dropped := 0
|
dropped := 0
|
||||||
itemsToPrune := int64(c.itemsToPrune)
|
itemsToPrune := int64(c.itemsToPrune)
|
||||||
@@ -341,7 +341,7 @@ func (c *LayeredCache) gc() int {
|
|||||||
return dropped
|
return dropped
|
||||||
}
|
}
|
||||||
prev := element.Prev()
|
prev := element.Prev()
|
||||||
item := element.Value.(*Item)
|
item := element.Value.(*Item[T])
|
||||||
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
|
||||||
c.bucket(item.group).delete(item.group, item.key)
|
c.bucket(item.group).delete(item.group, item.key)
|
||||||
c.size -= item.size
|
c.size -= item.size
|
||||||
|
@@ -17,13 +17,13 @@ func Test_LayeredCache(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) GetsANonExistantValue() {
|
func (_ *LayeredCacheTests) GetsANonExistantValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) SetANewValue() {
|
func (_ *LayeredCacheTests) SetANewValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "a value", time.Minute)
|
cache.Set("spice", "flow", "a value", time.Minute)
|
||||||
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
|
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
|
||||||
Expect(cache.Get("spice", "stop")).To.Equal(nil)
|
Expect(cache.Get("spice", "stop")).To.Equal(nil)
|
||||||
@@ -31,7 +31,7 @@ func (_ *LayeredCacheTests) SetANewValue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
|
func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
cache.Set("spice", "must", "value-b", time.Minute)
|
cache.Set("spice", "must", "value-b", time.Minute)
|
||||||
cache.Set("leto", "sister", "ghanima", time.Minute)
|
cache.Set("leto", "sister", "ghanima", time.Minute)
|
||||||
@@ -46,22 +46,22 @@ func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
|
func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
Expect(cache.Replace("spice", "flow", "value-a")).To.Equal(false)
|
Expect(cache.Replace("spice", "flow", "value-a")).To.Equal(false)
|
||||||
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() {
|
func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true)
|
Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true)
|
||||||
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
|
Expect(cache.Get("spice", "flow").Value()).To.Equal("value-b")
|
||||||
Expect(cache.ItemCount()).To.Equal(1)
|
Expect(cache.ItemCount()).To.Equal(1)
|
||||||
//not sure how to test that the TTL hasn't changed sort of a sleep..
|
//not sure how to test that the TTL hasn't changed sort of a sleep..
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) DeletesAValue() {
|
func (_ *LayeredCacheTests) DeletesAValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
cache.Set("spice", "must", "value-b", time.Minute)
|
cache.Set("spice", "must", "value-b", time.Minute)
|
||||||
cache.Set("leto", "sister", "ghanima", time.Minute)
|
cache.Set("leto", "sister", "ghanima", time.Minute)
|
||||||
@@ -74,7 +74,7 @@ func (_ *LayeredCacheTests) DeletesAValue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) DeletesAPrefix() {
|
func (_ *LayeredCacheTests) DeletesAPrefix() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
|
|
||||||
cache.Set("spice", "aaa", "1", time.Minute)
|
cache.Set("spice", "aaa", "1", time.Minute)
|
||||||
@@ -98,7 +98,7 @@ func (_ *LayeredCacheTests) DeletesAPrefix() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) DeletesAFunc() {
|
func (_ *LayeredCacheTests) DeletesAFunc() {
|
||||||
cache := newLayered()
|
cache := newLayered[int]()
|
||||||
Expect(cache.ItemCount()).To.Equal(0)
|
Expect(cache.ItemCount()).To.Equal(0)
|
||||||
|
|
||||||
cache.Set("spice", "a", 1, time.Minute)
|
cache.Set("spice", "a", 1, time.Minute)
|
||||||
@@ -109,17 +109,17 @@ func (_ *LayeredCacheTests) DeletesAFunc() {
|
|||||||
cache.Set("spice", "f", 6, time.Minute)
|
cache.Set("spice", "f", 6, time.Minute)
|
||||||
Expect(cache.ItemCount()).To.Equal(6)
|
Expect(cache.ItemCount()).To.Equal(6)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
|
||||||
return false
|
return false
|
||||||
})).To.Equal(0)
|
})).To.Equal(0)
|
||||||
Expect(cache.ItemCount()).To.Equal(6)
|
Expect(cache.ItemCount()).To.Equal(6)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
|
||||||
return item.Value().(int) < 4
|
return item.Value() < 4
|
||||||
})).To.Equal(2)
|
})).To.Equal(2)
|
||||||
Expect(cache.ItemCount()).To.Equal(4)
|
Expect(cache.ItemCount()).To.Equal(4)
|
||||||
|
|
||||||
Expect(cache.DeleteFunc("spice", func(key string, item *Item) bool {
|
Expect(cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
|
||||||
return key == "d"
|
return key == "d"
|
||||||
})).To.Equal(1)
|
})).To.Equal(1)
|
||||||
Expect(cache.ItemCount()).To.Equal(3)
|
Expect(cache.ItemCount()).To.Equal(3)
|
||||||
@@ -128,13 +128,13 @@ func (_ *LayeredCacheTests) DeletesAFunc() {
|
|||||||
|
|
||||||
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
|
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
|
||||||
onDeleteFnCalled := int32(0)
|
onDeleteFnCalled := int32(0)
|
||||||
onDeleteFn := func(item *Item) {
|
onDeleteFn := func(item *Item[string]) {
|
||||||
if item.group == "spice" && item.key == "flow" {
|
if item.group == "spice" && item.key == "flow" {
|
||||||
atomic.AddInt32(&onDeleteFnCalled, 1)
|
atomic.AddInt32(&onDeleteFnCalled, 1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cache := Layered(Configure().OnDelete(onDeleteFn))
|
cache := Layered[string](Configure[string]().OnDelete(onDeleteFn))
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
cache.Set("spice", "must", "value-b", time.Minute)
|
cache.Set("spice", "must", "value-b", time.Minute)
|
||||||
cache.Set("leto", "sister", "ghanima", time.Minute)
|
cache.Set("leto", "sister", "ghanima", time.Minute)
|
||||||
@@ -152,7 +152,7 @@ func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ *LayeredCacheTests) DeletesALayer() {
|
func (_ *LayeredCacheTests) DeletesALayer() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
cache.Set("spice", "must", "value-b", time.Minute)
|
cache.Set("spice", "must", "value-b", time.Minute)
|
||||||
cache.Set("leto", "sister", "ghanima", time.Minute)
|
cache.Set("leto", "sister", "ghanima", time.Minute)
|
||||||
@@ -164,7 +164,7 @@ func (_ *LayeredCacheTests) DeletesALayer() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) GCsTheOldestItems() {
|
func (_ LayeredCacheTests) GCsTheOldestItems() {
|
||||||
cache := Layered(Configure().ItemsToPrune(10))
|
cache := Layered[int](Configure[int]().ItemsToPrune(10))
|
||||||
cache.Set("xx", "a", 23, time.Minute)
|
cache.Set("xx", "a", 23, time.Minute)
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
@@ -181,7 +181,7 @@ func (_ LayeredCacheTests) GCsTheOldestItems() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
|
func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
|
||||||
cache := Layered(Configure().ItemsToPrune(10).GetsPerPromote(1))
|
cache := Layered[int](Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -195,7 +195,7 @@ func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
||||||
cache := Layered(Configure().ItemsToPrune(10).Track())
|
cache := Layered[int](Configure[int]().ItemsToPrune(10).Track())
|
||||||
item0 := cache.TrackingSet("0", "a", 0, time.Minute)
|
item0 := cache.TrackingSet("0", "a", 0, time.Minute)
|
||||||
for i := 1; i < 11; i++ {
|
for i := 1; i < 11; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
@@ -213,7 +213,7 @@ func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
|
func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
|
||||||
cache := Layered(Configure().MaxSize(5).ItemsToPrune(1))
|
cache := Layered[int](Configure[int]().MaxSize(5).ItemsToPrune(1))
|
||||||
cache.Set("xx", "a", 23, time.Minute)
|
cache.Set("xx", "a", 23, time.Minute)
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
@@ -231,7 +231,7 @@ func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) ResizeOnTheFly() {
|
func (_ LayeredCacheTests) ResizeOnTheFly() {
|
||||||
cache := Layered(Configure().MaxSize(9).ItemsToPrune(1))
|
cache := Layered[int](Configure[int]().MaxSize(9).ItemsToPrune(1))
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -265,7 +265,7 @@ func (_ LayeredCacheTests) ResizeOnTheFly() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
|
func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
|
||||||
cache := Layered(Configure().MaxSize(9).ItemsToPrune(2))
|
cache := Layered[*SizedItem](Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
|
||||||
for i := 0; i < 7; i++ {
|
for i := 0; i < 7; i++ {
|
||||||
cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
|
cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
|
||||||
}
|
}
|
||||||
@@ -274,11 +274,11 @@ func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
|
|||||||
Expect(cache.Get("pri", "1")).To.Equal(nil)
|
Expect(cache.Get("pri", "1")).To.Equal(nil)
|
||||||
Expect(cache.Get("pri", "2")).To.Equal(nil)
|
Expect(cache.Get("pri", "2")).To.Equal(nil)
|
||||||
Expect(cache.Get("pri", "3")).To.Equal(nil)
|
Expect(cache.Get("pri", "3")).To.Equal(nil)
|
||||||
Expect(cache.Get("pri", "4").Value().(*SizedItem).id).To.Equal(4)
|
Expect(cache.Get("pri", "4").Value().id).To.Equal(4)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
|
func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
|
||||||
cache := Layered(Configure())
|
cache := Layered[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
|
cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
|
||||||
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
|
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
@@ -299,7 +299,7 @@ func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
||||||
cache := Layered(Configure())
|
cache := Layered[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
|
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
|
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
|
cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
|
||||||
@@ -309,7 +309,7 @@ func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) ReplaceChangesSize() {
|
func (_ LayeredCacheTests) ReplaceChangesSize() {
|
||||||
cache := Layered(Configure())
|
cache := Layered[*SizedItem](Configure[*SizedItem]())
|
||||||
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
|
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
|
||||||
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
|
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
|
||||||
|
|
||||||
@@ -327,43 +327,43 @@ func (_ LayeredCacheTests) ReplaceChangesSize() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ LayeredCacheTests) EachFunc() {
|
func (_ LayeredCacheTests) EachFunc() {
|
||||||
cache := Layered(Configure().MaxSize(3).ItemsToPrune(1))
|
cache := Layered[int](Configure[int]().MaxSize(3).ItemsToPrune(1))
|
||||||
Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{})
|
Expect(forEachKeysLayered[int](cache, "1")).To.Equal([]string{})
|
||||||
|
|
||||||
cache.Set("1", "a", 1, time.Minute)
|
cache.Set("1", "a", 1, time.Minute)
|
||||||
Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a"})
|
Expect(forEachKeysLayered[int](cache, "1")).To.Equal([]string{"a"})
|
||||||
|
|
||||||
cache.Set("1", "b", 2, time.Minute)
|
cache.Set("1", "b", 2, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a", "b"})
|
Expect(forEachKeysLayered[int](cache, "1")).To.Equal([]string{"a", "b"})
|
||||||
|
|
||||||
cache.Set("1", "c", 3, time.Minute)
|
cache.Set("1", "c", 3, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"a", "b", "c"})
|
Expect(forEachKeysLayered[int](cache, "1")).To.Equal([]string{"a", "b", "c"})
|
||||||
|
|
||||||
cache.Set("1", "d", 4, time.Minute)
|
cache.Set("1", "d", 4, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
Expect(forEachKeysLayered(cache, "1")).To.Equal([]string{"b", "c", "d"})
|
Expect(forEachKeysLayered[int](cache, "1")).To.Equal([]string{"b", "c", "d"})
|
||||||
|
|
||||||
// iteration is non-deterministic, all we know for sure is "stop" should not be in there
|
// iteration is non-deterministic, all we know for sure is "stop" should not be in there
|
||||||
cache.Set("1", "stop", 5, time.Minute)
|
cache.Set("1", "stop", 5, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
Expect(forEachKeysLayered(cache, "1")).Not.To.Contain("stop")
|
Expect(forEachKeysLayered[int](cache, "1")).Not.To.Contain("stop")
|
||||||
|
|
||||||
cache.Set("1", "e", 6, time.Minute)
|
cache.Set("1", "e", 6, time.Minute)
|
||||||
cache.SyncUpdates()
|
cache.SyncUpdates()
|
||||||
Expect(forEachKeysLayered(cache, "1")).Not.To.Contain("stop")
|
Expect(forEachKeysLayered[int](cache, "1")).Not.To.Contain("stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLayered() *LayeredCache {
|
func newLayered[T any]() *LayeredCache[T] {
|
||||||
c := Layered(Configure())
|
c := Layered[T](Configure[T]())
|
||||||
c.Clear()
|
c.Clear()
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func forEachKeysLayered(cache *LayeredCache, primary string) []string {
|
func forEachKeysLayered[T any](cache *LayeredCache[T], primary string) []string {
|
||||||
keys := make([]string, 0, 10)
|
keys := make([]string, 0, 10)
|
||||||
cache.ForEachFunc(primary, func(key string, i *Item) bool {
|
cache.ForEachFunc(primary, func(key string, i *Item[T]) bool {
|
||||||
if key == "stop" {
|
if key == "stop" {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@@ -2,20 +2,20 @@ package ccache
|
|||||||
|
|
||||||
import "time"
|
import "time"
|
||||||
|
|
||||||
type SecondaryCache struct {
|
type SecondaryCache[T any] struct {
|
||||||
bucket *bucket
|
bucket *bucket[T]
|
||||||
pCache *LayeredCache
|
pCache *LayeredCache[T]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the secondary key.
|
// Get the secondary key.
|
||||||
// The semantics are the same as for LayeredCache.Get
|
// The semantics are the same as for LayeredCache.Get
|
||||||
func (s *SecondaryCache) Get(secondary string) *Item {
|
func (s *SecondaryCache[T]) Get(secondary string) *Item[T] {
|
||||||
return s.bucket.get(secondary)
|
return s.bucket.get(secondary)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set the secondary key to a value.
|
// Set the secondary key to a value.
|
||||||
// The semantics are the same as for LayeredCache.Set
|
// The semantics are the same as for LayeredCache.Set
|
||||||
func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
|
func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T] {
|
||||||
item, existing := s.bucket.set(secondary, value, duration, false)
|
item, existing := s.bucket.set(secondary, value, duration, false)
|
||||||
if existing != nil {
|
if existing != nil {
|
||||||
s.pCache.deletables <- existing
|
s.pCache.deletables <- existing
|
||||||
@@ -26,7 +26,7 @@ func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.
|
|||||||
|
|
||||||
// Fetch or set a secondary key.
|
// Fetch or set a secondary key.
|
||||||
// The semantics are the same as for LayeredCache.Fetch
|
// The semantics are the same as for LayeredCache.Fetch
|
||||||
func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
|
func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
|
||||||
item := s.Get(secondary)
|
item := s.Get(secondary)
|
||||||
if item != nil {
|
if item != nil {
|
||||||
return item, nil
|
return item, nil
|
||||||
@@ -40,7 +40,7 @@ func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch f
|
|||||||
|
|
||||||
// Delete a secondary key.
|
// Delete a secondary key.
|
||||||
// The semantics are the same as for LayeredCache.Delete
|
// The semantics are the same as for LayeredCache.Delete
|
||||||
func (s *SecondaryCache) Delete(secondary string) bool {
|
func (s *SecondaryCache[T]) Delete(secondary string) bool {
|
||||||
item := s.bucket.delete(secondary)
|
item := s.bucket.delete(secondary)
|
||||||
if item != nil {
|
if item != nil {
|
||||||
s.pCache.deletables <- item
|
s.pCache.deletables <- item
|
||||||
@@ -51,7 +51,7 @@ func (s *SecondaryCache) Delete(secondary string) bool {
|
|||||||
|
|
||||||
// Replace a secondary key.
|
// Replace a secondary key.
|
||||||
// The semantics are the same as for LayeredCache.Replace
|
// The semantics are the same as for LayeredCache.Replace
|
||||||
func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
|
func (s *SecondaryCache[T]) Replace(secondary string, value T) bool {
|
||||||
item := s.Get(secondary)
|
item := s.Get(secondary)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return false
|
return false
|
||||||
@@ -62,10 +62,10 @@ func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
|
|||||||
|
|
||||||
// Track a secondary key.
|
// Track a secondary key.
|
||||||
// The semantics are the same as for LayeredCache.TrackingGet
|
// The semantics are the same as for LayeredCache.TrackingGet
|
||||||
func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem {
|
func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T] {
|
||||||
item := c.Get(secondary)
|
item := c.Get(secondary)
|
||||||
if item == nil {
|
if item == nil {
|
||||||
return NilTracked
|
return nil
|
||||||
}
|
}
|
||||||
item.track()
|
item.track()
|
||||||
return item
|
return item
|
||||||
|
@@ -15,12 +15,12 @@ func Test_SecondaryCache(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) GetsANonExistantValue() {
|
func (_ SecondaryCacheTests) GetsANonExistantValue() {
|
||||||
cache := newLayered().GetOrCreateSecondaryCache("foo")
|
cache := newLayered[string]().GetOrCreateSecondaryCache("foo")
|
||||||
Expect(cache).Not.To.Equal(nil)
|
Expect(cache).Not.To.Equal(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) SetANewValue() {
|
func (_ SecondaryCacheTests) SetANewValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "a value", time.Minute)
|
cache.Set("spice", "flow", "a value", time.Minute)
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
Expect(sCache.Get("flow").Value()).To.Equal("a value")
|
Expect(sCache.Get("flow").Value()).To.Equal("a value")
|
||||||
@@ -28,7 +28,7 @@ func (_ SecondaryCacheTests) SetANewValue() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
|
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "a value", time.Minute)
|
cache.Set("spice", "flow", "a value", time.Minute)
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
sCache.Set("orinoco", "another value", time.Minute)
|
sCache.Set("orinoco", "another value", time.Minute)
|
||||||
@@ -37,7 +37,7 @@ func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
|
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
sCache.Set("flow", "a value", time.Minute)
|
sCache.Set("flow", "a value", time.Minute)
|
||||||
Expect(sCache.Get("flow").Value()).To.Equal("a value")
|
Expect(sCache.Get("flow").Value()).To.Equal("a value")
|
||||||
@@ -45,7 +45,7 @@ func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
|
func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "a value", time.Minute)
|
cache.Set("spice", "flow", "a value", time.Minute)
|
||||||
cache.Set("spice", "sister", "ghanima", time.Minute)
|
cache.Set("spice", "sister", "ghanima", time.Minute)
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
@@ -60,37 +60,37 @@ func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
|
func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
|
Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
|
||||||
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
Expect(cache.Get("spice", "flow")).To.Equal(nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
|
func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
|
Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
|
||||||
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
|
Expect(cache.Get("spice", "flow").Value()).To.Equal("value-b")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
|
func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
cache.Set("spice", "flow", "value-a", time.Minute)
|
cache.Set("spice", "flow", "value-a", time.Minute)
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
|
val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
|
||||||
Expect(val.Value().(string)).To.Equal("value-a")
|
Expect(val.Value()).To.Equal("value-a")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) FetchReturnsANewValue() {
|
func (_ SecondaryCacheTests) FetchReturnsANewValue() {
|
||||||
cache := newLayered()
|
cache := newLayered[string]()
|
||||||
sCache := cache.GetOrCreateSecondaryCache("spice")
|
sCache := cache.GetOrCreateSecondaryCache("spice")
|
||||||
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
|
val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
|
||||||
Expect(val.Value().(string)).To.Equal("a fetched value")
|
Expect(val.Value()).To.Equal("a fetched value")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
|
||||||
cache := Layered(Configure().ItemsToPrune(10).Track())
|
cache := Layered[int](Configure[int]().ItemsToPrune(10).Track())
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user