2 Commits

Author SHA1 Message Date
Karl Seguin
ece93bf87d On delete, always set promotions == -2 and node == nil
Also, item.promotions doesn't need to be loaded/stored using atomic. Once upon a
time it did. Cache was updated long ago to not use atomic operations on it, but
LayeredCache wasn't. They are both consistent now (they don't use atomic
operations).

Fixes: https://github.com/karlseguin/ccache/issues/76
2022-12-13 20:33:58 +08:00
Karl Seguin
3452e4e261 Fix memory leak
As documented in https://github.com/karlseguin/ccache/issues/76, an entry which
is both GC'd and deleted (either via a delete or an update) will result in the
internal link list having a nil tail (because removing the same node multiple times
from the linked list does that).

doDelete was already aware of "invalid" nodes (where item.node == nil), so the
solution seems to be as simple as setting item.node = nil during GC.
2022-11-19 08:15:48 +08:00
4 changed files with 57 additions and 3 deletions

View File

@@ -394,6 +394,8 @@ func (c *Cache[T]) doDelete(item *Item[T]) {
c.onDelete(item)
}
c.list.Remove(item.node)
item.node = nil
item.promotions = -2
}
}
@@ -438,6 +440,7 @@ func (c *Cache[T]) gc() int {
c.onDelete(item)
}
dropped += 1
item.node = nil
item.promotions = -2
}
node = prev

View File

@@ -1,6 +1,7 @@
package ccache
import (
"math/rand"
"sort"
"strconv"
"sync/atomic"
@@ -313,6 +314,29 @@ func Test_CacheForEachFunc(t *testing.T) {
assert.DoesNotContain(t, forEachKeys(cache), "stop")
}
func Test_CachePrune(t *testing.T) {
maxSize := int64(500)
cache := New(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
epoch := 0
for i := 0; i < 10000; i++ {
epoch += 1
expired := make([]string, 0)
for i := 0; i < 50; i += 1 {
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
item := cache.Get(key)
if item == nil || item.TTL() > 1*time.Minute {
expired = append(expired, key)
}
}
for _, key := range expired {
cache.Set(key, key, 5*time.Minute)
}
if epoch%500 == 0 {
assert.True(t, cache.GetSize() <= 500)
}
}
}
type SizedItem struct {
id int
s int64

View File

@@ -266,13 +266,15 @@ func (c *LayeredCache[T]) worker() {
}
deleteItem := func(item *Item[T]) {
if item.node == nil {
atomic.StoreInt32(&item.promotions, -2)
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.node)
item.node = nil
item.promotions = -2
}
}
for {
@@ -318,13 +320,13 @@ func (c *LayeredCache[T]) worker() {
func (c *LayeredCache[T]) doPromote(item *Item[T]) bool {
// deleted before it ever got promoted
if atomic.LoadInt32(&item.promotions) == -2 {
if item.promotions == -2 {
return false
}
if item.node != nil { //not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.node)
atomic.StoreInt32(&item.promotions, 0)
item.promotions = 0
}
return false
}
@@ -355,6 +357,7 @@ func (c *LayeredCache[T]) gc() int {
if c.onDelete != nil {
c.onDelete(item)
}
item.node = nil
item.promotions = -2
dropped += 1
}

View File

@@ -1,6 +1,7 @@
package ccache
import (
"math/rand"
"sort"
"strconv"
"sync/atomic"
@@ -372,6 +373,29 @@ func Test_LayeredCache_EachFunc(t *testing.T) {
assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
}
func Test_LayeredCachePrune(t *testing.T) {
maxSize := int64(500)
cache := Layered(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
epoch := 0
for i := 0; i < 10000; i++ {
epoch += 1
expired := make([]string, 0)
for i := 0; i < 50; i += 1 {
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
item := cache.Get(key, key)
if item == nil || item.TTL() > 1*time.Minute {
expired = append(expired, key)
}
}
for _, key := range expired {
cache.Set(key, key, key, 5*time.Minute)
}
if epoch%500 == 0 {
assert.True(t, cache.GetSize() <= 500)
}
}
}
func newLayered[T any]() *LayeredCache[T] {
c := Layered[T](Configure[T]())
c.Clear()