Fix memory leak
As documented in https://github.com/karlseguin/ccache/issues/76, an entry which is both GC'd and deleted (either via a delete or an update) will result in the internal link list having a nil tail (because removing the same node multiple times from the linked list does that). doDelete was already aware of "invalid" nodes (where item.node == nil), so the solution seems to be as simple as setting item.node = nil during GC.
This commit is contained in:
1
cache.go
1
cache.go
@@ -438,6 +438,7 @@ func (c *Cache[T]) gc() int {
|
|||||||
c.onDelete(item)
|
c.onDelete(item)
|
||||||
}
|
}
|
||||||
dropped += 1
|
dropped += 1
|
||||||
|
item.node = nil
|
||||||
item.promotions = -2
|
item.promotions = -2
|
||||||
}
|
}
|
||||||
node = prev
|
node = prev
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
package ccache
|
package ccache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -313,6 +314,29 @@ func Test_CacheForEachFunc(t *testing.T) {
|
|||||||
assert.DoesNotContain(t, forEachKeys(cache), "stop")
|
assert.DoesNotContain(t, forEachKeys(cache), "stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_CachePrune(t *testing.T) {
|
||||||
|
maxSize := int64(500)
|
||||||
|
cache := New(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
|
||||||
|
epoch := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
epoch += 1
|
||||||
|
expired := make([]string, 0)
|
||||||
|
for i := 0; i < 50; i += 1 {
|
||||||
|
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
|
||||||
|
item := cache.Get(key)
|
||||||
|
if item == nil || item.TTL() > 1*time.Minute {
|
||||||
|
expired = append(expired, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, key := range expired {
|
||||||
|
cache.Set(key, key, 5*time.Minute)
|
||||||
|
}
|
||||||
|
if epoch%500 == 0 {
|
||||||
|
assert.True(t, cache.GetSize() < 500)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type SizedItem struct {
|
type SizedItem struct {
|
||||||
id int
|
id int
|
||||||
s int64
|
s int64
|
||||||
|
@@ -355,6 +355,7 @@ func (c *LayeredCache[T]) gc() int {
|
|||||||
if c.onDelete != nil {
|
if c.onDelete != nil {
|
||||||
c.onDelete(item)
|
c.onDelete(item)
|
||||||
}
|
}
|
||||||
|
item.node = nil
|
||||||
item.promotions = -2
|
item.promotions = -2
|
||||||
dropped += 1
|
dropped += 1
|
||||||
}
|
}
|
||||||
|
@@ -1,6 +1,7 @@
|
|||||||
package ccache
|
package ccache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"math/rand"
|
||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
@@ -372,6 +373,29 @@ func Test_LayeredCache_EachFunc(t *testing.T) {
|
|||||||
assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
|
assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func Test_LayeredCachePrune(t *testing.T) {
|
||||||
|
maxSize := int64(500)
|
||||||
|
cache := Layered(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
|
||||||
|
epoch := 0
|
||||||
|
for i := 0; i < 10000; i++ {
|
||||||
|
epoch += 1
|
||||||
|
expired := make([]string, 0)
|
||||||
|
for i := 0; i < 50; i += 1 {
|
||||||
|
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
|
||||||
|
item := cache.Get(key, key)
|
||||||
|
if item == nil || item.TTL() > 1*time.Minute {
|
||||||
|
expired = append(expired, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, key := range expired {
|
||||||
|
cache.Set(key, key, key, 5*time.Minute)
|
||||||
|
}
|
||||||
|
if epoch%500 == 0 {
|
||||||
|
assert.True(t, cache.GetSize() < 500)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newLayered[T any]() *LayeredCache[T] {
|
func newLayered[T any]() *LayeredCache[T] {
|
||||||
c := Layered[T](Configure[T]())
|
c := Layered[T](Configure[T]())
|
||||||
c.Clear()
|
c.Clear()
|
||||||
|
Reference in New Issue
Block a user