change to an intrinsic linked list for less memory usage

This commit is contained in:
Karl Seguin
2024-11-12 09:52:11 +08:00
parent 0901f94888
commit 61f506609d
8 changed files with 102 additions and 103 deletions

View File

@@ -93,7 +93,7 @@ func (b *bucket[T]) set(key string, value T, duration time.Duration, track bool)
return item, existing
}
func (b *bucket[T]) delete(key string) *Item[T] {
func (b *bucket[T]) remove(key string) *Item[T] {
b.Lock()
item := b.lookup[key]
delete(b.lookup, key)
@@ -101,6 +101,12 @@ func (b *bucket[T]) delete(key string) *Item[T] {
return item
}
func (b *bucket[T]) delete(key string) {
b.Lock()
delete(b.lookup, key)
b.Unlock()
}
// This is an expensive operation, so we do what we can to optimize it and limit
// the impact it has on concurrent operations. Specifically, we:
// 1 - Do an initial iteration to collect matches. This allows us to do the

View File

@@ -10,7 +10,7 @@ import (
type Cache[T any] struct {
*Configuration[T]
control
list *List[*Item[T]]
list *List[T]
size int64
buckets []*bucket[T]
bucketMask uint32
@@ -22,7 +22,7 @@ type Cache[T any] struct {
// See ccache.Configure() for creating a configuration
func New[T any](config *Configuration[T]) *Cache[T] {
c := &Cache[T]{
list: NewList[*Item[T]](),
list: NewList[T](),
Configuration: config,
control: newControl(),
bucketMask: uint32(config.buckets) - 1,
@@ -184,7 +184,7 @@ func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, er
// Remove the item from the cache, return true if the item was present, false otherwise.
func (c *Cache[T]) Delete(key string) bool {
item := c.bucket(key).delete(key)
item := c.bucket(key).remove(key)
if item != nil {
c.deletables <- item
return true
@@ -269,7 +269,7 @@ func (c *Cache[T]) worker() {
bucket.clear()
}
c.size = 0
c.list = NewList[*Item[T]]()
c.list = NewList[T]()
})
msg.done <- struct{}{}
case controlGetSize:
@@ -327,15 +327,14 @@ doAllDeletes:
}
func (c *Cache[T]) doDelete(item *Item[T]) {
if item.node == nil {
if item.next == nil && item.prev == nil {
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.node)
item.node = nil
c.list.Remove(item)
item.promotions = -2
}
}
@@ -345,22 +344,23 @@ func (c *Cache[T]) doPromote(item *Item[T]) bool {
if item.promotions == -2 {
return false
}
if item.node != nil { //not a new item
if item.next != nil || item.prev != nil { // not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.node)
c.list.MoveToFront(item)
item.promotions = 0
}
return false
}
c.size += item.size
item.node = c.list.Insert(item)
c.list.Insert(item)
return true
}
func (c *Cache[T]) gc() int {
dropped := 0
node := c.list.Tail
item := c.list.Tail
itemsToPrune := int64(c.itemsToPrune)
if min := c.size - c.maxSize; min > itemsToPrune {
@@ -368,23 +368,21 @@ func (c *Cache[T]) gc() int {
}
for i := int64(0); i < itemsToPrune; i++ {
if node == nil {
if item == nil {
return dropped
}
prev := node.Prev
item := node.Value
prev := item.prev
if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 {
c.bucket(item.key).delete(item.key)
c.bucket(item.key).remove(item.key)
c.size -= item.size
c.list.Remove(node)
c.list.Remove(item)
if c.onDelete != nil {
c.onDelete(item)
}
dropped += 1
item.node = nil
item.promotions = -2
}
node = prev
item = prev
}
return dropped
}

10
item.go
View File

@@ -27,7 +27,8 @@ type Item[T any] struct {
expires int64
size int64
value T
node *Node[*Item[T]]
next *Item[T]
prev *Item[T]
}
func newItem[T any](key string, value T, expires int64, track bool) *Item[T] {
@@ -37,6 +38,7 @@ func newItem[T any](key string, value T, expires int64, track bool) *Item[T] {
if sized, ok := (interface{})(value).(Sized); ok {
size = sized.Size()
}
item := &Item[T]{
key: key,
value: value,
@@ -97,5 +99,9 @@ func (i *Item[T]) Extend(duration time.Duration) {
// fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe
// way.
func (i *Item[T]) String() string {
return fmt.Sprintf("Item(%v)", i.value)
group := i.group
if group == "" {
return fmt.Sprintf("Item(%s:%v)", i.key, i.value)
}
return fmt.Sprintf("Item(%s:%s:%v)", group, i.key, i.value)
}

View File

@@ -51,14 +51,24 @@ func (b *layeredBucket[T]) set(primary, secondary string, value T, duration time
return item, existing
}
func (b *layeredBucket[T]) delete(primary, secondary string) *Item[T] {
func (b *layeredBucket[T]) remove(primary, secondary string) *Item[T] {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if !exists {
return nil
}
return bucket.delete(secondary)
return bucket.remove(secondary)
}
func (b *layeredBucket[T]) delete(primary, secondary string) {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if !exists {
return
}
bucket.delete(secondary)
}
func (b *layeredBucket[T]) deletePrefix(primary, prefix string, deletables chan *Item[T]) int {

View File

@@ -10,7 +10,7 @@ import (
type LayeredCache[T any] struct {
*Configuration[T]
control
list *List[*Item[T]]
list *List[T]
buckets []*layeredBucket[T]
bucketMask uint32
size int64
@@ -33,7 +33,7 @@ type LayeredCache[T any] struct {
// See ccache.Configure() for creating a configuration
func Layered[T any](config *Configuration[T]) *LayeredCache[T] {
c := &LayeredCache[T]{
list: NewList[*Item[T]](),
list: NewList[T](),
Configuration: config,
control: newControl(),
bucketMask: uint32(config.buckets) - 1,
@@ -158,7 +158,7 @@ func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duratio
// Remove the item from the cache, return true if the item was present, false otherwise.
func (c *LayeredCache[T]) Delete(primary, secondary string) bool {
item := c.bucket(primary).delete(primary, secondary)
item := c.bucket(primary).remove(primary, secondary)
if item != nil {
c.deletables <- item
return true
@@ -262,7 +262,7 @@ func (c *LayeredCache[T]) worker() {
bucket.clear()
}
c.size = 0
c.list = NewList[*Item[T]]()
c.list = NewList[T]()
})
msg.done <- struct{}{}
case controlGetSize:
@@ -289,15 +289,14 @@ drain:
}
func (c *LayeredCache[T]) doDelete(item *Item[T]) {
if item.node == nil {
if item.prev == nil && item.next == nil {
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.node)
item.node = nil
c.list.Remove(item)
item.promotions = -2
}
}
@@ -307,45 +306,45 @@ func (c *LayeredCache[T]) doPromote(item *Item[T]) bool {
if item.promotions == -2 {
return false
}
if item.node != nil { //not a new item
if item.next != nil || item.prev != nil { // not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.node)
c.list.MoveToFront(item)
item.promotions = 0
}
return false
}
c.size += item.size
item.node = c.list.Insert(item)
c.list.Insert(item)
return true
}
func (c *LayeredCache[T]) gc() int {
node := c.list.Tail
dropped := 0
itemsToPrune := int64(c.itemsToPrune)
item := c.list.Tail
itemsToPrune := int64(c.itemsToPrune)
if min := c.size - c.maxSize; min > itemsToPrune {
itemsToPrune = min
}
for i := int64(0); i < itemsToPrune; i++ {
if node == nil {
if item == nil {
return dropped
}
prev := node.Prev
item := node.Value
prev := item.prev
if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 {
c.bucket(item.group).delete(item.group, item.key)
c.bucket(item.group).remove(item.group, item.key)
c.size -= item.size
c.list.Remove(node)
c.list.Remove(item)
if c.onDelete != nil {
c.onDelete(item)
}
item.node = nil
item.promotions = -2
dropped += 1
item.promotions = -2
}
node = prev
item = prev
}
return dropped
}

50
list.go
View File

@@ -1,57 +1,45 @@
package ccache
type List[T any] struct {
Head *Node[T]
Tail *Node[T]
Head *Item[T]
Tail *Item[T]
}
func NewList[T any]() *List[T] {
return &List[T]{}
}
func (l *List[T]) Remove(node *Node[T]) {
next := node.Next
prev := node.Prev
func (l *List[T]) Remove(item *Item[T]) {
next := item.next
prev := item.prev
if next == nil {
l.Tail = node.Prev
l.Tail = prev
} else {
next.Prev = prev
next.prev = prev
}
if prev == nil {
l.Head = node.Next
l.Head = next
} else {
prev.Next = next
prev.next = next
}
node.Next = nil
node.Prev = nil
item.next = nil
item.prev = nil
}
func (l *List[T]) MoveToFront(node *Node[T]) {
l.Remove(node)
l.nodeToFront(node)
func (l *List[T]) MoveToFront(item *Item[T]) {
l.Remove(item)
l.Insert(item)
}
func (l *List[T]) Insert(value T) *Node[T] {
node := &Node[T]{Value: value}
l.nodeToFront(node)
return node
}
func (l *List[T]) nodeToFront(node *Node[T]) {
func (l *List[T]) Insert(item *Item[T]) {
head := l.Head
l.Head = node
l.Head = item
if head == nil {
l.Tail = node
l.Tail = item
return
}
node.Next = head
head.Prev = node
}
type Node[T any] struct {
Next *Node[T]
Prev *Node[T]
Value T
item.next = head
head.prev = item
}

View File

@@ -10,13 +10,13 @@ func Test_List_Insert(t *testing.T) {
l := NewList[int]()
assertList(t, l)
l.Insert(1)
l.Insert(newItem("a", 1, 0, false))
assertList(t, l, 1)
l.Insert(2)
l.Insert(newItem("b", 2, 0, false))
assertList(t, l, 2, 1)
l.Insert(3)
l.Insert(newItem("c", 3, 0, false))
assertList(t, l, 3, 2, 1)
}
@@ -24,15 +24,21 @@ func Test_List_Remove(t *testing.T) {
l := NewList[int]()
assertList(t, l)
node := l.Insert(1)
l.Remove(node)
item := newItem("a", 1, 0, false)
l.Insert(item)
l.Remove(item)
assertList(t, l)
n5 := l.Insert(5)
n4 := l.Insert(4)
n3 := l.Insert(3)
n2 := l.Insert(2)
n1 := l.Insert(1)
n5 := newItem("e", 5, 0, false)
l.Insert(n5)
n4 := newItem("d", 4, 0, false)
l.Insert(n4)
n3 := newItem("c", 3, 0, false)
l.Insert(n3)
n2 := newItem("b", 2, 0, false)
l.Insert(n2)
n1 := newItem("a", 1, 0, false)
l.Insert(n1)
l.Remove(n5)
assertList(t, l, 1, 2, 3, 4)
@@ -50,20 +56,6 @@ func Test_List_Remove(t *testing.T) {
assertList(t, l)
}
func Test_List_MoveToFront(t *testing.T) {
l := NewList[int]()
n1 := l.Insert(1)
l.MoveToFront(n1)
assertList(t, l, 1)
n2 := l.Insert(2)
l.MoveToFront(n1)
assertList(t, l, 1, 2)
l.MoveToFront(n2)
assertList(t, l, 2, 1)
}
func assertList(t *testing.T, list *List[int], expected ...int) {
t.Helper()
@@ -75,13 +67,13 @@ func assertList(t *testing.T, list *List[int], expected ...int) {
node := list.Head
for _, expected := range expected {
assert.Equal(t, node.Value, expected)
node = node.Next
assert.Equal(t, node.value, expected)
node = node.next
}
node = list.Tail
for i := len(expected) - 1; i >= 0; i-- {
assert.Equal(t, node.Value, expected[i])
node = node.Prev
assert.Equal(t, node.value, expected[i])
node = node.prev
}
}

View File

@@ -41,7 +41,7 @@ func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetc
// Delete a secondary key.
// The semantics are the same as for LayeredCache.Delete
func (s *SecondaryCache[T]) Delete(secondary string) bool {
item := s.bucket.delete(secondary)
item := s.bucket.remove(secondary)
if item != nil {
s.pCache.deletables <- item
return true