77 Commits

Author SHA1 Message Date
Karl Seguin
f9779b45fc use delete instead of remove where possible 2024-11-12 10:22:32 +08:00
Karl Seguin
61f506609d change to an intrinsic linked list for less memory usage 2024-11-12 09:52:42 +08:00
Karl Seguin
0901f94888 Merge pull request #93 from chenyijun266846/perf_setnx
perf: add setnx2
2024-10-01 10:38:43 +08:00
chenyijun.266846
a47156de7d feat: add existing for setnx2 2024-09-29 15:07:02 +08:00
chenyijun.266846
c0806d27fe fix(setnx2): promotables 2024-09-29 14:46:58 +08:00
chenyijun.266846
d59160ba1c perf: add setnx2 2024-09-29 11:18:20 +08:00
Karl Seguin
e9a80ae138 Merge pull request #91 from miparnisari/bench-cpus
ensure benchmarks are compared if the CPUs are the same
2023-11-27 07:58:24 +08:00
Maria Ines Parnisari
964f899bf4 ensure benchmarks are compared if the CPUs are the same 2023-11-23 11:29:35 -08:00
Karl Seguin
3aa6a053b7 Make test more robust
Attempt to fix: https://github.com/karlseguin/ccache/issues/90
2023-11-23 09:48:42 +08:00
Maria Ines Parnisari
6d135b03a9 feat: add benchmarks 2023-11-23 09:12:34 +08:00
Karl Seguin
bf904fff3c fix make c on macos 2023-11-21 11:42:34 +08:00
Maria Ines Parnisari
de3e573a65 run tests and linter on every PR 2023-11-20 13:44:20 -08:00
Karl Seguin
7b5dfadcde Merge pull request #87 from craigpastro/patch-1
Update readme.md
2023-10-24 08:34:04 +08:00
Craig Pastro
2ad5f8fe86 Fix typo 2023-10-23 10:23:25 -07:00
Craig Pastro
c56472f9b5 Update readme.md
Generic version has landed and that branch no longer exists.
2023-10-23 10:20:28 -07:00
Karl Seguin
378b8b039e Merge pull request #86 from rfyiamcool/feat/public_extend
feat: public extend
2023-10-23 10:51:45 +08:00
rfyiamcool
1594fc55bc feat: public extend
Signed-off-by: rfyiamcool <rfyiamcool@163.com>
2023-10-22 22:19:27 +08:00
rfyiamcool
2977b36b74 feat: public extend
Signed-off-by: rfyiamcool <rfyiamcool@163.com>
2023-10-22 22:14:28 +08:00
Karl Seguin
62cd8cc8c3 Merge pull request #85 from rfyiamcool/feat/add_setnx
feat: add setnx (if not exists, set kv)
2023-10-22 20:19:26 +08:00
rfyiamcool
b26c342793 feat: add setnx (if not exists, set kv)
Signed-off-by: rfyiamcool <rfyiamcool@163.com>
2023-10-22 19:23:23 +08:00
rfyiamcool
dd0671989b feat: add setnx (if not exists, set kv)
Signed-off-by: rfyiamcool <rfyiamcool@163.com>
2023-10-20 10:36:04 +08:00
Karl Seguin
0f8575167d Merge pull request #84 from idsulik/added-key-method-to-item
Added Key() method to Item
2023-10-20 06:51:10 +08:00
Suleiman Dibirov
fd8f81fe86 Added Key() method to Item 2023-10-19 12:16:13 +03:00
Karl Seguin
a25552af28 Attempt to make Clear concurrency-safe
This is an attempt at fixing #81 without imposing a performance hit on the
cache's "normal" (get/set/fetch) activity. Calling "Clear" is now considerably
more expensive.
2023-04-14 15:27:39 +08:00
Karl Seguin
35052434f3 Merge pull request #78 from karlseguin/control_stop
Refactor control messages + Stop handling
2023-01-07 12:12:27 +08:00
Karl Seguin
22776be1ee Refactor control messages + Stop handling
Move the control API shared between Cache and LayeredCache into its own struct.
But keep the control logic handling separate - it requires access to the local
values, like dropped and deleteItem.

Stop is now a control message. Channels are no longer closed as part of the stop
process.
2023-01-04 10:40:19 +08:00
Karl Seguin
ece93bf87d On delete, always set promotions == -2 and node == nil
Also, item.promotions doesn't need to be loaded/stored using atomic. Once upon a
time it did. Cache was updated long ago to not use atomic operations on it, but
LayeredCache wasn't. They are both consistent now (they don't use atomic
operations).

Fixes: https://github.com/karlseguin/ccache/issues/76
2022-12-13 20:33:58 +08:00
Karl Seguin
3452e4e261 Fix memory leak
As documented in https://github.com/karlseguin/ccache/issues/76, an entry which
is both GC'd and deleted (either via a delete or an update) will result in the
internal link list having a nil tail (because removing the same node multiple times
from the linked list does that).

doDelete was already aware of "invalid" nodes (where item.node == nil), so the
solution seems to be as simple as setting item.node = nil during GC.
2022-11-19 08:15:48 +08:00
Karl Seguin
4d3a63decc always run tests with -race and withint caching 2022-10-23 13:21:05 +08:00
Karl Seguin
b1107e7097 Merge branch 'master' into generic 2022-10-23 13:20:22 +08:00
Karl Seguin
faaa8b2a26 clean up readme 2022-10-23 13:11:08 +08:00
Karl Seguin
42f36769a8 Merge pull request #72 from bep/fix/ondelete
Call OnDelete in LayeredCache.gc
2022-07-16 12:44:17 +00:00
Bjørn Erik Pedersen
1cee43040c Call OnDelete in LayeredCache.gc
Fixes #71
2022-07-15 17:04:41 +02:00
Karl Seguin
67d3f75750 Merge pull request #69 from karlseguin/GetWithoutPromote
Add GetWithoutPromote
2022-07-01 08:22:26 +00:00
Karl Seguin
2b2801cd19 note about not using the generic version yet 2022-03-03 22:40:57 +08:00
Karl Seguin
c4d364ba51 rely more on generic inference 2022-03-03 20:47:12 +08:00
Karl Seguin
516d62ed5f add generic list 2022-03-03 19:04:16 +08:00
Karl Seguin
95f74b4e85 remove dependency on expect library 2022-03-03 09:01:45 +08:00
Karl Seguin
92cae1a07a minor update to docs 2022-03-02 21:30:56 +08:00
Karl Seguin
da9aa857e2 add node about generic branch 2022-03-02 21:28:22 +08:00
Karl Seguin
e838337a8b Initial pass at leveraging generics
Still need to replace the linked list with a generic linked list and
want to remove the dependency on the expect package.
2022-03-02 21:26:07 +08:00
Karl Seguin
2d2b21ad8c Merge pull request #70 from nwtgck/remove-deps
Remove unused dependencies
2022-02-15 09:14:58 +08:00
Ryo Ota
970a298698 Remove unused dependencies 2022-02-14 22:34:11 +09:00
Karl Seguin
451f5a6e42 Add GetWithoutPromote
This gets the value without promoting it (sort of like a Peak)
2022-01-24 12:51:35 +08:00
Karl Seguin
ef4bd54683 Always promote on set
It's fine to conditionally promote on Get, to avoid blocking on a get
(see: https://github.com/karlseguin/ccache/pull/52) but a set _must_
promote else we can end with an entry in our buckets that isn't in our
list.

issue: https://github.com/karlseguin/ccache/issues/64
2021-06-13 10:15:28 +08:00
Karl Seguin
325d078286 move GC and GetSize to control commands 2021-03-20 18:57:11 +08:00
Karl Seguin
934f76bc44 Merge pull request #60 from launchdarkly/sync-updates
add SyncUpdates method to synchronize with worker thread, and use it in tests
2021-03-20 12:25:58 +08:00
Eli Bishop
6453d332ba Merge branch 'upstream-mirror' into sync-updates
# Conflicts:
#	layeredcache_test.go
2021-03-18 20:27:43 -07:00
Eli Bishop
b2a868314a revert changes not relevant to the SyncUpdates branch 2021-03-18 19:53:21 -07:00
Eli Bishop
c1fb5be323 add SyncUpdates method to synchronize with worker thread, and use it in tests 2021-03-18 16:09:30 -07:00
Karl Seguin
df2d98315c Conditionally prune more than itemsToPrune items
It's possible, though unlikely, that c.size will be larger than
c.maxSize by more than c.itemsToPrune. The most likely case that this
can happen is when using SetMaxSize to dynamically adjust the cache
size. The gc will now always clear to at least c.maxSize.
2021-03-18 19:29:04 +08:00
Karl Seguin
f28a7755a1 document the simplicity of fetch 2021-03-18 18:45:44 +08:00
Karl Seguin
ae1872d700 add ForEachFunc 2021-02-05 19:24:54 +08:00
Karl Seguin
36ffada8b5 Merge pull request #52 from gopalmor/master
Avoid blocking if promotables channel is full.
2020-12-10 18:49:34 +08:00
gopalmor
36d03ce88e Avoid blocking if promotables channel is full.
In rare situation it is possible to have `promotables` channel full. In such condition, the `Get` function will be blocked because it calls `promote` function. `Get` function being blocked defeats the purpose of fast cache response and hence may impact the application code in unexpected manner.
In this commit, the `promote` function is modified to use non-blocking channel send construct.
2020-12-09 23:05:20 -08:00
Karl Seguin
b779edb2ca Merge pull request #50 from imxyb/remove-int
remote unless int func
2020-11-18 18:01:52 +08:00
Karl Seguin
d9f8808f13 Merge pull request #51 from BlueMonday/マッハGoGoGo
Fix race condition in Cache.Get()
2020-11-18 17:58:29 +08:00
Steven Santos Erenst
97e7acb2af Fix race condition in Cache.Get()
Cache.Get() accesses the expires field in the Item without any sort of locking
or atomicity. This is an issue because Item.Extend() can be called from a
separate thread and cause a race condition.
2020-11-18 01:56:24 -08:00
imxyb
5fe99ab07a remote unless int func 2020-11-02 22:23:14 +08:00
Karl Seguin
1189f7f993 make Clear thread-safe 2020-08-16 21:12:47 +08:00
Karl Seguin
839a17bedb Remove impossible race conditions from test
This makes the output of go test --race less noisy.
2020-08-16 19:07:52 +08:00
Karl Seguin
0dbf3f125f add TrackingSet to LayeredCache 2020-08-14 11:15:13 +08:00
Karl Seguin
f3b2b9fd88 Merge pull request #48 from sargun/master
Add TrackingSet method
2020-08-14 11:03:30 +08:00
Karl Seguin
aa0e37ad6f Merge pull request #47 from bep/type-deletefunc
Use typed *Item in DeleteFunc
2020-08-14 10:56:02 +08:00
Sargun Dhillon
df91803297 Add TrackingSet method
This method reduces the likelihood of a race condition where
you can add a (tracked) item to the cache, and the item isn't
the item you thought it was.
2020-08-13 10:43:38 -07:00
Bjørn Erik Pedersen
a42bd4a9c8 Use typed *Item in DeleteFunc 2020-08-13 16:10:22 +02:00
Karl Seguin
e9b7be5016 remove race condition 2020-08-13 15:49:28 +08:00
Karl Seguin
fdd08e71c4 Merge pull request #46 from bep/document-deletefunc
Document DeleteFunc
2020-08-13 15:47:33 +08:00
Bjørn Erik Pedersen
992cd9564b Document DeleteFunc 2020-08-13 09:05:27 +02:00
Karl Seguin
f63031fa40 Merge pull request #45 from bep/DeleteFunc
Add DeleteFunc
2020-08-13 10:37:20 +08:00
Bjørn Erik Pedersen
d56665a86e Add DeleteFunc
This shares DeletePrefixs's implementation.
2020-08-12 17:47:11 +02:00
Karl Seguin
223703f7f0 Merge pull request #44 from bep/layered-delete-prefix
Add DeletePrefix to LayeredCache
2020-08-12 08:39:47 +08:00
Bjørn Erik Pedersen
a24d7f8c53 Add DeletePrefix to LayeredCache 2020-08-11 19:04:54 +02:00
Karl Seguin
3b58df727e Merge pull request #43 from jonathonlacher/patch-1
fix spelling in readme
2020-07-17 14:07:50 +08:00
Jonathon Lacher
4c88bf60e6 fix spelling in readme 2020-07-16 15:15:43 -05:00
Karl Seguin
eab9dbaa7f update readme to /v2 2020-06-29 20:50:55 +08:00
Karl Seguin
937ca294e6 go mod version 2020-06-29 15:22:50 +08:00
25 changed files with 2048 additions and 786 deletions

50
.github/workflows/master.yaml vendored Normal file
View File

@@ -0,0 +1,50 @@
name: Master
on:
push:
branches:
- master
permissions:
contents: read
jobs:
bench:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: './go.mod'
- name: Run benchmark and store the output to a file
run: |
set -o pipefail
make bench | tee bench_output.txt
- name: Get benchmark as JSON
uses: benchmark-action/github-action-benchmark@v1
with:
# What benchmark tool the output.txt came from
tool: 'go'
# Where the output from the benchmark tool is stored
output-file-path: bench_output.txt
# Write benchmarks to this file
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
github-token: ${{ secrets.GITHUB_TOKEN }}
comment-on-alert: true
- name: Get CPU information
uses: kenchan0130/actions-system-info@master
id: system-info
- name: Save benchmark JSON to cache
uses: actions/cache/save@v3
with:
path: ./cache/benchmark-data.json
# Save with commit hash to avoid "cache already exists"
# Save with OS & CPU info to prevent comparing against results from different CPUs
key: ${{ github.sha }}-${{ runner.os }}-${{ steps.system-info.outputs.cpu-model }}-go-benchmark

111
.github/workflows/pull_request.yaml vendored Normal file
View File

@@ -0,0 +1,111 @@
name: Pull Request
on:
merge_group:
pull_request:
branches:
- master
permissions:
contents: read
jobs:
lint:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: './go.mod'
- name: golangci-lint
uses: golangci/golangci-lint-action@v3
with:
version: latest
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: './go.mod'
- name: Unit Tests
run: make t
bench:
runs-on: ubuntu-latest
timeout-minutes: 5
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
fetch-depth: 0 # to be able to retrieve the last commit in master branch
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version-file: './go.mod'
cache-dependency-path: './go.sum'
check-latest: true
- name: Run benchmark and store the output to a file
run: |
set -o pipefail
make bench | tee ${{ github.sha }}_bench_output.txt
- name: Get CPU information
uses: kenchan0130/actions-system-info@master
id: system-info
- name: Get Master branch SHA
id: get-master-branch-sha
run: |
SHA=$(git rev-parse origin/master)
echo "sha=$SHA" >> $GITHUB_OUTPUT
- name: Try to get benchmark JSON from master branch
uses: actions/cache/restore@v3
id: cache
with:
path: ./cache/benchmark-data.json
key: ${{ steps.get-master-branch-sha.outputs.sha }}-${{ runner.os }}-${{ steps.system-info.outputs.cpu-model }}-go-benchmark
- name: Compare benchmarks with master
uses: benchmark-action/github-action-benchmark@v1
if: steps.cache.outputs.cache-hit == 'true'
with:
# What benchmark tool the output.txt came from
tool: 'go'
# Where the output from the benchmark tool is stored
output-file-path: ${{ github.sha }}_bench_output.txt
# Where the benchmarks in master are (to compare)
external-data-json-path: ./cache/benchmark-data.json
# Do not save the data
save-data-file: false
# Workflow will fail when an alert happens
fail-on-alert: true
github-token: ${{ secrets.GITHUB_TOKEN }}
# Enable Job Summary for PRs
summary-always: true
- name: Run benchmarks
uses: benchmark-action/github-action-benchmark@v1
if: steps.cache.outputs.cache-hit != 'true'
with:
# What benchmark tool the output.txt came from
tool: 'go'
# Where the output from the benchmark tool is stored
output-file-path: ${{ github.sha }}_bench_output.txt
# Write benchmarks to this file, do not publish to Github Pages
save-data-file: false
external-data-json-path: ./cache/benchmark-data.json
# Workflow will fail when an alert happens
fail-on-alert: true
# Enable alert commit comment
github-token: ${{ secrets.GITHUB_TOKEN }}
comment-on-alert: true
# Enable Job Summary for PRs
summary-always: true

2
.gitignore vendored
View File

@@ -1 +1,3 @@
vendor/
.idea/
*.out

35
.golangci.yaml Normal file
View File

@@ -0,0 +1,35 @@
run:
timeout: 3m
modules-download-mode: readonly
linters:
enable:
- errname
- gofmt
- goimports
- stylecheck
- importas
- errcheck
- gosimple
- govet
- ineffassign
- mirror
- staticcheck
- tagalign
- testifylint
- typecheck
- unused
- unconvert
- unparam
- wastedassign
- whitespace
- exhaustive
- noctx
- promlinter
linters-settings:
govet:
enable-all: true
disable:
- shadow
- fieldalignment

View File

@@ -1,5 +1,25 @@
t:
go test ./...
.DEFAULT_GOAL := help
f:
.PHONY: help
help:
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
.PHONY: bench
bench: ## Run benchmarks
go test ./... -bench . -benchtime 5s -timeout 0 -run=XXX -benchmem
.PHONY: l
l: ## Lint Go source files
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest && golangci-lint run
.PHONY: t
t: ## Run unit tests
go test -race -count=1 ./...
.PHONY: f
f: ## Format code
go fmt ./...
.PHONY: c
c: ## Measure code coverage
go test -race -covermode=atomic ./... -coverprofile=cover.out

106
assert/assert.go Normal file
View File

@@ -0,0 +1,106 @@
// A wrapper around *testing.T. I hate the if a != b { t.ErrorF(....) } pattern.
// Packages should prefer using the tests package (which exposes all of
// these functions). The only reason to use this package directly is if
// the tests package depends on your package (and thus you have a cyclical
// dependency)
package assert
import (
"math"
"reflect"
"strings"
"testing"
"time"
)
// a == b
func Equal[T comparable](t *testing.T, actual T, expected T) {
t.Helper()
if actual != expected {
t.Errorf("expected '%v' to equal '%v'", actual, expected)
t.FailNow()
}
}
// Two lists are equal (same length & same values in the same order)
func List[T comparable](t *testing.T, actuals []T, expecteds []T) {
t.Helper()
Equal(t, len(actuals), len(expecteds))
for i, actual := range actuals {
Equal(t, actual, expecteds[i])
}
}
// needle not in []haystack
func DoesNotContain[T comparable](t *testing.T, haystack []T, needle T) {
t.Helper()
for _, v := range haystack {
if v == needle {
t.Errorf("expected '%v' to not be in '%v'", needle, haystack)
t.FailNow()
}
}
}
// A value is nil
func Nil(t *testing.T, actual interface{}) {
t.Helper()
if actual != nil && !reflect.ValueOf(actual).IsNil() {
t.Errorf("expected %v to be nil", actual)
t.FailNow()
}
}
// A value is not nil
func NotNil(t *testing.T, actual interface{}) {
t.Helper()
if actual == nil {
t.Errorf("expected %v to be not nil", actual)
t.FailNow()
}
}
// A value is true
func True(t *testing.T, actual bool) {
t.Helper()
if !actual {
t.Error("expected true, got false")
t.FailNow()
}
}
// A value is false
func False(t *testing.T, actual bool) {
t.Helper()
if actual {
t.Error("expected false, got true")
t.FailNow()
}
}
// The string contains the given value
func StringContains(t *testing.T, actual string, expected string) {
t.Helper()
if !strings.Contains(actual, expected) {
t.Errorf("expected %s to contain %s", actual, expected)
t.FailNow()
}
}
func Error(t *testing.T, actual error, expected error) {
t.Helper()
if actual != expected {
t.Errorf("expected '%s' to be '%s'", actual, expected)
t.FailNow()
}
}
func Nowish(t *testing.T, actual time.Time) {
t.Helper()
diff := math.Abs(time.Now().UTC().Sub(actual).Seconds())
if diff > 1 {
t.Errorf("expected '%s' to be nowish", actual)
t.FailNow()
}
}

102
bucket.go
View File

@@ -6,26 +6,86 @@ import (
"time"
)
type bucket struct {
type bucket[T any] struct {
sync.RWMutex
lookup map[string]*Item
lookup map[string]*Item[T]
}
func (b *bucket) itemCount() int {
func (b *bucket[T]) itemCount() int {
b.RLock()
defer b.RUnlock()
return len(b.lookup)
}
func (b *bucket) get(key string) *Item {
func (b *bucket[T]) forEachFunc(matches func(key string, item *Item[T]) bool) bool {
lookup := b.lookup
b.RLock()
defer b.RUnlock()
for key, item := range lookup {
if !matches(key, item) {
return false
}
}
return true
}
func (b *bucket[T]) get(key string) *Item[T] {
b.RLock()
defer b.RUnlock()
return b.lookup[key]
}
func (b *bucket) set(key string, value interface{}, duration time.Duration) (*Item, *Item) {
func (b *bucket[T]) setnx(key string, value T, duration time.Duration, track bool) *Item[T] {
b.RLock()
item := b.lookup[key]
b.RUnlock()
if item != nil {
return item
}
expires := time.Now().Add(duration).UnixNano()
item := newItem(key, value, expires)
newItem := newItem(key, value, expires, track)
b.Lock()
defer b.Unlock()
// check again under write lock
item = b.lookup[key]
if item != nil {
return item
}
b.lookup[key] = newItem
return newItem
}
func (b *bucket[T]) setnx2(key string, f func() T, duration time.Duration, track bool) (*Item[T], bool) {
b.RLock()
item := b.lookup[key]
b.RUnlock()
if item != nil {
return item, true
}
b.Lock()
defer b.Unlock()
// check again under write lock
item = b.lookup[key]
if item != nil {
return item, true
}
expires := time.Now().Add(duration).UnixNano()
newItem := newItem(key, f(), expires, track)
b.lookup[key] = newItem
return newItem, false
}
func (b *bucket[T]) set(key string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
expires := time.Now().Add(duration).UnixNano()
item := newItem(key, value, expires, track)
b.Lock()
existing := b.lookup[key]
b.lookup[key] = item
@@ -33,7 +93,7 @@ func (b *bucket) set(key string, value interface{}, duration time.Duration) (*It
return item, existing
}
func (b *bucket) delete(key string) *Item {
func (b *bucket[T]) remove(key string) *Item[T] {
b.Lock()
item := b.lookup[key]
delete(b.lookup, key)
@@ -41,6 +101,12 @@ func (b *bucket) delete(key string) *Item {
return item
}
func (b *bucket[T]) delete(key string) {
b.Lock()
delete(b.lookup, key)
b.Unlock()
}
// This is an expensive operation, so we do what we can to optimize it and limit
// the impact it has on concurrent operations. Specifically, we:
// 1 - Do an initial iteration to collect matches. This allows us to do the
@@ -54,13 +120,13 @@ func (b *bucket) delete(key string) *Item {
// the item from the map. I'm pretty sure this is 100% fine, but it is unique.
// (We do this so that the write to the channel is under the read lock and not the
// write lock)
func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
func (b *bucket[T]) deleteFunc(matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
lookup := b.lookup
items := make([]*Item, 0, len(lookup)/10)
items := make([]*Item[T], 0)
b.RLock()
for key, item := range lookup {
if strings.HasPrefix(key, prefix) {
if matches(key, item) {
deletables <- item
items = append(items, item)
}
@@ -80,8 +146,16 @@ func (b *bucket) deletePrefix(prefix string, deletables chan *Item) int {
return len(items)
}
func (b *bucket) clear() {
b.Lock()
b.lookup = make(map[string]*Item)
b.Unlock()
func (b *bucket[T]) deletePrefix(prefix string, deletables chan *Item[T]) int {
return b.deleteFunc(func(key string, item *Item[T]) bool {
return strings.HasPrefix(key, prefix)
}, deletables)
}
// we expect the caller to have acquired a write lock
func (b *bucket[T]) clear() {
for _, item := range b.lookup {
item.promotions = -2
}
b.lookup = make(map[string]*Item[T])
}

View File

@@ -1,69 +1,56 @@
package ccache
import (
. "github.com/karlseguin/expect"
"testing"
"time"
"github.com/karlseguin/ccache/v3/assert"
)
type BucketTests struct {
}
func Test_Bucket(t *testing.T) {
Expectify(new(BucketTests), t)
}
func (_ *BucketTests) GetMissFromBucket() {
func Test_Bucket_GetMissFromBucket(t *testing.T) {
bucket := testBucket()
Expect(bucket.get("invalid")).To.Equal(nil)
assert.Nil(t, bucket.get("invalid"))
}
func (_ *BucketTests) GetHitFromBucket() {
func Test_Bucket_GetHitFromBucket(t *testing.T) {
bucket := testBucket()
item := bucket.get("power")
assertValue(item, "9000")
assertValue(t, item, "9000")
}
func (_ *BucketTests) DeleteItemFromBucket() {
func Test_Bucket_DeleteItemFromBucket(t *testing.T) {
bucket := testBucket()
bucket.delete("power")
Expect(bucket.get("power")).To.Equal(nil)
assert.Nil(t, bucket.get("power"))
}
func (_ *BucketTests) SetsANewBucketItem() {
func Test_Bucket_SetsANewBucketItem(t *testing.T) {
bucket := testBucket()
item, existing := bucket.set("spice", TestValue("flow"), time.Minute)
assertValue(item, "flow")
item, existing := bucket.set("spice", "flow", time.Minute, false)
assertValue(t, item, "flow")
item = bucket.get("spice")
assertValue(item, "flow")
Expect(existing).To.Equal(nil)
assertValue(t, item, "flow")
assert.Equal(t, existing, nil)
}
func (_ *BucketTests) SetsAnExistingItem() {
func Test_Bucket_SetsAnExistingItem(t *testing.T) {
bucket := testBucket()
item, existing := bucket.set("power", TestValue("9001"), time.Minute)
assertValue(item, "9001")
item, existing := bucket.set("power", "9001", time.Minute, false)
assertValue(t, item, "9001")
item = bucket.get("power")
assertValue(item, "9001")
assertValue(existing, "9000")
assertValue(t, item, "9001")
assertValue(t, existing, "9000")
}
func testBucket() *bucket {
b := &bucket{lookup: make(map[string]*Item)}
b.lookup["power"] = &Item{
func testBucket() *bucket[string] {
b := &bucket[string]{lookup: make(map[string]*Item[string])}
b.lookup["power"] = &Item[string]{
key: "power",
value: TestValue("9000"),
value: "9000",
}
return b
}
func assertValue(item *Item, expected string) {
value := item.value.(TestValue)
Expect(value).To.Equal(TestValue(expected))
}
type TestValue string
func (v TestValue) Expires() time.Time {
return time.Now()
func assertValue(t *testing.T, item *Item[string], expected string) {
assert.Equal(t, item.value, expected)
}

335
cache.go
View File

@@ -2,52 +2,44 @@
package ccache
import (
"container/list"
"hash/fnv"
"sync/atomic"
"time"
)
// The cache has a generic 'control' channel that is used to send
// messages to the worker. These are the messages that can be sent to it
type getDropped struct {
res chan int
}
type setMaxSize struct {
size int64
}
type Cache struct {
*Configuration
list *list.List
type Cache[T any] struct {
*Configuration[T]
control
list *List[T]
size int64
buckets []*bucket
buckets []*bucket[T]
bucketMask uint32
deletables chan *Item
promotables chan *Item
control chan interface{}
deletables chan *Item[T]
promotables chan *Item[T]
}
// Create a new cache with the specified configuration
// See ccache.Configure() for creating a configuration
func New(config *Configuration) *Cache {
c := &Cache{
list: list.New(),
func New[T any](config *Configuration[T]) *Cache[T] {
c := &Cache[T]{
list: NewList[T](),
Configuration: config,
control: newControl(),
bucketMask: uint32(config.buckets) - 1,
buckets: make([]*bucket, config.buckets),
control: make(chan interface{}),
buckets: make([]*bucket[T], config.buckets),
deletables: make(chan *Item[T], config.deleteBuffer),
promotables: make(chan *Item[T], config.promoteBuffer),
}
for i := 0; i < int(config.buckets); i++ {
c.buckets[i] = &bucket{
lookup: make(map[string]*Item),
for i := 0; i < config.buckets; i++ {
c.buckets[i] = &bucket[T]{
lookup: make(map[string]*Item[T]),
}
}
c.restart()
go c.worker()
return c
}
func (c *Cache) ItemCount() int {
func (c *Cache[T]) ItemCount() int {
count := 0
for _, b := range c.buckets {
count += b.itemCount()
@@ -55,7 +47,7 @@ func (c *Cache) ItemCount() int {
return count
}
func (c *Cache) DeletePrefix(prefix string) int {
func (c *Cache[T]) DeletePrefix(prefix string) int {
count := 0
for _, b := range c.buckets {
count += b.deletePrefix(prefix, c.deletables)
@@ -63,41 +55,95 @@ func (c *Cache) DeletePrefix(prefix string) int {
return count
}
// Deletes all items that the matches func evaluates to true.
func (c *Cache[T]) DeleteFunc(matches func(key string, item *Item[T]) bool) int {
count := 0
for _, b := range c.buckets {
count += b.deleteFunc(matches, c.deletables)
}
return count
}
func (c *Cache[T]) ForEachFunc(matches func(key string, item *Item[T]) bool) {
for _, b := range c.buckets {
if !b.forEachFunc(matches) {
break
}
}
}
// Get an item from the cache. Returns nil if the item wasn't found.
// This can return an expired item. Use item.Expired() to see if the item
// is expired and item.TTL() to see how long until the item expires (which
// will be negative for an already expired item).
func (c *Cache) Get(key string) *Item {
func (c *Cache[T]) Get(key string) *Item[T] {
item := c.bucket(key).get(key)
if item == nil {
return nil
}
if item.expires > time.Now().UnixNano() {
c.promote(item)
if !item.Expired() {
select {
case c.promotables <- item:
default:
}
}
return item
}
// Same as Get but does not promote the value. This essentially circumvents the
// "least recently used" aspect of this cache. To some degree, it's akin to a
// "peak"
func (c *Cache[T]) GetWithoutPromote(key string) *Item[T] {
return c.bucket(key).get(key)
}
// Used when the cache was created with the Track() configuration option.
// Avoid otherwise
func (c *Cache) TrackingGet(key string) TrackedItem {
func (c *Cache[T]) TrackingGet(key string) TrackedItem[T] {
item := c.Get(key)
if item == nil {
return NilTracked
return nil
}
item.track()
return item
}
// Used when the cache was created with the Track() configuration option.
// Sets the item, and returns a tracked reference to it.
func (c *Cache[T]) TrackingSet(key string, value T, duration time.Duration) TrackedItem[T] {
return c.set(key, value, duration, true)
}
// Set the value in the cache for the specified duration
func (c *Cache) Set(key string, value interface{}, duration time.Duration) {
c.set(key, value, duration)
func (c *Cache[T]) Set(key string, value T, duration time.Duration) {
c.set(key, value, duration, false)
}
// Setnx set the value in the cache for the specified duration if not exists
func (c *Cache[T]) Setnx(key string, value T, duration time.Duration) {
c.bucket(key).setnx(key, value, duration, false)
}
// Setnx2 set the value in the cache for the specified duration if not exists
func (c *Cache[T]) Setnx2(key string, f func() T, duration time.Duration) *Item[T] {
item, existing := c.bucket(key).setnx2(key, f, duration, false)
// consistent with Get
if existing && !item.Expired() {
select {
case c.promotables <- item:
default:
}
// consistent with set
} else if !existing {
c.promotables <- item
}
return item
}
// Replace the value if it exists, does not set if it doesn't.
// Returns true if the item existed an was replaced, false otherwise.
// Replace does not reset item's TTL
func (c *Cache) Replace(key string, value interface{}) bool {
func (c *Cache[T]) Replace(key string, value T) bool {
item := c.bucket(key).get(key)
if item == nil {
return false
@@ -106,10 +152,25 @@ func (c *Cache) Replace(key string, value interface{}) bool {
return true
}
// Extend the value if it exists, does not set if it doesn't exists.
// Returns true if the expire time of the item an was extended, false otherwise.
func (c *Cache[T]) Extend(key string, duration time.Duration) bool {
item := c.bucket(key).get(key)
if item == nil {
return false
}
item.Extend(duration)
return true
}
// Attempts to get the value from the cache and calles fetch on a miss (missing
// or stale item). If fetch returns an error, no value is cached and the error
// is returned back to the caller.
func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
// Note that Fetch merely calls the public Get and Set functions. If you want
// a different Fetch behavior, such as thundering herd protection or returning
// expired items, implement it in your application.
func (c *Cache[T]) Fetch(key string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
item := c.Get(key)
if item != nil && !item.Expired() {
return item, nil
@@ -118,12 +179,12 @@ func (c *Cache) Fetch(key string, duration time.Duration, fetch func() (interfac
if err != nil {
return nil, err
}
return c.set(key, value, duration), nil
return c.set(key, value, duration, false), nil
}
// Remove the item from the cache, return true if the item was present, false otherwise.
func (c *Cache) Delete(key string) bool {
item := c.bucket(key).delete(key)
func (c *Cache[T]) Delete(key string) bool {
item := c.bucket(key).remove(key)
if item != nil {
c.deletables <- item
return true
@@ -131,91 +192,94 @@ func (c *Cache) Delete(key string) bool {
return false
}
//this isn't thread safe. It's meant to be called from non-concurrent tests
func (c *Cache) Clear() {
for _, bucket := range c.buckets {
bucket.clear()
}
c.size = 0
c.list = list.New()
}
// Stops the background worker. Operations performed on the cache after Stop
// is called are likely to panic
func (c *Cache) Stop() {
close(c.promotables)
<-c.control
}
// Gets the number of items removed from the cache due to memory pressure since
// the last time GetDropped was called
func (c *Cache) GetDropped() int {
res := make(chan int)
c.control <- getDropped{res: res}
return <-res
}
// Sets a new max size. That can result in a GC being run if the new maxium size
// is smaller than the cached size
func (c *Cache) SetMaxSize(size int64) {
c.control <- setMaxSize{size}
}
func (c *Cache) restart() {
c.deletables = make(chan *Item, c.deleteBuffer)
c.promotables = make(chan *Item, c.promoteBuffer)
c.control = make(chan interface{})
go c.worker()
}
func (c *Cache) deleteItem(bucket *bucket, item *Item) {
bucket.delete(item.key) //stop other GETs from getting it
c.deletables <- item
}
func (c *Cache) set(key string, value interface{}, duration time.Duration) *Item {
item, existing := c.bucket(key).set(key, value, duration)
func (c *Cache[T]) set(key string, value T, duration time.Duration, track bool) *Item[T] {
item, existing := c.bucket(key).set(key, value, duration, track)
if existing != nil {
c.deletables <- existing
}
c.promote(item)
c.promotables <- item
return item
}
func (c *Cache) bucket(key string) *bucket {
func (c *Cache[T]) bucket(key string) *bucket[T] {
h := fnv.New32a()
h.Write([]byte(key))
return c.buckets[h.Sum32()&c.bucketMask]
}
func (c *Cache) promote(item *Item) {
c.promotables <- item
func (c *Cache[T]) halted(fn func()) {
c.halt()
defer c.unhalt()
fn()
}
func (c *Cache) worker() {
defer close(c.control)
func (c *Cache[T]) halt() {
for _, bucket := range c.buckets {
bucket.Lock()
}
}
func (c *Cache[T]) unhalt() {
for _, bucket := range c.buckets {
bucket.Unlock()
}
}
func (c *Cache[T]) worker() {
dropped := 0
cc := c.control
promoteItem := func(item *Item[T]) {
if c.doPromote(item) && c.size > c.maxSize {
dropped += c.gc()
}
}
for {
select {
case item, ok := <-c.promotables:
if ok == false {
goto drain
}
if c.doPromote(item) && c.size > c.maxSize {
dropped += c.gc()
}
case item := <-c.promotables:
promoteItem(item)
case item := <-c.deletables:
c.doDelete(item)
case control := <-c.control:
case control := <-cc:
switch msg := control.(type) {
case getDropped:
case controlStop:
goto drain
case controlGetDropped:
msg.res <- dropped
dropped = 0
case setMaxSize:
case controlSetMaxSize:
c.maxSize = msg.size
if c.size > c.maxSize {
dropped += c.gc()
}
msg.done <- struct{}{}
case controlClear:
c.halted(func() {
promotables := c.promotables
for len(promotables) > 0 {
<-promotables
}
deletables := c.deletables
for len(deletables) > 0 {
<-deletables
}
for _, bucket := range c.buckets {
bucket.clear()
}
c.size = 0
c.list = NewList[T]()
})
msg.done <- struct{}{}
case controlGetSize:
msg.res <- c.size
case controlGC:
dropped += c.gc()
msg.done <- struct{}{}
case controlSyncUpdates:
doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete)
msg.done <- struct{}{}
}
}
}
@@ -226,62 +290,99 @@ drain:
case item := <-c.deletables:
c.doDelete(item)
default:
close(c.deletables)
return
}
}
}
func (c *Cache) doDelete(item *Item) {
if item.element == nil {
// This method is used to implement SyncUpdates. It simply receives and processes as many
// items as it can receive from the promotables and deletables channels immediately without
// blocking. If some other goroutine sends an item on either channel after this method has
// finished receiving, that's OK, because SyncUpdates only guarantees processing of values
// that were already sent by the same goroutine.
func doAllPendingPromotesAndDeletes[T any](
promotables <-chan *Item[T],
promoteFn func(*Item[T]),
deletables <-chan *Item[T],
deleteFn func(*Item[T]),
) {
doAllPromotes:
for {
select {
case item := <-promotables:
promoteFn(item)
default:
break doAllPromotes
}
}
doAllDeletes:
for {
select {
case item := <-deletables:
deleteFn(item)
default:
break doAllDeletes
}
}
}
func (c *Cache[T]) doDelete(item *Item[T]) {
if item.next == nil && item.prev == nil {
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.element)
c.list.Remove(item)
item.promotions = -2
}
}
func (c *Cache) doPromote(item *Item) bool {
func (c *Cache[T]) doPromote(item *Item[T]) bool {
//already deleted
if item.promotions == -2 {
return false
}
if item.element != nil { //not a new item
if item.next != nil || item.prev != nil { // not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.element)
c.list.MoveToFront(item)
item.promotions = 0
}
return false
}
c.size += item.size
item.element = c.list.PushFront(item)
c.list.Insert(item)
return true
}
func (c *Cache) gc() int {
func (c *Cache[T]) gc() int {
dropped := 0
element := c.list.Back()
for i := 0; i < c.itemsToPrune; i++ {
if element == nil {
item := c.list.Tail
itemsToPrune := int64(c.itemsToPrune)
if min := c.size - c.maxSize; min > itemsToPrune {
itemsToPrune = min
}
for i := int64(0); i < itemsToPrune; i++ {
if item == nil {
return dropped
}
prev := element.Prev()
item := element.Value.(*Item)
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
prev := item.prev
if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 {
c.bucket(item.key).delete(item.key)
c.size -= item.size
c.list.Remove(element)
c.list.Remove(item)
if c.onDelete != nil {
c.onDelete(item)
}
dropped += 1
item.promotions = -2
}
element = prev
item = prev
}
return dropped
}

View File

@@ -1,243 +1,508 @@
package ccache
import (
"math/rand"
"sort"
"strconv"
"sync"
"sync/atomic"
"testing"
"time"
. "github.com/karlseguin/expect"
"github.com/karlseguin/ccache/v3/assert"
)
type CacheTests struct{}
func Test_Setnx(t *testing.T) {
cache := New(Configure[string]())
defer cache.Stop()
assert.Equal(t, cache.ItemCount(), 0)
func Test_Cache(t *testing.T) {
Expectify(new(CacheTests), t)
cache.Set("spice", "flow", time.Minute)
assert.Equal(t, cache.ItemCount(), 1)
// set if exists
cache.Setnx("spice", "worm", time.Minute)
assert.Equal(t, cache.ItemCount(), 1)
assert.Equal(t, cache.Get("spice").Value(), "flow")
// set if not exists
cache.Delete("spice")
cache.Setnx("spice", "worm", time.Minute)
assert.Equal(t, cache.Get("spice").Value(), "worm")
assert.Equal(t, cache.ItemCount(), 1)
}
func (_ CacheTests) DeletesAValue() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
func Test_Extend(t *testing.T) {
cache := New(Configure[string]())
defer cache.Stop()
assert.Equal(t, cache.ItemCount(), 0)
// non exist
ok := cache.Extend("spice", time.Minute*10)
assert.Equal(t, false, ok)
// exist
cache.Set("spice", "flow", time.Minute)
assert.Equal(t, cache.ItemCount(), 1)
ok = cache.Extend("spice", time.Minute*10) // 10 + 10
assert.Equal(t, true, ok)
item := cache.Get("spice")
less := time.Minute*22 < time.Duration(item.expires)
assert.Equal(t, true, less)
more := time.Minute*18 < time.Duration(item.expires)
assert.Equal(t, true, more)
assert.Equal(t, cache.ItemCount(), 1)
}
func Test_CacheDeletesAValue(t *testing.T) {
cache := New(Configure[string]())
defer cache.Stop()
assert.Equal(t, cache.ItemCount(), 0)
cache.Set("spice", "flow", time.Minute)
cache.Set("worm", "sand", time.Minute)
Expect(cache.ItemCount()).To.Equal(2)
assert.Equal(t, cache.ItemCount(), 2)
cache.Delete("spice")
Expect(cache.Get("spice")).To.Equal(nil)
Expect(cache.Get("worm").Value()).To.Equal("sand")
Expect(cache.ItemCount()).To.Equal(1)
assert.Equal(t, cache.Get("spice"), nil)
assert.Equal(t, cache.Get("worm").Value(), "sand")
assert.Equal(t, cache.ItemCount(), 1)
}
func (_ CacheTests) DeletesAPrefix() {
cache := New(Configure())
Expect(cache.ItemCount()).To.Equal(0)
func Test_CacheDeletesAPrefix(t *testing.T) {
cache := New(Configure[string]())
defer cache.Stop()
assert.Equal(t, cache.ItemCount(), 0)
cache.Set("aaa", "1", time.Minute)
cache.Set("aab", "2", time.Minute)
cache.Set("aac", "3", time.Minute)
cache.Set("ac", "4", time.Minute)
cache.Set("z5", "7", time.Minute)
Expect(cache.ItemCount()).To.Equal(5)
assert.Equal(t, cache.ItemCount(), 5)
Expect(cache.DeletePrefix("9a")).To.Equal(0)
Expect(cache.ItemCount()).To.Equal(5)
assert.Equal(t, cache.DeletePrefix("9a"), 0)
assert.Equal(t, cache.ItemCount(), 5)
Expect(cache.DeletePrefix("aa")).To.Equal(3)
Expect(cache.Get("aaa")).To.Equal(nil)
Expect(cache.Get("aab")).To.Equal(nil)
Expect(cache.Get("aac")).To.Equal(nil)
Expect(cache.Get("ac").Value()).To.Equal("4")
Expect(cache.Get("z5").Value()).To.Equal("7")
Expect(cache.ItemCount()).To.Equal(2)
assert.Equal(t, cache.DeletePrefix("aa"), 3)
assert.Equal(t, cache.Get("aaa"), nil)
assert.Equal(t, cache.Get("aab"), nil)
assert.Equal(t, cache.Get("aac"), nil)
assert.Equal(t, cache.Get("ac").Value(), "4")
assert.Equal(t, cache.Get("z5").Value(), "7")
assert.Equal(t, cache.ItemCount(), 2)
}
func (_ CacheTests) OnDeleteCallbackCalled() {
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
func Test_CacheDeletesAFunc(t *testing.T) {
cache := New(Configure[int]())
defer cache.Stop()
assert.Equal(t, cache.ItemCount(), 0)
cache.Set("a", 1, time.Minute)
cache.Set("b", 2, time.Minute)
cache.Set("c", 3, time.Minute)
cache.Set("d", 4, time.Minute)
cache.Set("e", 5, time.Minute)
cache.Set("f", 6, time.Minute)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
return false
}), 0)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
return item.Value() < 4
}), 3)
assert.Equal(t, cache.ItemCount(), 3)
assert.Equal(t, cache.DeleteFunc(func(key string, item *Item[int]) bool {
return key == "d"
}), 1)
assert.Equal(t, cache.ItemCount(), 2)
}
func Test_CacheOnDeleteCallbackCalled(t *testing.T) {
onDeleteFnCalled := int32(0)
onDeleteFn := func(item *Item[string]) {
if item.key == "spice" {
onDeleteFnCalled = true
atomic.AddInt32(&onDeleteFnCalled, 1)
}
}
cache := New(Configure().OnDelete(onDeleteFn))
cache := New(Configure[string]().OnDelete(onDeleteFn))
cache.Set("spice", "flow", time.Minute)
cache.Set("worm", "sand", time.Minute)
time.Sleep(time.Millisecond * 10) // Run once to init
cache.Delete("spice")
time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
cache.SyncUpdates() // wait for worker to pick up preceding updates
Expect(cache.Get("spice")).To.Equal(nil)
Expect(cache.Get("worm").Value()).To.Equal("sand")
Expect(onDeleteFnCalled).To.Equal(true)
cache.Delete("spice")
cache.SyncUpdates()
assert.Equal(t, cache.Get("spice"), nil)
assert.Equal(t, cache.Get("worm").Value(), "sand")
assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1)
}
func (_ CacheTests) FetchesExpiredItems() {
cache := New(Configure())
fn := func() (interface{}, error) { return "moo-moo", nil }
func Test_CacheFetchesExpiredItems(t *testing.T) {
cache := New(Configure[string]())
fn := func() (string, error) { return "moo-moo", nil }
cache.Set("beef", "moo", time.Second*-1)
Expect(cache.Get("beef").Value()).To.Equal("moo")
assert.Equal(t, cache.Get("beef").Value(), "moo")
out, _ := cache.Fetch("beef", time.Second, fn)
Expect(out.Value()).To.Equal("moo-moo")
assert.Equal(t, out.Value(), "moo-moo")
}
func (_ CacheTests) GCsTheOldestItems() {
cache := New(Configure().ItemsToPrune(10))
func Test_CacheGCsTheOldestItems(t *testing.T) {
cache := New(Configure[int]().ItemsToPrune(10))
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
//let the items get promoted (and added to our list)
time.Sleep(time.Millisecond * 10)
gcCache(cache)
Expect(cache.Get("9")).To.Equal(nil)
Expect(cache.Get("10").Value()).To.Equal(10)
Expect(cache.ItemCount()).To.Equal(490)
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("9"), nil)
assert.Equal(t, cache.Get("10").Value(), 10)
assert.Equal(t, cache.ItemCount(), 490)
}
func (_ CacheTests) PromotedItemsDontGetPruned() {
cache := New(Configure().ItemsToPrune(10).GetsPerPromote(1))
func Test_CachePromotedItemsDontGetPruned(t *testing.T) {
cache := New(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
time.Sleep(time.Millisecond * 10) //run the worker once to init the list
cache.SyncUpdates()
cache.Get("9")
time.Sleep(time.Millisecond * 10)
gcCache(cache)
Expect(cache.Get("9").Value()).To.Equal(9)
Expect(cache.Get("10")).To.Equal(nil)
Expect(cache.Get("11").Value()).To.Equal(11)
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("9").Value(), 9)
assert.Equal(t, cache.Get("10"), nil)
assert.Equal(t, cache.Get("11").Value(), 11)
}
func (_ CacheTests) TrackerDoesNotCleanupHeldInstance() {
cache := New(Configure().ItemsToPrune(10).Track())
for i := 0; i < 10; i++ {
func Test_GetWithoutPromoteDoesNotPromote(t *testing.T) {
cache := New(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
item := cache.TrackingGet("0")
time.Sleep(time.Millisecond * 10)
gcCache(cache)
Expect(cache.Get("0").Value()).To.Equal(0)
Expect(cache.Get("1")).To.Equal(nil)
item.Release()
gcCache(cache)
Expect(cache.Get("0")).To.Equal(nil)
cache.SyncUpdates()
cache.GetWithoutPromote("9")
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("9"), nil)
assert.Equal(t, cache.Get("10").Value(), 10)
assert.Equal(t, cache.Get("11").Value(), 11)
}
func (_ CacheTests) RemovesOldestItemWhenFull() {
func Test_CacheTrackerDoesNotCleanupHeldInstance(t *testing.T) {
cache := New(Configure[int]().ItemsToPrune(11).Track())
item0 := cache.TrackingSet("0", 0, time.Minute)
for i := 1; i < 11; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
item1 := cache.TrackingGet("1")
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("0").Value(), 0)
assert.Equal(t, cache.Get("1").Value(), 1)
item0.Release()
item1.Release()
cache.GC()
assert.Equal(t, cache.Get("0"), nil)
assert.Equal(t, cache.Get("1"), nil)
}
func Test_CacheRemovesOldestItemWhenFull(t *testing.T) {
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
onDeleteFn := func(item *Item[int]) {
if item.key == "0" {
onDeleteFnCalled = true
}
}
cache := New(Configure().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
cache := New(Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
for i := 0; i < 7; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
time.Sleep(time.Millisecond * 10)
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2").Value()).To.Equal(2)
Expect(onDeleteFnCalled).To.Equal(true)
Expect(cache.ItemCount()).To.Equal(5)
cache.SyncUpdates()
assert.Equal(t, cache.Get("0"), nil)
assert.Equal(t, cache.Get("1"), nil)
assert.Equal(t, cache.Get("2").Value(), 2)
assert.Equal(t, onDeleteFnCalled, true)
assert.Equal(t, cache.ItemCount(), 5)
}
func (_ CacheTests) RemovesOldestItemWhenFullBySizer() {
cache := New(Configure().MaxSize(9).ItemsToPrune(2))
func Test_CacheRemovesOldestItemWhenFullBySizer(t *testing.T) {
cache := New(Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
for i := 0; i < 7; i++ {
cache.Set(strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
}
time.Sleep(time.Millisecond * 10)
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2")).To.Equal(nil)
Expect(cache.Get("3")).To.Equal(nil)
Expect(cache.Get("4").Value().(*SizedItem).id).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(0)
cache.SyncUpdates()
assert.Equal(t, cache.Get("0"), nil)
assert.Equal(t, cache.Get("1"), nil)
assert.Equal(t, cache.Get("2"), nil)
assert.Equal(t, cache.Get("3"), nil)
assert.Equal(t, cache.Get("4").Value().id, 4)
assert.Equal(t, cache.GetDropped(), 4)
assert.Equal(t, cache.GetDropped(), 0)
}
func (_ CacheTests) SetUpdatesSizeOnDelta() {
cache := New(Configure())
func Test_CacheSetUpdatesSizeOnDelta(t *testing.T) {
cache := New(Configure[*SizedItem]())
cache.Set("a", &SizedItem{0, 2}, time.Minute)
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
cache.Set("b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
cache.Set("b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkSize(cache, 6)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 6)
cache.Set("b", &SizedItem{0, 2}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkSize(cache, 4)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 4)
cache.Delete("b")
time.Sleep(time.Millisecond * 100)
checkSize(cache, 2)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 2)
}
func (_ CacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache := New(Configure())
func Test_CacheReplaceDoesNotchangeSizeIfNotSet(t *testing.T) {
cache := New(Configure[*SizedItem]())
cache.Set("1", &SizedItem{1, 2}, time.Minute)
cache.Set("2", &SizedItem{1, 2}, time.Minute)
cache.Set("3", &SizedItem{1, 2}, time.Minute)
cache.Replace("4", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
checkSize(cache, 6)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 6)
}
func (_ CacheTests) ReplaceChangesSize() {
cache := New(Configure())
func Test_CacheReplaceChangesSize(t *testing.T) {
cache := New(Configure[*SizedItem]())
cache.Set("1", &SizedItem{1, 2}, time.Minute)
cache.Set("2", &SizedItem{1, 2}, time.Minute)
cache.Replace("2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
checkSize(cache, 4)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 4)
cache.Replace("2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
checkSize(cache, 3)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 3)
cache.Replace("2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
checkSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
}
func (_ CacheTests) ResizeOnTheFly() {
cache := New(Configure().MaxSize(9).ItemsToPrune(1))
func Test_CacheResizeOnTheFly(t *testing.T) {
cache := New(Configure[int]().MaxSize(9).ItemsToPrune(1))
for i := 0; i < 5; i++ {
cache.Set(strconv.Itoa(i), i, time.Minute)
}
cache.SetMaxSize(3)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(2)
Expect(cache.Get("0")).To.Equal(nil)
Expect(cache.Get("1")).To.Equal(nil)
Expect(cache.Get("2").Value()).To.Equal(2)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 2)
assert.Equal(t, cache.Get("0"), nil)
assert.Equal(t, cache.Get("1"), nil)
assert.Equal(t, cache.Get("2").Value(), 2)
assert.Equal(t, cache.Get("3").Value(), 3)
assert.Equal(t, cache.Get("4").Value(), 4)
cache.Set("5", 5, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.GetDropped()).To.Equal(1)
Expect(cache.Get("2")).To.Equal(nil)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
Expect(cache.Get("5").Value()).To.Equal(5)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 1)
assert.Equal(t, cache.Get("2"), nil)
assert.Equal(t, cache.Get("3").Value(), 3)
assert.Equal(t, cache.Get("4").Value(), 4)
assert.Equal(t, cache.Get("5").Value(), 5)
cache.SetMaxSize(10)
cache.Set("6", 6, time.Minute)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(0)
Expect(cache.Get("3").Value()).To.Equal(3)
Expect(cache.Get("4").Value()).To.Equal(4)
Expect(cache.Get("5").Value()).To.Equal(5)
Expect(cache.Get("6").Value()).To.Equal(6)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 0)
assert.Equal(t, cache.Get("3").Value(), 3)
assert.Equal(t, cache.Get("4").Value(), 4)
assert.Equal(t, cache.Get("5").Value(), 5)
assert.Equal(t, cache.Get("6").Value(), 6)
}
func Test_CacheForEachFunc(t *testing.T) {
cache := New(Configure[int]().MaxSize(3).ItemsToPrune(1))
assert.List(t, forEachKeys[int](cache), []string{})
cache.Set("1", 1, time.Minute)
assert.List(t, forEachKeys(cache), []string{"1"})
cache.Set("2", 2, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeys(cache), []string{"1", "2"})
cache.Set("3", 3, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeys(cache), []string{"1", "2", "3"})
cache.Set("4", 4, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeys(cache), []string{"2", "3", "4"})
cache.Set("stop", 5, time.Minute)
cache.SyncUpdates()
assert.DoesNotContain(t, forEachKeys(cache), "stop")
cache.Set("6", 6, time.Minute)
cache.SyncUpdates()
assert.DoesNotContain(t, forEachKeys(cache), "stop")
}
func Test_CachePrune(t *testing.T) {
maxSize := int64(500)
cache := New(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
epoch := 0
for i := 0; i < 10000; i++ {
epoch += 1
expired := make([]string, 0)
for i := 0; i < 50; i += 1 {
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
item := cache.Get(key)
if item == nil || item.TTL() > 1*time.Minute {
expired = append(expired, key)
}
}
for _, key := range expired {
cache.Set(key, key, 5*time.Minute)
}
if epoch%500 == 0 {
assert.True(t, cache.GetSize() <= 500)
}
}
}
func Test_ConcurrentStop(t *testing.T) {
for i := 0; i < 100; i++ {
cache := New(Configure[string]())
r := func() {
for {
key := strconv.Itoa(int(rand.Int31n(100)))
switch rand.Int31n(3) {
case 0:
cache.Get(key)
case 1:
cache.Set(key, key, time.Minute)
case 2:
cache.Delete(key)
}
}
}
go r()
go r()
go r()
time.Sleep(time.Millisecond * 10)
cache.Stop()
}
}
func Test_ConcurrentClearAndSet(t *testing.T) {
for i := 0; i < 1000000; i++ {
var stop atomic.Bool
var wg sync.WaitGroup
cache := New(Configure[string]())
r := func() {
for !stop.Load() {
cache.Set("a", "a", time.Minute)
}
wg.Done()
}
go r()
wg.Add(1)
cache.Clear()
stop.Store(true)
wg.Wait()
cache.SyncUpdates()
// The point of this test is to make sure that the cache's lookup and its
// recency list are in sync. But the two aren't written to atomically:
// the lookup is written to directly from the call to Set, whereas the
// list is maintained by the background worker. This can create a period
// where the two are out of sync. Even SyncUpdate is helpless here, since
// it can only sync what's been written to the buffers.
for i := 0; i < 10; i++ {
expectedCount := 0
if cache.list.Head != nil {
expectedCount = 1
}
actualCount := cache.ItemCount()
if expectedCount == actualCount {
return
}
time.Sleep(time.Millisecond)
}
t.Errorf("cache list and lookup are not consistent")
t.FailNow()
}
}
func BenchmarkFrequentSets(b *testing.B) {
cache := New(Configure[int]())
defer cache.Stop()
b.ResetTimer()
for n := 0; n < b.N; n++ {
key := strconv.Itoa(n)
cache.Set(key, n, time.Minute)
}
}
func BenchmarkFrequentGets(b *testing.B) {
cache := New(Configure[int]())
defer cache.Stop()
numKeys := 500
for i := 0; i < numKeys; i++ {
key := strconv.Itoa(i)
cache.Set(key, i, time.Minute)
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
key := strconv.FormatInt(rand.Int63n(int64(numKeys)), 10)
cache.Get(key)
}
}
func BenchmarkGetWithPromoteSmall(b *testing.B) {
getsPerPromotes := 5
cache := New(Configure[int]().GetsPerPromote(int32(getsPerPromotes)))
defer cache.Stop()
b.ResetTimer()
for n := 0; n < b.N; n++ {
key := strconv.Itoa(n)
cache.Set(key, n, time.Minute)
for i := 0; i < getsPerPromotes; i++ {
cache.Get(key)
}
}
}
func BenchmarkGetWithPromoteLarge(b *testing.B) {
getsPerPromotes := 100
cache := New(Configure[int]().GetsPerPromote(int32(getsPerPromotes)))
defer cache.Stop()
b.ResetTimer()
for n := 0; n < b.N; n++ {
key := strconv.Itoa(n)
cache.Set(key, n, time.Minute)
for i := 0; i < getsPerPromotes; i++ {
cache.Get(key)
}
}
}
type SizedItem struct {
@@ -249,14 +514,15 @@ func (s *SizedItem) Size() int64 {
return s.s
}
func checkSize(cache *Cache, sz int64) {
cache.Stop()
Expect(cache.size).To.Equal(sz)
cache.restart()
}
func gcCache(cache *Cache) {
cache.Stop()
cache.gc()
cache.restart()
func forEachKeys[T any](cache *Cache[T]) []string {
keys := make([]string, 0, 10)
cache.ForEachFunc(func(key string, i *Item[T]) bool {
if key == "stop" {
return false
}
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}

View File

@@ -1,6 +1,6 @@
package ccache
type Configuration struct {
type Configuration[T any] struct {
maxSize int64
buckets int
itemsToPrune int
@@ -8,14 +8,14 @@ type Configuration struct {
promoteBuffer int
getsPerPromote int32
tracking bool
onDelete func(item *Item)
onDelete func(item *Item[T])
}
// Creates a configuration object with sensible defaults
// Use this as the start of the fluent configuration:
// e.g.: ccache.New(ccache.Configure().MaxSize(10000))
func Configure() *Configuration {
return &Configuration{
func Configure[T any]() *Configuration[T] {
return &Configuration[T]{
buckets: 16,
itemsToPrune: 500,
deleteBuffer: 1024,
@@ -28,7 +28,7 @@ func Configure() *Configuration {
// The max size for the cache
// [5000]
func (c *Configuration) MaxSize(max int64) *Configuration {
func (c *Configuration[T]) MaxSize(max int64) *Configuration[T] {
c.maxSize = max
return c
}
@@ -36,8 +36,8 @@ func (c *Configuration) MaxSize(max int64) *Configuration {
// Keys are hashed into % bucket count to provide greater concurrency (every set
// requires a write lock on the bucket). Must be a power of 2 (1, 2, 4, 8, 16, ...)
// [16]
func (c *Configuration) Buckets(count uint32) *Configuration {
if count == 0 || ((count&(^count+1)) == count) == false {
func (c *Configuration[T]) Buckets(count uint32) *Configuration[T] {
if count == 0 || !((count & (^count + 1)) == count) {
count = 16
}
c.buckets = int(count)
@@ -46,7 +46,7 @@ func (c *Configuration) Buckets(count uint32) *Configuration {
// The number of items to prune when memory is low
// [500]
func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
func (c *Configuration[T]) ItemsToPrune(count uint32) *Configuration[T] {
c.itemsToPrune = int(count)
return c
}
@@ -54,14 +54,14 @@ func (c *Configuration) ItemsToPrune(count uint32) *Configuration {
// The size of the queue for items which should be promoted. If the queue fills
// up, promotions are skipped
// [1024]
func (c *Configuration) PromoteBuffer(size uint32) *Configuration {
func (c *Configuration[T]) PromoteBuffer(size uint32) *Configuration[T] {
c.promoteBuffer = int(size)
return c
}
// The size of the queue for items which should be deleted. If the queue fills
// up, calls to Delete() will block
func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
func (c *Configuration[T]) DeleteBuffer(size uint32) *Configuration[T] {
c.deleteBuffer = int(size)
return c
}
@@ -70,7 +70,7 @@ func (c *Configuration) DeleteBuffer(size uint32) *Configuration {
// to promote an item on every Get. GetsPerPromote specifies the number of Gets
// a key must have before being promoted
// [3]
func (c *Configuration) GetsPerPromote(count int32) *Configuration {
func (c *Configuration[T]) GetsPerPromote(count int32) *Configuration[T] {
c.getsPerPromote = count
return c
}
@@ -89,7 +89,7 @@ func (c *Configuration) GetsPerPromote(count int32) *Configuration {
// By turning tracking on and using the cache's TrackingGet, the cache
// won't evict items which you haven't called Release() on. It's a simple reference
// counter.
func (c *Configuration) Track() *Configuration {
func (c *Configuration[T]) Track() *Configuration[T] {
c.tracking = true
return c
}
@@ -97,7 +97,7 @@ func (c *Configuration) Track() *Configuration {
// OnDelete allows setting a callback function to react to ideam deletion.
// This typically allows to do a cleanup of resources, such as calling a Close() on
// cached object that require some kind of tear-down.
func (c *Configuration) OnDelete(callback func(item *Item)) *Configuration {
func (c *Configuration[T]) OnDelete(callback func(item *Item[T])) *Configuration[T] {
c.onDelete = callback
return c
}

View File

@@ -1,23 +1,23 @@
package ccache
import (
. "github.com/karlseguin/expect"
"testing"
"github.com/karlseguin/ccache/v3/assert"
)
type ConfigurationTests struct{}
func Test_Configuration(t *testing.T) {
Expectify(new(ConfigurationTests), t)
}
func (_ *ConfigurationTests) BucketsPowerOf2() {
func Test_Configuration_BucketsPowerOf2(t *testing.T) {
for i := uint32(0); i < 31; i++ {
c := Configure().Buckets(i)
c := Configure[int]().Buckets(i)
if i == 1 || i == 2 || i == 4 || i == 8 || i == 16 {
Expect(c.buckets).ToEqual(int(i))
assert.Equal(t, c.buckets, int(i))
} else {
Expect(c.buckets).ToEqual(16)
assert.Equal(t, c.buckets, 16)
}
}
}
func Test_Configuration_Buffers(t *testing.T) {
assert.Equal(t, Configure[int]().DeleteBuffer(24).deleteBuffer, 24)
assert.Equal(t, Configure[int]().PromoteBuffer(95).promoteBuffer, 95)
}

110
control.go Normal file
View File

@@ -0,0 +1,110 @@
package ccache
type controlGC struct {
done chan struct{}
}
type controlClear struct {
done chan struct{}
}
type controlStop struct {
}
type controlGetSize struct {
res chan int64
}
type controlGetDropped struct {
res chan int
}
type controlSetMaxSize struct {
size int64
done chan struct{}
}
type controlSyncUpdates struct {
done chan struct{}
}
type control chan interface{}
func newControl() chan interface{} {
return make(chan interface{}, 5)
}
// Forces GC. There should be no reason to call this function, except from tests
// which require synchronous GC.
// This is a control command.
func (c control) GC() {
done := make(chan struct{})
c <- controlGC{done: done}
<-done
}
// Sends a stop signal to the worker thread. The worker thread will shut down
// 5 seconds after the last message is received. The cache should not be used
// after Stop is called, but concurrently executing requests should properly finish
// executing.
// This is a control command.
func (c control) Stop() {
c.SyncUpdates()
c <- controlStop{}
}
// Clears the cache
// This is a control command.
func (c control) Clear() {
done := make(chan struct{})
c <- controlClear{done: done}
<-done
}
// Gets the size of the cache. This is an O(1) call to make, but it is handled
// by the worker goroutine. It's meant to be called periodically for metrics, or
// from tests.
// This is a control command.
func (c control) GetSize() int64 {
res := make(chan int64)
c <- controlGetSize{res: res}
return <-res
}
// Gets the number of items removed from the cache due to memory pressure since
// the last time GetDropped was called
// This is a control command.
func (c control) GetDropped() int {
res := make(chan int)
c <- controlGetDropped{res: res}
return <-res
}
// Sets a new max size. That can result in a GC being run if the new maxium size
// is smaller than the cached size
// This is a control command.
func (c control) SetMaxSize(size int64) {
done := make(chan struct{})
c <- controlSetMaxSize{size: size, done: done}
<-done
}
// SyncUpdates waits until the cache has finished asynchronous state updates for any operations
// that were done by the current goroutine up to now.
//
// For efficiency, the cache's implementation of LRU behavior is partly managed by a worker
// goroutine that updates its internal data structures asynchronously. This means that the
// cache's state in terms of (for instance) eviction of LRU items is only eventually consistent;
// there is no guarantee that it happens before a Get or Set call has returned. Most of the time
// application code will not care about this, but especially in a test scenario you may want to
// be able to know when the worker has caught up.
//
// This applies only to cache methods that were previously called by the same goroutine that is
// now calling SyncUpdates. If other goroutines are using the cache at the same time, there is
// no way to know whether any of them still have pending state updates when SyncUpdates returns.
// This is a control command.
func (c control) SyncUpdates() {
done := make(chan struct{})
c <- controlSyncUpdates{done: done}
<-done
}

9
go.mod
View File

@@ -1,8 +1,3 @@
module github.com/karlseguin/ccache
module github.com/karlseguin/ccache/v3
go 1.13
require (
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0
)
go 1.19

6
go.sum
View File

@@ -1,6 +0,0 @@
github.com/karlseguin/expect v1.0.1 h1:z4wy4npwwHSWKjGWH85WNJO42VQhovxTCZDSzhjo8hY=
github.com/karlseguin/expect v1.0.1/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003 h1:vJ0Snvo+SLMY72r5J4sEfkuE7AFbixEP2qRbEcum/wA=
github.com/karlseguin/expect v1.0.2-0.20190806010014-778a5f0c6003/go.mod h1:zNBxMY8P21owkeogJELCLeHIt+voOSduHYTFUbwRAV8=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0 h1:3UeQBvD0TFrlVjOeLOBz+CPAI8dnbqNSVwUwRrkp7vQ=
github.com/wsxiaoys/terminal v0.0.0-20160513160801-0940f3fc43a0/go.mod h1:IXCdmsXIht47RaVFLEdVnh1t+pgYtTAhQGj73kz+2DM=

82
item.go
View File

@@ -1,7 +1,7 @@
package ccache
import (
"container/list"
"fmt"
"sync/atomic"
"time"
)
@@ -10,8 +10,8 @@ type Sized interface {
Size() int64
}
type TrackedItem interface {
Value() interface{}
type TrackedItem[T any] interface {
Value() T
Release()
Expired() bool
TTL() time.Duration
@@ -19,85 +19,89 @@ type TrackedItem interface {
Extend(duration time.Duration)
}
type nilItem struct{}
func (n *nilItem) Value() interface{} { return nil }
func (n *nilItem) Release() {}
func (i *nilItem) Expired() bool {
return true
}
func (i *nilItem) TTL() time.Duration {
return time.Minute
}
func (i *nilItem) Expires() time.Time {
return time.Time{}
}
func (i *nilItem) Extend(duration time.Duration) {
}
var NilTracked = new(nilItem)
type Item struct {
type Item[T any] struct {
key string
group string
promotions int32
refCount int32
expires int64
size int64
value interface{}
element *list.Element
value T
next *Item[T]
prev *Item[T]
}
func newItem(key string, value interface{}, expires int64) *Item {
func newItem[T any](key string, value T, expires int64, track bool) *Item[T] {
size := int64(1)
if sized, ok := value.(Sized); ok {
// https://github.com/golang/go/issues/49206
if sized, ok := (interface{})(value).(Sized); ok {
size = sized.Size()
}
return &Item{
item := &Item[T]{
key: key,
value: value,
promotions: 0,
size: size,
expires: expires,
}
if track {
item.refCount = 1
}
return item
}
func (i *Item) shouldPromote(getsPerPromote int32) bool {
func (i *Item[T]) shouldPromote(getsPerPromote int32) bool {
i.promotions += 1
return i.promotions == getsPerPromote
}
func (i *Item) Value() interface{} {
func (i *Item[T]) Key() string {
return i.key
}
func (i *Item[T]) Value() T {
return i.value
}
func (i *Item) track() {
func (i *Item[T]) track() {
atomic.AddInt32(&i.refCount, 1)
}
func (i *Item) Release() {
func (i *Item[T]) Release() {
atomic.AddInt32(&i.refCount, -1)
}
func (i *Item) Expired() bool {
func (i *Item[T]) Expired() bool {
expires := atomic.LoadInt64(&i.expires)
return expires < time.Now().UnixNano()
}
func (i *Item) TTL() time.Duration {
func (i *Item[T]) TTL() time.Duration {
expires := atomic.LoadInt64(&i.expires)
return time.Nanosecond * time.Duration(expires-time.Now().UnixNano())
}
func (i *Item) Expires() time.Time {
func (i *Item[T]) Expires() time.Time {
expires := atomic.LoadInt64(&i.expires)
return time.Unix(0, expires)
}
func (i *Item) Extend(duration time.Duration) {
func (i *Item[T]) Extend(duration time.Duration) {
atomic.StoreInt64(&i.expires, time.Now().Add(duration).UnixNano())
}
// String returns a string representation of the Item. This includes the default string
// representation of its Value(), as implemented by fmt.Sprintf with "%v", but the exact
// format of the string should not be relied on; it is provided only for debugging
// purposes, and because otherwise including an Item in a call to fmt.Printf or
// fmt.Sprintf expression could cause fields of the Item to be read in a non-thread-safe
// way.
func (i *Item[T]) String() string {
group := i.group
if group == "" {
return fmt.Sprintf("Item(%s:%v)", i.key, i.value)
}
return fmt.Sprintf("Item(%s:%s:%v)", group, i.key, i.value)
}

View File

@@ -5,45 +5,44 @@ import (
"testing"
"time"
. "github.com/karlseguin/expect"
"github.com/karlseguin/ccache/v3/assert"
)
type ItemTests struct{}
func Test_Item(t *testing.T) {
Expectify(new(ItemTests), t)
func Test_Item_Key(t *testing.T) {
item := &Item[int]{key: "foo"}
assert.Equal(t, item.Key(), "foo")
}
func (_ *ItemTests) Promotability() {
item := &Item{promotions: 4}
Expect(item.shouldPromote(5)).To.Equal(true)
Expect(item.shouldPromote(5)).To.Equal(false)
func Test_Item_Promotability(t *testing.T) {
item := &Item[int]{promotions: 4}
assert.Equal(t, item.shouldPromote(5), true)
assert.Equal(t, item.shouldPromote(5), false)
}
func (_ *ItemTests) Expired() {
func Test_Item_Expired(t *testing.T) {
now := time.Now().UnixNano()
item1 := &Item{expires: now + (10 * int64(time.Millisecond))}
item2 := &Item{expires: now - (10 * int64(time.Millisecond))}
Expect(item1.Expired()).To.Equal(false)
Expect(item2.Expired()).To.Equal(true)
item1 := &Item[int]{expires: now + (10 * int64(time.Millisecond))}
item2 := &Item[int]{expires: now - (10 * int64(time.Millisecond))}
assert.Equal(t, item1.Expired(), false)
assert.Equal(t, item2.Expired(), true)
}
func (_ *ItemTests) TTL() {
func Test_Item_TTL(t *testing.T) {
now := time.Now().UnixNano()
item1 := &Item{expires: now + int64(time.Second)}
item2 := &Item{expires: now - int64(time.Second)}
Expect(int(math.Ceil(item1.TTL().Seconds()))).To.Equal(1)
Expect(int(math.Ceil(item2.TTL().Seconds()))).To.Equal(-1)
item1 := &Item[int]{expires: now + int64(time.Second)}
item2 := &Item[int]{expires: now - int64(time.Second)}
assert.Equal(t, int(math.Ceil(item1.TTL().Seconds())), 1)
assert.Equal(t, int(math.Ceil(item2.TTL().Seconds())), -1)
}
func (_ *ItemTests) Expires() {
func Test_Item_Expires(t *testing.T) {
now := time.Now().UnixNano()
item := &Item{expires: now + (10)}
Expect(item.Expires().UnixNano()).To.Equal(now + 10)
item := &Item[int]{expires: now + (10)}
assert.Equal(t, item.Expires().UnixNano(), now+10)
}
func (_ *ItemTests) Extend() {
item := &Item{expires: time.Now().UnixNano() + 10}
func Test_Item_Extend(t *testing.T) {
item := &Item[int]{expires: time.Now().UnixNano() + 10}
item.Extend(time.Minute * 2)
Expect(item.Expires().Unix()).To.Equal(time.Now().Unix() + 120)
assert.Equal(t, item.Expires().Unix(), time.Now().Unix()+120)
}

View File

@@ -5,12 +5,12 @@ import (
"time"
)
type layeredBucket struct {
type layeredBucket[T any] struct {
sync.RWMutex
buckets map[string]*bucket
buckets map[string]*bucket[T]
}
func (b *layeredBucket) itemCount() int {
func (b *layeredBucket[T]) itemCount() int {
count := 0
b.RLock()
defer b.RUnlock()
@@ -20,7 +20,7 @@ func (b *layeredBucket) itemCount() int {
return count
}
func (b *layeredBucket) get(primary, secondary string) *Item {
func (b *layeredBucket[T]) get(primary, secondary string) *Item[T] {
bucket := b.getSecondaryBucket(primary)
if bucket == nil {
return nil
@@ -28,44 +28,74 @@ func (b *layeredBucket) get(primary, secondary string) *Item {
return bucket.get(secondary)
}
func (b *layeredBucket) getSecondaryBucket(primary string) *bucket {
func (b *layeredBucket[T]) getSecondaryBucket(primary string) *bucket[T] {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
if !exists {
return nil
}
return bucket
}
func (b *layeredBucket) set(primary, secondary string, value interface{}, duration time.Duration) (*Item, *Item) {
func (b *layeredBucket[T]) set(primary, secondary string, value T, duration time.Duration, track bool) (*Item[T], *Item[T]) {
b.Lock()
bkt, exists := b.buckets[primary]
if exists == false {
bkt = &bucket{lookup: make(map[string]*Item)}
if !exists {
bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
b.buckets[primary] = bkt
}
b.Unlock()
item, existing := bkt.set(secondary, value, duration)
item, existing := bkt.set(secondary, value, duration, track)
item.group = primary
return item, existing
}
func (b *layeredBucket) delete(primary, secondary string) *Item {
func (b *layeredBucket[T]) remove(primary, secondary string) *Item[T] {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
if !exists {
return nil
}
return bucket.delete(secondary)
return bucket.remove(secondary)
}
func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
func (b *layeredBucket[T]) delete(primary, secondary string) {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists == false {
if !exists {
return
}
bucket.delete(secondary)
}
func (b *layeredBucket[T]) deletePrefix(primary, prefix string, deletables chan *Item[T]) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if !exists {
return 0
}
return bucket.deletePrefix(prefix, deletables)
}
func (b *layeredBucket[T]) deleteFunc(primary string, matches func(key string, item *Item[T]) bool, deletables chan *Item[T]) int {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if !exists {
return 0
}
return bucket.deleteFunc(matches, deletables)
}
func (b *layeredBucket[T]) deleteAll(primary string, deletables chan *Item[T]) bool {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if !exists {
return false
}
@@ -82,11 +112,19 @@ func (b *layeredBucket) deleteAll(primary string, deletables chan *Item) bool {
return true
}
func (b *layeredBucket) clear() {
b.Lock()
defer b.Unlock()
func (b *layeredBucket[T]) forEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
b.RLock()
bucket, exists := b.buckets[primary]
b.RUnlock()
if exists {
bucket.forEachFunc(matches)
}
}
// we expect the caller to have acquired a write lock
func (b *layeredBucket[T]) clear() {
for _, bucket := range b.buckets {
bucket.clear()
}
b.buckets = make(map[string]*bucket)
b.buckets = make(map[string]*bucket[T])
}

View File

@@ -2,21 +2,20 @@
package ccache
import (
"container/list"
"hash/fnv"
"sync/atomic"
"time"
)
type LayeredCache struct {
*Configuration
list *list.List
buckets []*layeredBucket
type LayeredCache[T any] struct {
*Configuration[T]
control
list *List[T]
buckets []*layeredBucket[T]
bucketMask uint32
size int64
deletables chan *Item
promotables chan *Item
control chan interface{}
deletables chan *Item[T]
promotables chan *Item[T]
}
// Create a new layered cache with the specified configuration.
@@ -32,25 +31,26 @@ type LayeredCache struct {
// secondary key 2 = ".xml"
// See ccache.Configure() for creating a configuration
func Layered(config *Configuration) *LayeredCache {
c := &LayeredCache{
list: list.New(),
func Layered[T any](config *Configuration[T]) *LayeredCache[T] {
c := &LayeredCache[T]{
list: NewList[T](),
Configuration: config,
control: newControl(),
bucketMask: uint32(config.buckets) - 1,
buckets: make([]*layeredBucket, config.buckets),
deletables: make(chan *Item, config.deleteBuffer),
control: make(chan interface{}),
buckets: make([]*layeredBucket[T], config.buckets),
deletables: make(chan *Item[T], config.deleteBuffer),
promotables: make(chan *Item[T], config.promoteBuffer),
}
for i := 0; i < int(config.buckets); i++ {
c.buckets[i] = &layeredBucket{
buckets: make(map[string]*bucket),
for i := 0; i < config.buckets; i++ {
c.buckets[i] = &layeredBucket[T]{
buckets: make(map[string]*bucket[T]),
}
}
c.restart()
go c.worker()
return c
}
func (c *LayeredCache) ItemCount() int {
func (c *LayeredCache[T]) ItemCount() int {
count := 0
for _, b := range c.buckets {
count += b.itemCount()
@@ -62,30 +62,44 @@ func (c *LayeredCache) ItemCount() int {
// This can return an expired item. Use item.Expired() to see if the item
// is expired and item.TTL() to see how long until the item expires (which
// will be negative for an already expired item).
func (c *LayeredCache) Get(primary, secondary string) *Item {
func (c *LayeredCache[T]) Get(primary, secondary string) *Item[T] {
item := c.bucket(primary).get(primary, secondary)
if item == nil {
return nil
}
if item.expires > time.Now().UnixNano() {
c.promote(item)
select {
case c.promotables <- item:
default:
}
}
return item
}
// Same as Get but does not promote the value. This essentially circumvents the
// "least recently used" aspect of this cache. To some degree, it's akin to a
// "peak"
func (c *LayeredCache[T]) GetWithoutPromote(primary, secondary string) *Item[T] {
return c.bucket(primary).get(primary, secondary)
}
func (c *LayeredCache[T]) ForEachFunc(primary string, matches func(key string, item *Item[T]) bool) {
c.bucket(primary).forEachFunc(primary, matches)
}
// Get the secondary cache for a given primary key. This operation will
// never return nil. In the case where the primary key does not exist, a
// new, underlying, empty bucket will be created and returned.
func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache {
func (c *LayeredCache[T]) GetOrCreateSecondaryCache(primary string) *SecondaryCache[T] {
primaryBkt := c.bucket(primary)
bkt := primaryBkt.getSecondaryBucket(primary)
primaryBkt.Lock()
if bkt == nil {
bkt = &bucket{lookup: make(map[string]*Item)}
bkt = &bucket[T]{lookup: make(map[string]*Item[T])}
primaryBkt.buckets[primary] = bkt
}
primaryBkt.Unlock()
return &SecondaryCache{
return &SecondaryCache[T]{
bucket: bkt,
pCache: c,
}
@@ -93,24 +107,29 @@ func (c *LayeredCache) GetOrCreateSecondaryCache(primary string) *SecondaryCache
// Used when the cache was created with the Track() configuration option.
// Avoid otherwise
func (c *LayeredCache) TrackingGet(primary, secondary string) TrackedItem {
func (c *LayeredCache[T]) TrackingGet(primary, secondary string) TrackedItem[T] {
item := c.Get(primary, secondary)
if item == nil {
return NilTracked
return nil
}
item.track()
return item
}
// Set the value in the cache for the specified duration
func (c *LayeredCache) Set(primary, secondary string, value interface{}, duration time.Duration) {
c.set(primary, secondary, value, duration)
func (c *LayeredCache[T]) TrackingSet(primary, secondary string, value T, duration time.Duration) TrackedItem[T] {
return c.set(primary, secondary, value, duration, true)
}
// Set the value in the cache for the specified duration
func (c *LayeredCache[T]) Set(primary, secondary string, value T, duration time.Duration) {
c.set(primary, secondary, value, duration, false)
}
// Replace the value if it exists, does not set if it doesn't.
// Returns true if the item existed an was replaced, false otherwise.
// Replace does not reset item's TTL nor does it alter its position in the LRU
func (c *LayeredCache) Replace(primary, secondary string, value interface{}) bool {
func (c *LayeredCache[T]) Replace(primary, secondary string, value T) bool {
item := c.bucket(primary).get(primary, secondary)
if item == nil {
return false
@@ -122,7 +141,10 @@ func (c *LayeredCache) Replace(primary, secondary string, value interface{}) boo
// Attempts to get the value from the cache and calles fetch on a miss.
// If fetch returns an error, no value is cached and the error is returned back
// to the caller.
func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
// Note that Fetch merely calls the public Get and Set functions. If you want
// a different Fetch behavior, such as thundering herd protection or returning
// expired items, implement it in your application.
func (c *LayeredCache[T]) Fetch(primary, secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
item := c.Get(primary, secondary)
if item != nil {
return item, nil
@@ -131,12 +153,12 @@ func (c *LayeredCache) Fetch(primary, secondary string, duration time.Duration,
if err != nil {
return nil, err
}
return c.set(primary, secondary, value, duration), nil
return c.set(primary, secondary, value, duration, false), nil
}
// Remove the item from the cache, return true if the item was present, false otherwise.
func (c *LayeredCache) Delete(primary, secondary string) bool {
item := c.bucket(primary).delete(primary, secondary)
func (c *LayeredCache[T]) Delete(primary, secondary string) bool {
item := c.bucket(primary).remove(primary, secondary)
if item != nil {
c.deletables <- item
return true
@@ -145,46 +167,22 @@ func (c *LayeredCache) Delete(primary, secondary string) bool {
}
// Deletes all items that share the same primary key
func (c *LayeredCache) DeleteAll(primary string) bool {
func (c *LayeredCache[T]) DeleteAll(primary string) bool {
return c.bucket(primary).deleteAll(primary, c.deletables)
}
//this isn't thread safe. It's meant to be called from non-concurrent tests
func (c *LayeredCache) Clear() {
for _, bucket := range c.buckets {
bucket.clear()
}
c.size = 0
c.list = list.New()
// Deletes all items that share the same primary key and prefix.
func (c *LayeredCache[T]) DeletePrefix(primary, prefix string) int {
return c.bucket(primary).deletePrefix(primary, prefix, c.deletables)
}
func (c *LayeredCache) Stop() {
close(c.promotables)
<-c.control
// Deletes all items that share the same primary key and where the matches func evaluates to true.
func (c *LayeredCache[T]) DeleteFunc(primary string, matches func(key string, item *Item[T]) bool) int {
return c.bucket(primary).deleteFunc(primary, matches, c.deletables)
}
// Gets the number of items removed from the cache due to memory pressure since
// the last time GetDropped was called
func (c *LayeredCache) GetDropped() int {
res := make(chan int)
c.control <- getDropped{res: res}
return <-res
}
// Sets a new max size. That can result in a GC being run if the new maxium size
// is smaller than the cached size
func (c *LayeredCache) SetMaxSize(size int64) {
c.control <- setMaxSize{size}
}
func (c *LayeredCache) restart() {
c.promotables = make(chan *Item, c.promoteBuffer)
c.control = make(chan interface{})
go c.worker()
}
func (c *LayeredCache) set(primary, secondary string, value interface{}, duration time.Duration) *Item {
item, existing := c.bucket(primary).set(primary, secondary, value, duration)
func (c *LayeredCache[T]) set(primary, secondary string, value T, duration time.Duration, track bool) *Item[T] {
item, existing := c.bucket(primary).set(primary, secondary, value, duration, track)
if existing != nil {
c.deletables <- existing
}
@@ -192,87 +190,161 @@ func (c *LayeredCache) set(primary, secondary string, value interface{}, duratio
return item
}
func (c *LayeredCache) bucket(key string) *layeredBucket {
func (c *LayeredCache[T]) bucket(key string) *layeredBucket[T] {
h := fnv.New32a()
h.Write([]byte(key))
return c.buckets[h.Sum32()&c.bucketMask]
}
func (c *LayeredCache) promote(item *Item) {
func (c *LayeredCache[T]) halted(fn func()) {
c.halt()
defer c.unhalt()
fn()
}
func (c *LayeredCache[T]) halt() {
for _, bucket := range c.buckets {
bucket.Lock()
}
}
func (c *LayeredCache[T]) unhalt() {
for _, bucket := range c.buckets {
bucket.Unlock()
}
}
func (c *LayeredCache[T]) promote(item *Item[T]) {
c.promotables <- item
}
func (c *LayeredCache) worker() {
defer close(c.control)
func (c *LayeredCache[T]) worker() {
dropped := 0
cc := c.control
promoteItem := func(item *Item[T]) {
if c.doPromote(item) && c.size > c.maxSize {
dropped += c.gc()
}
}
for {
select {
case item, ok := <-c.promotables:
if ok == false {
return
}
if c.doPromote(item) && c.size > c.maxSize {
dropped += c.gc()
}
case item := <-c.promotables:
promoteItem(item)
case item := <-c.deletables:
if item.element == nil {
atomic.StoreInt32(&item.promotions, -2)
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item.element)
}
case control := <-c.control:
c.doDelete(item)
case control := <-cc:
switch msg := control.(type) {
case getDropped:
case controlStop:
goto drain
case controlGetDropped:
msg.res <- dropped
dropped = 0
case setMaxSize:
case controlSetMaxSize:
c.maxSize = msg.size
if c.size > c.maxSize {
dropped += c.gc()
}
msg.done <- struct{}{}
case controlClear:
promotables := c.promotables
for len(promotables) > 0 {
<-promotables
}
deletables := c.deletables
for len(deletables) > 0 {
<-deletables
}
c.halted(func() {
for _, bucket := range c.buckets {
bucket.clear()
}
c.size = 0
c.list = NewList[T]()
})
msg.done <- struct{}{}
case controlGetSize:
msg.res <- c.size
case controlGC:
dropped += c.gc()
msg.done <- struct{}{}
case controlSyncUpdates:
doAllPendingPromotesAndDeletes(c.promotables, promoteItem, c.deletables, c.doDelete)
msg.done <- struct{}{}
}
}
}
drain:
for {
select {
case item := <-c.deletables:
c.doDelete(item)
default:
return
}
}
}
func (c *LayeredCache) doPromote(item *Item) bool {
func (c *LayeredCache[T]) doDelete(item *Item[T]) {
if item.prev == nil && item.next == nil {
item.promotions = -2
} else {
c.size -= item.size
if c.onDelete != nil {
c.onDelete(item)
}
c.list.Remove(item)
item.promotions = -2
}
}
func (c *LayeredCache[T]) doPromote(item *Item[T]) bool {
// deleted before it ever got promoted
if atomic.LoadInt32(&item.promotions) == -2 {
if item.promotions == -2 {
return false
}
if item.element != nil { //not a new item
if item.next != nil || item.prev != nil { // not a new item
if item.shouldPromote(c.getsPerPromote) {
c.list.MoveToFront(item.element)
atomic.StoreInt32(&item.promotions, 0)
c.list.MoveToFront(item)
item.promotions = 0
}
return false
}
c.size += item.size
item.element = c.list.PushFront(item)
c.list.Insert(item)
return true
}
func (c *LayeredCache) gc() int {
element := c.list.Back()
func (c *LayeredCache[T]) gc() int {
dropped := 0
for i := 0; i < c.itemsToPrune; i++ {
if element == nil {
item := c.list.Tail
itemsToPrune := int64(c.itemsToPrune)
if min := c.size - c.maxSize; min > itemsToPrune {
itemsToPrune = min
}
for i := int64(0); i < itemsToPrune; i++ {
if item == nil {
return dropped
}
prev := element.Prev()
item := element.Value.(*Item)
if c.tracking == false || atomic.LoadInt32(&item.refCount) == 0 {
prev := item.prev
if !c.tracking || atomic.LoadInt32(&item.refCount) == 0 {
c.bucket(item.group).delete(item.group, item.key)
c.size -= item.size
c.list.Remove(element)
item.promotions = -2
c.list.Remove(item)
if c.onDelete != nil {
c.onDelete(item)
}
dropped += 1
item.promotions = -2
}
element = prev
item = prev
}
return dropped
}

View File

@@ -1,285 +1,438 @@
package ccache
import (
"math/rand"
"sort"
"strconv"
"sync/atomic"
"testing"
"time"
. "github.com/karlseguin/expect"
"github.com/karlseguin/ccache/v3/assert"
)
type LayeredCacheTests struct{}
func Test_LayeredCache(t *testing.T) {
Expectify(new(LayeredCacheTests), t)
func Test_LayedCache_GetsANonExistantValue(t *testing.T) {
cache := newLayered[string]()
assert.Equal(t, cache.Get("spice", "flow"), nil)
assert.Equal(t, cache.ItemCount(), 0)
}
func (_ *LayeredCacheTests) GetsANonExistantValue() {
cache := newLayered()
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(0)
}
func (_ *LayeredCacheTests) SetANewValue() {
cache := newLayered()
func Test_LayedCache_SetANewValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "a value", time.Minute)
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
Expect(cache.Get("spice", "stop")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(1)
assert.Equal(t, cache.Get("spice", "flow").Value(), "a value")
assert.Equal(t, cache.Get("spice", "stop"), nil)
assert.Equal(t, cache.ItemCount(), 1)
}
func (_ *LayeredCacheTests) SetsMultipleValueWithinTheSameLayer() {
cache := newLayered()
func Test_LayedCache_SetsMultipleValueWithinTheSameLayer(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
cache.Set("spice", "must", "value-b", time.Minute)
cache.Set("leto", "sister", "ghanima", time.Minute)
Expect(cache.Get("spice", "flow").Value()).To.Equal("value-a")
Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
Expect(cache.Get("spice", "worm")).To.Equal(nil)
assert.Equal(t, cache.Get("spice", "flow").Value(), "value-a")
assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
assert.Equal(t, cache.Get("spice", "worm"), nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
Expect(cache.Get("leto", "brother")).To.Equal(nil)
Expect(cache.Get("baron", "friend")).To.Equal(nil)
Expect(cache.ItemCount()).To.Equal(3)
assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
assert.Equal(t, cache.Get("leto", "brother"), nil)
assert.Equal(t, cache.Get("baron", "friend"), nil)
assert.Equal(t, cache.ItemCount(), 3)
}
func (_ *LayeredCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
cache := newLayered()
Expect(cache.Replace("spice", "flow", "value-a")).To.Equal(false)
Expect(cache.Get("spice", "flow")).To.Equal(nil)
func Test_LayedCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) {
cache := newLayered[string]()
assert.Equal(t, cache.Replace("spice", "flow", "value-a"), false)
assert.Equal(t, cache.Get("spice", "flow"), nil)
}
func (_ *LayeredCacheTests) ReplaceUpdatesTheValue() {
cache := newLayered()
func Test_LayedCache_ReplaceUpdatesTheValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
Expect(cache.Replace("spice", "flow", "value-b")).To.Equal(true)
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
Expect(cache.ItemCount()).To.Equal(1)
assert.Equal(t, cache.Replace("spice", "flow", "value-b"), true)
assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b")
assert.Equal(t, cache.ItemCount(), 1)
//not sure how to test that the TTL hasn't changed sort of a sleep..
}
func (_ *LayeredCacheTests) DeletesAValue() {
cache := newLayered()
func Test_LayedCache_DeletesAValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
cache.Set("spice", "must", "value-b", time.Minute)
cache.Set("leto", "sister", "ghanima", time.Minute)
cache.Delete("spice", "flow")
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
Expect(cache.Get("spice", "worm")).To.Equal(nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
Expect(cache.ItemCount()).To.Equal(2)
assert.Equal(t, cache.Get("spice", "flow"), nil)
assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
assert.Equal(t, cache.Get("spice", "worm"), nil)
assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
assert.Equal(t, cache.ItemCount(), 2)
}
func (_ *LayeredCacheTests) OnDeleteCallbackCalled() {
func Test_LayedCache_DeletesAPrefix(t *testing.T) {
cache := newLayered[string]()
assert.Equal(t, cache.ItemCount(), 0)
onDeleteFnCalled := false
onDeleteFn := func(item *Item) {
cache.Set("spice", "aaa", "1", time.Minute)
cache.Set("spice", "aab", "2", time.Minute)
cache.Set("spice", "aac", "3", time.Minute)
cache.Set("leto", "aac", "3", time.Minute)
cache.Set("spice", "ac", "4", time.Minute)
cache.Set("spice", "z5", "7", time.Minute)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeletePrefix("spice", "9a"), 0)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeletePrefix("spice", "aa"), 3)
assert.Equal(t, cache.Get("spice", "aaa"), nil)
assert.Equal(t, cache.Get("spice", "aab"), nil)
assert.Equal(t, cache.Get("spice", "aac"), nil)
assert.Equal(t, cache.Get("spice", "ac").Value(), "4")
assert.Equal(t, cache.Get("spice", "z5").Value(), "7")
assert.Equal(t, cache.ItemCount(), 3)
}
func Test_LayedCache_DeletesAFunc(t *testing.T) {
cache := newLayered[int]()
assert.Equal(t, cache.ItemCount(), 0)
cache.Set("spice", "a", 1, time.Minute)
cache.Set("leto", "b", 2, time.Minute)
cache.Set("spice", "c", 3, time.Minute)
cache.Set("spice", "d", 4, time.Minute)
cache.Set("spice", "e", 5, time.Minute)
cache.Set("spice", "f", 6, time.Minute)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
return false
}), 0)
assert.Equal(t, cache.ItemCount(), 6)
assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
return item.Value() < 4
}), 2)
assert.Equal(t, cache.ItemCount(), 4)
assert.Equal(t, cache.DeleteFunc("spice", func(key string, item *Item[int]) bool {
return key == "d"
}), 1)
assert.Equal(t, cache.ItemCount(), 3)
}
func Test_LayedCache_OnDeleteCallbackCalled(t *testing.T) {
onDeleteFnCalled := int32(0)
onDeleteFn := func(item *Item[string]) {
if item.group == "spice" && item.key == "flow" {
onDeleteFnCalled = true
atomic.AddInt32(&onDeleteFnCalled, 1)
}
}
cache := Layered(Configure().OnDelete(onDeleteFn))
cache := Layered[string](Configure[string]().OnDelete(onDeleteFn))
cache.Set("spice", "flow", "value-a", time.Minute)
cache.Set("spice", "must", "value-b", time.Minute)
cache.Set("leto", "sister", "ghanima", time.Minute)
time.Sleep(time.Millisecond * 10) // Run once to init
cache.SyncUpdates()
cache.Delete("spice", "flow")
time.Sleep(time.Millisecond * 10) // Wait for worker to pick up deleted items
cache.SyncUpdates()
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.Get("spice", "must").Value()).To.Equal("value-b")
Expect(cache.Get("spice", "worm")).To.Equal(nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
assert.Equal(t, cache.Get("spice", "flow"), nil)
assert.Equal(t, cache.Get("spice", "must").Value(), "value-b")
assert.Equal(t, cache.Get("spice", "worm"), nil)
assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
Expect(onDeleteFnCalled).To.Equal(true)
assert.Equal(t, atomic.LoadInt32(&onDeleteFnCalled), 1)
}
func (_ *LayeredCacheTests) DeletesALayer() {
cache := newLayered()
func Test_LayedCache_DeletesALayer(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
cache.Set("spice", "must", "value-b", time.Minute)
cache.Set("leto", "sister", "ghanima", time.Minute)
cache.DeleteAll("spice")
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(cache.Get("spice", "must")).To.Equal(nil)
Expect(cache.Get("spice", "worm")).To.Equal(nil)
Expect(cache.Get("leto", "sister").Value()).To.Equal("ghanima")
assert.Equal(t, cache.Get("spice", "flow"), nil)
assert.Equal(t, cache.Get("spice", "must"), nil)
assert.Equal(t, cache.Get("spice", "worm"), nil)
assert.Equal(t, cache.Get("leto", "sister").Value(), "ghanima")
}
func (_ LayeredCacheTests) GCsTheOldestItems() {
cache := Layered(Configure().ItemsToPrune(10))
func Test_LayeredCache_GCsTheOldestItems(t *testing.T) {
cache := Layered(Configure[int]().ItemsToPrune(10))
cache.Set("xx", "a", 23, time.Minute)
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
cache.Set("xx", "b", 9001, time.Minute)
//let the items get promoted (and added to our list)
time.Sleep(time.Millisecond * 10)
gcLayeredCache(cache)
Expect(cache.Get("xx", "a")).To.Equal(nil)
Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
Expect(cache.Get("8", "a")).To.Equal(nil)
Expect(cache.Get("9", "a").Value()).To.Equal(9)
Expect(cache.Get("10", "a").Value()).To.Equal(10)
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("xx", "a"), nil)
assert.Equal(t, cache.Get("xx", "b").Value(), 9001)
assert.Equal(t, cache.Get("8", "a"), nil)
assert.Equal(t, cache.Get("9", "a").Value(), 9)
assert.Equal(t, cache.Get("10", "a").Value(), 10)
}
func (_ LayeredCacheTests) PromotedItemsDontGetPruned() {
cache := Layered(Configure().ItemsToPrune(10).GetsPerPromote(1))
func Test_LayeredCache_PromotedItemsDontGetPruned(t *testing.T) {
cache := Layered(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
time.Sleep(time.Millisecond * 10) //run the worker once to init the list
cache.SyncUpdates()
cache.Get("9", "a")
time.Sleep(time.Millisecond * 10)
gcLayeredCache(cache)
Expect(cache.Get("9", "a").Value()).To.Equal(9)
Expect(cache.Get("10", "a")).To.Equal(nil)
Expect(cache.Get("11", "a").Value()).To.Equal(11)
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("9", "a").Value(), 9)
assert.Equal(t, cache.Get("10", "a"), nil)
assert.Equal(t, cache.Get("11", "a").Value(), 11)
}
func (_ LayeredCacheTests) TrackerDoesNotCleanupHeldInstance() {
cache := Layered(Configure().ItemsToPrune(10).Track())
for i := 0; i < 10; i++ {
func Test_LayeredCache_GetWithoutPromoteDoesNotPromote(t *testing.T) {
cache := Layered(Configure[int]().ItemsToPrune(10).GetsPerPromote(1))
for i := 0; i < 500; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
item := cache.TrackingGet("0", "a")
time.Sleep(time.Millisecond * 10)
gcLayeredCache(cache)
Expect(cache.Get("0", "a").Value()).To.Equal(0)
Expect(cache.Get("1", "a")).To.Equal(nil)
item.Release()
gcLayeredCache(cache)
Expect(cache.Get("0", "a")).To.Equal(nil)
cache.SyncUpdates()
cache.GetWithoutPromote("9", "a")
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("9", "a"), nil)
assert.Equal(t, cache.Get("10", "a").Value(), 10)
assert.Equal(t, cache.Get("11", "a").Value(), 11)
}
func (_ LayeredCacheTests) RemovesOldestItemWhenFull() {
cache := Layered(Configure().MaxSize(5).ItemsToPrune(1))
func Test_LayeredCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
cache := Layered(Configure[int]().ItemsToPrune(10).Track())
item0 := cache.TrackingSet("0", "a", 0, time.Minute)
for i := 1; i < 11; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
item1 := cache.TrackingGet("1", "a")
cache.SyncUpdates()
cache.GC()
assert.Equal(t, cache.Get("0", "a").Value(), 0)
assert.Equal(t, cache.Get("1", "a").Value(), 1)
item0.Release()
item1.Release()
cache.GC()
assert.Equal(t, cache.Get("0", "a"), nil)
assert.Equal(t, cache.Get("1", "a"), nil)
}
func Test_LayeredCache_RemovesOldestItemWhenFull(t *testing.T) {
onDeleteFnCalled := false
onDeleteFn := func(item *Item[int]) {
if item.key == "a" {
onDeleteFnCalled = true
}
}
cache := Layered(Configure[int]().MaxSize(5).ItemsToPrune(1).OnDelete(onDeleteFn))
cache.Set("xx", "a", 23, time.Minute)
for i := 0; i < 7; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
cache.Set("xx", "b", 9001, time.Minute)
time.Sleep(time.Millisecond * 10)
Expect(cache.Get("xx", "a")).To.Equal(nil)
Expect(cache.Get("0", "a")).To.Equal(nil)
Expect(cache.Get("1", "a")).To.Equal(nil)
Expect(cache.Get("2", "a")).To.Equal(nil)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("xx", "b").Value()).To.Equal(9001)
Expect(cache.GetDropped()).To.Equal(4)
Expect(cache.GetDropped()).To.Equal(0)
cache.SyncUpdates()
assert.Equal(t, cache.Get("xx", "a"), nil)
assert.Equal(t, cache.Get("0", "a"), nil)
assert.Equal(t, cache.Get("1", "a"), nil)
assert.Equal(t, cache.Get("2", "a"), nil)
assert.Equal(t, cache.Get("3", "a").Value(), 3)
assert.Equal(t, cache.Get("xx", "b").Value(), 9001)
assert.Equal(t, cache.GetDropped(), 4)
assert.Equal(t, cache.GetDropped(), 0)
assert.Equal(t, onDeleteFnCalled, true)
}
func (_ LayeredCacheTests) ResizeOnTheFly() {
cache := Layered(Configure().MaxSize(9).ItemsToPrune(1))
func Test_LayeredCache_ResizeOnTheFly(t *testing.T) {
cache := Layered(Configure[int]().MaxSize(9).ItemsToPrune(1))
for i := 0; i < 5; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
cache.SyncUpdates()
cache.SetMaxSize(3)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(2)
Expect(cache.Get("0", "a")).To.Equal(nil)
Expect(cache.Get("1", "a")).To.Equal(nil)
Expect(cache.Get("2", "a").Value()).To.Equal(2)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 2)
assert.Equal(t, cache.Get("0", "a"), nil)
assert.Equal(t, cache.Get("1", "a"), nil)
assert.Equal(t, cache.Get("2", "a").Value(), 2)
assert.Equal(t, cache.Get("3", "a").Value(), 3)
assert.Equal(t, cache.Get("4", "a").Value(), 4)
cache.Set("5", "a", 5, time.Minute)
time.Sleep(time.Millisecond * 5)
Expect(cache.GetDropped()).To.Equal(1)
Expect(cache.Get("2", "a")).To.Equal(nil)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
Expect(cache.Get("5", "a").Value()).To.Equal(5)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 1)
assert.Equal(t, cache.Get("2", "a"), nil)
assert.Equal(t, cache.Get("3", "a").Value(), 3)
assert.Equal(t, cache.Get("4", "a").Value(), 4)
assert.Equal(t, cache.Get("5", "a").Value(), 5)
cache.SetMaxSize(10)
cache.Set("6", "a", 6, time.Minute)
time.Sleep(time.Millisecond * 10)
Expect(cache.GetDropped()).To.Equal(0)
Expect(cache.Get("3", "a").Value()).To.Equal(3)
Expect(cache.Get("4", "a").Value()).To.Equal(4)
Expect(cache.Get("5", "a").Value()).To.Equal(5)
Expect(cache.Get("6", "a").Value()).To.Equal(6)
cache.SyncUpdates()
assert.Equal(t, cache.GetDropped(), 0)
assert.Equal(t, cache.Get("3", "a").Value(), 3)
assert.Equal(t, cache.Get("4", "a").Value(), 4)
assert.Equal(t, cache.Get("5", "a").Value(), 5)
assert.Equal(t, cache.Get("6", "a").Value(), 6)
}
func newLayered() *LayeredCache {
return Layered(Configure())
}
func (_ LayeredCacheTests) RemovesOldestItemWhenFullBySizer() {
cache := Layered(Configure().MaxSize(9).ItemsToPrune(2))
func Test_LayeredCache_RemovesOldestItemWhenFullBySizer(t *testing.T) {
cache := Layered(Configure[*SizedItem]().MaxSize(9).ItemsToPrune(2))
for i := 0; i < 7; i++ {
cache.Set("pri", strconv.Itoa(i), &SizedItem{i, 2}, time.Minute)
}
time.Sleep(time.Millisecond * 10)
Expect(cache.Get("pri", "0")).To.Equal(nil)
Expect(cache.Get("pri", "1")).To.Equal(nil)
Expect(cache.Get("pri", "2")).To.Equal(nil)
Expect(cache.Get("pri", "3")).To.Equal(nil)
Expect(cache.Get("pri", "4").Value().(*SizedItem).id).To.Equal(4)
cache.SyncUpdates()
assert.Equal(t, cache.Get("pri", "0"), nil)
assert.Equal(t, cache.Get("pri", "1"), nil)
assert.Equal(t, cache.Get("pri", "2"), nil)
assert.Equal(t, cache.Get("pri", "3"), nil)
assert.Equal(t, cache.Get("pri", "4").Value().id, 4)
}
func (_ LayeredCacheTests) SetUpdatesSizeOnDelta() {
cache := Layered(Configure())
func Test_LayeredCache_SetUpdatesSizeOnDelta(t *testing.T) {
cache := Layered(Configure[*SizedItem]())
cache.Set("pri", "a", &SizedItem{0, 2}, time.Minute)
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
cache.Set("pri", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
cache.Set("pri", "b", &SizedItem{0, 4}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 6)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 6)
cache.Set("pri", "b", &SizedItem{0, 2}, time.Minute)
cache.Set("sec", "b", &SizedItem{0, 3}, time.Minute)
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 7)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 7)
cache.Delete("pri", "b")
time.Sleep(time.Millisecond * 10)
checkLayeredSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
}
func (_ LayeredCacheTests) ReplaceDoesNotchangeSizeIfNotSet() {
cache := Layered(Configure())
func Test_LayeredCache_ReplaceDoesNotchangeSizeIfNotSet(t *testing.T) {
cache := Layered(Configure[*SizedItem]())
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
cache.Set("pri", "3", &SizedItem{1, 2}, time.Minute)
cache.Replace("sec", "3", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 6)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 6)
}
func (_ LayeredCacheTests) ReplaceChangesSize() {
cache := Layered(Configure())
func Test_LayeredCache_ReplaceChangesSize(t *testing.T) {
cache := Layered(Configure[*SizedItem]())
cache.Set("pri", "1", &SizedItem{1, 2}, time.Minute)
cache.Set("pri", "2", &SizedItem{1, 2}, time.Minute)
cache.Replace("pri", "2", &SizedItem{1, 2})
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 4)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 4)
cache.Replace("pri", "2", &SizedItem{1, 1})
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 3)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 3)
cache.Replace("pri", "2", &SizedItem{1, 3})
time.Sleep(time.Millisecond * 5)
checkLayeredSize(cache, 5)
cache.SyncUpdates()
assert.Equal(t, cache.GetSize(), 5)
}
func checkLayeredSize(cache *LayeredCache, sz int64) {
cache.Stop()
Expect(cache.size).To.Equal(sz)
cache.restart()
func Test_LayeredCache_EachFunc(t *testing.T) {
cache := Layered(Configure[int]().MaxSize(3).ItemsToPrune(1))
assert.List(t, forEachKeysLayered[int](cache, "1"), []string{})
cache.Set("1", "a", 1, time.Minute)
assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a"})
cache.Set("1", "b", 2, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b"})
cache.Set("1", "c", 3, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"a", "b", "c"})
cache.Set("1", "d", 4, time.Minute)
cache.SyncUpdates()
assert.List(t, forEachKeysLayered[int](cache, "1"), []string{"b", "c", "d"})
// iteration is non-deterministic, all we know for sure is "stop" should not be in there
cache.Set("1", "stop", 5, time.Minute)
cache.SyncUpdates()
assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
cache.Set("1", "e", 6, time.Minute)
cache.SyncUpdates()
assert.DoesNotContain(t, forEachKeysLayered[int](cache, "1"), "stop")
}
func gcLayeredCache(cache *LayeredCache) {
cache.Stop()
cache.gc()
cache.restart()
func Test_LayeredCachePrune(t *testing.T) {
maxSize := int64(500)
cache := Layered(Configure[string]().MaxSize(maxSize).ItemsToPrune(50))
epoch := 0
for i := 0; i < 10000; i++ {
epoch += 1
expired := make([]string, 0)
for i := 0; i < 50; i += 1 {
key := strconv.FormatInt(rand.Int63n(maxSize*20), 10)
item := cache.Get(key, key)
if item == nil || item.TTL() > 1*time.Minute {
expired = append(expired, key)
}
}
for _, key := range expired {
cache.Set(key, key, key, 5*time.Minute)
}
if epoch%500 == 0 {
assert.True(t, cache.GetSize() <= 500)
}
}
}
func Test_LayeredConcurrentStop(t *testing.T) {
for i := 0; i < 100; i++ {
cache := Layered(Configure[string]())
r := func() {
for {
key := strconv.Itoa(int(rand.Int31n(100)))
switch rand.Int31n(3) {
case 0:
cache.Get(key, key)
case 1:
cache.Set(key, key, key, time.Minute)
case 2:
cache.Delete(key, key)
}
}
}
go r()
go r()
go r()
time.Sleep(time.Millisecond * 10)
cache.Stop()
}
}
func newLayered[T any]() *LayeredCache[T] {
c := Layered[T](Configure[T]())
c.Clear()
return c
}
func forEachKeysLayered[T any](cache *LayeredCache[T], primary string) []string {
keys := make([]string, 0, 10)
cache.ForEachFunc(primary, func(key string, i *Item[T]) bool {
if key == "stop" {
return false
}
keys = append(keys, key)
return true
})
sort.Strings(keys)
return keys
}

45
list.go Normal file
View File

@@ -0,0 +1,45 @@
package ccache
type List[T any] struct {
Head *Item[T]
Tail *Item[T]
}
func NewList[T any]() *List[T] {
return &List[T]{}
}
func (l *List[T]) Remove(item *Item[T]) {
next := item.next
prev := item.prev
if next == nil {
l.Tail = prev
} else {
next.prev = prev
}
if prev == nil {
l.Head = next
} else {
prev.next = next
}
item.next = nil
item.prev = nil
}
func (l *List[T]) MoveToFront(item *Item[T]) {
l.Remove(item)
l.Insert(item)
}
func (l *List[T]) Insert(item *Item[T]) {
head := l.Head
l.Head = item
if head == nil {
l.Tail = item
return
}
item.next = head
head.prev = item
}

79
list_test.go Normal file
View File

@@ -0,0 +1,79 @@
package ccache
import (
"testing"
"github.com/karlseguin/ccache/v3/assert"
)
func Test_List_Insert(t *testing.T) {
l := NewList[int]()
assertList(t, l)
l.Insert(newItem("a", 1, 0, false))
assertList(t, l, 1)
l.Insert(newItem("b", 2, 0, false))
assertList(t, l, 2, 1)
l.Insert(newItem("c", 3, 0, false))
assertList(t, l, 3, 2, 1)
}
func Test_List_Remove(t *testing.T) {
l := NewList[int]()
assertList(t, l)
item := newItem("a", 1, 0, false)
l.Insert(item)
l.Remove(item)
assertList(t, l)
n5 := newItem("e", 5, 0, false)
l.Insert(n5)
n4 := newItem("d", 4, 0, false)
l.Insert(n4)
n3 := newItem("c", 3, 0, false)
l.Insert(n3)
n2 := newItem("b", 2, 0, false)
l.Insert(n2)
n1 := newItem("a", 1, 0, false)
l.Insert(n1)
l.Remove(n5)
assertList(t, l, 1, 2, 3, 4)
l.Remove(n1)
assertList(t, l, 2, 3, 4)
l.Remove(n3)
assertList(t, l, 2, 4)
l.Remove(n2)
assertList(t, l, 4)
l.Remove(n4)
assertList(t, l)
}
func assertList(t *testing.T, list *List[int], expected ...int) {
t.Helper()
if len(expected) == 0 {
assert.Nil(t, list.Head)
assert.Nil(t, list.Tail)
return
}
node := list.Head
for _, expected := range expected {
assert.Equal(t, node.value, expected)
node = node.next
}
node = list.Tail
for i := len(expected) - 1; i >= 0; i-- {
assert.Equal(t, node.value, expected[i])
node = node.prev
}
}

View File

@@ -1,4 +1,5 @@
# CCache
CCache is an LRU Cache, written in Go, focused on supporting high concurrency.
Lock contention on the list is reduced by:
@@ -9,30 +10,25 @@ Lock contention on the list is reduced by:
Unless otherwise stated, all methods are thread-safe.
## Setup
First, download the project:
```go
go get github.com/karlseguin/ccache
```
The non-generic version of this cache can be imported via `github.com/karlseguin/ccache/`.
## Configuration
Next, import and create a `Cache` instance:
Import and create a `Cache` instance:
```go
import (
"github.com/karlseguin/ccache"
"github.com/karlseguin/ccache/v3"
)
var cache = ccache.New(ccache.Configure())
// create a cache with string values
var cache = ccache.New(ccache.Configure[string]())
```
`Configure` exposes a chainable API:
```go
var cache = ccache.New(ccache.Configure().MaxSize(1000).ItemsToPrune(100))
// creates a cache with int values
var cache = ccache.New(ccache.Configure[int]().MaxSize(1000).ItemsToPrune(100))
```
The most likely configuration options to tweak are:
@@ -57,18 +53,21 @@ item := cache.Get("user:4")
if item == nil {
//handle
} else {
user := item.Value().(*User)
user := item.Value()
}
```
The returned `*Item` exposes a number of methods:
* `Value() interface{}` - the value cached
* `Value() T` - the value cached
* `Expired() bool` - whether the item is expired or not
* `TTL() time.Duration` - the duration before the item expires (will be a negative value for expired items)
* `Expires() time.Time` - the time the item will expire
By returning expired items, CCache lets you decide if you want to serve stale content or not. For example, you might decide to serve up slightly stale content (< 30 seconds old) while re-fetching newer data in the background. You might also decide to serve up infinitely stale content if you're unable to get new data from your source.
### GetWithoutPromote
Same as `Get` but does not "promote" the value, which is to say it circumvents the "lru" aspect of this cache. Should only be used in limited cases, such as peaking at the value.
### Set
`Set` expects the key, value and ttl:
@@ -80,14 +79,16 @@ cache.Set("user:4", user, time.Minute * 10)
There's also a `Fetch` which mixes a `Get` and a `Set`:
```go
item, err := cache.Fetch("user:4", time.Minute * 10, func() (interface{}, error) {
item, err := cache.Fetch("user:4", time.Minute * 10, func() (*User, error) {
//code to fetch the data incase of a miss
//should return the data to cache and the error, if any
})
```
`Fetch` doesn't do anything fancy: it merely uses the public `Get` and `Set` functions. If you want more advanced behavior, such as using a singleflight to protect against thundering herd, support a callback that accepts the key, or returning expired items, you should implement that in your application.
### Delete
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existant key:
`Delete` expects the key to delete. It's ok to call `Delete` on a non-existent key:
```go
cache.Delete("user:4")
@@ -96,12 +97,29 @@ cache.Delete("user:4")
### DeletePrefix
`DeletePrefix` deletes all keys matching the provided prefix. Returns the number of keys removed.
### DeleteFunc
`DeleteFunc` deletes all items that the provided matches func evaluates to true. Returns the number of keys removed.
### ForEachFunc
`ForEachFunc` iterates through all keys and values in the map and passes them to the provided function. Iteration stops if the function returns false. Iteration order is random.
### Clear
`Clear` clears the cache. This method is **not** thread safe. It is meant to be used from tests.
`Clear` clears the cache. If the cache's gc is running, `Clear` waits for it to finish.
### Extend
The life of an item can be changed via the `Extend` method. This will change the expiry of the item by the specified duration relative to the current time.
```go
cache.Extend("user:4", time.Minute * 10)
// or
item := cache.Get("user:4")
if item != nil {
item.Extend(time.Minute * 10)
}
```
### Replace
The value of an item can be updated to a new value without renewing the item's TTL or it's position in the LRU:
@@ -111,13 +129,21 @@ cache.Replace("user:4", user)
`Replace` returns true if the item existed (and thus was replaced). In the case where the key was not in the cache, the value *is not* inserted and false is returned.
### Setnx
Set the value if not exists. setnx will first check whether kv exists. If it does not exist, set kv in cache. this operation is atomic.
```go
cache.Set("user:4", user, time.Minute * 10)
```
### GetDropped
You can get the number of keys evicted due to memory pressure by calling `GetDropped`:
```go
dropped := cache.GetDropped()
```
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant ot be called asynchronously for statistics /monitoring purposes.
The counter is reset on every call. If the cache's gc is running, `GetDropped` waits for it to finish; it's meant to be called asynchronously for statistics /monitoring purposes.
### Stop
The cache's background worker can be stopped by calling `Stop`. Once `Stop` is called
@@ -129,7 +155,7 @@ CCache supports a special tracking mode which is meant to be used in conjunction
When you configure your cache with `Track()`:
```go
cache = ccache.New(ccache.Configure().Track())
cache = ccache.New(ccache.Configure[int]().Track())
```
The items retrieved via `TrackingGet` will not be eligible for purge until `Release` is called on them:
@@ -140,7 +166,7 @@ user := item.Value() //will be nil if "user:4" didn't exist in the cache
item.Release() //can be called even if item.Value() returned nil
```
In practice, `Release` wouldn't be called until later, at some other place in your code.
In practice, `Release` wouldn't be called until later, at some other place in your code. `TrackingSet` can be used to set a value to be tracked.
There's a couple reason to use the tracking mode if other parts of your code also hold references to objects. First, if you're already going to hold a reference to these objects, there's really no reason not to have them in the cache - the memory is used up anyways.
@@ -155,7 +181,7 @@ CCache's `LayeredCache` stores and retrieves values by both a primary and second
`LayeredCache` takes the same configuration object as the main cache, exposes the same optional tracking capabilities, but exposes a slightly different API:
```go
cache := ccache.Layered(ccache.Configure())
cache := ccache.Layered(ccache.Configure[string]())
cache.Set("/users/goku", "type:json", "{value_to_cache}", time.Minute * 5)
cache.Set("/users/goku", "type:xml", "<value_to_cache>", time.Minute * 5)
@@ -174,7 +200,7 @@ cache.DeleteAll("/users/goku")
In some cases, when using a `LayeredCache`, it may be desirable to always be acting on the secondary portion of the cache entry. This could be the case where the primary key is used as a key elsewhere in your code. The `SecondaryCache` is retrieved with:
```go
cache := ccache.Layered(ccache.Configure())
cache := ccache.Layered(ccache.Configure[string]())
sCache := cache.GetOrCreateSecondaryCache("/users/goku")
sCache.Set("type:json", "{value_to_cache}", time.Minute * 5)
```
@@ -187,4 +213,4 @@ By default, items added to a cache have a size of 1. This means that if you conf
However, if the values you set into the cache have a method `Size() int64`, this size will be used. Note that ccache has an overhead of ~350 bytes per entry, which isn't taken into account. In other words, given a filled up cache, with `MaxSize(4096000)` and items that return a `Size() int64` of 2048, we can expect to find 2000 items (4096000/2048) taking a total space of 4796000 bytes.
## Want Something Simpler?
For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache)
For a simpler cache, checkout out [rcache](https://github.com/karlseguin/rcache).

View File

@@ -2,21 +2,21 @@ package ccache
import "time"
type SecondaryCache struct {
bucket *bucket
pCache *LayeredCache
type SecondaryCache[T any] struct {
bucket *bucket[T]
pCache *LayeredCache[T]
}
// Get the secondary key.
// The semantics are the same as for LayeredCache.Get
func (s *SecondaryCache) Get(secondary string) *Item {
func (s *SecondaryCache[T]) Get(secondary string) *Item[T] {
return s.bucket.get(secondary)
}
// Set the secondary key to a value.
// The semantics are the same as for LayeredCache.Set
func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.Duration) *Item {
item, existing := s.bucket.set(secondary, value, duration)
func (s *SecondaryCache[T]) Set(secondary string, value T, duration time.Duration) *Item[T] {
item, existing := s.bucket.set(secondary, value, duration, false)
if existing != nil {
s.pCache.deletables <- existing
}
@@ -26,7 +26,7 @@ func (s *SecondaryCache) Set(secondary string, value interface{}, duration time.
// Fetch or set a secondary key.
// The semantics are the same as for LayeredCache.Fetch
func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch func() (interface{}, error)) (*Item, error) {
func (s *SecondaryCache[T]) Fetch(secondary string, duration time.Duration, fetch func() (T, error)) (*Item[T], error) {
item := s.Get(secondary)
if item != nil {
return item, nil
@@ -40,8 +40,8 @@ func (s *SecondaryCache) Fetch(secondary string, duration time.Duration, fetch f
// Delete a secondary key.
// The semantics are the same as for LayeredCache.Delete
func (s *SecondaryCache) Delete(secondary string) bool {
item := s.bucket.delete(secondary)
func (s *SecondaryCache[T]) Delete(secondary string) bool {
item := s.bucket.remove(secondary)
if item != nil {
s.pCache.deletables <- item
return true
@@ -51,7 +51,7 @@ func (s *SecondaryCache) Delete(secondary string) bool {
// Replace a secondary key.
// The semantics are the same as for LayeredCache.Replace
func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
func (s *SecondaryCache[T]) Replace(secondary string, value T) bool {
item := s.Get(secondary)
if item == nil {
return false
@@ -62,10 +62,10 @@ func (s *SecondaryCache) Replace(secondary string, value interface{}) bool {
// Track a secondary key.
// The semantics are the same as for LayeredCache.TrackingGet
func (c *SecondaryCache) TrackingGet(secondary string) TrackedItem {
func (c *SecondaryCache[T]) TrackingGet(secondary string) TrackedItem[T] {
item := c.Get(secondary)
if item == nil {
return NilTracked
return nil
}
item.track()
return item

View File

@@ -1,105 +1,100 @@
package ccache
import (
. "github.com/karlseguin/expect"
"strconv"
"testing"
"time"
"github.com/karlseguin/ccache/v3/assert"
)
type SecondaryCacheTests struct{}
func Test_SecondaryCache(t *testing.T) {
Expectify(new(SecondaryCacheTests), t)
func Test_SecondaryCache_GetsANonExistantValue(t *testing.T) {
cache := newLayered[string]().GetOrCreateSecondaryCache("foo")
assert.Equal(t, cache == nil, false)
}
func (_ SecondaryCacheTests) GetsANonExistantValue() {
cache := newLayered().GetOrCreateSecondaryCache("foo")
Expect(cache).Not.To.Equal(nil)
}
func (_ SecondaryCacheTests) SetANewValue() {
cache := newLayered()
func Test_SecondaryCache_SetANewValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(sCache.Get("stop")).To.Equal(nil)
assert.Equal(t, sCache.Get("flow").Value(), "a value")
assert.Equal(t, sCache.Get("stop"), nil)
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches1() {
cache := newLayered()
func Test_SecondaryCache_ValueCanBeSeenInBothCaches1(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "a value", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("orinoco", "another value", time.Minute)
Expect(sCache.Get("orinoco").Value()).To.Equal("another value")
Expect(cache.Get("spice", "orinoco").Value()).To.Equal("another value")
assert.Equal(t, sCache.Get("orinoco").Value(), "another value")
assert.Equal(t, cache.Get("spice", "orinoco").Value(), "another value")
}
func (_ SecondaryCacheTests) ValueCanBeSeenInBothCaches2() {
cache := newLayered()
func Test_SecondaryCache_ValueCanBeSeenInBothCaches2(t *testing.T) {
cache := newLayered[string]()
sCache := cache.GetOrCreateSecondaryCache("spice")
sCache.Set("flow", "a value", time.Minute)
Expect(sCache.Get("flow").Value()).To.Equal("a value")
Expect(cache.Get("spice", "flow").Value()).To.Equal("a value")
assert.Equal(t, sCache.Get("flow").Value(), "a value")
assert.Equal(t, cache.Get("spice", "flow").Value(), "a value")
}
func (_ SecondaryCacheTests) DeletesAreReflectedInBothCaches() {
cache := newLayered()
func Test_SecondaryCache_DeletesAreReflectedInBothCaches(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "a value", time.Minute)
cache.Set("spice", "sister", "ghanima", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
cache.Delete("spice", "flow")
Expect(cache.Get("spice", "flow")).To.Equal(nil)
Expect(sCache.Get("flow")).To.Equal(nil)
assert.Equal(t, cache.Get("spice", "flow"), nil)
assert.Equal(t, sCache.Get("flow"), nil)
sCache.Delete("sister")
Expect(cache.Get("spice", "sister")).To.Equal(nil)
Expect(sCache.Get("sister")).To.Equal(nil)
assert.Equal(t, cache.Get("spice", "sister"), nil)
assert.Equal(t, sCache.Get("sister"), nil)
}
func (_ SecondaryCacheTests) ReplaceDoesNothingIfKeyDoesNotExist() {
cache := newLayered()
func Test_SecondaryCache_ReplaceDoesNothingIfKeyDoesNotExist(t *testing.T) {
cache := newLayered[string]()
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-a")).To.Equal(false)
Expect(cache.Get("spice", "flow")).To.Equal(nil)
assert.Equal(t, sCache.Replace("flow", "value-a"), false)
assert.Equal(t, cache.Get("spice", "flow"), nil)
}
func (_ SecondaryCacheTests) ReplaceUpdatesTheValue() {
cache := newLayered()
func Test_SecondaryCache_ReplaceUpdatesTheValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
Expect(sCache.Replace("flow", "value-b")).To.Equal(true)
Expect(cache.Get("spice", "flow").Value().(string)).To.Equal("value-b")
assert.Equal(t, sCache.Replace("flow", "value-b"), true)
assert.Equal(t, cache.Get("spice", "flow").Value(), "value-b")
}
func (_ SecondaryCacheTests) FetchReturnsAnExistingValue() {
cache := newLayered()
func Test_SecondaryCache_FetchReturnsAnExistingValue(t *testing.T) {
cache := newLayered[string]()
cache.Set("spice", "flow", "value-a", time.Minute)
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
Expect(val.Value().(string)).To.Equal("value-a")
val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
assert.Equal(t, val.Value(), "value-a")
}
func (_ SecondaryCacheTests) FetchReturnsANewValue() {
cache := newLayered()
func Test_SecondaryCache_FetchReturnsANewValue(t *testing.T) {
cache := newLayered[string]()
sCache := cache.GetOrCreateSecondaryCache("spice")
val, _ := sCache.Fetch("flow", time.Minute, func() (interface{}, error) { return "a fetched value", nil })
Expect(val.Value().(string)).To.Equal("a fetched value")
val, _ := sCache.Fetch("flow", time.Minute, func() (string, error) { return "a fetched value", nil })
assert.Equal(t, val.Value(), "a fetched value")
}
func (_ SecondaryCacheTests) TrackerDoesNotCleanupHeldInstance() {
cache := Layered(Configure().ItemsToPrune(10).Track())
func Test_SecondaryCache_TrackerDoesNotCleanupHeldInstance(t *testing.T) {
cache := Layered(Configure[int]().ItemsToPrune(10).Track())
for i := 0; i < 10; i++ {
cache.Set(strconv.Itoa(i), "a", i, time.Minute)
}
sCache := cache.GetOrCreateSecondaryCache("0")
item := sCache.TrackingGet("a")
time.Sleep(time.Millisecond * 10)
gcLayeredCache(cache)
Expect(cache.Get("0", "a").Value()).To.Equal(0)
Expect(cache.Get("1", "a")).To.Equal(nil)
cache.GC()
assert.Equal(t, cache.Get("0", "a").Value(), 0)
assert.Equal(t, cache.Get("1", "a"), nil)
item.Release()
gcLayeredCache(cache)
Expect(cache.Get("0", "a")).To.Equal(nil)
cache.GC()
assert.Equal(t, cache.Get("0", "a"), nil)
}