mirror of
https://github.com/xxjwxc/public.git
synced 2025-09-26 11:51:14 +08:00
get the value but not update the expiration time
This commit is contained in:
1
go.mod
1
go.mod
@@ -18,7 +18,6 @@ require (
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jroimartin/gocui v0.4.0
|
||||
github.com/kardianos/service v1.2.1
|
||||
github.com/muesli/cache2go v0.0.0-20200423001931-a100c5aac93f
|
||||
github.com/nicksnyder/go-i18n/v2 v2.0.3
|
||||
github.com/nsqio/go-nsq v1.0.8
|
||||
github.com/olivere/elastic v6.2.31+incompatible
|
||||
|
1
go.sum
1
go.sum
@@ -144,7 +144,6 @@ github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/muesli/cache2go v0.0.0-20200423001931-a100c5aac93f h1:dBzTzAKOh89fTvdQ3XlXupMExvLty90V1rrpChTAQAY=
|
||||
github.com/muesli/cache2go v0.0.0-20200423001931-a100c5aac93f/go.mod h1:414R+qZrt4f9S2TO/s6YVQMNAXR2KdwqQ7pW+O4oYzU=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nicksnyder/go-i18n/v2 v2.0.3 h1:ks/JkQiOEhhuF6jpNvx+Wih1NIiXzUnZeZVnJuI8R8M=
|
||||
|
49
mycache/cache2go/benchmark_test.go
Normal file
49
mycache/cache2go/benchmark_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkNotFoundAdd(b *testing.B) {
|
||||
table := Cache("testNotFoundAdd")
|
||||
|
||||
var finish sync.WaitGroup
|
||||
var added int32
|
||||
var idle int32
|
||||
|
||||
fn := func(id int) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
if table.NotFoundAdd(i, 0, i+id) {
|
||||
atomic.AddInt32(&added, 1)
|
||||
} else {
|
||||
atomic.AddInt32(&idle, 1)
|
||||
}
|
||||
time.Sleep(0)
|
||||
}
|
||||
finish.Done()
|
||||
}
|
||||
|
||||
finish.Add(10)
|
||||
go fn(0x0000)
|
||||
go fn(0x1100)
|
||||
go fn(0x2200)
|
||||
go fn(0x3300)
|
||||
go fn(0x4400)
|
||||
go fn(0x5500)
|
||||
go fn(0x6600)
|
||||
go fn(0x7700)
|
||||
go fn(0x8800)
|
||||
go fn(0x9900)
|
||||
finish.Wait()
|
||||
|
||||
}
|
42
mycache/cache2go/cache.go
Normal file
42
mycache/cache2go/cache.go
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2012, Radu Ioan Fericean
|
||||
* 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
var (
|
||||
cache = make(map[string]*CacheTable)
|
||||
mutex sync.RWMutex
|
||||
)
|
||||
|
||||
// Cache returns the existing cache table with given name or creates a new one
|
||||
// if the table does not exist yet.
|
||||
func Cache(table string) *CacheTable {
|
||||
mutex.RLock()
|
||||
t, ok := cache[table]
|
||||
mutex.RUnlock()
|
||||
|
||||
if !ok {
|
||||
mutex.Lock()
|
||||
t, ok = cache[table]
|
||||
// Double check whether the table exists or not.
|
||||
if !ok {
|
||||
t = &CacheTable{
|
||||
name: table,
|
||||
items: make(map[interface{}]*CacheItem),
|
||||
}
|
||||
cache[table] = t
|
||||
}
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
return t
|
||||
}
|
517
mycache/cache2go/cache_test.go
Normal file
517
mycache/cache2go/cache_test.go
Normal file
@@ -0,0 +1,517 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"log"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
k = "testkey"
|
||||
v = "testvalue"
|
||||
)
|
||||
|
||||
func TestCache(t *testing.T) {
|
||||
// add an expiring item after a non-expiring one to
|
||||
// trigger expirationCheck iterating over non-expiring items
|
||||
table := Cache("testCache")
|
||||
table.Add(k+"_1", 0*time.Second, v)
|
||||
table.Add(k+"_2", 1*time.Second, v)
|
||||
|
||||
// check if both items are still there
|
||||
p, err := table.Value(k + "_1")
|
||||
if err != nil || p == nil || p.Data().(string) != v {
|
||||
t.Error("Error retrieving non expiring data from cache", err)
|
||||
}
|
||||
p, err = table.Value(k + "_2")
|
||||
if err != nil || p == nil || p.Data().(string) != v {
|
||||
t.Error("Error retrieving data from cache", err)
|
||||
}
|
||||
|
||||
// sanity checks
|
||||
if p.AccessCount() != 1 {
|
||||
t.Error("Error getting correct access count")
|
||||
}
|
||||
if p.LifeSpan() != 1*time.Second {
|
||||
t.Error("Error getting correct life-span")
|
||||
}
|
||||
if p.AccessedOn().Unix() == 0 {
|
||||
t.Error("Error getting access time")
|
||||
}
|
||||
if p.CreatedOn().Unix() == 0 {
|
||||
t.Error("Error getting creation time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCacheExpire(t *testing.T) {
|
||||
table := Cache("testCache")
|
||||
|
||||
table.Add(k+"_1", 250*time.Millisecond, v+"_1")
|
||||
table.Add(k+"_2", 200*time.Millisecond, v+"_2")
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// check key `1` is still alive
|
||||
_, err := table.Value(k + "_1")
|
||||
if err != nil {
|
||||
t.Error("Error retrieving value from cache:", err)
|
||||
}
|
||||
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
|
||||
// check key `1` again, it should still be alive since we just accessed it
|
||||
_, err = table.Value(k + "_1")
|
||||
if err != nil {
|
||||
t.Error("Error retrieving value from cache:", err)
|
||||
}
|
||||
|
||||
// check key `2`, it should have been removed by now
|
||||
_, err = table.Value(k + "_2")
|
||||
if err == nil {
|
||||
t.Error("Found key which should have been expired by now")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExists(t *testing.T) {
|
||||
// add an expiring item
|
||||
table := Cache("testExists")
|
||||
table.Add(k, 0, v)
|
||||
// check if it exists
|
||||
if !table.Exists(k) {
|
||||
t.Error("Error verifying existing data in cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotFoundAdd(t *testing.T) {
|
||||
table := Cache("testNotFoundAdd")
|
||||
|
||||
if !table.NotFoundAdd(k, 0, v) {
|
||||
t.Error("Error verifying NotFoundAdd, data not in cache")
|
||||
}
|
||||
|
||||
if table.NotFoundAdd(k, 0, v) {
|
||||
t.Error("Error verifying NotFoundAdd data in cache")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotFoundAddConcurrency(t *testing.T) {
|
||||
table := Cache("testNotFoundAdd")
|
||||
|
||||
var finish sync.WaitGroup
|
||||
var added int32
|
||||
var idle int32
|
||||
|
||||
fn := func(id int) {
|
||||
for i := 0; i < 100; i++ {
|
||||
if table.NotFoundAdd(i, 0, i+id) {
|
||||
atomic.AddInt32(&added, 1)
|
||||
} else {
|
||||
atomic.AddInt32(&idle, 1)
|
||||
}
|
||||
time.Sleep(0)
|
||||
}
|
||||
finish.Done()
|
||||
}
|
||||
|
||||
finish.Add(10)
|
||||
go fn(0x0000)
|
||||
go fn(0x1100)
|
||||
go fn(0x2200)
|
||||
go fn(0x3300)
|
||||
go fn(0x4400)
|
||||
go fn(0x5500)
|
||||
go fn(0x6600)
|
||||
go fn(0x7700)
|
||||
go fn(0x8800)
|
||||
go fn(0x9900)
|
||||
finish.Wait()
|
||||
|
||||
t.Log(added, idle)
|
||||
|
||||
table.Foreach(func(key interface{}, item *CacheItem) {
|
||||
v, _ := item.Data().(int)
|
||||
k, _ := key.(int)
|
||||
t.Logf("%02x %04x\n", k, v)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheKeepAlive(t *testing.T) {
|
||||
// add an expiring item
|
||||
table := Cache("testKeepAlive")
|
||||
p := table.Add(k, 250*time.Millisecond, v)
|
||||
|
||||
// keep it alive before it expires
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
p.KeepAlive()
|
||||
|
||||
// check it's still alive after it was initially supposed to expire
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
if !table.Exists(k) {
|
||||
t.Error("Error keeping item alive")
|
||||
}
|
||||
|
||||
// check it expires eventually
|
||||
time.Sleep(300 * time.Millisecond)
|
||||
if table.Exists(k) {
|
||||
t.Error("Error expiring item after keeping it alive")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeek(t *testing.T) {
|
||||
// add an expiring item
|
||||
table := Cache("TestPeek")
|
||||
_ = table.Add(k, 250*time.Millisecond, v)
|
||||
_ = table.Add(k+"_", 250*time.Millisecond, v)
|
||||
|
||||
// test peek item
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
p, _ := table.Peek(k)
|
||||
if p.Data() != v {
|
||||
t.Error("Error peek item")
|
||||
}
|
||||
|
||||
// test k is expired
|
||||
time.Sleep(150 * time.Millisecond)
|
||||
if table.Exists(k) {
|
||||
t.Error("Error peek but expired time updated")
|
||||
}
|
||||
|
||||
// test peek nil
|
||||
_, err := table.Peek(k)
|
||||
if err != ErrKeyNotFound {
|
||||
t.Error("Error peek nil value but not return ErrKeyNotFound")
|
||||
}
|
||||
|
||||
// test DataLoader
|
||||
table.SetDataLoader(func(key interface{}, args ...interface{}) *CacheItem {
|
||||
if key == k {
|
||||
return NewCacheItem(key, 150*time.Millisecond, "new value")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
p, _ = table.Peek(k)
|
||||
if p.Data() != "new value" {
|
||||
t.Error("Error peek DataLoader callback did not take effect")
|
||||
}
|
||||
|
||||
_, err = table.Peek(k + "_")
|
||||
if err != ErrKeyNotFoundOrLoadable {
|
||||
t.Error("Error peek DataLoader err")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
// add an item to the cache
|
||||
table := Cache("testDelete")
|
||||
table.Add(k, 0, v)
|
||||
// check it's really cached
|
||||
p, err := table.Value(k)
|
||||
if err != nil || p == nil || p.Data().(string) != v {
|
||||
t.Error("Error retrieving data from cache", err)
|
||||
}
|
||||
// try to delete it
|
||||
table.Delete(k)
|
||||
// verify it has been deleted
|
||||
p, err = table.Value(k)
|
||||
if err == nil || p != nil {
|
||||
t.Error("Error deleting data")
|
||||
}
|
||||
|
||||
// test error handling
|
||||
_, err = table.Delete(k)
|
||||
if err == nil {
|
||||
t.Error("Expected error deleting item")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFlush(t *testing.T) {
|
||||
// add an item to the cache
|
||||
table := Cache("testFlush")
|
||||
table.Add(k, 10*time.Second, v)
|
||||
// flush the entire table
|
||||
table.Flush()
|
||||
|
||||
// try to retrieve the item
|
||||
p, err := table.Value(k)
|
||||
if err == nil || p != nil {
|
||||
t.Error("Error flushing table")
|
||||
}
|
||||
// make sure there's really nothing else left in the cache
|
||||
if table.Count() != 0 {
|
||||
t.Error("Error verifying count of flushed table")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
// add a huge amount of items to the cache
|
||||
table := Cache("testCount")
|
||||
count := 100000
|
||||
for i := 0; i < count; i++ {
|
||||
key := k + strconv.Itoa(i)
|
||||
table.Add(key, 10*time.Second, v)
|
||||
}
|
||||
// confirm every single item has been cached
|
||||
for i := 0; i < count; i++ {
|
||||
key := k + strconv.Itoa(i)
|
||||
p, err := table.Value(key)
|
||||
if err != nil || p == nil || p.Data().(string) != v {
|
||||
t.Error("Error retrieving data")
|
||||
}
|
||||
}
|
||||
// make sure the item count matches (no dupes etc.)
|
||||
if table.Count() != count {
|
||||
t.Error("Data count mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDataLoader(t *testing.T) {
|
||||
// setup a cache with a configured data-loader
|
||||
table := Cache("testDataLoader")
|
||||
table.SetDataLoader(func(key interface{}, args ...interface{}) *CacheItem {
|
||||
var item *CacheItem
|
||||
if key.(string) != "nil" {
|
||||
val := k + key.(string)
|
||||
i := NewCacheItem(key, 500*time.Millisecond, val)
|
||||
item = i
|
||||
}
|
||||
|
||||
return item
|
||||
})
|
||||
|
||||
// make sure data-loader works as expected and handles unloadable keys
|
||||
_, err := table.Value("nil")
|
||||
if err == nil || table.Exists("nil") {
|
||||
t.Error("Error validating data loader for nil values")
|
||||
}
|
||||
|
||||
// retrieve a bunch of items via the data-loader
|
||||
for i := 0; i < 10; i++ {
|
||||
key := k + strconv.Itoa(i)
|
||||
vp := k + key
|
||||
p, err := table.Value(key)
|
||||
if err != nil || p == nil || p.Data().(string) != vp {
|
||||
t.Error("Error validating data loader")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAccessCount(t *testing.T) {
|
||||
// add 100 items to the cache
|
||||
count := 100
|
||||
table := Cache("testAccessCount")
|
||||
for i := 0; i < count; i++ {
|
||||
table.Add(i, 10*time.Second, v)
|
||||
}
|
||||
// never access the first item, access the second item once, the third
|
||||
// twice and so on...
|
||||
for i := 0; i < count; i++ {
|
||||
for j := 0; j < i; j++ {
|
||||
table.Value(i)
|
||||
}
|
||||
}
|
||||
|
||||
// check MostAccessed returns the items in correct order
|
||||
ma := table.MostAccessed(int64(count))
|
||||
for i, item := range ma {
|
||||
if item.Key() != count-1-i {
|
||||
t.Error("Most accessed items seem to be sorted incorrectly")
|
||||
}
|
||||
}
|
||||
|
||||
// check MostAccessed returns the correct amount of items
|
||||
ma = table.MostAccessed(int64(count - 1))
|
||||
if len(ma) != count-1 {
|
||||
t.Error("MostAccessed returns incorrect amount of items")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCallbacks(t *testing.T) {
|
||||
var m sync.Mutex
|
||||
addedKey := ""
|
||||
removedKey := ""
|
||||
calledAddedItem := false
|
||||
calledRemoveItem := false
|
||||
expired := false
|
||||
calledExpired := false
|
||||
|
||||
// setup a cache with AddedItem & SetAboutToDelete handlers configured
|
||||
table := Cache("testCallbacks")
|
||||
table.SetAddedItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
addedKey = item.Key().(string)
|
||||
m.Unlock()
|
||||
})
|
||||
table.SetAddedItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
calledAddedItem = true
|
||||
m.Unlock()
|
||||
})
|
||||
table.SetAboutToDeleteItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
removedKey = item.Key().(string)
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
table.SetAboutToDeleteItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
calledRemoveItem = true
|
||||
m.Unlock()
|
||||
})
|
||||
// add an item to the cache and setup its AboutToExpire handler
|
||||
i := table.Add(k, 500*time.Millisecond, v)
|
||||
i.SetAboutToExpireCallback(func(key interface{}) {
|
||||
m.Lock()
|
||||
expired = true
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
i.SetAboutToExpireCallback(func(key interface{}) {
|
||||
m.Lock()
|
||||
calledExpired = true
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
// verify the AddedItem handler works
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
m.Lock()
|
||||
if addedKey == k && !calledAddedItem {
|
||||
t.Error("AddedItem callback not working")
|
||||
}
|
||||
m.Unlock()
|
||||
// verify the AboutToDelete handler works
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
m.Lock()
|
||||
if removedKey == k && !calledRemoveItem {
|
||||
t.Error("AboutToDeleteItem callback not working:" + k + "_" + removedKey)
|
||||
}
|
||||
// verify the AboutToExpire handler works
|
||||
if expired && !calledExpired {
|
||||
t.Error("AboutToExpire callback not working")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
}
|
||||
|
||||
func TestCallbackQueue(t *testing.T) {
|
||||
var m sync.Mutex
|
||||
addedKey := ""
|
||||
addedkeyCallback2 := ""
|
||||
secondCallbackResult := "second"
|
||||
removedKey := ""
|
||||
removedKeyCallback := ""
|
||||
expired := false
|
||||
calledExpired := false
|
||||
// setup a cache with AddedItem & SetAboutToDelete handlers configured
|
||||
table := Cache("testCallbacks")
|
||||
|
||||
// test callback queue
|
||||
table.AddAddedItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
addedKey = item.Key().(string)
|
||||
m.Unlock()
|
||||
})
|
||||
table.AddAddedItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
addedkeyCallback2 = secondCallbackResult
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
table.AddAboutToDeleteItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
removedKey = item.Key().(string)
|
||||
m.Unlock()
|
||||
})
|
||||
table.AddAboutToDeleteItemCallback(func(item *CacheItem) {
|
||||
m.Lock()
|
||||
removedKeyCallback = secondCallbackResult
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
i := table.Add(k, 500*time.Millisecond, v)
|
||||
i.AddAboutToExpireCallback(func(key interface{}) {
|
||||
m.Lock()
|
||||
expired = true
|
||||
m.Unlock()
|
||||
})
|
||||
i.AddAboutToExpireCallback(func(key interface{}) {
|
||||
m.Lock()
|
||||
calledExpired = true
|
||||
m.Unlock()
|
||||
})
|
||||
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
m.Lock()
|
||||
if addedKey != k && addedkeyCallback2 != secondCallbackResult {
|
||||
t.Error("AddedItem callback queue not working")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
m.Lock()
|
||||
if removedKey != k && removedKeyCallback != secondCallbackResult {
|
||||
t.Error("Item removed callback queue not working")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
// test removing of the callbacks
|
||||
table.RemoveAddedItemCallbacks()
|
||||
table.RemoveAboutToDeleteItemCallback()
|
||||
secondItemKey := "itemKey02"
|
||||
expired = false
|
||||
i = table.Add(secondItemKey, 500*time.Millisecond, v)
|
||||
i.SetAboutToExpireCallback(func(key interface{}) {
|
||||
m.Lock()
|
||||
expired = true
|
||||
m.Unlock()
|
||||
})
|
||||
i.RemoveAboutToExpireCallback()
|
||||
|
||||
// verify if the callbacks were removed
|
||||
time.Sleep(250 * time.Millisecond)
|
||||
m.Lock()
|
||||
if addedKey == secondItemKey {
|
||||
t.Error("AddedItemCallbacks were not removed")
|
||||
}
|
||||
m.Unlock()
|
||||
|
||||
// verify the AboutToDelete handler works
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
m.Lock()
|
||||
if removedKey == secondItemKey {
|
||||
t.Error("AboutToDeleteItem not removed")
|
||||
}
|
||||
// verify the AboutToExpire handler works
|
||||
if !expired && !calledExpired {
|
||||
t.Error("AboutToExpire callback not working")
|
||||
}
|
||||
m.Unlock()
|
||||
}
|
||||
|
||||
func TestLogger(t *testing.T) {
|
||||
// setup a logger
|
||||
out := new(bytes.Buffer)
|
||||
l := log.New(out, "cache2go ", log.Ldate|log.Ltime)
|
||||
|
||||
// setup a cache with this logger
|
||||
table := Cache("testLogger")
|
||||
table.SetLogger(l)
|
||||
table.Add(k, 0, v)
|
||||
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
|
||||
// verify the logger has been used
|
||||
if out.Len() == 0 {
|
||||
t.Error("Logger is empty")
|
||||
}
|
||||
}
|
132
mycache/cache2go/cacheitem.go
Normal file
132
mycache/cache2go/cacheitem.go
Normal file
@@ -0,0 +1,132 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CacheItem is an individual cache item
|
||||
// Parameter data contains the user-set value in the cache.
|
||||
type CacheItem struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The item's key.
|
||||
key interface{}
|
||||
// The item's data.
|
||||
data interface{}
|
||||
// How long will the item live in the cache when not being accessed/kept alive.
|
||||
lifeSpan time.Duration
|
||||
|
||||
// Creation timestamp.
|
||||
createdOn time.Time
|
||||
// Last access timestamp.
|
||||
accessedOn time.Time
|
||||
// How often the item was accessed.
|
||||
accessCount int64
|
||||
|
||||
// Callback method triggered right before removing the item from the cache
|
||||
aboutToExpire []func(key interface{})
|
||||
}
|
||||
|
||||
// NewCacheItem returns a newly created CacheItem.
|
||||
// Parameter key is the item's cache-key.
|
||||
// Parameter lifeSpan determines after which time period without an access the item
|
||||
// will get removed from the cache.
|
||||
// Parameter data is the item's value.
|
||||
func NewCacheItem(key interface{}, lifeSpan time.Duration, data interface{}) *CacheItem {
|
||||
t := time.Now()
|
||||
return &CacheItem{
|
||||
key: key,
|
||||
lifeSpan: lifeSpan,
|
||||
createdOn: t,
|
||||
accessedOn: t,
|
||||
accessCount: 0,
|
||||
aboutToExpire: nil,
|
||||
data: data,
|
||||
}
|
||||
}
|
||||
|
||||
// AddAccessCount only add one access count.
|
||||
func (item *CacheItem) AddAccessCount() {
|
||||
item.Lock()
|
||||
defer item.Unlock()
|
||||
item.accessCount++
|
||||
}
|
||||
|
||||
// KeepAlive marks an item to be kept for another expireDuration period.
|
||||
func (item *CacheItem) KeepAlive() {
|
||||
item.Lock()
|
||||
defer item.Unlock()
|
||||
item.accessedOn = time.Now()
|
||||
item.accessCount++
|
||||
}
|
||||
|
||||
// LifeSpan returns this item's expiration duration.
|
||||
func (item *CacheItem) LifeSpan() time.Duration {
|
||||
// immutable
|
||||
return item.lifeSpan
|
||||
}
|
||||
|
||||
// AccessedOn returns when this item was last accessed.
|
||||
func (item *CacheItem) AccessedOn() time.Time {
|
||||
item.RLock()
|
||||
defer item.RUnlock()
|
||||
return item.accessedOn
|
||||
}
|
||||
|
||||
// CreatedOn returns when this item was added to the cache.
|
||||
func (item *CacheItem) CreatedOn() time.Time {
|
||||
// immutable
|
||||
return item.createdOn
|
||||
}
|
||||
|
||||
// AccessCount returns how often this item has been accessed.
|
||||
func (item *CacheItem) AccessCount() int64 {
|
||||
item.RLock()
|
||||
defer item.RUnlock()
|
||||
return item.accessCount
|
||||
}
|
||||
|
||||
// Key returns the key of this cached item.
|
||||
func (item *CacheItem) Key() interface{} {
|
||||
// immutable
|
||||
return item.key
|
||||
}
|
||||
|
||||
// Data returns the value of this cached item.
|
||||
func (item *CacheItem) Data() interface{} {
|
||||
// immutable
|
||||
return item.data
|
||||
}
|
||||
|
||||
// SetAboutToExpireCallback configures a callback, which will be called right
|
||||
// before the item is about to be removed from the cache.
|
||||
func (item *CacheItem) SetAboutToExpireCallback(f func(interface{})) {
|
||||
if len(item.aboutToExpire) > 0 {
|
||||
item.RemoveAboutToExpireCallback()
|
||||
}
|
||||
item.Lock()
|
||||
defer item.Unlock()
|
||||
item.aboutToExpire = append(item.aboutToExpire, f)
|
||||
}
|
||||
|
||||
// AddAboutToExpireCallback appends a new callback to the AboutToExpire queue
|
||||
func (item *CacheItem) AddAboutToExpireCallback(f func(interface{})) {
|
||||
item.Lock()
|
||||
defer item.Unlock()
|
||||
item.aboutToExpire = append(item.aboutToExpire, f)
|
||||
}
|
||||
|
||||
// RemoveAboutToExpireCallback empties the about to expire callback queue
|
||||
func (item *CacheItem) RemoveAboutToExpireCallback() {
|
||||
item.Lock()
|
||||
defer item.Unlock()
|
||||
item.aboutToExpire = nil
|
||||
}
|
398
mycache/cache2go/cachetable.go
Normal file
398
mycache/cache2go/cachetable.go
Normal file
@@ -0,0 +1,398 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// CacheTable is a table within the cache
|
||||
type CacheTable struct {
|
||||
sync.RWMutex
|
||||
|
||||
// The table's name.
|
||||
name string
|
||||
// All cached items.
|
||||
items map[interface{}]*CacheItem
|
||||
|
||||
// Timer responsible for triggering cleanup.
|
||||
cleanupTimer *time.Timer
|
||||
// Current timer duration.
|
||||
cleanupInterval time.Duration
|
||||
|
||||
// The logger used for this table.
|
||||
logger *log.Logger
|
||||
|
||||
// Callback method triggered when trying to load a non-existing key.
|
||||
loadData func(key interface{}, args ...interface{}) *CacheItem
|
||||
// Callback method triggered when adding a new item to the cache.
|
||||
addedItem []func(item *CacheItem)
|
||||
// Callback method triggered before deleting an item from the cache.
|
||||
aboutToDeleteItem []func(item *CacheItem)
|
||||
}
|
||||
|
||||
// Count returns how many items are currently stored in the cache.
|
||||
func (table *CacheTable) Count() int {
|
||||
table.RLock()
|
||||
defer table.RUnlock()
|
||||
return len(table.items)
|
||||
}
|
||||
|
||||
// Foreach all items
|
||||
func (table *CacheTable) Foreach(trans func(key interface{}, item *CacheItem)) {
|
||||
table.RLock()
|
||||
defer table.RUnlock()
|
||||
|
||||
for k, v := range table.items {
|
||||
trans(k, v)
|
||||
}
|
||||
}
|
||||
|
||||
// SetDataLoader configures a data-loader callback, which will be called when
|
||||
// trying to access a non-existing key. The key and 0...n additional arguments
|
||||
// are passed to the callback function.
|
||||
func (table *CacheTable) SetDataLoader(f func(interface{}, ...interface{}) *CacheItem) {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.loadData = f
|
||||
}
|
||||
|
||||
// SetAddedItemCallback configures a callback, which will be called every time
|
||||
// a new item is added to the cache.
|
||||
func (table *CacheTable) SetAddedItemCallback(f func(*CacheItem)) {
|
||||
if len(table.addedItem) > 0 {
|
||||
table.RemoveAddedItemCallbacks()
|
||||
}
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.addedItem = append(table.addedItem, f)
|
||||
}
|
||||
|
||||
// AddAddedItemCallback appends a new callback to the addedItem queue
|
||||
func (table *CacheTable) AddAddedItemCallback(f func(*CacheItem)) {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.addedItem = append(table.addedItem, f)
|
||||
}
|
||||
|
||||
// RemoveAddedItemCallbacks empties the added item callback queue
|
||||
func (table *CacheTable) RemoveAddedItemCallbacks() {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.addedItem = nil
|
||||
}
|
||||
|
||||
// SetAboutToDeleteItemCallback configures a callback, which will be called
|
||||
// every time an item is about to be removed from the cache.
|
||||
func (table *CacheTable) SetAboutToDeleteItemCallback(f func(*CacheItem)) {
|
||||
if len(table.aboutToDeleteItem) > 0 {
|
||||
table.RemoveAboutToDeleteItemCallback()
|
||||
}
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.aboutToDeleteItem = append(table.aboutToDeleteItem, f)
|
||||
}
|
||||
|
||||
// AddAboutToDeleteItemCallback appends a new callback to the AboutToDeleteItem queue
|
||||
func (table *CacheTable) AddAboutToDeleteItemCallback(f func(*CacheItem)) {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.aboutToDeleteItem = append(table.aboutToDeleteItem, f)
|
||||
}
|
||||
|
||||
// RemoveAboutToDeleteItemCallback empties the about to delete item callback queue
|
||||
func (table *CacheTable) RemoveAboutToDeleteItemCallback() {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.aboutToDeleteItem = nil
|
||||
}
|
||||
|
||||
// SetLogger sets the logger to be used by this cache table.
|
||||
func (table *CacheTable) SetLogger(logger *log.Logger) {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
table.logger = logger
|
||||
}
|
||||
|
||||
// Expiration check loop, triggered by a self-adjusting timer.
|
||||
func (table *CacheTable) expirationCheck() {
|
||||
table.Lock()
|
||||
if table.cleanupTimer != nil {
|
||||
table.cleanupTimer.Stop()
|
||||
}
|
||||
if table.cleanupInterval > 0 {
|
||||
table.log("Expiration check triggered after", table.cleanupInterval, "for table", table.name)
|
||||
} else {
|
||||
table.log("Expiration check installed for table", table.name)
|
||||
}
|
||||
|
||||
// To be more accurate with timers, we would need to update 'now' on every
|
||||
// loop iteration. Not sure it's really efficient though.
|
||||
now := time.Now()
|
||||
smallestDuration := 0 * time.Second
|
||||
for key, item := range table.items {
|
||||
// Cache values so we don't keep blocking the mutex.
|
||||
item.RLock()
|
||||
lifeSpan := item.lifeSpan
|
||||
accessedOn := item.accessedOn
|
||||
item.RUnlock()
|
||||
|
||||
if lifeSpan == 0 {
|
||||
continue
|
||||
}
|
||||
if now.Sub(accessedOn) >= lifeSpan {
|
||||
// Item has excessed its lifespan.
|
||||
table.deleteInternal(key)
|
||||
} else {
|
||||
// Find the item chronologically closest to its end-of-lifespan.
|
||||
if smallestDuration == 0 || lifeSpan-now.Sub(accessedOn) < smallestDuration {
|
||||
smallestDuration = lifeSpan - now.Sub(accessedOn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Setup the interval for the next cleanup run.
|
||||
table.cleanupInterval = smallestDuration
|
||||
if smallestDuration > 0 {
|
||||
table.cleanupTimer = time.AfterFunc(smallestDuration, func() {
|
||||
go table.expirationCheck()
|
||||
})
|
||||
}
|
||||
table.Unlock()
|
||||
}
|
||||
|
||||
func (table *CacheTable) addInternal(item *CacheItem) {
|
||||
// Careful: do not run this method unless the table-mutex is locked!
|
||||
// It will unlock it for the caller before running the callbacks and checks
|
||||
table.log("Adding item with key", item.key, "and lifespan of", item.lifeSpan, "to table", table.name)
|
||||
table.items[item.key] = item
|
||||
|
||||
// Cache values so we don't keep blocking the mutex.
|
||||
expDur := table.cleanupInterval
|
||||
addedItem := table.addedItem
|
||||
table.Unlock()
|
||||
|
||||
// Trigger callback after adding an item to cache.
|
||||
if addedItem != nil {
|
||||
for _, callback := range addedItem {
|
||||
callback(item)
|
||||
}
|
||||
}
|
||||
|
||||
// If we haven't set up any expiration check timer or found a more imminent item.
|
||||
if item.lifeSpan > 0 && (expDur == 0 || item.lifeSpan < expDur) {
|
||||
table.expirationCheck()
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a key/value pair to the cache.
|
||||
// Parameter key is the item's cache-key.
|
||||
// Parameter lifeSpan determines after which time period without an access the item
|
||||
// will get removed from the cache.
|
||||
// Parameter data is the item's value.
|
||||
func (table *CacheTable) Add(key interface{}, lifeSpan time.Duration, data interface{}) *CacheItem {
|
||||
item := NewCacheItem(key, lifeSpan, data)
|
||||
|
||||
// Add item to cache.
|
||||
table.Lock()
|
||||
table.addInternal(item)
|
||||
|
||||
return item
|
||||
}
|
||||
|
||||
func (table *CacheTable) deleteInternal(key interface{}) (*CacheItem, error) {
|
||||
r, ok := table.items[key]
|
||||
if !ok {
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
|
||||
// Cache value so we don't keep blocking the mutex.
|
||||
aboutToDeleteItem := table.aboutToDeleteItem
|
||||
table.Unlock()
|
||||
|
||||
// Trigger callbacks before deleting an item from cache.
|
||||
if aboutToDeleteItem != nil {
|
||||
for _, callback := range aboutToDeleteItem {
|
||||
callback(r)
|
||||
}
|
||||
}
|
||||
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
if r.aboutToExpire != nil {
|
||||
for _, callback := range r.aboutToExpire {
|
||||
callback(key)
|
||||
}
|
||||
}
|
||||
|
||||
table.Lock()
|
||||
table.log("Deleting item with key", key, "created on", r.createdOn, "and hit", r.accessCount, "times from table", table.name)
|
||||
delete(table.items, key)
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Delete an item from the cache.
|
||||
func (table *CacheTable) Delete(key interface{}) (*CacheItem, error) {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
|
||||
return table.deleteInternal(key)
|
||||
}
|
||||
|
||||
// Exists returns whether an item exists in the cache. Unlike the Value method
|
||||
// Exists neither tries to fetch data via the loadData callback nor does it
|
||||
// keep the item alive in the cache.
|
||||
func (table *CacheTable) Exists(key interface{}) bool {
|
||||
table.RLock()
|
||||
defer table.RUnlock()
|
||||
_, ok := table.items[key]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
// NotFoundAdd checks whether an item is not yet cached. Unlike the Exists
|
||||
// method this also adds data if the key could not be found.
|
||||
func (table *CacheTable) NotFoundAdd(key interface{}, lifeSpan time.Duration, data interface{}) bool {
|
||||
table.Lock()
|
||||
|
||||
if _, ok := table.items[key]; ok {
|
||||
table.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
item := NewCacheItem(key, lifeSpan, data)
|
||||
table.addInternal(item)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Value returns an item from the cache and marks it to be kept alive. You can
|
||||
// pass additional arguments to your DataLoader callback function.
|
||||
func (table *CacheTable) Value(key interface{}, args ...interface{}) (*CacheItem, error) {
|
||||
table.RLock()
|
||||
r, ok := table.items[key]
|
||||
loadData := table.loadData
|
||||
table.RUnlock()
|
||||
|
||||
if ok {
|
||||
// Update access counter and timestamp.
|
||||
r.KeepAlive()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Item doesn't exist in cache. Try and fetch it with a data-loader.
|
||||
if loadData != nil {
|
||||
item := loadData(key, args...)
|
||||
if item != nil {
|
||||
table.Add(key, item.lifeSpan, item.data)
|
||||
return item, nil
|
||||
}
|
||||
|
||||
return nil, ErrKeyNotFoundOrLoadable
|
||||
}
|
||||
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
|
||||
// Peek returns an item from the cache, but does not mark its active status, i.e. does not update the active time.
|
||||
// You can also pass additional arguments to your DataLoader callback function.
|
||||
func (table *CacheTable) Peek(key interface{}, args ...interface{}) (*CacheItem, error) {
|
||||
table.RLock()
|
||||
r, ok := table.items[key]
|
||||
loadData := table.loadData
|
||||
table.RUnlock()
|
||||
|
||||
if ok {
|
||||
r.AddAccessCount()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// Item doesn't exist in cache. Try and fetch it with a data-loader.
|
||||
if loadData != nil {
|
||||
item := loadData(key, args...)
|
||||
if item != nil {
|
||||
table.Add(key, item.lifeSpan, item.data)
|
||||
return item, nil
|
||||
}
|
||||
|
||||
return nil, ErrKeyNotFoundOrLoadable
|
||||
}
|
||||
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
|
||||
// Flush deletes all items from this cache table.
|
||||
func (table *CacheTable) Flush() {
|
||||
table.Lock()
|
||||
defer table.Unlock()
|
||||
|
||||
table.log("Flushing table", table.name)
|
||||
|
||||
table.items = make(map[interface{}]*CacheItem)
|
||||
table.cleanupInterval = 0
|
||||
if table.cleanupTimer != nil {
|
||||
table.cleanupTimer.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// CacheItemPair maps key to access counter
|
||||
type CacheItemPair struct {
|
||||
Key interface{}
|
||||
AccessCount int64
|
||||
}
|
||||
|
||||
// CacheItemPairList is a slice of CacheItemPairs that implements sort.
|
||||
// Interface to sort by AccessCount.
|
||||
type CacheItemPairList []CacheItemPair
|
||||
|
||||
func (p CacheItemPairList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
func (p CacheItemPairList) Len() int { return len(p) }
|
||||
func (p CacheItemPairList) Less(i, j int) bool { return p[i].AccessCount > p[j].AccessCount }
|
||||
|
||||
// MostAccessed returns the most accessed items in this cache table
|
||||
func (table *CacheTable) MostAccessed(count int64) []*CacheItem {
|
||||
table.RLock()
|
||||
defer table.RUnlock()
|
||||
|
||||
p := make(CacheItemPairList, len(table.items))
|
||||
i := 0
|
||||
for k, v := range table.items {
|
||||
p[i] = CacheItemPair{k, v.accessCount}
|
||||
i++
|
||||
}
|
||||
sort.Sort(p)
|
||||
|
||||
var r []*CacheItem
|
||||
c := int64(0)
|
||||
for _, v := range p {
|
||||
if c >= count {
|
||||
break
|
||||
}
|
||||
|
||||
item, ok := table.items[v.Key]
|
||||
if ok {
|
||||
r = append(r, item)
|
||||
}
|
||||
c++
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// Internal logging method for convenience.
|
||||
func (table *CacheTable) log(v ...interface{}) {
|
||||
if table.logger == nil {
|
||||
return
|
||||
}
|
||||
|
||||
table.logger.Println(v...)
|
||||
}
|
20
mycache/cache2go/errors.go
Normal file
20
mycache/cache2go/errors.go
Normal file
@@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Simple caching library with expiration capabilities
|
||||
* Copyright (c) 2013-2017, Christian Muehlhaeuser <muesli@gmail.com>
|
||||
*
|
||||
* For license see LICENSE.txt
|
||||
*/
|
||||
|
||||
package cache2go
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrKeyNotFound gets returned when a specific key couldn't be found
|
||||
ErrKeyNotFound = errors.New("Key not found in cache")
|
||||
// ErrKeyNotFoundOrLoadable gets returned when a specific key couldn't be
|
||||
// found and loading via the data-loader callback also failed
|
||||
ErrKeyNotFoundOrLoadable = errors.New("Key not found and could not be loaded into cache")
|
||||
)
|
51
mycache/cache2go/examples/callbacks/callbacks.go
Normal file
51
mycache/cache2go/examples/callbacks/callbacks.go
Normal file
@@ -0,0 +1,51 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/xxjwxc/public/mycache/cache2go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cache := cache2go.Cache("myCache")
|
||||
|
||||
// This callback will be triggered every time a new item
|
||||
// gets added to the cache.
|
||||
cache.SetAddedItemCallback(func(entry *cache2go.CacheItem) {
|
||||
fmt.Println("Added Callback 1:", entry.Key(), entry.Data(), entry.CreatedOn())
|
||||
})
|
||||
cache.AddAddedItemCallback(func(entry *cache2go.CacheItem) {
|
||||
fmt.Println("Added Callback 2:", entry.Key(), entry.Data(), entry.CreatedOn())
|
||||
})
|
||||
// This callback will be triggered every time an item
|
||||
// is about to be removed from the cache.
|
||||
cache.SetAboutToDeleteItemCallback(func(entry *cache2go.CacheItem) {
|
||||
fmt.Println("Deleting:", entry.Key(), entry.Data(), entry.CreatedOn())
|
||||
})
|
||||
|
||||
// Caching a new item will execute the AddedItem callback.
|
||||
cache.Add("someKey", 0, "This is a test!")
|
||||
|
||||
// Let's retrieve the item from the cache
|
||||
res, err := cache.Value("someKey")
|
||||
if err == nil {
|
||||
fmt.Println("Found value in cache:", res.Data())
|
||||
} else {
|
||||
fmt.Println("Error retrieving value from cache:", err)
|
||||
}
|
||||
|
||||
// Deleting the item will execute the AboutToDeleteItem callback.
|
||||
cache.Delete("someKey")
|
||||
|
||||
cache.RemoveAddedItemCallbacks()
|
||||
// Caching a new item that expires in 3 seconds
|
||||
res = cache.Add("anotherKey", 3*time.Second, "This is another test")
|
||||
|
||||
// This callback will be triggered when the item is about to expire
|
||||
res.SetAboutToExpireCallback(func(key interface{}) {
|
||||
fmt.Println("About to expire:", key.(string))
|
||||
})
|
||||
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
34
mycache/cache2go/examples/dataloader/dataloader.go
Normal file
34
mycache/cache2go/examples/dataloader/dataloader.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/xxjwxc/public/mycache/cache2go"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cache := cache2go.Cache("myCache")
|
||||
|
||||
// The data loader gets called automatically whenever something
|
||||
// tries to retrieve a non-existing key from the cache.
|
||||
cache.SetDataLoader(func(key interface{}, args ...interface{}) *cache2go.CacheItem {
|
||||
// Apply some clever loading logic here, e.g. read values for
|
||||
// this key from database, network or file.
|
||||
val := "This is a test with key " + key.(string)
|
||||
|
||||
// This helper method creates the cached item for us. Yay!
|
||||
item := cache2go.NewCacheItem(key, 0, val)
|
||||
return item
|
||||
})
|
||||
|
||||
// Let's retrieve a few auto-generated items from the cache.
|
||||
for i := 0; i < 10; i++ {
|
||||
res, err := cache.Value("someKey_" + strconv.Itoa(i))
|
||||
if err == nil {
|
||||
fmt.Println("Found value in cache:", res.Data())
|
||||
} else {
|
||||
fmt.Println("Error retrieving value from cache:", err)
|
||||
}
|
||||
}
|
||||
}
|
53
mycache/cache2go/examples/mycachedapp/mycachedapp.go
Normal file
53
mycache/cache2go/examples/mycachedapp/mycachedapp.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/xxjwxc/public/mycache/cache2go"
|
||||
)
|
||||
|
||||
// Keys & values in cache2go can be of arbitrary types, e.g. a struct.
|
||||
type myStruct struct {
|
||||
text string
|
||||
moreData []byte
|
||||
}
|
||||
|
||||
func main() {
|
||||
// Accessing a new cache table for the first time will create it.
|
||||
cache := cache2go.Cache("myCache")
|
||||
|
||||
// We will put a new item in the cache. It will expire after
|
||||
// not being accessed via Value(key) for more than 5 seconds.
|
||||
val := myStruct{"This is a test!", []byte{}}
|
||||
cache.Add("someKey", 5*time.Second, &val)
|
||||
|
||||
// Let's retrieve the item from the cache.
|
||||
res, err := cache.Value("someKey")
|
||||
if err == nil {
|
||||
fmt.Println("Found value in cache:", res.Data().(*myStruct).text)
|
||||
} else {
|
||||
fmt.Println("Error retrieving value from cache:", err)
|
||||
}
|
||||
|
||||
// Wait for the item to expire in cache.
|
||||
time.Sleep(6 * time.Second)
|
||||
res, err = cache.Value("someKey")
|
||||
if err != nil {
|
||||
fmt.Println("Item is not cached (anymore).")
|
||||
}
|
||||
|
||||
// Add another item that never expires.
|
||||
cache.Add("someKey", 0, &val)
|
||||
|
||||
// cache2go supports a few handy callbacks and loading mechanisms.
|
||||
cache.SetAboutToDeleteItemCallback(func(e *cache2go.CacheItem) {
|
||||
fmt.Println("Deleting:", e.Key(), e.Data().(*myStruct).text, e.CreatedOn())
|
||||
})
|
||||
|
||||
// Remove the item from the cache.
|
||||
cache.Delete("someKey")
|
||||
|
||||
// And wipe the entire cache table.
|
||||
cache.Flush()
|
||||
}
|
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
key/value 内存缓存,支持基于超时的自动无效功能
|
||||
key/value 内存缓存,支持基于超时的自动无效功能
|
||||
*/
|
||||
package mycache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/muesli/cache2go"
|
||||
"github.com/xxjwxc/public/mycache/cache2go"
|
||||
"github.com/xxjwxc/public/serializing"
|
||||
)
|
||||
|
||||
@@ -21,7 +21,6 @@ type CacheIFS interface {
|
||||
Close() (err error) // 关闭连接
|
||||
}
|
||||
|
||||
|
||||
// MyCache 内存缓存
|
||||
type MyCache struct {
|
||||
cache *cache2go.CacheTable
|
||||
@@ -47,7 +46,7 @@ func (mc *MyCache) Add(key interface{}, value interface{}, lifeSpan time.Duratio
|
||||
|
||||
// Value 查找一个cache
|
||||
func (mc *MyCache) Value(key interface{}, value interface{}) error {
|
||||
res, err := mc.cache.Value(key)
|
||||
res, err := mc.cache.Peek(key)
|
||||
if err == nil {
|
||||
bt := res.Data().([]byte)
|
||||
return decodeValue(bt, value)
|
||||
|
@@ -1,12 +1,12 @@
|
||||
/*
|
||||
key/value 内存缓存,支持基于超时的自动无效功能
|
||||
key/value 内存缓存,支持基于超时的自动无效功能
|
||||
*/
|
||||
package mycache
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/muesli/cache2go"
|
||||
"github.com/xxjwxc/public/mycache/cache2go"
|
||||
)
|
||||
|
||||
// MyCache 内存缓存
|
||||
@@ -15,8 +15,8 @@ type MyCache struct {
|
||||
}
|
||||
|
||||
/*
|
||||
初始化一个cache
|
||||
cachename 缓存名字
|
||||
初始化一个cache
|
||||
cachename 缓存名字
|
||||
*/
|
||||
func NewCache(cachename string) (mc *MyCache) {
|
||||
mc = &MyCache{}
|
||||
@@ -25,8 +25,8 @@ func NewCache(cachename string) (mc *MyCache) {
|
||||
}
|
||||
|
||||
/*
|
||||
添加一个缓存
|
||||
lifeSpan:缓存时间,0表示永不超时
|
||||
添加一个缓存
|
||||
lifeSpan:缓存时间,0表示永不超时
|
||||
*/
|
||||
func (mc *MyCache) Add(key interface{}, value interface{}, lifeSpan time.Duration) *cache2go.CacheItem {
|
||||
return mc.cache.Add(key, lifeSpan, value)
|
||||
@@ -49,14 +49,14 @@ func (mc *MyCache) Value(key interface{}) (value interface{}, b bool) {
|
||||
}
|
||||
|
||||
/*
|
||||
判断key是否存在
|
||||
判断key是否存在
|
||||
*/
|
||||
func (mc *MyCache) IsExist(key interface{}) bool {
|
||||
return mc.cache.Exists(key)
|
||||
}
|
||||
|
||||
/*
|
||||
删除一个cache
|
||||
删除一个cache
|
||||
*/
|
||||
func (mc *MyCache) Delete(key interface{}) error {
|
||||
_, err := mc.cache.Delete(key)
|
||||
@@ -64,14 +64,14 @@ func (mc *MyCache) Delete(key interface{}) error {
|
||||
}
|
||||
|
||||
/*
|
||||
获取原始cache2go操作类
|
||||
获取原始cache2go操作类
|
||||
*/
|
||||
func (mc *MyCache) GetCache2go() *cache2go.CacheTable {
|
||||
return mc.cache
|
||||
}
|
||||
|
||||
/*
|
||||
清空表內容
|
||||
清空表內容
|
||||
*/
|
||||
func (mc *MyCache) Clear() bool {
|
||||
mc.cache.Flush()
|
||||
|
Reference in New Issue
Block a user