mirror of
https://github.com/datarhei/core.git
synced 2025-09-26 20:11:29 +08:00
Fix memfs concurrent read and write performance
This commit is contained in:
21
glob/glob.go
21
glob/glob.go
@@ -4,6 +4,27 @@ import (
|
||||
"github.com/gobwas/glob"
|
||||
)
|
||||
|
||||
type Glob interface {
|
||||
Match(name string) bool
|
||||
}
|
||||
|
||||
type globber struct {
|
||||
glob glob.Glob
|
||||
}
|
||||
|
||||
func Compile(pattern string, separators ...rune) (Glob, error) {
|
||||
g, err := glob.Compile(pattern, separators...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &globber{glob: g}, nil
|
||||
}
|
||||
|
||||
func (g *globber) Match(name string) bool {
|
||||
return g.glob.Match(name)
|
||||
}
|
||||
|
||||
// Match returns whether the name matches the glob pattern, also considering
|
||||
// one or several optionnal separator. An error is only returned if the pattern
|
||||
// is invalid.
|
||||
|
24
glob/glob_test.go
Normal file
24
glob/glob_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package glob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestPatterns(t *testing.T) {
|
||||
ok, err := Match("**/a/b/**", "/s3/a/b/test.m3u8", '/')
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
ok, err = Match("**/a/b/**", "/a/b/test.m3u8", '/')
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
|
||||
ok, err = Match("{/memfs,}/a/b/**", "/a/b/test.m3u8", '/')
|
||||
|
||||
require.NoError(t, err)
|
||||
require.True(t, ok)
|
||||
}
|
1
go.mod
1
go.mod
@@ -22,6 +22,7 @@ require (
|
||||
github.com/minio/minio-go/v7 v7.0.67
|
||||
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0
|
||||
github.com/shirou/gopsutil/v3 v3.24.1
|
||||
github.com/stretchr/testify v1.8.4
|
||||
github.com/swaggo/echo-swagger v1.4.1
|
||||
|
2
go.sum
2
go.sum
@@ -150,6 +150,8 @@ github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSz
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
|
||||
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
|
||||
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
|
260
io/fs/mem.go
260
io/fs/mem.go
@@ -108,16 +108,11 @@ type memFilesystem struct {
|
||||
metaLock sync.RWMutex
|
||||
|
||||
// Mapping of path to file
|
||||
files map[string]*memFile
|
||||
files *memStorage
|
||||
|
||||
// Mutex for the files map
|
||||
filesLock sync.RWMutex
|
||||
|
||||
// Pool for the storage of the contents of files
|
||||
dataPool sync.Pool
|
||||
|
||||
// Current size of the filesystem in bytes
|
||||
// Current size of the filesystem in bytes and its mutes
|
||||
currentSize int64
|
||||
sizeLock sync.RWMutex
|
||||
|
||||
// Logger from the config
|
||||
logger log.Logger
|
||||
@@ -137,13 +132,7 @@ func NewMemFilesystem(config MemConfig) (Filesystem, error) {
|
||||
|
||||
fs.logger = fs.logger.WithField("type", "mem")
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
|
||||
fs.dataPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
}
|
||||
fs.files = newMemStorage()
|
||||
|
||||
fs.logger.Debug().Log("Created")
|
||||
|
||||
@@ -218,25 +207,24 @@ func (fs *memFilesystem) SetMetadata(key, data string) {
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Size() (int64, int64) {
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
fs.sizeLock.RLock()
|
||||
defer fs.sizeLock.RUnlock()
|
||||
|
||||
return fs.currentSize, -1
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Files() int64 {
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
nfiles := int64(0)
|
||||
|
||||
for _, f := range fs.files {
|
||||
fs.files.Range(func(key string, f *memFile) bool {
|
||||
if f.dir {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
|
||||
nfiles++
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return nfiles
|
||||
}
|
||||
@@ -244,9 +232,7 @@ func (fs *memFilesystem) Files() int64 {
|
||||
func (fs *memFilesystem) Open(path string) File {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
file, ok := fs.files[path]
|
||||
fs.filesLock.RUnlock()
|
||||
file, ok := fs.files.LoadAndCopy(path)
|
||||
|
||||
if !ok {
|
||||
return nil
|
||||
@@ -262,7 +248,9 @@ func (fs *memFilesystem) Open(path string) File {
|
||||
}
|
||||
|
||||
if len(file.linkTo) != 0 {
|
||||
file, ok = fs.files[file.linkTo]
|
||||
file.Close()
|
||||
|
||||
file, ok = fs.files.LoadAndCopy(file.linkTo)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
@@ -280,21 +268,23 @@ func (fs *memFilesystem) Open(path string) File {
|
||||
func (fs *memFilesystem) ReadFile(path string) ([]byte, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
file, ok := fs.files[path]
|
||||
fs.filesLock.RUnlock()
|
||||
file, ok := fs.files.LoadAndCopy(path)
|
||||
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
if len(file.linkTo) != 0 {
|
||||
file, ok = fs.files[file.linkTo]
|
||||
file.Close()
|
||||
|
||||
file, ok = fs.files.LoadAndCopy(file.linkTo)
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
}
|
||||
|
||||
defer file.Close()
|
||||
|
||||
if file.data != nil {
|
||||
return file.data.Bytes(), nil
|
||||
}
|
||||
@@ -306,21 +296,17 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
oldname = fs.cleanPath(oldname)
|
||||
newname = fs.cleanPath(newname)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
if _, ok := fs.files[oldname]; !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if _, ok := fs.files[newname]; ok {
|
||||
if fs.files.Has(newname) {
|
||||
return os.ErrExist
|
||||
}
|
||||
|
||||
if file, ok := fs.files[oldname]; ok {
|
||||
if len(file.linkTo) != 0 {
|
||||
return fmt.Errorf("%s can't link to another link (%s)", newname, oldname)
|
||||
oldFile, ok := fs.files.Load(oldname)
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if len(oldFile.linkTo) != 0 {
|
||||
return fmt.Errorf("%s can't link to another link (%s)", newname, oldname)
|
||||
}
|
||||
|
||||
newFile := &memFile{
|
||||
@@ -334,7 +320,17 @@ func (fs *memFilesystem) Symlink(oldname, newname string) error {
|
||||
data: nil,
|
||||
}
|
||||
|
||||
fs.files[newname] = newFile
|
||||
oldFile, loaded := fs.files.Store(newname, newFile)
|
||||
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
if loaded {
|
||||
oldFile.Close()
|
||||
fs.currentSize -= oldFile.size
|
||||
}
|
||||
|
||||
fs.currentSize += newFile.size
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -349,10 +345,9 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool,
|
||||
size: 0,
|
||||
lastMod: time.Now(),
|
||||
},
|
||||
data: fs.dataPool.Get().(*bytes.Buffer),
|
||||
data: &bytes.Buffer{},
|
||||
}
|
||||
|
||||
newFile.data.Reset()
|
||||
size, err := newFile.data.ReadFrom(r)
|
||||
if err != nil {
|
||||
fs.logger.WithFields(log.Fields{
|
||||
@@ -364,21 +359,17 @@ func (fs *memFilesystem) WriteFileReader(path string, r io.Reader) (int64, bool,
|
||||
|
||||
newFile.size = size
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
oldFile, replace := fs.files.Store(path, newFile)
|
||||
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
file, replace := fs.files[path]
|
||||
if replace {
|
||||
delete(fs.files, path)
|
||||
oldFile.Close()
|
||||
|
||||
fs.currentSize -= file.size
|
||||
|
||||
fs.dataPool.Put(file.data)
|
||||
file.data = nil
|
||||
fs.currentSize -= oldFile.size
|
||||
}
|
||||
|
||||
fs.files[path] = newFile
|
||||
|
||||
fs.currentSize += newFile.size
|
||||
|
||||
logger := fs.logger.WithFields(log.Fields{
|
||||
@@ -405,14 +396,12 @@ func (fs *memFilesystem) WriteFileSafe(path string, data []byte) (int64, bool, e
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Purge(size int64) int64 {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
files := []*memFile{}
|
||||
|
||||
for _, f := range fs.files {
|
||||
files = append(files, f)
|
||||
}
|
||||
fs.files.Range(func(_ string, file *memFile) bool {
|
||||
files = append(files, file)
|
||||
return true
|
||||
})
|
||||
|
||||
sort.Slice(files, func(i, j int) bool {
|
||||
return files[i].lastMod.Before(files[j].lastMod)
|
||||
@@ -421,13 +410,15 @@ func (fs *memFilesystem) Purge(size int64) int64 {
|
||||
var freed int64 = 0
|
||||
|
||||
for _, f := range files {
|
||||
delete(fs.files, f.name)
|
||||
fs.files.Delete(f.name)
|
||||
size -= f.size
|
||||
freed += f.size
|
||||
fs.currentSize -= f.size
|
||||
|
||||
fs.dataPool.Put(f.data)
|
||||
f.data = nil
|
||||
fs.sizeLock.Lock()
|
||||
fs.currentSize -= f.size
|
||||
fs.sizeLock.Unlock()
|
||||
|
||||
f.Close()
|
||||
|
||||
fs.logger.WithFields(log.Fields{
|
||||
"path": f.name,
|
||||
@@ -448,8 +439,8 @@ func (fs *memFilesystem) Purge(size int64) int64 {
|
||||
func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
info, err := fs.stat(path)
|
||||
if err == nil {
|
||||
@@ -470,7 +461,7 @@ func (fs *memFilesystem) MkdirAll(path string, perm os.FileMode) error {
|
||||
data: nil,
|
||||
}
|
||||
|
||||
fs.files[path] = f
|
||||
fs.files.Store(path, f)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -483,25 +474,23 @@ func (fs *memFilesystem) Rename(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
srcFile, ok := fs.files[src]
|
||||
srcFile, ok := fs.files.Load(src)
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
dstFile, ok := fs.files[dst]
|
||||
if ok {
|
||||
dstFile, replace := fs.files.Store(dst, srcFile)
|
||||
fs.files.Delete(src)
|
||||
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
if replace {
|
||||
dstFile.Close()
|
||||
|
||||
fs.currentSize -= dstFile.size
|
||||
|
||||
fs.dataPool.Put(dstFile.data)
|
||||
dstFile.data = nil
|
||||
}
|
||||
|
||||
fs.files[dst] = srcFile
|
||||
delete(fs.files, src)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -513,58 +502,53 @@ func (fs *memFilesystem) Copy(src, dst string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
if fs.isDir(dst) {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
srcFile, ok := fs.files[src]
|
||||
srcFile, ok := fs.files.LoadAndCopy(src)
|
||||
if !ok {
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if srcFile.dir {
|
||||
srcFile.Close()
|
||||
return os.ErrNotExist
|
||||
}
|
||||
|
||||
if fs.isDir(dst) {
|
||||
return os.ErrInvalid
|
||||
}
|
||||
|
||||
dstFile, ok := fs.files[dst]
|
||||
if ok {
|
||||
fs.currentSize -= dstFile.size
|
||||
} else {
|
||||
dstFile = &memFile{
|
||||
dstFile := &memFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: dst,
|
||||
dir: false,
|
||||
size: srcFile.size,
|
||||
lastMod: time.Now(),
|
||||
},
|
||||
data: fs.dataPool.Get().(*bytes.Buffer),
|
||||
}
|
||||
data: srcFile.data,
|
||||
}
|
||||
|
||||
dstFile.data.Reset()
|
||||
dstFile.data.Write(srcFile.data.Bytes())
|
||||
f, replace := fs.files.Store(dst, dstFile)
|
||||
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
if replace {
|
||||
f.Close()
|
||||
fs.currentSize -= f.size
|
||||
}
|
||||
|
||||
fs.currentSize += dstFile.size
|
||||
|
||||
fs.files[dst] = dstFile
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Stat(path string) (FileInfo, error) {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
return fs.stat(path)
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) stat(path string) (FileInfo, error) {
|
||||
file, ok := fs.files[path]
|
||||
file, ok := fs.files.Load(path)
|
||||
if ok {
|
||||
f := &memFileInfo{
|
||||
name: file.name,
|
||||
@@ -575,7 +559,7 @@ func (fs *memFilesystem) stat(path string) (FileInfo, error) {
|
||||
}
|
||||
|
||||
if len(f.linkTo) != 0 {
|
||||
file, ok := fs.files[f.linkTo]
|
||||
file, ok := fs.files.Load(f.linkTo)
|
||||
if !ok {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
@@ -604,7 +588,7 @@ func (fs *memFilesystem) stat(path string) (FileInfo, error) {
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) isDir(path string) bool {
|
||||
file, ok := fs.files[path]
|
||||
file, ok := fs.files.Load(path)
|
||||
if ok {
|
||||
return file.dir
|
||||
}
|
||||
@@ -617,28 +601,29 @@ func (fs *memFilesystem) isDir(path string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
for k := range fs.files {
|
||||
if strings.HasPrefix(k, path) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
found := false
|
||||
|
||||
fs.files.Range(func(k string, _ *memFile) bool {
|
||||
if strings.HasPrefix(k, path) {
|
||||
found = true
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return found
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) Remove(path string) int64 {
|
||||
path = fs.cleanPath(path)
|
||||
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
|
||||
file, ok := fs.files[path]
|
||||
file, ok := fs.files.Delete(path)
|
||||
if ok {
|
||||
delete(fs.files, path)
|
||||
fs.currentSize -= file.size
|
||||
file.Close()
|
||||
|
||||
fs.dataPool.Put(file.data)
|
||||
file.data = nil
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
fs.currentSize -= file.size
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
@@ -653,12 +638,12 @@ func (fs *memFilesystem) Remove(path string) int64 {
|
||||
}
|
||||
|
||||
func (fs *memFilesystem) RemoveAll() int64 {
|
||||
fs.filesLock.Lock()
|
||||
defer fs.filesLock.Unlock()
|
||||
fs.sizeLock.Lock()
|
||||
defer fs.sizeLock.Unlock()
|
||||
|
||||
size := fs.currentSize
|
||||
|
||||
fs.files = make(map[string]*memFile)
|
||||
fs.files = newMemStorage()
|
||||
fs.currentSize = 0
|
||||
|
||||
return size
|
||||
@@ -668,22 +653,29 @@ func (fs *memFilesystem) List(path, pattern string) []FileInfo {
|
||||
path = fs.cleanPath(path)
|
||||
files := []FileInfo{}
|
||||
|
||||
fs.filesLock.RLock()
|
||||
defer fs.filesLock.RUnlock()
|
||||
|
||||
for _, file := range fs.files {
|
||||
if !strings.HasPrefix(file.name, path) {
|
||||
continue
|
||||
}
|
||||
var compiledPattern glob.Glob
|
||||
var err error
|
||||
|
||||
if len(pattern) != 0 {
|
||||
if ok, _ := glob.Match(pattern, file.name, '/'); !ok {
|
||||
continue
|
||||
compiledPattern, err = glob.Compile(pattern, '/')
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fs.files.Range(func(key string, file *memFile) bool {
|
||||
if file.dir {
|
||||
continue
|
||||
return true
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(file.name, path) {
|
||||
return true
|
||||
}
|
||||
|
||||
if compiledPattern != nil {
|
||||
if !compiledPattern.Match(file.name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
files = append(files, &memFileInfo{
|
||||
@@ -692,7 +684,9 @@ func (fs *memFilesystem) List(path, pattern string) []FileInfo {
|
||||
lastMod: file.lastMod,
|
||||
linkTo: file.linkTo,
|
||||
})
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
return files
|
||||
}
|
||||
|
84
io/fs/mem_storage.go
Normal file
84
io/fs/mem_storage.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package fs
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
type memStorage struct {
|
||||
lock *xsync.RBMutex
|
||||
files *xsync.MapOf[string, *memFile]
|
||||
}
|
||||
|
||||
func newMemStorage() *memStorage {
|
||||
m := &memStorage{
|
||||
lock: xsync.NewRBMutex(),
|
||||
files: xsync.NewMapOf[string, *memFile](),
|
||||
}
|
||||
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *memStorage) Delete(key string) (*memFile, bool) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return m.files.LoadAndDelete(key)
|
||||
}
|
||||
|
||||
func (m *memStorage) Store(key string, value *memFile) (*memFile, bool) {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
|
||||
return m.files.LoadAndStore(key, value)
|
||||
}
|
||||
|
||||
func (m *memStorage) Load(key string) (*memFile, bool) {
|
||||
token := m.lock.RLock()
|
||||
defer m.lock.RUnlock(token)
|
||||
|
||||
return m.files.Load(key)
|
||||
}
|
||||
|
||||
func (m *memStorage) LoadAndCopy(key string) (*memFile, bool) {
|
||||
token := m.lock.RLock()
|
||||
defer m.lock.RUnlock(token)
|
||||
|
||||
v, ok := m.files.Load(key)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
f := &memFile{
|
||||
memFileInfo: memFileInfo{
|
||||
name: v.name,
|
||||
size: v.size,
|
||||
dir: v.dir,
|
||||
lastMod: v.lastMod,
|
||||
linkTo: v.linkTo,
|
||||
},
|
||||
}
|
||||
|
||||
if v.data != nil {
|
||||
f.data = bytes.NewBuffer(v.data.Bytes())
|
||||
}
|
||||
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (m *memStorage) Has(key string) bool {
|
||||
token := m.lock.RLock()
|
||||
defer m.lock.RUnlock(token)
|
||||
|
||||
_, ok := m.files.Load(key)
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func (m *memStorage) Range(f func(key string, value *memFile) bool) {
|
||||
token := m.lock.RLock()
|
||||
defer m.lock.RUnlock(token)
|
||||
|
||||
m.files.Range(f)
|
||||
}
|
@@ -27,6 +27,7 @@ func TestMemFromDir(t *testing.T) {
|
||||
"/disk.go",
|
||||
"/fs_test.go",
|
||||
"/fs.go",
|
||||
"/mem_storage.go",
|
||||
"/mem_test.go",
|
||||
"/mem.go",
|
||||
"/readonly_test.go",
|
||||
|
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
15
vendor/github.com/puzpuzpuz/xsync/v3/.gitignore
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
131
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
131
vendor/github.com/puzpuzpuz/xsync/v3/BENCHMARKS.md
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
# xsync benchmarks
|
||||
|
||||
If you're interested in `MapOf` comparison with some of the popular concurrent hash maps written in Go, check [this](https://github.com/cornelk/hashmap/pull/70) and [this](https://github.com/alphadose/haxmap/pull/22) PRs.
|
||||
|
||||
The below results were obtained for xsync v2.3.1 on a c6g.metal EC2 instance (64 CPU, 128GB RAM) running Linux and Go 1.19.3. I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks.
|
||||
|
||||
The following commands were used to run the benchmarks:
|
||||
```bash
|
||||
$ go test -run='^$' -cpu=1,2,4,8,16,32,64 -bench . -count=30 -timeout=0 | tee bench.txt
|
||||
$ benchstat bench.txt | tee benchstat.txt
|
||||
```
|
||||
|
||||
The below sections contain some of the results. Refer to [this gist](https://gist.github.com/puzpuzpuz/e62e38e06feadecfdc823c0f941ece0b) for the complete output.
|
||||
|
||||
### Counter vs. atomic int64
|
||||
|
||||
```
|
||||
name time/op
|
||||
Counter 27.3ns ± 1%
|
||||
Counter-2 27.2ns ±11%
|
||||
Counter-4 15.3ns ± 8%
|
||||
Counter-8 7.43ns ± 7%
|
||||
Counter-16 3.70ns ±10%
|
||||
Counter-32 1.77ns ± 3%
|
||||
Counter-64 0.96ns ±10%
|
||||
AtomicInt64 7.60ns ± 0%
|
||||
AtomicInt64-2 12.6ns ±13%
|
||||
AtomicInt64-4 13.5ns ±14%
|
||||
AtomicInt64-8 12.7ns ± 9%
|
||||
AtomicInt64-16 12.8ns ± 8%
|
||||
AtomicInt64-32 13.0ns ± 6%
|
||||
AtomicInt64-64 12.9ns ± 7%
|
||||
```
|
||||
|
||||
Here `time/op` stands for average time spent on operation. If you divide `10^9` by the result in nanoseconds per operation, you'd get the throughput in operations per second. Thus, the ideal theoretical scalability of a concurrent data structure implies that the reported `time/op` decreases proportionally with the increased number of CPU cores. On the contrary, if the measured time per operation increases when run on more cores, it means performance degradation.
|
||||
|
||||
### MapOf vs. sync.Map
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 100% Loads:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=100% 24.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-2 12.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-4 6.02ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-8 3.01ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-16 1.50ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-32 0.75ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=100%-64 0.38ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100% 55.3ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100%-2 27.6ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=100%-4 16.1ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=100%-8 8.35ns ± 7%
|
||||
IntegerMapStandard_WarmUp/reads=100%-16 4.24ns ± 7%
|
||||
IntegerMapStandard_WarmUp/reads=100%-32 2.18ns ± 6%
|
||||
IntegerMapStandard_WarmUp/reads=100%-64 1.11ns ± 3%
|
||||
```
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 99% Loads, 0.5% Stores, 0.5% Deletes:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=99% 31.0ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-2 16.4ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=99%-4 8.42ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-8 4.41ns ± 0%
|
||||
IntegerMapOf_WarmUp/reads=99%-16 2.38ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=99%-32 1.37ns ± 4%
|
||||
IntegerMapOf_WarmUp/reads=99%-64 0.85ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99% 121ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=99%-2 109ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=99%-4 115ns ± 4%
|
||||
IntegerMapStandard_WarmUp/reads=99%-8 114ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99%-16 105ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=99%-32 97.0ns ± 3%
|
||||
IntegerMapStandard_WarmUp/reads=99%-64 98.0ns ± 2%
|
||||
```
|
||||
|
||||
1,000 `[int, int]` entries with a warm-up, 75% Loads, 12.5% Stores, 12.5% Deletes:
|
||||
```
|
||||
IntegerMapOf_WarmUp/reads=75%-reads 46.2ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-2 36.7ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-4 22.0ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-8 12.8ns ± 2%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-16 7.69ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-32 5.16ns ± 1%
|
||||
IntegerMapOf_WarmUp/reads=75%-reads-64 4.91ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads 156ns ± 0%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-2 177ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-4 197ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-8 221ns ± 2%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-16 242ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-32 258ns ± 1%
|
||||
IntegerMapStandard_WarmUp/reads=75%-reads-64 264ns ± 1%
|
||||
```
|
||||
|
||||
### MPMCQueue vs. Go channels
|
||||
|
||||
Concurrent producers and consumers (1:1), queue/channel size 1,000, some work done by both producers and consumers:
|
||||
```
|
||||
QueueProdConsWork100 252ns ± 0%
|
||||
QueueProdConsWork100-2 206ns ± 5%
|
||||
QueueProdConsWork100-4 136ns ±12%
|
||||
QueueProdConsWork100-8 110ns ± 6%
|
||||
QueueProdConsWork100-16 108ns ± 2%
|
||||
QueueProdConsWork100-32 102ns ± 2%
|
||||
QueueProdConsWork100-64 101ns ± 0%
|
||||
ChanProdConsWork100 283ns ± 0%
|
||||
ChanProdConsWork100-2 406ns ±21%
|
||||
ChanProdConsWork100-4 549ns ± 7%
|
||||
ChanProdConsWork100-8 754ns ± 7%
|
||||
ChanProdConsWork100-16 828ns ± 7%
|
||||
ChanProdConsWork100-32 810ns ± 8%
|
||||
ChanProdConsWork100-64 832ns ± 4%
|
||||
```
|
||||
|
||||
### RBMutex vs. sync.RWMutex
|
||||
|
||||
The writer locks on each 100,000 iteration with some work in the critical section for both readers and the writer:
|
||||
```
|
||||
RBMutexWorkWrite100000 146ns ± 0%
|
||||
RBMutexWorkWrite100000-2 73.3ns ± 0%
|
||||
RBMutexWorkWrite100000-4 36.7ns ± 0%
|
||||
RBMutexWorkWrite100000-8 18.6ns ± 0%
|
||||
RBMutexWorkWrite100000-16 9.83ns ± 3%
|
||||
RBMutexWorkWrite100000-32 5.53ns ± 0%
|
||||
RBMutexWorkWrite100000-64 4.04ns ± 3%
|
||||
RWMutexWorkWrite100000 121ns ± 0%
|
||||
RWMutexWorkWrite100000-2 128ns ± 1%
|
||||
RWMutexWorkWrite100000-4 124ns ± 2%
|
||||
RWMutexWorkWrite100000-8 101ns ± 1%
|
||||
RWMutexWorkWrite100000-16 92.9ns ± 1%
|
||||
RWMutexWorkWrite100000-32 89.9ns ± 1%
|
||||
RWMutexWorkWrite100000-64 88.4ns ± 1%
|
||||
```
|
21
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
21
vendor/github.com/puzpuzpuz/xsync/v3/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 Andrey Pechkurov
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
138
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
138
vendor/github.com/puzpuzpuz/xsync/v3/README.md
generated
vendored
Normal file
@@ -0,0 +1,138 @@
|
||||
[](https://pkg.go.dev/github.com/puzpuzpuz/xsync/v3)
|
||||
[](https://goreportcard.com/report/github.com/puzpuzpuz/xsync/v3)
|
||||
[](https://codecov.io/gh/puzpuzpuz/xsync)
|
||||
|
||||
# xsync
|
||||
|
||||
Concurrent data structures for Go. Aims to provide more scalable alternatives for some of the data structures from the standard `sync` package, but not only.
|
||||
|
||||
Covered with tests following the approach described [here](https://puzpuzpuz.dev/testing-concurrent-code-for-fun-and-profit).
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Benchmark results may be found [here](BENCHMARKS.md). I'd like to thank [@felixge](https://github.com/felixge) who kindly ran the benchmarks on a beefy multicore machine.
|
||||
|
||||
Also, a non-scientific, unfair benchmark comparing Java's [j.u.c.ConcurrentHashMap](https://docs.oracle.com/en/java/javase/17/docs/api/java.base/java/util/concurrent/ConcurrentHashMap.html) and `xsync.MapOf` is available [here](https://puzpuzpuz.dev/concurrent-map-in-go-vs-java-yet-another-meaningless-benchmark).
|
||||
|
||||
## Usage
|
||||
|
||||
The latest xsync major version is v3, so `/v3` suffix should be used when importing the library:
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
```
|
||||
|
||||
*Note for v1 and v2 users*: v1 and v2 support is discontinued, so please upgrade to v3. While the API has some breaking changes, the migration should be trivial.
|
||||
|
||||
### Counter
|
||||
|
||||
A `Counter` is a striped `int64` counter inspired by the `j.u.c.a.LongAdder` class from the Java standard library.
|
||||
|
||||
```go
|
||||
c := xsync.NewCounter()
|
||||
// increment and decrement the counter
|
||||
c.Inc()
|
||||
c.Dec()
|
||||
// read the current value
|
||||
v := c.Value()
|
||||
```
|
||||
|
||||
Works better in comparison with a single atomically updated `int64` counter in high contention scenarios.
|
||||
|
||||
### Map
|
||||
|
||||
A `Map` is like a concurrent hash table-based map. It follows the interface of `sync.Map` with a number of valuable extensions like `Compute` or `Size`.
|
||||
|
||||
```go
|
||||
m := xsync.NewMap()
|
||||
m.Store("foo", "bar")
|
||||
v, ok := m.Load("foo")
|
||||
s := m.Size()
|
||||
```
|
||||
|
||||
`Map` uses a modified version of Cache-Line Hash Table (CLHT) data structure: https://github.com/LPD-EPFL/CLHT
|
||||
|
||||
CLHT is built around the idea of organizing the hash table in cache-line-sized buckets, so that on all modern CPUs update operations complete with minimal cache-line transfer. Also, `Get` operations are obstruction-free and involve no writes to shared memory, hence no mutexes or any other sort of locks. Due to this design, in all considered scenarios `Map` outperforms `sync.Map`.
|
||||
|
||||
One important difference with `sync.Map` is that only string keys are supported. That's because Golang standard library does not expose the built-in hash functions for `interface{}` values.
|
||||
|
||||
`MapOf[K, V]` is an implementation with parametrized key and value types. While it's still a CLHT-inspired hash map, `MapOf`'s design is quite different from `Map`. As a result, less GC pressure and fewer atomic operations on reads.
|
||||
|
||||
```go
|
||||
m := xsync.NewMapOf[string, string]()
|
||||
m.Store("foo", "bar")
|
||||
v, ok := m.Load("foo")
|
||||
```
|
||||
|
||||
One important difference with `Map` is that `MapOf` supports arbitrary `comparable` key types:
|
||||
|
||||
```go
|
||||
type Point struct {
|
||||
x int32
|
||||
y int32
|
||||
}
|
||||
m := NewMapOf[Point, int]()
|
||||
m.Store(Point{42, 42}, 42)
|
||||
v, ok := m.Load(point{42, 42})
|
||||
```
|
||||
|
||||
### MPMCQueue
|
||||
|
||||
A `MPMCQueue` is a bounded multi-producer multi-consumer concurrent queue.
|
||||
|
||||
```go
|
||||
q := xsync.NewMPMCQueue(1024)
|
||||
// producer inserts an item into the queue
|
||||
q.Enqueue("foo")
|
||||
// optimistic insertion attempt; doesn't block
|
||||
inserted := q.TryEnqueue("bar")
|
||||
// consumer obtains an item from the queue
|
||||
item := q.Dequeue() // interface{} pointing to a string
|
||||
// optimistic obtain attempt; doesn't block
|
||||
item, ok := q.TryDequeue()
|
||||
```
|
||||
|
||||
`MPMCQueueOf[I]` is an implementation with parametrized item type. It is available for Go 1.19 or later.
|
||||
|
||||
```go
|
||||
q := xsync.NewMPMCQueueOf[string](1024)
|
||||
q.Enqueue("foo")
|
||||
item := q.Dequeue() // string
|
||||
```
|
||||
|
||||
The queue is based on the algorithm from the [MPMCQueue](https://github.com/rigtorp/MPMCQueue) C++ library which in its turn references D.Vyukov's [MPMC queue](https://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue). According to the following [classification](https://www.1024cores.net/home/lock-free-algorithms/queues), the queue is array-based, fails on overflow, provides causal FIFO, has blocking producers and consumers.
|
||||
|
||||
The idea of the algorithm is to allow parallelism for concurrent producers and consumers by introducing the notion of tickets, i.e. values of two counters, one per producers/consumers. An atomic increment of one of those counters is the only noticeable contention point in queue operations. The rest of the operation avoids contention on writes thanks to the turn-based read/write access for each of the queue items.
|
||||
|
||||
In essence, `MPMCQueue` is a specialized queue for scenarios where there are multiple concurrent producers and consumers of a single queue running on a large multicore machine.
|
||||
|
||||
To get the optimal performance, you may want to set the queue size to be large enough, say, an order of magnitude greater than the number of producers/consumers, to allow producers and consumers to progress with their queue operations in parallel most of the time.
|
||||
|
||||
### RBMutex
|
||||
|
||||
A `RBMutex` is a reader-biased reader/writer mutual exclusion lock. The lock can be held by many readers or a single writer.
|
||||
|
||||
```go
|
||||
mu := xsync.NewRBMutex()
|
||||
// reader lock calls return a token
|
||||
t := mu.RLock()
|
||||
// the token must be later used to unlock the mutex
|
||||
mu.RUnlock(t)
|
||||
// writer locks are the same as in sync.RWMutex
|
||||
mu.Lock()
|
||||
mu.Unlock()
|
||||
```
|
||||
|
||||
`RBMutex` is based on a modified version of BRAVO (Biased Locking for Reader-Writer Locks) algorithm: https://arxiv.org/pdf/1810.01553.pdf
|
||||
|
||||
The idea of the algorithm is to build on top of an existing reader-writer mutex and introduce a fast path for readers. On the fast path, reader lock attempts are sharded over an internal array based on the reader identity (a token in the case of Golang). This means that readers do not contend over a single atomic counter like it's done in, say, `sync.RWMutex` allowing for better scalability in terms of cores.
|
||||
|
||||
Hence, by the design `RBMutex` is a specialized mutex for scenarios, such as caches, where the vast majority of locks are acquired by readers and write lock acquire attempts are infrequent. In such scenarios, `RBMutex` should perform better than the `sync.RWMutex` on large multicore machines.
|
||||
|
||||
`RBMutex` extends `sync.RWMutex` internally and uses it as the "reader bias disabled" fallback, so the same semantics apply. The only noticeable difference is in the reader tokens returned from the `RLock`/`RUnlock` methods.
|
||||
|
||||
## License
|
||||
|
||||
Licensed under MIT.
|
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
99
vendor/github.com/puzpuzpuz/xsync/v3/counter.go
generated
vendored
Normal file
@@ -0,0 +1,99 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// pool for P tokens
|
||||
var ptokenPool sync.Pool
|
||||
|
||||
// a P token is used to point at the current OS thread (P)
|
||||
// on which the goroutine is run; exact identity of the thread,
|
||||
// as well as P migration tolerance, is not important since
|
||||
// it's used to as a best effort mechanism for assigning
|
||||
// concurrent operations (goroutines) to different stripes of
|
||||
// the counter
|
||||
type ptoken struct {
|
||||
idx uint32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// A Counter is a striped int64 counter.
|
||||
//
|
||||
// Should be preferred over a single atomically updated int64
|
||||
// counter in high contention scenarios.
|
||||
//
|
||||
// A Counter must not be copied after first use.
|
||||
type Counter struct {
|
||||
stripes []cstripe
|
||||
mask uint32
|
||||
}
|
||||
|
||||
type cstripe struct {
|
||||
c int64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 8]byte
|
||||
}
|
||||
|
||||
// NewCounter creates a new Counter instance.
|
||||
func NewCounter() *Counter {
|
||||
nstripes := nextPowOf2(parallelism())
|
||||
c := Counter{
|
||||
stripes: make([]cstripe, nstripes),
|
||||
mask: nstripes - 1,
|
||||
}
|
||||
return &c
|
||||
}
|
||||
|
||||
// Inc increments the counter by 1.
|
||||
func (c *Counter) Inc() {
|
||||
c.Add(1)
|
||||
}
|
||||
|
||||
// Dec decrements the counter by 1.
|
||||
func (c *Counter) Dec() {
|
||||
c.Add(-1)
|
||||
}
|
||||
|
||||
// Add adds the delta to the counter.
|
||||
func (c *Counter) Add(delta int64) {
|
||||
t, ok := ptokenPool.Get().(*ptoken)
|
||||
if !ok {
|
||||
t = new(ptoken)
|
||||
t.idx = runtime_fastrand()
|
||||
}
|
||||
for {
|
||||
stripe := &c.stripes[t.idx&c.mask]
|
||||
cnt := atomic.LoadInt64(&stripe.c)
|
||||
if atomic.CompareAndSwapInt64(&stripe.c, cnt, cnt+delta) {
|
||||
break
|
||||
}
|
||||
// Give a try with another randomly selected stripe.
|
||||
t.idx = runtime_fastrand()
|
||||
}
|
||||
ptokenPool.Put(t)
|
||||
}
|
||||
|
||||
// Value returns the current counter value.
|
||||
// The returned value may not include all of the latest operations in
|
||||
// presence of concurrent modifications of the counter.
|
||||
func (c *Counter) Value() int64 {
|
||||
v := int64(0)
|
||||
for i := 0; i < len(c.stripes); i++ {
|
||||
stripe := &c.stripes[i]
|
||||
v += atomic.LoadInt64(&stripe.c)
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// Reset resets the counter to zero.
|
||||
// This method should only be used when it is known that there are
|
||||
// no concurrent modifications of the counter.
|
||||
func (c *Counter) Reset() {
|
||||
for i := 0; i < len(c.stripes); i++ {
|
||||
stripe := &c.stripes[i]
|
||||
atomic.StoreInt64(&stripe.c, 0)
|
||||
}
|
||||
}
|
799
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
799
vendor/github.com/puzpuzpuz/xsync/v3/map.go
generated
vendored
Normal file
@@ -0,0 +1,799 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
type mapResizeHint int
|
||||
|
||||
const (
|
||||
mapGrowHint mapResizeHint = 0
|
||||
mapShrinkHint mapResizeHint = 1
|
||||
mapClearHint mapResizeHint = 2
|
||||
)
|
||||
|
||||
const (
|
||||
// number of entries per bucket; 3 entries lead to size of 64B
|
||||
// (one cache line) on 64-bit machines
|
||||
entriesPerMapBucket = 3
|
||||
// threshold fraction of table occupation to start a table shrinking
|
||||
// when deleting the last entry in a bucket chain
|
||||
mapShrinkFraction = 128
|
||||
// map load factor to trigger a table resize during insertion;
|
||||
// a map holds up to mapLoadFactor*entriesPerMapBucket*mapTableLen
|
||||
// key-value pairs (this is a soft limit)
|
||||
mapLoadFactor = 0.75
|
||||
// minimal table size, i.e. number of buckets; thus, minimal map
|
||||
// capacity can be calculated as entriesPerMapBucket*defaultMinMapTableLen
|
||||
defaultMinMapTableLen = 32
|
||||
// minimum counter stripes to use
|
||||
minMapCounterLen = 8
|
||||
// maximum counter stripes to use; stands for around 4KB of memory
|
||||
maxMapCounterLen = 32
|
||||
)
|
||||
|
||||
var (
|
||||
topHashMask = uint64((1<<20)-1) << 44
|
||||
topHashEntryMasks = [3]uint64{
|
||||
topHashMask,
|
||||
topHashMask >> 20,
|
||||
topHashMask >> 40,
|
||||
}
|
||||
)
|
||||
|
||||
// Map is like a Go map[string]interface{} but is safe for concurrent
|
||||
// use by multiple goroutines without additional locking or
|
||||
// coordination. It follows the interface of sync.Map with
|
||||
// a number of valuable extensions like Compute or Size.
|
||||
//
|
||||
// A Map must not be copied after first use.
|
||||
//
|
||||
// Map uses a modified version of Cache-Line Hash Table (CLHT)
|
||||
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||
//
|
||||
// CLHT is built around idea to organize the hash table in
|
||||
// cache-line-sized buckets, so that on all modern CPUs update
|
||||
// operations complete with at most one cache-line transfer.
|
||||
// Also, Get operations involve no write to memory, as well as no
|
||||
// mutexes or any other sort of locks. Due to this design, in all
|
||||
// considered scenarios Map outperforms sync.Map.
|
||||
//
|
||||
// One important difference with sync.Map is that only string keys
|
||||
// are supported. That's because Golang standard library does not
|
||||
// expose the built-in hash functions for interface{} values.
|
||||
type Map struct {
|
||||
totalGrowths int64
|
||||
totalShrinks int64
|
||||
resizing int64 // resize in progress flag; updated atomically
|
||||
resizeMu sync.Mutex // only used along with resizeCond
|
||||
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||
table unsafe.Pointer // *mapTable
|
||||
minTableLen int
|
||||
}
|
||||
|
||||
type mapTable struct {
|
||||
buckets []bucketPadded
|
||||
// striped counter for number of table entries;
|
||||
// used to determine if a table shrinking is needed
|
||||
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||
size []counterStripe
|
||||
seed uint64
|
||||
}
|
||||
|
||||
type counterStripe struct {
|
||||
c int64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 8]byte
|
||||
}
|
||||
|
||||
type bucketPadded struct {
|
||||
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||
pad [cacheLineSize - unsafe.Sizeof(bucket{})]byte
|
||||
bucket
|
||||
}
|
||||
|
||||
type bucket struct {
|
||||
next unsafe.Pointer // *bucketPadded
|
||||
keys [entriesPerMapBucket]unsafe.Pointer
|
||||
values [entriesPerMapBucket]unsafe.Pointer
|
||||
// topHashMutex is a 2-in-1 value.
|
||||
//
|
||||
// It contains packed top 20 bits (20 MSBs) of hash codes for keys
|
||||
// stored in the bucket:
|
||||
// | key 0's top hash | key 1's top hash | key 2's top hash | bitmap for keys | mutex |
|
||||
// | 20 bits | 20 bits | 20 bits | 3 bits | 1 bit |
|
||||
//
|
||||
// The least significant bit is used for the mutex (TTAS spinlock).
|
||||
topHashMutex uint64
|
||||
}
|
||||
|
||||
type rangeEntry struct {
|
||||
key unsafe.Pointer
|
||||
value unsafe.Pointer
|
||||
}
|
||||
|
||||
// NewMap creates a new Map instance.
|
||||
func NewMap() *Map {
|
||||
return NewMapPresized(defaultMinMapTableLen * entriesPerMapBucket)
|
||||
}
|
||||
|
||||
// NewMapPresized creates a new Map instance with capacity enough to hold
|
||||
// sizeHint entries. If sizeHint is zero or negative, the value is ignored.
|
||||
func NewMapPresized(sizeHint int) *Map {
|
||||
m := &Map{}
|
||||
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||
var table *mapTable
|
||||
if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
|
||||
table = newMapTable(defaultMinMapTableLen)
|
||||
} else {
|
||||
tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
|
||||
table = newMapTable(int(tableLen))
|
||||
}
|
||||
m.minTableLen = len(table.buckets)
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||
return m
|
||||
}
|
||||
|
||||
func newMapTable(minTableLen int) *mapTable {
|
||||
buckets := make([]bucketPadded, minTableLen)
|
||||
counterLen := minTableLen >> 10
|
||||
if counterLen < minMapCounterLen {
|
||||
counterLen = minMapCounterLen
|
||||
} else if counterLen > maxMapCounterLen {
|
||||
counterLen = maxMapCounterLen
|
||||
}
|
||||
counter := make([]counterStripe, counterLen)
|
||||
t := &mapTable{
|
||||
buckets: buckets,
|
||||
size: counter,
|
||||
seed: makeSeed(),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or nil if no
|
||||
// value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *Map) Load(key string) (value interface{}, ok bool) {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
hash := hashString(key, table.seed)
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
b := &table.buckets[bidx]
|
||||
for {
|
||||
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if !topHashMatch(hash, topHashes, i) {
|
||||
continue
|
||||
}
|
||||
atomic_snapshot:
|
||||
// Start atomic snapshot.
|
||||
vp := atomic.LoadPointer(&b.values[i])
|
||||
kp := atomic.LoadPointer(&b.keys[i])
|
||||
if kp != nil && vp != nil {
|
||||
if key == derefKey(kp) {
|
||||
if uintptr(vp) == uintptr(atomic.LoadPointer(&b.values[i])) {
|
||||
// Atomic snapshot succeeded.
|
||||
return derefValue(vp), true
|
||||
}
|
||||
// Concurrent update/remove. Go for another spin.
|
||||
goto atomic_snapshot
|
||||
}
|
||||
}
|
||||
}
|
||||
bptr := atomic.LoadPointer(&b.next)
|
||||
if bptr == nil {
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(bptr)
|
||||
}
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *Map) Store(key string, value interface{}) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *Map) LoadOrStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadAndStore returns the existing value for the key if present,
|
||||
// while setting the new value for the key.
|
||||
// It stores the new value and returns the existing one, if present.
|
||||
// The loaded result is true if the existing value was loaded,
|
||||
// false otherwise.
|
||||
func (m *Map) LoadAndStore(key string, value interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrCompute returns the existing value for the key if present.
|
||||
// Otherwise, it computes the value using the provided function and
|
||||
// returns the computed value. The loaded result is true if the value
|
||||
// was loaded, false if stored.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map) LoadOrCompute(key string, valueFn func() interface{}) (actual interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(interface{}, bool) (interface{}, bool) {
|
||||
return valueFn(), false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute either sets the computed new value for the key or deletes
|
||||
// the value for the key. When the delete result of the valueFn function
|
||||
// is set to true, the value will be deleted, if it exists. When delete
|
||||
// is set to false, the value is updated to the newValue.
|
||||
// The ok result indicates whether value was computed and stored, thus, is
|
||||
// present in the map. The actual result contains the new value in cases where
|
||||
// the value was computed and stored. See the example for a few use cases.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *Map) Compute(
|
||||
key string,
|
||||
valueFn func(oldValue interface{}, loaded bool) (newValue interface{}, delete bool),
|
||||
) (actual interface{}, ok bool) {
|
||||
return m.doCompute(key, valueFn, false, true)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous
|
||||
// value if any. The loaded result reports whether the key was
|
||||
// present.
|
||||
func (m *Map) LoadAndDelete(key string) (value interface{}, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *Map) Delete(key string) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(value interface{}, loaded bool) (interface{}, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *Map) doCompute(
|
||||
key string,
|
||||
valueFn func(oldValue interface{}, loaded bool) (interface{}, bool),
|
||||
loadIfExists, computeOnly bool,
|
||||
) (interface{}, bool) {
|
||||
// Read-only path.
|
||||
if loadIfExists {
|
||||
if v, ok := m.Load(key); ok {
|
||||
return v, !computeOnly
|
||||
}
|
||||
}
|
||||
// Write path.
|
||||
for {
|
||||
compute_attempt:
|
||||
var (
|
||||
emptyb *bucketPadded
|
||||
emptyidx int
|
||||
hintNonEmpty int
|
||||
)
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
hash := hashString(key, table.seed)
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
rootb := &table.buckets[bidx]
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
// The following two checks must go in reverse to what's
|
||||
// in the resize method.
|
||||
if m.resizeInProgress() {
|
||||
// Resize is in progress. Wait, then go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
m.waitForResize()
|
||||
goto compute_attempt
|
||||
}
|
||||
if m.newerTableExists(table) {
|
||||
// Someone resized the table. Go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
goto compute_attempt
|
||||
}
|
||||
b := rootb
|
||||
for {
|
||||
topHashes := atomic.LoadUint64(&b.topHashMutex)
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] == nil {
|
||||
if emptyb == nil {
|
||||
emptyb = b
|
||||
emptyidx = i
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !topHashMatch(hash, topHashes, i) {
|
||||
hintNonEmpty++
|
||||
continue
|
||||
}
|
||||
if key == derefKey(b.keys[i]) {
|
||||
vp := b.values[i]
|
||||
if loadIfExists {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return derefValue(vp), !computeOnly
|
||||
}
|
||||
// In-place update/delete.
|
||||
// We get a copy of the value via an interface{} on each call,
|
||||
// thus the live value pointers are unique. Otherwise atomic
|
||||
// snapshot won't be correct in case of multiple Store calls
|
||||
// using the same value.
|
||||
oldValue := derefValue(vp)
|
||||
newValue, del := valueFn(oldValue, true)
|
||||
if del {
|
||||
// Deletion.
|
||||
// First we update the value, then the key.
|
||||
// This is important for atomic snapshot states.
|
||||
atomic.StoreUint64(&b.topHashMutex, eraseTopHash(topHashes, i))
|
||||
atomic.StorePointer(&b.values[i], nil)
|
||||
atomic.StorePointer(&b.keys[i], nil)
|
||||
leftEmpty := false
|
||||
if hintNonEmpty == 0 {
|
||||
leftEmpty = isEmptyBucket(b)
|
||||
}
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, -1)
|
||||
// Might need to shrink the table.
|
||||
if leftEmpty {
|
||||
m.resize(table, mapShrinkHint)
|
||||
}
|
||||
return oldValue, !computeOnly
|
||||
}
|
||||
nvp := unsafe.Pointer(&newValue)
|
||||
if assertionsEnabled && vp == nvp {
|
||||
panic("non-unique value pointer")
|
||||
}
|
||||
atomic.StorePointer(&b.values[i], nvp)
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
if computeOnly {
|
||||
// Compute expects the new value to be returned.
|
||||
return newValue, true
|
||||
}
|
||||
// LoadAndStore expects the old value to be returned.
|
||||
return oldValue, true
|
||||
}
|
||||
hintNonEmpty++
|
||||
}
|
||||
if b.next == nil {
|
||||
if emptyb != nil {
|
||||
// Insertion into an existing bucket.
|
||||
var zeroedV interface{}
|
||||
newValue, del := valueFn(zeroedV, false)
|
||||
if del {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return zeroedV, false
|
||||
}
|
||||
// First we update the value, then the key.
|
||||
// This is important for atomic snapshot states.
|
||||
topHashes = atomic.LoadUint64(&emptyb.topHashMutex)
|
||||
atomic.StoreUint64(&emptyb.topHashMutex, storeTopHash(hash, topHashes, emptyidx))
|
||||
atomic.StorePointer(&emptyb.values[emptyidx], unsafe.Pointer(&newValue))
|
||||
atomic.StorePointer(&emptyb.keys[emptyidx], unsafe.Pointer(&key))
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
|
||||
if table.sumSize() > int64(growThreshold) {
|
||||
// Need to grow the table. Then go for another attempt.
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
m.resize(table, mapGrowHint)
|
||||
goto compute_attempt
|
||||
}
|
||||
// Insertion into a new bucket.
|
||||
var zeroedV interface{}
|
||||
newValue, del := valueFn(zeroedV, false)
|
||||
if del {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return newValue, false
|
||||
}
|
||||
// Create and append the bucket.
|
||||
newb := new(bucketPadded)
|
||||
newb.keys[0] = unsafe.Pointer(&key)
|
||||
newb.values[0] = unsafe.Pointer(&newValue)
|
||||
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) newerTableExists(table *mapTable) bool {
|
||||
curTablePtr := atomic.LoadPointer(&m.table)
|
||||
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||
}
|
||||
|
||||
func (m *Map) resizeInProgress() bool {
|
||||
return atomic.LoadInt64(&m.resizing) == 1
|
||||
}
|
||||
|
||||
func (m *Map) waitForResize() {
|
||||
m.resizeMu.Lock()
|
||||
for m.resizeInProgress() {
|
||||
m.resizeCond.Wait()
|
||||
}
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *Map) resize(knownTable *mapTable, hint mapResizeHint) {
|
||||
knownTableLen := len(knownTable.buckets)
|
||||
// Fast path for shrink attempts.
|
||||
if hint == mapShrinkHint {
|
||||
shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||
// Someone else started resize. Wait for it to finish.
|
||||
m.waitForResize()
|
||||
return
|
||||
}
|
||||
var newTable *mapTable
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
switch hint {
|
||||
case mapGrowHint:
|
||||
// Grow the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalGrowths, 1)
|
||||
newTable = newMapTable(tableLen << 1)
|
||||
case mapShrinkHint:
|
||||
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||
// Shrink the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalShrinks, 1)
|
||||
newTable = newMapTable(tableLen >> 1)
|
||||
} else {
|
||||
// No need to shrink. Wake up all waiters and give up.
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
return
|
||||
}
|
||||
case mapClearHint:
|
||||
newTable = newMapTable(m.minTableLen)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||
}
|
||||
// Copy the data only if we're not clearing the map.
|
||||
if hint != mapClearHint {
|
||||
for i := 0; i < tableLen; i++ {
|
||||
copied := copyBucket(&table.buckets[i], newTable)
|
||||
newTable.addSizePlain(uint64(i), copied)
|
||||
}
|
||||
}
|
||||
// Publish the new table and wake up all waiters.
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func copyBucket(b *bucketPadded, destTable *mapTable) (copied int) {
|
||||
rootb := b
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
k := derefKey(b.keys[i])
|
||||
hash := hashString(k, destTable.seed)
|
||||
bidx := uint64(len(destTable.buckets)-1) & hash
|
||||
destb := &destTable.buckets[bidx]
|
||||
appendToBucket(hash, b.keys[i], b.values[i], destb)
|
||||
copied++
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func appendToBucket(hash uint64, keyPtr, valPtr unsafe.Pointer, b *bucketPadded) {
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] == nil {
|
||||
b.keys[i] = keyPtr
|
||||
b.values[i] = valPtr
|
||||
b.topHashMutex = storeTopHash(hash, b.topHashMutex, i)
|
||||
return
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
newb := new(bucketPadded)
|
||||
newb.keys[0] = keyPtr
|
||||
newb.values[0] = valPtr
|
||||
newb.topHashMutex = storeTopHash(hash, newb.topHashMutex, 0)
|
||||
b.next = unsafe.Pointer(newb)
|
||||
return
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func isEmptyBucket(rootb *bucketPadded) bool {
|
||||
b := rootb
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
return true
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the
|
||||
// map. If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot
|
||||
// of the Map's contents: no key will be visited more than once, but
|
||||
// if the value for any key is stored or deleted concurrently, Range
|
||||
// may reflect any mapping for that key from any point during the
|
||||
// Range call.
|
||||
//
|
||||
// It is safe to modify the map while iterating it, including entry
|
||||
// creation, modification and deletion. However, the concurrent
|
||||
// modification rule apply, i.e. the changes may be not reflected
|
||||
// in the subsequently iterated entries.
|
||||
func (m *Map) Range(f func(key string, value interface{}) bool) {
|
||||
var zeroEntry rangeEntry
|
||||
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||
bentries := make([]rangeEntry, 0, 16*entriesPerMapBucket)
|
||||
tablep := atomic.LoadPointer(&m.table)
|
||||
table := *(*mapTable)(tablep)
|
||||
for i := range table.buckets {
|
||||
rootb := &table.buckets[i]
|
||||
b := rootb
|
||||
// Prevent concurrent modifications and copy all entries into
|
||||
// the intermediate slice.
|
||||
lockBucket(&rootb.topHashMutex)
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.keys[i] != nil {
|
||||
bentries = append(bentries, rangeEntry{
|
||||
key: b.keys[i],
|
||||
value: b.values[i],
|
||||
})
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
unlockBucket(&rootb.topHashMutex)
|
||||
break
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
}
|
||||
// Call the function for all copied entries.
|
||||
for j := range bentries {
|
||||
k := derefKey(bentries[j].key)
|
||||
v := derefValue(bentries[j].value)
|
||||
if !f(k, v) {
|
||||
return
|
||||
}
|
||||
// Remove the reference to avoid preventing the copied
|
||||
// entries from being GCed until this method finishes.
|
||||
bentries[j] = zeroEntry
|
||||
}
|
||||
bentries = bentries[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Clear deletes all keys and values currently stored in the map.
|
||||
func (m *Map) Clear() {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
m.resize(table, mapClearHint)
|
||||
}
|
||||
|
||||
// Size returns current size of the map.
|
||||
func (m *Map) Size() int {
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
return int(table.sumSize())
|
||||
}
|
||||
|
||||
func derefKey(keyPtr unsafe.Pointer) string {
|
||||
return *(*string)(keyPtr)
|
||||
}
|
||||
|
||||
func derefValue(valuePtr unsafe.Pointer) interface{} {
|
||||
return *(*interface{})(valuePtr)
|
||||
}
|
||||
|
||||
func lockBucket(mu *uint64) {
|
||||
for {
|
||||
var v uint64
|
||||
for {
|
||||
v = atomic.LoadUint64(mu)
|
||||
if v&1 != 1 {
|
||||
break
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
if atomic.CompareAndSwapUint64(mu, v, v|1) {
|
||||
return
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
func unlockBucket(mu *uint64) {
|
||||
v := atomic.LoadUint64(mu)
|
||||
atomic.StoreUint64(mu, v&^1)
|
||||
}
|
||||
|
||||
func topHashMatch(hash, topHashes uint64, idx int) bool {
|
||||
if topHashes&(1<<(idx+1)) == 0 {
|
||||
// Entry is not present.
|
||||
return false
|
||||
}
|
||||
hash = hash & topHashMask
|
||||
topHashes = (topHashes & topHashEntryMasks[idx]) << (20 * idx)
|
||||
return hash == topHashes
|
||||
}
|
||||
|
||||
func storeTopHash(hash, topHashes uint64, idx int) uint64 {
|
||||
// Zero out top hash at idx.
|
||||
topHashes = topHashes &^ topHashEntryMasks[idx]
|
||||
// Chop top 20 MSBs of the given hash and position them at idx.
|
||||
hash = (hash & topHashMask) >> (20 * idx)
|
||||
// Store the MSBs.
|
||||
topHashes = topHashes | hash
|
||||
// Mark the entry as present.
|
||||
return topHashes | (1 << (idx + 1))
|
||||
}
|
||||
|
||||
func eraseTopHash(topHashes uint64, idx int) uint64 {
|
||||
return topHashes &^ (1 << (idx + 1))
|
||||
}
|
||||
|
||||
func (table *mapTable) addSize(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||
}
|
||||
|
||||
func (table *mapTable) addSizePlain(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
table.size[cidx].c += int64(delta)
|
||||
}
|
||||
|
||||
func (table *mapTable) sumSize() int64 {
|
||||
sum := int64(0)
|
||||
for i := range table.size {
|
||||
sum += atomic.LoadInt64(&table.size[i].c)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
type mapStats struct {
|
||||
RootBuckets int
|
||||
TotalBuckets int
|
||||
EmptyBuckets int
|
||||
Capacity int
|
||||
Size int // calculated number of entries
|
||||
Counter int // number of entries according to table counter
|
||||
CounterLen int // number of counter stripes
|
||||
MinEntries int // min entries per chain of buckets
|
||||
MaxEntries int // max entries per chain of buckets
|
||||
TotalGrowths int64
|
||||
TotalShrinks int64
|
||||
}
|
||||
|
||||
func (s *mapStats) ToString() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString("\n---\n")
|
||||
sb.WriteString(fmt.Sprintf("RootBuckets: %d\n", s.RootBuckets))
|
||||
sb.WriteString(fmt.Sprintf("TotalBuckets: %d\n", s.TotalBuckets))
|
||||
sb.WriteString(fmt.Sprintf("EmptyBuckets: %d\n", s.EmptyBuckets))
|
||||
sb.WriteString(fmt.Sprintf("Capacity: %d\n", s.Capacity))
|
||||
sb.WriteString(fmt.Sprintf("Size: %d\n", s.Size))
|
||||
sb.WriteString(fmt.Sprintf("Counter: %d\n", s.Counter))
|
||||
sb.WriteString(fmt.Sprintf("CounterLen: %d\n", s.CounterLen))
|
||||
sb.WriteString(fmt.Sprintf("MinEntries: %d\n", s.MinEntries))
|
||||
sb.WriteString(fmt.Sprintf("MaxEntries: %d\n", s.MaxEntries))
|
||||
sb.WriteString(fmt.Sprintf("TotalGrowths: %d\n", s.TotalGrowths))
|
||||
sb.WriteString(fmt.Sprintf("TotalShrinks: %d\n", s.TotalShrinks))
|
||||
sb.WriteString("---\n")
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// O(N) operation; use for debug purposes only
|
||||
func (m *Map) stats() mapStats {
|
||||
stats := mapStats{
|
||||
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||
MinEntries: math.MaxInt32,
|
||||
}
|
||||
table := (*mapTable)(atomic.LoadPointer(&m.table))
|
||||
stats.RootBuckets = len(table.buckets)
|
||||
stats.Counter = int(table.sumSize())
|
||||
stats.CounterLen = len(table.size)
|
||||
for i := range table.buckets {
|
||||
nentries := 0
|
||||
b := &table.buckets[i]
|
||||
stats.TotalBuckets++
|
||||
for {
|
||||
nentriesLocal := 0
|
||||
stats.Capacity += entriesPerMapBucket
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if atomic.LoadPointer(&b.keys[i]) != nil {
|
||||
stats.Size++
|
||||
nentriesLocal++
|
||||
}
|
||||
}
|
||||
nentries += nentriesLocal
|
||||
if nentriesLocal == 0 {
|
||||
stats.EmptyBuckets++
|
||||
}
|
||||
if b.next == nil {
|
||||
break
|
||||
}
|
||||
b = (*bucketPadded)(b.next)
|
||||
stats.TotalBuckets++
|
||||
}
|
||||
if nentries < stats.MinEntries {
|
||||
stats.MinEntries = nentries
|
||||
}
|
||||
if nentries > stats.MaxEntries {
|
||||
stats.MaxEntries = nentries
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
669
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
669
vendor/github.com/puzpuzpuz/xsync/v3/mapof.go
generated
vendored
Normal file
@@ -0,0 +1,669 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// MapOf is like a Go map[K]V but is safe for concurrent
|
||||
// use by multiple goroutines without additional locking or
|
||||
// coordination. It follows the interface of sync.Map with
|
||||
// a number of valuable extensions like Compute or Size.
|
||||
//
|
||||
// A MapOf must not be copied after first use.
|
||||
//
|
||||
// MapOf uses a modified version of Cache-Line Hash Table (CLHT)
|
||||
// data structure: https://github.com/LPD-EPFL/CLHT
|
||||
//
|
||||
// CLHT is built around idea to organize the hash table in
|
||||
// cache-line-sized buckets, so that on all modern CPUs update
|
||||
// operations complete with at most one cache-line transfer.
|
||||
// Also, Get operations involve no write to memory, as well as no
|
||||
// mutexes or any other sort of locks. Due to this design, in all
|
||||
// considered scenarios MapOf outperforms sync.Map.
|
||||
type MapOf[K comparable, V any] struct {
|
||||
totalGrowths int64
|
||||
totalShrinks int64
|
||||
resizing int64 // resize in progress flag; updated atomically
|
||||
resizeMu sync.Mutex // only used along with resizeCond
|
||||
resizeCond sync.Cond // used to wake up resize waiters (concurrent modifications)
|
||||
table unsafe.Pointer // *mapOfTable
|
||||
hasher func(K, uint64) uint64
|
||||
minTableLen int
|
||||
}
|
||||
|
||||
type mapOfTable[K comparable, V any] struct {
|
||||
buckets []bucketOfPadded
|
||||
// striped counter for number of table entries;
|
||||
// used to determine if a table shrinking is needed
|
||||
// occupies min(buckets_memory/1024, 64KB) of memory
|
||||
size []counterStripe
|
||||
seed uint64
|
||||
}
|
||||
|
||||
// bucketOfPadded is a CL-sized map bucket holding up to
|
||||
// entriesPerMapBucket entries.
|
||||
type bucketOfPadded struct {
|
||||
//lint:ignore U1000 ensure each bucket takes two cache lines on both 32 and 64-bit archs
|
||||
pad [cacheLineSize - unsafe.Sizeof(bucketOf{})]byte
|
||||
bucketOf
|
||||
}
|
||||
|
||||
type bucketOf struct {
|
||||
hashes [entriesPerMapBucket]uint64
|
||||
entries [entriesPerMapBucket]unsafe.Pointer // *entryOf
|
||||
next unsafe.Pointer // *bucketOfPadded
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// entryOf is an immutable map entry.
|
||||
type entryOf[K comparable, V any] struct {
|
||||
key K
|
||||
value V
|
||||
}
|
||||
|
||||
// NewMapOf creates a new MapOf instance.
|
||||
func NewMapOf[K comparable, V any]() *MapOf[K, V] {
|
||||
return NewMapOfPresized[K, V](defaultMinMapTableLen * entriesPerMapBucket)
|
||||
}
|
||||
|
||||
// NewMapOfPresized creates a new MapOf instance with capacity enough
|
||||
// to hold sizeHint entries. If sizeHint is zero or negative, the value
|
||||
// is ignored.
|
||||
func NewMapOfPresized[K comparable, V any](sizeHint int) *MapOf[K, V] {
|
||||
return newMapOfPresized[K, V](makeHasher[K](), sizeHint)
|
||||
}
|
||||
|
||||
func newMapOfPresized[K comparable, V any](
|
||||
hasher func(K, uint64) uint64,
|
||||
sizeHint int,
|
||||
) *MapOf[K, V] {
|
||||
m := &MapOf[K, V]{}
|
||||
m.resizeCond = *sync.NewCond(&m.resizeMu)
|
||||
m.hasher = hasher
|
||||
var table *mapOfTable[K, V]
|
||||
if sizeHint <= defaultMinMapTableLen*entriesPerMapBucket {
|
||||
table = newMapOfTable[K, V](defaultMinMapTableLen)
|
||||
} else {
|
||||
tableLen := nextPowOf2(uint32(sizeHint / entriesPerMapBucket))
|
||||
table = newMapOfTable[K, V](int(tableLen))
|
||||
}
|
||||
m.minTableLen = len(table.buckets)
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(table))
|
||||
return m
|
||||
}
|
||||
|
||||
func newMapOfTable[K comparable, V any](minTableLen int) *mapOfTable[K, V] {
|
||||
buckets := make([]bucketOfPadded, minTableLen)
|
||||
counterLen := minTableLen >> 10
|
||||
if counterLen < minMapCounterLen {
|
||||
counterLen = minMapCounterLen
|
||||
} else if counterLen > maxMapCounterLen {
|
||||
counterLen = maxMapCounterLen
|
||||
}
|
||||
counter := make([]counterStripe, counterLen)
|
||||
t := &mapOfTable[K, V]{
|
||||
buckets: buckets,
|
||||
size: counter,
|
||||
seed: makeSeed(),
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
// Load returns the value stored in the map for a key, or zero value
|
||||
// of type V if no value is present.
|
||||
// The ok result indicates whether value was found in the map.
|
||||
func (m *MapOf[K, V]) Load(key K) (value V, ok bool) {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
hash := shiftHash(m.hasher(key, table.seed))
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
b := &table.buckets[bidx]
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
// We treat the hash code only as a hint, so there is no
|
||||
// need to get an atomic snapshot.
|
||||
h := atomic.LoadUint64(&b.hashes[i])
|
||||
if h == uint64(0) || h != hash {
|
||||
continue
|
||||
}
|
||||
eptr := atomic.LoadPointer(&b.entries[i])
|
||||
if eptr == nil {
|
||||
continue
|
||||
}
|
||||
e := (*entryOf[K, V])(eptr)
|
||||
if e.key == key {
|
||||
return e.value, true
|
||||
}
|
||||
}
|
||||
bptr := atomic.LoadPointer(&b.next)
|
||||
if bptr == nil {
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(bptr)
|
||||
}
|
||||
}
|
||||
|
||||
// Store sets the value for a key.
|
||||
func (m *MapOf[K, V]) Store(key K, value V) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrStore returns the existing value for the key if present.
|
||||
// Otherwise, it stores and returns the given value.
|
||||
// The loaded result is true if the value was loaded, false if stored.
|
||||
func (m *MapOf[K, V]) LoadOrStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadAndStore returns the existing value for the key if present,
|
||||
// while setting the new value for the key.
|
||||
// It stores the new value and returns the existing one, if present.
|
||||
// The loaded result is true if the existing value was loaded,
|
||||
// false otherwise.
|
||||
func (m *MapOf[K, V]) LoadAndStore(key K, value V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return value, false
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// LoadOrCompute returns the existing value for the key if present.
|
||||
// Otherwise, it computes the value using the provided function and
|
||||
// returns the computed value. The loaded result is true if the value
|
||||
// was loaded, false if stored.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *MapOf[K, V]) LoadOrCompute(key K, valueFn func() V) (actual V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(V, bool) (V, bool) {
|
||||
return valueFn(), false
|
||||
},
|
||||
true,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Compute either sets the computed new value for the key or deletes
|
||||
// the value for the key. When the delete result of the valueFn function
|
||||
// is set to true, the value will be deleted, if it exists. When delete
|
||||
// is set to false, the value is updated to the newValue.
|
||||
// The ok result indicates whether value was computed and stored, thus, is
|
||||
// present in the map. The actual result contains the new value in cases where
|
||||
// the value was computed and stored. See the example for a few use cases.
|
||||
//
|
||||
// This call locks a hash table bucket while the compute function
|
||||
// is executed. It means that modifications on other entries in
|
||||
// the bucket will be blocked until the valueFn executes. Consider
|
||||
// this when the function includes long-running operations.
|
||||
func (m *MapOf[K, V]) Compute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (newValue V, delete bool),
|
||||
) (actual V, ok bool) {
|
||||
return m.doCompute(key, valueFn, false, true)
|
||||
}
|
||||
|
||||
// LoadAndDelete deletes the value for a key, returning the previous
|
||||
// value if any. The loaded result reports whether the key was
|
||||
// present.
|
||||
func (m *MapOf[K, V]) LoadAndDelete(key K) (value V, loaded bool) {
|
||||
return m.doCompute(
|
||||
key,
|
||||
func(value V, loaded bool) (V, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
// Delete deletes the value for a key.
|
||||
func (m *MapOf[K, V]) Delete(key K) {
|
||||
m.doCompute(
|
||||
key,
|
||||
func(value V, loaded bool) (V, bool) {
|
||||
return value, true
|
||||
},
|
||||
false,
|
||||
false,
|
||||
)
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) doCompute(
|
||||
key K,
|
||||
valueFn func(oldValue V, loaded bool) (V, bool),
|
||||
loadIfExists, computeOnly bool,
|
||||
) (V, bool) {
|
||||
// Read-only path.
|
||||
if loadIfExists {
|
||||
if v, ok := m.Load(key); ok {
|
||||
return v, !computeOnly
|
||||
}
|
||||
}
|
||||
// Write path.
|
||||
for {
|
||||
compute_attempt:
|
||||
var (
|
||||
emptyb *bucketOfPadded
|
||||
emptyidx int
|
||||
hintNonEmpty int
|
||||
)
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
hash := shiftHash(m.hasher(key, table.seed))
|
||||
bidx := uint64(len(table.buckets)-1) & hash
|
||||
rootb := &table.buckets[bidx]
|
||||
rootb.mu.Lock()
|
||||
// The following two checks must go in reverse to what's
|
||||
// in the resize method.
|
||||
if m.resizeInProgress() {
|
||||
// Resize is in progress. Wait, then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.waitForResize()
|
||||
goto compute_attempt
|
||||
}
|
||||
if m.newerTableExists(table) {
|
||||
// Someone resized the table. Go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
goto compute_attempt
|
||||
}
|
||||
b := rootb
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
h := atomic.LoadUint64(&b.hashes[i])
|
||||
if h == uint64(0) {
|
||||
if emptyb == nil {
|
||||
emptyb = b
|
||||
emptyidx = i
|
||||
}
|
||||
continue
|
||||
}
|
||||
if h != hash {
|
||||
hintNonEmpty++
|
||||
continue
|
||||
}
|
||||
e := (*entryOf[K, V])(b.entries[i])
|
||||
if e.key == key {
|
||||
if loadIfExists {
|
||||
rootb.mu.Unlock()
|
||||
return e.value, !computeOnly
|
||||
}
|
||||
// In-place update/delete.
|
||||
// We get a copy of the value via an interface{} on each call,
|
||||
// thus the live value pointers are unique. Otherwise atomic
|
||||
// snapshot won't be correct in case of multiple Store calls
|
||||
// using the same value.
|
||||
oldv := e.value
|
||||
newv, del := valueFn(oldv, true)
|
||||
if del {
|
||||
// Deletion.
|
||||
// First we update the hash, then the entry.
|
||||
atomic.StoreUint64(&b.hashes[i], uint64(0))
|
||||
atomic.StorePointer(&b.entries[i], nil)
|
||||
leftEmpty := false
|
||||
if hintNonEmpty == 0 {
|
||||
leftEmpty = isEmptyBucketOf(b)
|
||||
}
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, -1)
|
||||
// Might need to shrink the table.
|
||||
if leftEmpty {
|
||||
m.resize(table, mapShrinkHint)
|
||||
}
|
||||
return oldv, !computeOnly
|
||||
}
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newv
|
||||
atomic.StorePointer(&b.entries[i], unsafe.Pointer(newe))
|
||||
rootb.mu.Unlock()
|
||||
if computeOnly {
|
||||
// Compute expects the new value to be returned.
|
||||
return newv, true
|
||||
}
|
||||
// LoadAndStore expects the old value to be returned.
|
||||
return oldv, true
|
||||
}
|
||||
hintNonEmpty++
|
||||
}
|
||||
if b.next == nil {
|
||||
if emptyb != nil {
|
||||
// Insertion into an existing bucket.
|
||||
var zeroedV V
|
||||
newValue, del := valueFn(zeroedV, false)
|
||||
if del {
|
||||
rootb.mu.Unlock()
|
||||
return zeroedV, false
|
||||
}
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
// First we update the hash, then the entry.
|
||||
atomic.StoreUint64(&emptyb.hashes[emptyidx], hash)
|
||||
atomic.StorePointer(&emptyb.entries[emptyidx], unsafe.Pointer(newe))
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
growThreshold := float64(tableLen) * entriesPerMapBucket * mapLoadFactor
|
||||
if table.sumSize() > int64(growThreshold) {
|
||||
// Need to grow the table. Then go for another attempt.
|
||||
rootb.mu.Unlock()
|
||||
m.resize(table, mapGrowHint)
|
||||
goto compute_attempt
|
||||
}
|
||||
// Insertion into a new bucket.
|
||||
var zeroedV V
|
||||
newValue, del := valueFn(zeroedV, false)
|
||||
if del {
|
||||
rootb.mu.Unlock()
|
||||
return newValue, false
|
||||
}
|
||||
// Create and append the bucket.
|
||||
newb := new(bucketOfPadded)
|
||||
newb.hashes[0] = hash
|
||||
newe := new(entryOf[K, V])
|
||||
newe.key = key
|
||||
newe.value = newValue
|
||||
newb.entries[0] = unsafe.Pointer(newe)
|
||||
atomic.StorePointer(&b.next, unsafe.Pointer(newb))
|
||||
rootb.mu.Unlock()
|
||||
table.addSize(bidx, 1)
|
||||
return newValue, computeOnly
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) newerTableExists(table *mapOfTable[K, V]) bool {
|
||||
curTablePtr := atomic.LoadPointer(&m.table)
|
||||
return uintptr(curTablePtr) != uintptr(unsafe.Pointer(table))
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) resizeInProgress() bool {
|
||||
return atomic.LoadInt64(&m.resizing) == 1
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) waitForResize() {
|
||||
m.resizeMu.Lock()
|
||||
for m.resizeInProgress() {
|
||||
m.resizeCond.Wait()
|
||||
}
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func (m *MapOf[K, V]) resize(knownTable *mapOfTable[K, V], hint mapResizeHint) {
|
||||
knownTableLen := len(knownTable.buckets)
|
||||
// Fast path for shrink attempts.
|
||||
if hint == mapShrinkHint {
|
||||
shrinkThreshold := int64((knownTableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if knownTableLen == m.minTableLen || knownTable.sumSize() > shrinkThreshold {
|
||||
return
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
if !atomic.CompareAndSwapInt64(&m.resizing, 0, 1) {
|
||||
// Someone else started resize. Wait for it to finish.
|
||||
m.waitForResize()
|
||||
return
|
||||
}
|
||||
var newTable *mapOfTable[K, V]
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
tableLen := len(table.buckets)
|
||||
switch hint {
|
||||
case mapGrowHint:
|
||||
// Grow the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalGrowths, 1)
|
||||
newTable = newMapOfTable[K, V](tableLen << 1)
|
||||
case mapShrinkHint:
|
||||
shrinkThreshold := int64((tableLen * entriesPerMapBucket) / mapShrinkFraction)
|
||||
if tableLen > m.minTableLen && table.sumSize() <= shrinkThreshold {
|
||||
// Shrink the table with factor of 2.
|
||||
atomic.AddInt64(&m.totalShrinks, 1)
|
||||
newTable = newMapOfTable[K, V](tableLen >> 1)
|
||||
} else {
|
||||
// No need to shrink. Wake up all waiters and give up.
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
return
|
||||
}
|
||||
case mapClearHint:
|
||||
newTable = newMapOfTable[K, V](m.minTableLen)
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected resize hint: %d", hint))
|
||||
}
|
||||
// Copy the data only if we're not clearing the map.
|
||||
if hint != mapClearHint {
|
||||
for i := 0; i < tableLen; i++ {
|
||||
copied := copyBucketOf(&table.buckets[i], newTable, m.hasher)
|
||||
newTable.addSizePlain(uint64(i), copied)
|
||||
}
|
||||
}
|
||||
// Publish the new table and wake up all waiters.
|
||||
atomic.StorePointer(&m.table, unsafe.Pointer(newTable))
|
||||
m.resizeMu.Lock()
|
||||
atomic.StoreInt64(&m.resizing, 0)
|
||||
m.resizeCond.Broadcast()
|
||||
m.resizeMu.Unlock()
|
||||
}
|
||||
|
||||
func copyBucketOf[K comparable, V any](
|
||||
b *bucketOfPadded,
|
||||
destTable *mapOfTable[K, V],
|
||||
hasher func(K, uint64) uint64,
|
||||
) (copied int) {
|
||||
rootb := b
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i] != nil {
|
||||
e := (*entryOf[K, V])(b.entries[i])
|
||||
hash := shiftHash(hasher(e.key, destTable.seed))
|
||||
bidx := uint64(len(destTable.buckets)-1) & hash
|
||||
destb := &destTable.buckets[bidx]
|
||||
appendToBucketOf(hash, b.entries[i], destb)
|
||||
copied++
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
rootb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
// Range calls f sequentially for each key and value present in the
|
||||
// map. If f returns false, range stops the iteration.
|
||||
//
|
||||
// Range does not necessarily correspond to any consistent snapshot
|
||||
// of the Map's contents: no key will be visited more than once, but
|
||||
// if the value for any key is stored or deleted concurrently, Range
|
||||
// may reflect any mapping for that key from any point during the
|
||||
// Range call.
|
||||
//
|
||||
// It is safe to modify the map while iterating it, including entry
|
||||
// creation, modification and deletion. However, the concurrent
|
||||
// modification rule apply, i.e. the changes may be not reflected
|
||||
// in the subsequently iterated entries.
|
||||
func (m *MapOf[K, V]) Range(f func(key K, value V) bool) {
|
||||
var zeroPtr unsafe.Pointer
|
||||
// Pre-allocate array big enough to fit entries for most hash tables.
|
||||
bentries := make([]unsafe.Pointer, 0, 16*entriesPerMapBucket)
|
||||
tablep := atomic.LoadPointer(&m.table)
|
||||
table := *(*mapOfTable[K, V])(tablep)
|
||||
for i := range table.buckets {
|
||||
rootb := &table.buckets[i]
|
||||
b := rootb
|
||||
// Prevent concurrent modifications and copy all entries into
|
||||
// the intermediate slice.
|
||||
rootb.mu.Lock()
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i] != nil {
|
||||
bentries = append(bentries, b.entries[i])
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
rootb.mu.Unlock()
|
||||
break
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
// Call the function for all copied entries.
|
||||
for j := range bentries {
|
||||
entry := (*entryOf[K, V])(bentries[j])
|
||||
if !f(entry.key, entry.value) {
|
||||
return
|
||||
}
|
||||
// Remove the reference to avoid preventing the copied
|
||||
// entries from being GCed until this method finishes.
|
||||
bentries[j] = zeroPtr
|
||||
}
|
||||
bentries = bentries[:0]
|
||||
}
|
||||
}
|
||||
|
||||
// Clear deletes all keys and values currently stored in the map.
|
||||
func (m *MapOf[K, V]) Clear() {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
m.resize(table, mapClearHint)
|
||||
}
|
||||
|
||||
// Size returns current size of the map.
|
||||
func (m *MapOf[K, V]) Size() int {
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
return int(table.sumSize())
|
||||
}
|
||||
|
||||
func appendToBucketOf(hash uint64, entryPtr unsafe.Pointer, b *bucketOfPadded) {
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i] == nil {
|
||||
b.hashes[i] = hash
|
||||
b.entries[i] = entryPtr
|
||||
return
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
newb := new(bucketOfPadded)
|
||||
newb.hashes[0] = hash
|
||||
newb.entries[0] = entryPtr
|
||||
b.next = unsafe.Pointer(newb)
|
||||
return
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func isEmptyBucketOf(rootb *bucketOfPadded) bool {
|
||||
b := rootb
|
||||
for {
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if b.entries[i] != nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if b.next == nil {
|
||||
return true
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
}
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) addSize(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
atomic.AddInt64(&table.size[cidx].c, int64(delta))
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) addSizePlain(bucketIdx uint64, delta int) {
|
||||
cidx := uint64(len(table.size)-1) & bucketIdx
|
||||
table.size[cidx].c += int64(delta)
|
||||
}
|
||||
|
||||
func (table *mapOfTable[K, V]) sumSize() int64 {
|
||||
sum := int64(0)
|
||||
for i := range table.size {
|
||||
sum += atomic.LoadInt64(&table.size[i].c)
|
||||
}
|
||||
return sum
|
||||
}
|
||||
|
||||
func shiftHash(h uint64) uint64 {
|
||||
// uint64(0) is a reserved value which stands for an empty slot.
|
||||
if h == uint64(0) {
|
||||
return uint64(1)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// O(N) operation; use for debug purposes only
|
||||
func (m *MapOf[K, V]) stats() mapStats {
|
||||
stats := mapStats{
|
||||
TotalGrowths: atomic.LoadInt64(&m.totalGrowths),
|
||||
TotalShrinks: atomic.LoadInt64(&m.totalShrinks),
|
||||
MinEntries: math.MaxInt32,
|
||||
}
|
||||
table := (*mapOfTable[K, V])(atomic.LoadPointer(&m.table))
|
||||
stats.RootBuckets = len(table.buckets)
|
||||
stats.Counter = int(table.sumSize())
|
||||
stats.CounterLen = len(table.size)
|
||||
for i := range table.buckets {
|
||||
nentries := 0
|
||||
b := &table.buckets[i]
|
||||
stats.TotalBuckets++
|
||||
for {
|
||||
nentriesLocal := 0
|
||||
stats.Capacity += entriesPerMapBucket
|
||||
for i := 0; i < entriesPerMapBucket; i++ {
|
||||
if atomic.LoadPointer(&b.entries[i]) != nil {
|
||||
stats.Size++
|
||||
nentriesLocal++
|
||||
}
|
||||
}
|
||||
nentries += nentriesLocal
|
||||
if nentriesLocal == 0 {
|
||||
stats.EmptyBuckets++
|
||||
}
|
||||
if b.next == nil {
|
||||
break
|
||||
}
|
||||
b = (*bucketOfPadded)(b.next)
|
||||
stats.TotalBuckets++
|
||||
}
|
||||
if nentries < stats.MinEntries {
|
||||
stats.MinEntries = nentries
|
||||
}
|
||||
if nentries > stats.MaxEntries {
|
||||
stats.MaxEntries = nentries
|
||||
}
|
||||
}
|
||||
return stats
|
||||
}
|
137
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
137
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueue.go
generated
vendored
Normal file
@@ -0,0 +1,137 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A MPMCQueue is a bounded multi-producer multi-consumer concurrent
|
||||
// queue.
|
||||
//
|
||||
// MPMCQueue instances must be created with NewMPMCQueue function.
|
||||
// A MPMCQueue must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following C++ library:
|
||||
// https://github.com/rigtorp/MPMCQueue
|
||||
type MPMCQueue struct {
|
||||
cap uint64
|
||||
head uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
hpad [cacheLineSize - 8]byte
|
||||
tail uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
tpad [cacheLineSize - 8]byte
|
||||
slots []slotPadded
|
||||
}
|
||||
|
||||
type slotPadded struct {
|
||||
slot
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - unsafe.Sizeof(slot{})]byte
|
||||
}
|
||||
|
||||
type slot struct {
|
||||
turn uint64
|
||||
item interface{}
|
||||
}
|
||||
|
||||
// NewMPMCQueue creates a new MPMCQueue instance with the given
|
||||
// capacity.
|
||||
func NewMPMCQueue(capacity int) *MPMCQueue {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &MPMCQueue{
|
||||
cap: uint64(capacity),
|
||||
slots: make([]slotPadded, capacity),
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue inserts the given item into the queue.
|
||||
// Blocks, if the queue is full.
|
||||
func (q *MPMCQueue) Enqueue(item interface{}) {
|
||||
head := atomic.AddUint64(&q.head, 1) - 1
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
for atomic.LoadUint64(&slot.turn) != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
slot.item = item
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
}
|
||||
|
||||
// Dequeue retrieves and removes the item from the head of the queue.
|
||||
// Blocks, if the queue is empty.
|
||||
func (q *MPMCQueue) Dequeue() interface{} {
|
||||
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
for atomic.LoadUint64(&slot.turn) != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
item := slot.item
|
||||
slot.item = nil
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return item
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *MPMCQueue) TryEnqueue(item interface{}) bool {
|
||||
head := atomic.LoadUint64(&q.head)
|
||||
for {
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
if atomic.LoadUint64(&slot.turn) == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||
slot.item = item
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
prevHead := head
|
||||
head = atomic.LoadUint64(&q.head)
|
||||
if head == prevHead {
|
||||
return false
|
||||
}
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *MPMCQueue) TryDequeue() (item interface{}, ok bool) {
|
||||
tail := atomic.LoadUint64(&q.tail)
|
||||
for {
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
if atomic.LoadUint64(&slot.turn) == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||
item = slot.item
|
||||
ok = true
|
||||
slot.item = nil
|
||||
atomic.StoreUint64(&slot.turn, turn+1)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
prevTail := tail
|
||||
tail = atomic.LoadUint64(&q.tail)
|
||||
if tail == prevTail {
|
||||
return
|
||||
}
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
func (q *MPMCQueue) idx(i uint64) uint64 {
|
||||
return i % q.cap
|
||||
}
|
||||
|
||||
func (q *MPMCQueue) turn(i uint64) uint64 {
|
||||
return i / q.cap
|
||||
}
|
150
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
150
vendor/github.com/puzpuzpuz/xsync/v3/mpmcqueueof.go
generated
vendored
Normal file
@@ -0,0 +1,150 @@
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// A MPMCQueueOf is a bounded multi-producer multi-consumer concurrent
|
||||
// queue. It's a generic version of MPMCQueue.
|
||||
//
|
||||
// MPMCQueue instances must be created with NewMPMCQueueOf function.
|
||||
// A MPMCQueueOf must not be copied after first use.
|
||||
//
|
||||
// Based on the data structure from the following C++ library:
|
||||
// https://github.com/rigtorp/MPMCQueue
|
||||
type MPMCQueueOf[I any] struct {
|
||||
cap uint64
|
||||
head uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
hpad [cacheLineSize - 8]byte
|
||||
tail uint64
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
tpad [cacheLineSize - 8]byte
|
||||
slots []slotOfPadded[I]
|
||||
}
|
||||
|
||||
type slotOfPadded[I any] struct {
|
||||
slotOf[I]
|
||||
// Unfortunately, proper padding like the below one:
|
||||
//
|
||||
// pad [cacheLineSize - (unsafe.Sizeof(slotOf[I]{}) % cacheLineSize)]byte
|
||||
//
|
||||
// won't compile, so here we add a best-effort padding for items up to
|
||||
// 56 bytes size.
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - unsafe.Sizeof(atomic.Uint64{})]byte
|
||||
}
|
||||
|
||||
type slotOf[I any] struct {
|
||||
// atomic.Uint64 is used here to get proper 8 byte alignment on
|
||||
// 32-bit archs.
|
||||
turn atomic.Uint64
|
||||
item I
|
||||
}
|
||||
|
||||
// NewMPMCQueueOf creates a new MPMCQueueOf instance with the given
|
||||
// capacity.
|
||||
func NewMPMCQueueOf[I any](capacity int) *MPMCQueueOf[I] {
|
||||
if capacity < 1 {
|
||||
panic("capacity must be positive number")
|
||||
}
|
||||
return &MPMCQueueOf[I]{
|
||||
cap: uint64(capacity),
|
||||
slots: make([]slotOfPadded[I], capacity),
|
||||
}
|
||||
}
|
||||
|
||||
// Enqueue inserts the given item into the queue.
|
||||
// Blocks, if the queue is full.
|
||||
func (q *MPMCQueueOf[I]) Enqueue(item I) {
|
||||
head := atomic.AddUint64(&q.head, 1) - 1
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
for slot.turn.Load() != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
slot.item = item
|
||||
slot.turn.Store(turn + 1)
|
||||
}
|
||||
|
||||
// Dequeue retrieves and removes the item from the head of the queue.
|
||||
// Blocks, if the queue is empty.
|
||||
func (q *MPMCQueueOf[I]) Dequeue() I {
|
||||
var zeroedI I
|
||||
tail := atomic.AddUint64(&q.tail, 1) - 1
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
for slot.turn.Load() != turn {
|
||||
runtime.Gosched()
|
||||
}
|
||||
item := slot.item
|
||||
slot.item = zeroedI
|
||||
slot.turn.Store(turn + 1)
|
||||
return item
|
||||
}
|
||||
|
||||
// TryEnqueue inserts the given item into the queue. Does not block
|
||||
// and returns immediately. The result indicates that the queue isn't
|
||||
// full and the item was inserted.
|
||||
func (q *MPMCQueueOf[I]) TryEnqueue(item I) bool {
|
||||
head := atomic.LoadUint64(&q.head)
|
||||
for {
|
||||
slot := &q.slots[q.idx(head)]
|
||||
turn := q.turn(head) * 2
|
||||
if slot.turn.Load() == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.head, head, head+1) {
|
||||
slot.item = item
|
||||
slot.turn.Store(turn + 1)
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
prevHead := head
|
||||
head = atomic.LoadUint64(&q.head)
|
||||
if head == prevHead {
|
||||
return false
|
||||
}
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
// TryDequeue retrieves and removes the item from the head of the
|
||||
// queue. Does not block and returns immediately. The ok result
|
||||
// indicates that the queue isn't empty and an item was retrieved.
|
||||
func (q *MPMCQueueOf[I]) TryDequeue() (item I, ok bool) {
|
||||
tail := atomic.LoadUint64(&q.tail)
|
||||
for {
|
||||
slot := &q.slots[q.idx(tail)]
|
||||
turn := q.turn(tail)*2 + 1
|
||||
if slot.turn.Load() == turn {
|
||||
if atomic.CompareAndSwapUint64(&q.tail, tail, tail+1) {
|
||||
var zeroedI I
|
||||
item = slot.item
|
||||
ok = true
|
||||
slot.item = zeroedI
|
||||
slot.turn.Store(turn + 1)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
prevTail := tail
|
||||
tail = atomic.LoadUint64(&q.tail)
|
||||
if tail == prevTail {
|
||||
return
|
||||
}
|
||||
}
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
|
||||
func (q *MPMCQueueOf[I]) idx(i uint64) uint64 {
|
||||
return i % q.cap
|
||||
}
|
||||
|
||||
func (q *MPMCQueueOf[I]) turn(i uint64) uint64 {
|
||||
return i / q.cap
|
||||
}
|
145
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
145
vendor/github.com/puzpuzpuz/xsync/v3/rbmutex.go
generated
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
// slow-down guard
|
||||
const nslowdown = 7
|
||||
|
||||
// pool for reader tokens
|
||||
var rtokenPool sync.Pool
|
||||
|
||||
// RToken is a reader lock token.
|
||||
type RToken struct {
|
||||
slot uint32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// A RBMutex is a reader biased reader/writer mutual exclusion lock.
|
||||
// The lock can be held by an many readers or a single writer.
|
||||
// The zero value for a RBMutex is an unlocked mutex.
|
||||
//
|
||||
// A RBMutex must not be copied after first use.
|
||||
//
|
||||
// RBMutex is based on a modified version of BRAVO
|
||||
// (Biased Locking for Reader-Writer Locks) algorithm:
|
||||
// https://arxiv.org/pdf/1810.01553.pdf
|
||||
//
|
||||
// RBMutex is a specialized mutex for scenarios, such as caches,
|
||||
// where the vast majority of locks are acquired by readers and write
|
||||
// lock acquire attempts are infrequent. In such scenarios, RBMutex
|
||||
// performs better than sync.RWMutex on large multicore machines.
|
||||
//
|
||||
// RBMutex extends sync.RWMutex internally and uses it as the "reader
|
||||
// bias disabled" fallback, so the same semantics apply. The only
|
||||
// noticeable difference is in reader tokens returned from the
|
||||
// RLock/RUnlock methods.
|
||||
type RBMutex struct {
|
||||
rslots []rslot
|
||||
rmask uint32
|
||||
rbias int32
|
||||
inhibitUntil time.Time
|
||||
rw sync.RWMutex
|
||||
}
|
||||
|
||||
type rslot struct {
|
||||
mu int32
|
||||
//lint:ignore U1000 prevents false sharing
|
||||
pad [cacheLineSize - 4]byte
|
||||
}
|
||||
|
||||
// NewRBMutex creates a new RBMutex instance.
|
||||
func NewRBMutex() *RBMutex {
|
||||
nslots := nextPowOf2(parallelism())
|
||||
mu := RBMutex{
|
||||
rslots: make([]rslot, nslots),
|
||||
rmask: nslots - 1,
|
||||
rbias: 1,
|
||||
}
|
||||
return &mu
|
||||
}
|
||||
|
||||
// RLock locks m for reading and returns a reader token. The
|
||||
// token must be used in the later RUnlock call.
|
||||
//
|
||||
// Should not be used for recursive read locking; a blocked Lock
|
||||
// call excludes new readers from acquiring the lock.
|
||||
func (mu *RBMutex) RLock() *RToken {
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
t, ok := rtokenPool.Get().(*RToken)
|
||||
if !ok {
|
||||
t = new(RToken)
|
||||
t.slot = runtime_fastrand()
|
||||
}
|
||||
// Try all available slots to distribute reader threads to slots.
|
||||
for i := 0; i < len(mu.rslots); i++ {
|
||||
slot := t.slot + uint32(i)
|
||||
rslot := &mu.rslots[slot&mu.rmask]
|
||||
rslotmu := atomic.LoadInt32(&rslot.mu)
|
||||
if atomic.CompareAndSwapInt32(&rslot.mu, rslotmu, rslotmu+1) {
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
// Hot path succeeded.
|
||||
t.slot = slot
|
||||
return t
|
||||
}
|
||||
// The mutex is no longer reader biased. Go to the slow path.
|
||||
atomic.AddInt32(&rslot.mu, -1)
|
||||
rtokenPool.Put(t)
|
||||
break
|
||||
}
|
||||
// Contention detected. Give a try with the next slot.
|
||||
}
|
||||
}
|
||||
// Slow path.
|
||||
mu.rw.RLock()
|
||||
if atomic.LoadInt32(&mu.rbias) == 0 && time.Now().After(mu.inhibitUntil) {
|
||||
atomic.StoreInt32(&mu.rbias, 1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RUnlock undoes a single RLock call. A reader token obtained from
|
||||
// the RLock call must be provided. RUnlock does not affect other
|
||||
// simultaneous readers. A panic is raised if m is not locked for
|
||||
// reading on entry to RUnlock.
|
||||
func (mu *RBMutex) RUnlock(t *RToken) {
|
||||
if t == nil {
|
||||
mu.rw.RUnlock()
|
||||
return
|
||||
}
|
||||
if atomic.AddInt32(&mu.rslots[t.slot&mu.rmask].mu, -1) < 0 {
|
||||
panic("invalid reader state detected")
|
||||
}
|
||||
rtokenPool.Put(t)
|
||||
}
|
||||
|
||||
// Lock locks m for writing. If the lock is already locked for
|
||||
// reading or writing, Lock blocks until the lock is available.
|
||||
func (mu *RBMutex) Lock() {
|
||||
mu.rw.Lock()
|
||||
if atomic.LoadInt32(&mu.rbias) == 1 {
|
||||
atomic.StoreInt32(&mu.rbias, 0)
|
||||
start := time.Now()
|
||||
for i := 0; i < len(mu.rslots); i++ {
|
||||
for atomic.LoadInt32(&mu.rslots[i].mu) > 0 {
|
||||
runtime.Gosched()
|
||||
}
|
||||
}
|
||||
mu.inhibitUntil = time.Now().Add(time.Since(start) * nslowdown)
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock unlocks m for writing. A panic is raised if m is not locked
|
||||
// for writing on entry to Unlock.
|
||||
//
|
||||
// As with RWMutex, a locked RBMutex is not associated with a
|
||||
// particular goroutine. One goroutine may RLock (Lock) a RBMutex and
|
||||
// then arrange for another goroutine to RUnlock (Unlock) it.
|
||||
func (mu *RBMutex) Unlock() {
|
||||
mu.rw.Unlock()
|
||||
}
|
46
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
46
vendor/github.com/puzpuzpuz/xsync/v3/util.go
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
_ "unsafe"
|
||||
)
|
||||
|
||||
// test-only assert()-like flag
|
||||
var assertionsEnabled = false
|
||||
|
||||
const (
|
||||
// cacheLineSize is used in paddings to prevent false sharing;
|
||||
// 64B are used instead of 128B as a compromise between
|
||||
// memory footprint and performance; 128B usage may give ~30%
|
||||
// improvement on NUMA machines.
|
||||
cacheLineSize = 64
|
||||
)
|
||||
|
||||
// nextPowOf2 computes the next highest power of 2 of 32-bit v.
|
||||
// Source: https://graphics.stanford.edu/~seander/bithacks.html#RoundUpPowerOf2
|
||||
func nextPowOf2(v uint32) uint32 {
|
||||
if v == 0 {
|
||||
return 1
|
||||
}
|
||||
v--
|
||||
v |= v >> 1
|
||||
v |= v >> 2
|
||||
v |= v >> 4
|
||||
v |= v >> 8
|
||||
v |= v >> 16
|
||||
v++
|
||||
return v
|
||||
}
|
||||
|
||||
func parallelism() uint32 {
|
||||
maxProcs := uint32(runtime.GOMAXPROCS(0))
|
||||
numCores := uint32(runtime.NumCPU())
|
||||
if maxProcs < numCores {
|
||||
return maxProcs
|
||||
}
|
||||
return numCores
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_fastrand runtime.fastrand
|
||||
func runtime_fastrand() uint32
|
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
77
vendor/github.com/puzpuzpuz/xsync/v3/util_hash.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
package xsync
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// makeSeed creates a random seed.
|
||||
func makeSeed() uint64 {
|
||||
var s1 uint32
|
||||
for {
|
||||
s1 = runtime_fastrand()
|
||||
// We use seed 0 to indicate an uninitialized seed/hash,
|
||||
// so keep trying until we get a non-zero seed.
|
||||
if s1 != 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
s2 := runtime_fastrand()
|
||||
return uint64(s1)<<32 | uint64(s2)
|
||||
}
|
||||
|
||||
// hashString calculates a hash of s with the given seed.
|
||||
func hashString(s string, seed uint64) uint64 {
|
||||
if s == "" {
|
||||
return seed
|
||||
}
|
||||
strh := (*reflect.StringHeader)(unsafe.Pointer(&s))
|
||||
return uint64(runtime_memhash(unsafe.Pointer(strh.Data), uintptr(seed), uintptr(strh.Len)))
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_memhash runtime.memhash
|
||||
func runtime_memhash(p unsafe.Pointer, h, s uintptr) uintptr
|
||||
|
||||
// makeHasher creates a fast hash function for the given comparable type.
|
||||
// The only limitation is that the type should not contain interfaces inside
|
||||
// based on runtime.typehash.
|
||||
func makeHasher[T comparable]() func(T, uint64) uint64 {
|
||||
var zero T
|
||||
|
||||
if reflect.TypeOf(&zero).Elem().Kind() == reflect.Interface {
|
||||
return func(value T, seed uint64) uint64 {
|
||||
iValue := any(value)
|
||||
i := (*iface)(unsafe.Pointer(&iValue))
|
||||
return runtime_typehash64(i.typ, i.word, seed)
|
||||
}
|
||||
} else {
|
||||
var iZero any = zero
|
||||
i := (*iface)(unsafe.Pointer(&iZero))
|
||||
return func(value T, seed uint64) uint64 {
|
||||
return runtime_typehash64(i.typ, unsafe.Pointer(&value), seed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// how interface is represented in memory
|
||||
type iface struct {
|
||||
typ uintptr
|
||||
word unsafe.Pointer
|
||||
}
|
||||
|
||||
// same as runtime_typehash, but always returns a uint64
|
||||
// see: maphash.rthash function for details
|
||||
func runtime_typehash64(t uintptr, p unsafe.Pointer, seed uint64) uint64 {
|
||||
if unsafe.Sizeof(uintptr(0)) == 8 {
|
||||
return uint64(runtime_typehash(t, p, uintptr(seed)))
|
||||
}
|
||||
|
||||
lo := runtime_typehash(t, p, uintptr(seed))
|
||||
hi := runtime_typehash(t, p, uintptr(seed>>32))
|
||||
return uint64(hi)<<32 | uint64(lo)
|
||||
}
|
||||
|
||||
//go:noescape
|
||||
//go:linkname runtime_typehash runtime.typehash
|
||||
func runtime_typehash(t uintptr, p unsafe.Pointer, h uintptr) uintptr
|
3
vendor/modules.txt
vendored
3
vendor/modules.txt
vendored
@@ -279,6 +279,9 @@ github.com/prometheus/common/model
|
||||
github.com/prometheus/procfs
|
||||
github.com/prometheus/procfs/internal/fs
|
||||
github.com/prometheus/procfs/internal/util
|
||||
# github.com/puzpuzpuz/xsync/v3 v3.1.0
|
||||
## explicit; go 1.18
|
||||
github.com/puzpuzpuz/xsync/v3
|
||||
# github.com/rs/xid v1.5.0
|
||||
## explicit; go 1.12
|
||||
github.com/rs/xid
|
||||
|
Reference in New Issue
Block a user