diff --git a/leveldb/README.md b/leveldb/README.md new file mode 100644 index 00000000..7e345858 --- /dev/null +++ b/leveldb/README.md @@ -0,0 +1,178 @@ +--- +id: leveldb +title: LevelDB +--- + +![Release](https://img.shields.io/github/v/tag/gofiber/storage?filter=leveldb*) +[![Discord](https://img.shields.io/discord/704680098577514527?style=flat&label=%F0%9F%92%AC%20discord&color=00ACD7)](https://gofiber.io/discord) +![Test](https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-leveldb.yml?label=Tests) +![Security](https://img.shields.io/github/actions/workflow/status/gofiber/storage/gosec.yml?label=Security) +![Linter](https://img.shields.io/github/actions/workflow/status/gofiber/storage/linter.yml?label=Linter) + +A fast key-value DB using [syndtr/goleveldb](https://github.com/syndtr/goleveldb) + +**Note: Requires Go 1.23.1 and above** + +### Table of Contents + +- [Signatures](#signatures) +- [Installation](#installation) +- [Examples](#examples) +- [Config](#config) +- [Default Config](#default-config) + +### Signatures + +```go +func New(config ...Config) Storage +func (s *Storage) Get(key string) ([]byte, error) +func (s *Storage) Set(key string, val []byte, exp time.Duration) error +func (s *Storage) Delete(key string) error +func (s *Storage) Reset() error +func (s *Storage) Close() error +func (s *Storage) Conn() *leveldb.DB +``` + +### Installation + +LevelDB is tested on the 2 last [Go versions](https://golang.org/dl/) with support for modules. So make sure to initialize one first if you didn't do that yet: + +```bash +go mod init github.com// +``` + +And then install the leveldb implementation: + +```bash +go get github.com/gofiber/storage/leveldb +``` + +### Examples + +Import the storage package. + +```go +import "github.com/gofiber/storage/leveldb" +``` + +You can use the following possibilities to create a storage: + +```go +// Initialize default config +store := leveldb.New() + +// Initialize custom config +store := leveldb.New(leveldb.Config{ + Database: "./fiber.leveldb", + Reset: false, + GCInterval: 10 * time.Second, +}) +``` + +### Config + +```go +type Config struct { + // DBPath is the filesystem path for the database + // + // Optional. Default is "./fiber.leveldb" + DBPath string + + // CacheSize is the size of LevelDB's cache (in MB) + // + // Optional. Default is 8MB + CacheSize int + + // BlockSize is the size of data blocks (in KB) + // + // Optional. Default is 4KB + BlockSize int + + // WriteBuffer is the size of write buffer (in MB) + // + // Optional. Default is 4MB + WriteBuffer int + + // CompactionL0Trigger is the number of level-0 tables that triggers compaction + // + // Optional. Default is 4 + CompactionL0Trigger int + + // WriteL0PauseTrigger is the number of level-0 tables that triggers write pause + // + // Optional. Default is 12 + WriteL0PauseTrigger int + + // WriteL0SlowdownTrigger is the number of level-0 tables that triggers write slowdown + // + // Optional. Default is 8 + WriteL0SlowdownTrigger int + + // MaxOpenFiles is the maximum number of open files that can be held + // + // Optional. Default is 200 on MacOS, 500 on others + MaxOpenFiles int + + // CompactionTableSize is the size of compaction table (in MB) + // + // Optional. Default is 2MB + CompactionTableSize int + + // BloomFilterBits is the number of bits used in bloom filter + // + // Optional. Default is 10 bits/key + BloomFilterBits int + + // NoSync completely disables fsync + // + // Optional. Default is false + NoSync bool + + // ReadOnly opens the database in read-only mode + // + // Optional. Default is false + ReadOnly bool + + // ErrorIfMissing returns error if database doesn't exist + // + // Optional. Default is false + ErrorIfMissing bool + + // ErrorIfExist returns error if database exists + // + // Optional. Default is false + ErrorIfExist bool + + // GCInterval is the garbage collection interval + // + // Optional. Default is 10 minutes + GCInterval time.Duration +} +``` + +### Default Config + +```go +var ConfigDefault = Config{ + DBPath: "./fiber.leveldb", + CacheSize: 8, // 8 MB + BlockSize: 4, // 4 KB + WriteBuffer: 4, // 4 MB + CompactionL0Trigger: 4, + WriteL0PauseTrigger: 12, + WriteL0SlowdownTrigger: 8, + MaxOpenFiles: func() int { + if runtime.GOOS == "darwin" { + return 200 // MacOS + } + return 500 // Unix/Linux + }(), + CompactionTableSize: 2, // 2 MB + BloomFilterBits: 10, // 10 bits per key + NoSync: false, + ReadOnly: false, + ErrorIfMissing: false, + ErrorIfExist: false, + GCInterval: 10 * time.Minute, +} +``` diff --git a/leveldb/config.go b/leveldb/config.go new file mode 100644 index 00000000..045a4668 --- /dev/null +++ b/leveldb/config.go @@ -0,0 +1,163 @@ +package leveldb + +import ( + "runtime" + "time" +) + +// Config holds the configuration options for LevelDB database +type Config struct { + // DBPath is the filesystem path for the database + // + // Optional. Default is "./fiber.leveldb" + DBPath string + + // CacheSize is the size of LevelDB's cache (in MB) + // + // Optional. Default is 8MB + CacheSize int + + // BlockSize is the size of data blocks (in KB) + // + // Optional. Default is 4KB + BlockSize int + + // WriteBuffer is the size of write buffer (in MB) + // + // Optional. Default is 4MB + WriteBuffer int + + // CompactionL0Trigger is the number of level-0 tables that triggers compaction + // + // Optional. Default is 4 + CompactionL0Trigger int + + // WriteL0PauseTrigger is the number of level-0 tables that triggers write pause + // + // Optional. Default is 12 + WriteL0PauseTrigger int + + // WriteL0SlowdownTrigger is the number of level-0 tables that triggers write slowdown + // + // Optional. Default is 8 + WriteL0SlowdownTrigger int + + // MaxOpenFiles is the maximum number of open files that can be held + // + // Optional. Default is 200 on MacOS, 500 on others + MaxOpenFiles int + + // CompactionTableSize is the size of compaction table (in MB) + // + // Optional. Default is 2MB + CompactionTableSize int + + // BloomFilterBits is the number of bits used in bloom filter + // + // Optional. Default is 10 bits/key + BloomFilterBits int + + // NoSync completely disables fsync + // + // Optional. Default is false + NoSync bool + + // ReadOnly opens the database in read-only mode + // + // Optional. Default is false + ReadOnly bool + + // ErrorIfMissing returns error if database doesn't exist + // + // Optional. Default is false + ErrorIfMissing bool + + // ErrorIfExist returns error if database exists + // + // Optional. Default is false + ErrorIfExist bool + + // GCInterval is the garbage collection interval + // + // Optional. Default is 10 minutes + GCInterval time.Duration +} + +// ConfigDefault is the default config +var ConfigDefault = Config{ + DBPath: "./fiber.leveldb", + CacheSize: 8, // 8 MB + BlockSize: 4, // 4 KB + WriteBuffer: 4, // 4 MB + CompactionL0Trigger: 4, + WriteL0PauseTrigger: 12, + WriteL0SlowdownTrigger: 8, + MaxOpenFiles: func() int { + if runtime.GOOS == "darwin" { + return 200 // MacOS + } + return 500 // Unix/Linux + }(), + CompactionTableSize: 2, // 2 MB + BloomFilterBits: 10, // 10 bits per key + NoSync: false, + ReadOnly: false, + ErrorIfMissing: false, + ErrorIfExist: false, + GCInterval: 10 * time.Minute, +} + +// configDefault is a helper function to set default values for the config +func configDefault(config ...Config) Config { + if len(config) < 1 { + return ConfigDefault + } + + cfg := config[0] + + if cfg.DBPath == "" { + cfg.DBPath = ConfigDefault.DBPath + } + + if cfg.CacheSize <= 0 { + cfg.CacheSize = ConfigDefault.CacheSize + } + + if cfg.BlockSize <= 0 { + cfg.BlockSize = ConfigDefault.BlockSize + } + + if cfg.WriteBuffer <= 0 { + cfg.WriteBuffer = ConfigDefault.WriteBuffer + } + + if cfg.CompactionL0Trigger <= 0 { + cfg.CompactionL0Trigger = ConfigDefault.CompactionL0Trigger + } + + if cfg.WriteL0PauseTrigger <= 0 { + cfg.WriteL0PauseTrigger = ConfigDefault.WriteL0PauseTrigger + } + + if cfg.WriteL0SlowdownTrigger <= 0 { + cfg.WriteL0SlowdownTrigger = ConfigDefault.WriteL0SlowdownTrigger + } + + if cfg.MaxOpenFiles <= 0 { + cfg.MaxOpenFiles = ConfigDefault.MaxOpenFiles + } + + if cfg.CompactionTableSize <= 0 { + cfg.CompactionTableSize = ConfigDefault.CompactionTableSize + } + + if cfg.BloomFilterBits <= 0 { + cfg.BloomFilterBits = ConfigDefault.BloomFilterBits + } + + if cfg.GCInterval <= 0 { + cfg.GCInterval = ConfigDefault.GCInterval + } + + return cfg +} diff --git a/leveldb/config_test.go b/leveldb/config_test.go new file mode 100644 index 00000000..1ba90183 --- /dev/null +++ b/leveldb/config_test.go @@ -0,0 +1,19 @@ +package leveldb + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestConfigConfigMaxOpenFiles(t *testing.T) { + cfg := Config{ + MaxOpenFiles: 1000, + } + assert.Equal(t, 1000, cfg.MaxOpenFiles) +} + +func TestConfigDefaultDarwin(t *testing.T) { // MacOS + cfg := configDefault() + assert.Equal(t, 200, cfg.MaxOpenFiles) +} diff --git a/leveldb/go.mod b/leveldb/go.mod new file mode 100644 index 00000000..878c4bdc --- /dev/null +++ b/leveldb/go.mod @@ -0,0 +1,12 @@ +module github.com/gofiber/storage/leveldb + +go 1.23.1 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/testify v1.10.0 // indirect + github.com/syndtr/goleveldb v1.0.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/leveldb/go.sum b/leveldb/go.sum new file mode 100644 index 00000000..8495d2a5 --- /dev/null +++ b/leveldb/go.sum @@ -0,0 +1,26 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/leveldb/leveldb.go b/leveldb/leveldb.go new file mode 100644 index 00000000..0ae189aa --- /dev/null +++ b/leveldb/leveldb.go @@ -0,0 +1,159 @@ +package leveldb + +import ( + "encoding/json" + "time" + + "github.com/syndtr/goleveldb/leveldb" +) + +// data structure for storing items in the database +type item struct { + Value []byte `json:"value"` + ExpireAt time.Time `json:"expire_at"` +} + +// Storage interface that is implemented by storage providers +type Storage struct { + db *leveldb.DB + gcInterval time.Duration + done chan struct{} +} + +// New creates a new memory storage +func New(config ...Config) *Storage { + cfg := configDefault(config...) + + db, err := leveldb.OpenFile(cfg.DBPath, nil) + if err != nil { + panic(err) + } + + store := &Storage{ + db: db, + gcInterval: cfg.GCInterval, + done: make(chan struct{}), + } + + go store.gc() + + return store +} + +// Get value by key +func (s *Storage) Get(key []byte) ([]byte, error) { + + if len(key) <= 0 { + return nil, nil + } + + data, err := s.db.Get(key, nil) + if err != nil { + return nil, nil + } + + var stored item + if err := json.Unmarshal(data, &stored); err != nil { + return data, nil + } + + if !stored.ExpireAt.IsZero() && time.Now().After(stored.ExpireAt) { + s.db.Delete(key, nil) + return nil, nil + } + + return stored.Value, nil +} + +// Set key with value +func (s *Storage) Set(key, value []byte, exp time.Duration) error { + if len(key) <= 0 || len(value) <= 0 { + return nil + } + if exp == 0 { + return s.db.Put(key, value, nil) + } + + data := item{ + Value: value, + ExpireAt: time.Now().Add(exp), + } + + encoded, err := json.Marshal(data) + if err != nil { + return err + } + return s.db.Put(key, encoded, nil) +} + +// Delete key by key +func (s *Storage) Delete(key string) error { + if len(key) <= 0 { + return nil + } + + return s.db.Delete([]byte(key), nil) +} + +// Reset all keys +func (s *Storage) Reset() error { + iter := s.db.NewIterator(nil, nil) + defer iter.Release() + + batch := new(leveldb.Batch) + for iter.Next() { + batch.Delete(iter.Key()) + } + + if err := iter.Error(); err != nil { + return nil + } + + return s.db.Write(batch, nil) +} + +// Close the memory storage +func (s *Storage) Close() error { + s.done <- struct{}{} // GC stop + return s.db.Close() +} + +// Return database client +func (s *Storage) Conn() *leveldb.DB { + return s.db +} + +// gc is a helper function to clean up expired keys +func (s *Storage) gc() { + ticker := time.NewTicker(s.gcInterval) + defer ticker.Stop() + + for { + select { + case <-s.done: + return + case <-ticker.C: + iter := s.db.NewIterator(nil, nil) + batch := new(leveldb.Batch) + + for iter.Next() { + key := iter.Key() + data := iter.Value() + + var stored item + if err := json.Unmarshal(data, &stored); err != nil { + continue + } + if !stored.ExpireAt.IsZero() && time.Now().After(stored.ExpireAt) { + batch.Delete(key) + } + } + + iter.Release() + + if batch.Len() > 0 { + _ = s.db.Write(batch, nil) + } + } + } +} diff --git a/leveldb/leveldb_test.go b/leveldb/leveldb_test.go new file mode 100644 index 00000000..9b12086e --- /dev/null +++ b/leveldb/leveldb_test.go @@ -0,0 +1,166 @@ +package leveldb + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func removeAllFiles(dir string) error { + return os.RemoveAll(dir) +} + +func Test_New_EmptyConfig(t *testing.T) { + db := New() + require.NotNil(t, db) + + _, err := os.Stat("./fiber.leveldb") + require.Nil(t, err) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_New_WithConfig(t *testing.T) { + db := New(Config{ + DBPath: "./testdb", + }) + require.NotNil(t, db) + _, err := os.Stat("./testdb") + require.Nil(t, err) + + err = removeAllFiles("./testdb") + require.Nil(t, err) +} + +func Test_Set_Overwrite(t *testing.T) { + db := New() + + db.Set([]byte("key"), []byte("value"), time.Second*1) + db.Set([]byte("key"), []byte("value2"), time.Second*1) + + value, err := db.Get([]byte("key")) + require.Nil(t, err) + require.Equal(t, []byte("value2"), value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_Get_For0Second(t *testing.T) { + db := New() + + db.Set([]byte("key"), []byte("value"), 0) + + _, err := db.Get([]byte("key")) + require.Nil(t, err) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_Get_ForExpired1Second(t *testing.T) { + db := New() + + db.Set([]byte("key"), []byte("value"), time.Second*1) + + time.Sleep(time.Second * 2) + + value, err := db.Get([]byte("key")) + require.Nil(t, err) + require.Nil(t, value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_Delete_WhileThereIsData(t *testing.T) { + db := New() + + db.Set([]byte("key"), []byte("value"), time.Second*1) + + err := db.Delete("key") + require.Nil(t, err) + + value, err := db.Get([]byte("key")) + require.Nil(t, err) + require.Nil(t, value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) + +} + +func Test_Reset(t *testing.T) { + db := New() + + db.Set([]byte("key1"), []byte("value1"), time.Second*1) + db.Set([]byte("key2"), []byte("value2"), time.Second*1) + db.Set([]byte("key3"), []byte("value3"), time.Second*1) + + db.Reset() + + value, err := db.Get([]byte("key1")) + require.Nil(t, err) + require.Nil(t, value) + + value, err = db.Get([]byte("key2")) + require.Nil(t, err) + require.Nil(t, value) + + value, err = db.Get([]byte("key3")) + require.Nil(t, err) + require.Nil(t, value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) + +} + +func Test_Close(t *testing.T) { + db := New() + + db.Close() + + err := db.Conn().Put([]byte("key"), []byte("value"), nil) + require.Error(t, err) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_GarbageCollection_AfterWorking(t *testing.T) { + db := New(Config{ + GCInterval: time.Second * 1, + }) + + db.Set([]byte("key"), []byte("value"), time.Second*1) + + time.Sleep(time.Second * 2) + + value, err := db.Conn().Get([]byte("key"), nil) + require.Error(t, err) + require.Equal(t, []byte{}, value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +} + +func Test_GarbageCollection_BeforeWorking(t *testing.T) { + db := New(Config{ + GCInterval: time.Second * 1, + }) + + db.Set([]byte("key"), []byte("value"), time.Second*1) + + //time.Sleep(time.Second * 2) + + value, err := db.Conn().Get([]byte("key"), nil) + require.Nil(t, err) + require.NotNil(t, value) + + err = removeAllFiles("./fiber.leveldb") + require.Nil(t, err) +}