mirror of
https://github.com/gofiber/storage.git
synced 2025-09-27 04:46:08 +08:00
Compare commits
24 Commits
valkey/v0.
...
leveldb/v0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6b31862a33 | ||
![]() |
785e71215f | ||
![]() |
ea98e0ac2c | ||
![]() |
a2f349766f | ||
![]() |
b60700c0b9 | ||
![]() |
3f5304479a | ||
![]() |
1fa944bef7 | ||
![]() |
fbfc6d9394 | ||
![]() |
165b2ae02a | ||
![]() |
174e393340 | ||
![]() |
fb997fd4b5 | ||
![]() |
a3bf4c9e20 | ||
![]() |
45a0bc547e | ||
![]() |
40f3844551 | ||
![]() |
1b4c2425db | ||
![]() |
d8b1437f50 | ||
![]() |
b5d51650a3 | ||
![]() |
863ef71f8b | ||
![]() |
f2a5ba2e7c | ||
![]() |
85b4a99042 | ||
![]() |
44d93745d1 | ||
![]() |
b4cd42b73e | ||
![]() |
10116f925d | ||
![]() |
de3b493523 |
28
.github/workflows/test-leveldb.yml
vendored
Normal file
28
.github/workflows/test-leveldb.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- 'leveldb/**'
|
||||
pull_request:
|
||||
paths:
|
||||
- 'leveldb/**'
|
||||
name: "Tests LevelDB"
|
||||
jobs:
|
||||
Tests:
|
||||
strategy:
|
||||
matrix:
|
||||
go-version:
|
||||
- 1.23.x
|
||||
- 1.24.x
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Fetch Repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '${{ matrix.go-version }}'
|
||||
- name: Test LevelDB
|
||||
run: cd ./leveldb && go test ./... -v -race
|
@@ -59,6 +59,7 @@ type Storage interface {
|
||||
- [Couchbase](./couchbase/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Couchbase%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-couchbase.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [DynamoDB](./dynamodb/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+DynamoDB%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-dynamodb.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [Etcd](./etcd/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Etcd%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-etcd.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [LevelDB](./leveldb/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+LevelDB%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-leveldb.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" alt="LevelDB Tests Status"/> </a>
|
||||
- [Memcache](./memcache/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Memcache%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-memcache.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [Memory](./memory/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Local+Storage%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-memory.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [Minio](./minio/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Minio%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-minio.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
@@ -76,4 +77,4 @@ type Storage interface {
|
||||
- [ScyllaDB](./scylladb/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+scylladb%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-scylladb.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [SQLite3](./sqlite3/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Sqlite3%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-sqlite3.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [ClickHouse](./clickhouse/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+Clickhouse%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-clickhouse.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [Valkey](./valkey/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+valkey%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-valkey.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
||||
- [Valkey](./valkey/README.md) <a href="https://github.com/gofiber/storage/actions?query=workflow%3A%22Tests+valkey%22"> <img src="https://img.shields.io/github/actions/workflow/status/gofiber/storage/test-valkey.yml?branch=main&label=%F0%9F%A7%AA%20&style=flat&color=75C46B" /> </a>
|
174
leveldb/README.md
Normal file
174
leveldb/README.md
Normal file
@@ -0,0 +1,174 @@
|
||||
---
|
||||
id: leveldb
|
||||
title: LevelDB
|
||||
---
|
||||
|
||||

|
||||
[](https://gofiber.io/discord)
|
||||

|
||||

|
||||
|
||||
A fast key-value DB using [syndtr/goleveldb](https://github.com/syndtr/goleveldb)
|
||||
|
||||
### Table of Contents
|
||||
|
||||
- [Signatures](#signatures)
|
||||
- [Installation](#installation)
|
||||
- [Examples](#examples)
|
||||
- [Config](#config)
|
||||
- [Default Config](#default-config)
|
||||
|
||||
### Signatures
|
||||
|
||||
```go
|
||||
func New(config ...Config) Storage
|
||||
func (s *Storage) Get(key string) ([]byte, error)
|
||||
func (s *Storage) Set(key string, val []byte, exp time.Duration) error
|
||||
func (s *Storage) Delete(key string) error
|
||||
func (s *Storage) Reset() error
|
||||
func (s *Storage) Close() error
|
||||
func (s *Storage) Conn() *leveldb.DB
|
||||
```
|
||||
|
||||
### Installation
|
||||
|
||||
LevelDB is tested on the 2 last [Go versions](https://golang.org/dl/) with support for modules. So make sure to initialize one first if you didn't do that yet:
|
||||
|
||||
```bash
|
||||
go mod init github.com/<user>/<repo>
|
||||
```
|
||||
|
||||
And then install the leveldb implementation:
|
||||
|
||||
```bash
|
||||
go get github.com/gofiber/storage/leveldb
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Import the storage package.
|
||||
|
||||
```go
|
||||
import "github.com/gofiber/storage/leveldb"
|
||||
```
|
||||
|
||||
You can use the following possibilities to create a storage:
|
||||
|
||||
```go
|
||||
// Initialize default config
|
||||
store := leveldb.New()
|
||||
|
||||
// Initialize custom config
|
||||
store := leveldb.New(leveldb.Config{
|
||||
Path: "./testdb",
|
||||
GCInterval: 10 * time.Second,
|
||||
})
|
||||
```
|
||||
|
||||
### Config
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
// Path is the filesystem path for the database
|
||||
//
|
||||
// Optional. Default is "./fiber.leveldb"
|
||||
Path string
|
||||
|
||||
// CacheSize is the size of LevelDB's cache (in MB)
|
||||
//
|
||||
// Optional. Default is 8MB
|
||||
CacheSize int
|
||||
|
||||
// BlockSize is the size of data blocks (in KB)
|
||||
//
|
||||
// Optional. Default is 4KB
|
||||
BlockSize int
|
||||
|
||||
// WriteBuffer is the size of write buffer (in MB)
|
||||
//
|
||||
// Optional. Default is 4MB
|
||||
WriteBuffer int
|
||||
|
||||
// CompactionL0Trigger is the number of level-0 tables that triggers compaction
|
||||
//
|
||||
// Optional. Default is 4
|
||||
CompactionL0Trigger int
|
||||
|
||||
// WriteL0PauseTrigger is the number of level-0 tables that triggers write pause
|
||||
//
|
||||
// Optional. Default is 12
|
||||
WriteL0PauseTrigger int
|
||||
|
||||
// WriteL0SlowdownTrigger is the number of level-0 tables that triggers write slowdown
|
||||
//
|
||||
// Optional. Default is 8
|
||||
WriteL0SlowdownTrigger int
|
||||
|
||||
// MaxOpenFiles is the maximum number of open files that can be held
|
||||
//
|
||||
// Optional. Default is 200 on MacOS, 500 on others
|
||||
MaxOpenFiles int
|
||||
|
||||
// CompactionTableSize is the size of compaction table (in MB)
|
||||
//
|
||||
// Optional. Default is 2MB
|
||||
CompactionTableSize int
|
||||
|
||||
// BloomFilterBits is the number of bits used in bloom filter
|
||||
//
|
||||
// Optional. Default is 10 bits/key
|
||||
BloomFilterBits int
|
||||
|
||||
// NoSync completely disables fsync
|
||||
//
|
||||
// Optional. Default is false
|
||||
NoSync bool
|
||||
|
||||
// ReadOnly opens the database in read-only mode
|
||||
//
|
||||
// Optional. Default is false
|
||||
ReadOnly bool
|
||||
|
||||
// ErrorIfMissing returns error if database doesn't exist
|
||||
//
|
||||
// Optional. Default is false
|
||||
ErrorIfMissing bool
|
||||
|
||||
// ErrorIfExist returns error if database exists
|
||||
//
|
||||
// Optional. Default is false
|
||||
ErrorIfExist bool
|
||||
|
||||
// GCInterval is the garbage collection interval
|
||||
//
|
||||
// Optional. Default is 10 minutes
|
||||
GCInterval time.Duration
|
||||
}
|
||||
```
|
||||
|
||||
### Default Config
|
||||
|
||||
```go
|
||||
var ConfigDefault = Config{
|
||||
Path: "./fiber.leveldb",
|
||||
CacheSize: 8, // 8 MB
|
||||
BlockSize: 4, // 4 KB
|
||||
WriteBuffer: 4, // 4 MB
|
||||
CompactionL0Trigger: 4,
|
||||
WriteL0PauseTrigger: 12,
|
||||
WriteL0SlowdownTrigger: 8,
|
||||
MaxOpenFiles: func() int {
|
||||
if runtime.GOOS == "darwin" {
|
||||
return 200 // MacOS
|
||||
}
|
||||
return 500 // Unix/Linux
|
||||
}(),
|
||||
CompactionTableSize: 2, // 2 MB
|
||||
BloomFilterBits: 10, // 10 bits per key
|
||||
NoSync: false,
|
||||
ReadOnly: false,
|
||||
ErrorIfMissing: false,
|
||||
ErrorIfExist: false,
|
||||
GCInterval: 10 * time.Minute,
|
||||
}
|
||||
```
|
163
leveldb/config.go
Normal file
163
leveldb/config.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config holds the configuration options for LevelDB database
|
||||
type Config struct {
|
||||
// Path is the filesystem path for the database
|
||||
//
|
||||
// Optional. Default is "./fiber.leveldb"
|
||||
Path string
|
||||
|
||||
// CacheSize is the size of LevelDB's cache (in MB)
|
||||
//
|
||||
// Optional. Default is 8MB
|
||||
CacheSize int
|
||||
|
||||
// BlockSize is the size of data blocks (in KB)
|
||||
//
|
||||
// Optional. Default is 4KB
|
||||
BlockSize int
|
||||
|
||||
// WriteBuffer is the size of write buffer (in MB)
|
||||
//
|
||||
// Optional. Default is 4MB
|
||||
WriteBuffer int
|
||||
|
||||
// CompactionL0Trigger is the number of level-0 tables that triggers compaction
|
||||
//
|
||||
// Optional. Default is 4
|
||||
CompactionL0Trigger int
|
||||
|
||||
// WriteL0PauseTrigger is the number of level-0 tables that triggers write pause
|
||||
//
|
||||
// Optional. Default is 12
|
||||
WriteL0PauseTrigger int
|
||||
|
||||
// WriteL0SlowdownTrigger is the number of level-0 tables that triggers write slowdown
|
||||
//
|
||||
// Optional. Default is 8
|
||||
WriteL0SlowdownTrigger int
|
||||
|
||||
// MaxOpenFiles is the maximum number of open files that can be held
|
||||
//
|
||||
// Optional. Default is 200 on MacOS, 500 on others
|
||||
MaxOpenFiles int
|
||||
|
||||
// CompactionTableSize is the size of compaction table (in MB)
|
||||
//
|
||||
// Optional. Default is 2MB
|
||||
CompactionTableSize int
|
||||
|
||||
// BloomFilterBits is the number of bits used in bloom filter
|
||||
//
|
||||
// Optional. Default is 10 bits/key
|
||||
BloomFilterBits int
|
||||
|
||||
// NoSync completely disables fsync
|
||||
//
|
||||
// Optional. Default is false
|
||||
NoSync bool
|
||||
|
||||
// ReadOnly opens the database in read-only mode
|
||||
//
|
||||
// Optional. Default is false
|
||||
ReadOnly bool
|
||||
|
||||
// ErrorIfMissing returns error if database doesn't exist
|
||||
//
|
||||
// Optional. Default is false
|
||||
ErrorIfMissing bool
|
||||
|
||||
// ErrorIfExist returns error if database exists
|
||||
//
|
||||
// Optional. Default is false
|
||||
ErrorIfExist bool
|
||||
|
||||
// GCInterval is the garbage collection interval
|
||||
//
|
||||
// Optional. Default is 10 minutes
|
||||
GCInterval time.Duration
|
||||
}
|
||||
|
||||
// ConfigDefault is the default config
|
||||
var ConfigDefault = Config{
|
||||
Path: "./fiber.leveldb",
|
||||
CacheSize: 8, // 8 MB
|
||||
BlockSize: 4, // 4 KB
|
||||
WriteBuffer: 4, // 4 MB
|
||||
CompactionL0Trigger: 4,
|
||||
WriteL0PauseTrigger: 12,
|
||||
WriteL0SlowdownTrigger: 8,
|
||||
MaxOpenFiles: func() int {
|
||||
if runtime.GOOS == "darwin" {
|
||||
return 200 // MacOS
|
||||
}
|
||||
return 500 // Unix/Linux
|
||||
}(),
|
||||
CompactionTableSize: 2, // 2 MB
|
||||
BloomFilterBits: 10, // 10 bits per key
|
||||
NoSync: false,
|
||||
ReadOnly: false,
|
||||
ErrorIfMissing: false,
|
||||
ErrorIfExist: false,
|
||||
GCInterval: 10 * time.Minute,
|
||||
}
|
||||
|
||||
// configDefault is a helper function to set default values for the config
|
||||
func configDefault(config ...Config) Config {
|
||||
if len(config) < 1 {
|
||||
return ConfigDefault
|
||||
}
|
||||
|
||||
cfg := config[0]
|
||||
|
||||
if cfg.Path == "" {
|
||||
cfg.Path = ConfigDefault.Path
|
||||
}
|
||||
|
||||
if cfg.CacheSize <= 0 {
|
||||
cfg.CacheSize = ConfigDefault.CacheSize
|
||||
}
|
||||
|
||||
if cfg.BlockSize <= 0 {
|
||||
cfg.BlockSize = ConfigDefault.BlockSize
|
||||
}
|
||||
|
||||
if cfg.WriteBuffer <= 0 {
|
||||
cfg.WriteBuffer = ConfigDefault.WriteBuffer
|
||||
}
|
||||
|
||||
if cfg.CompactionL0Trigger <= 0 {
|
||||
cfg.CompactionL0Trigger = ConfigDefault.CompactionL0Trigger
|
||||
}
|
||||
|
||||
if cfg.WriteL0PauseTrigger <= 0 {
|
||||
cfg.WriteL0PauseTrigger = ConfigDefault.WriteL0PauseTrigger
|
||||
}
|
||||
|
||||
if cfg.WriteL0SlowdownTrigger <= 0 {
|
||||
cfg.WriteL0SlowdownTrigger = ConfigDefault.WriteL0SlowdownTrigger
|
||||
}
|
||||
|
||||
if cfg.MaxOpenFiles <= 0 {
|
||||
cfg.MaxOpenFiles = ConfigDefault.MaxOpenFiles
|
||||
}
|
||||
|
||||
if cfg.CompactionTableSize <= 0 {
|
||||
cfg.CompactionTableSize = ConfigDefault.CompactionTableSize
|
||||
}
|
||||
|
||||
if cfg.BloomFilterBits <= 0 {
|
||||
cfg.BloomFilterBits = ConfigDefault.BloomFilterBits
|
||||
}
|
||||
|
||||
if cfg.GCInterval <= 0 {
|
||||
cfg.GCInterval = ConfigDefault.GCInterval
|
||||
}
|
||||
|
||||
return cfg
|
||||
}
|
24
leveldb/config_test.go
Normal file
24
leveldb/config_test.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigConfigMaxOpenFiles(t *testing.T) {
|
||||
cfg := Config{
|
||||
MaxOpenFiles: 1000,
|
||||
}
|
||||
require.Equal(t, 1000, cfg.MaxOpenFiles)
|
||||
}
|
||||
|
||||
func TestConfigDefaultDarwin(t *testing.T) { // MacOS
|
||||
cfg := configDefault()
|
||||
if runtime.GOOS == "darwin" {
|
||||
require.Equal(t, 200, cfg.MaxOpenFiles)
|
||||
} else {
|
||||
require.Equal(t, 500, cfg.MaxOpenFiles)
|
||||
}
|
||||
}
|
12
leveldb/go.mod
Normal file
12
leveldb/go.mod
Normal file
@@ -0,0 +1,12 @@
|
||||
module github.com/gofiber/storage/leveldb
|
||||
|
||||
go 1.23
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/testify v1.10.0 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
26
leveldb/go.sum
Normal file
26
leveldb/go.sum
Normal file
@@ -0,0 +1,26 @@
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE=
|
||||
github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
159
leveldb/leveldb.go
Normal file
159
leveldb/leveldb.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
)
|
||||
|
||||
// data structure for storing items in the database
|
||||
type item struct {
|
||||
Value []byte `json:"value"`
|
||||
ExpireAt time.Time `json:"expire_at"`
|
||||
}
|
||||
|
||||
// Storage interface that is implemented by storage providers
|
||||
type Storage struct {
|
||||
db *leveldb.DB
|
||||
gcInterval time.Duration
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
// New creates a new memory storage
|
||||
func New(config ...Config) *Storage {
|
||||
cfg := configDefault(config...)
|
||||
|
||||
db, err := leveldb.OpenFile(cfg.Path, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
store := &Storage{
|
||||
db: db,
|
||||
gcInterval: cfg.GCInterval,
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
|
||||
go store.gc()
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
// Get value by key
|
||||
func (s *Storage) Get(key []byte) ([]byte, error) {
|
||||
if len(key) <= 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
data, err := s.db.Get(key, nil)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var stored item
|
||||
if err := json.Unmarshal(data, &stored); err != nil {
|
||||
return data, nil
|
||||
}
|
||||
|
||||
if !stored.ExpireAt.IsZero() && time.Now().After(stored.ExpireAt) {
|
||||
if err := s.Delete(string(key)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
return stored.Value, nil
|
||||
}
|
||||
|
||||
// Set key with value
|
||||
func (s *Storage) Set(key, value []byte, exp time.Duration) error {
|
||||
if len(key) <= 0 || len(value) <= 0 {
|
||||
return nil
|
||||
}
|
||||
if exp == 0 {
|
||||
return s.db.Put(key, value, nil)
|
||||
}
|
||||
|
||||
data := item{
|
||||
Value: value,
|
||||
ExpireAt: time.Now().Add(exp),
|
||||
}
|
||||
|
||||
encoded, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return s.db.Put(key, encoded, nil)
|
||||
}
|
||||
|
||||
// Delete key by key
|
||||
func (s *Storage) Delete(key string) error {
|
||||
if len(key) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return s.db.Delete([]byte(key), nil)
|
||||
}
|
||||
|
||||
// Reset all keys
|
||||
func (s *Storage) Reset() error {
|
||||
iter := s.db.NewIterator(nil, nil)
|
||||
defer iter.Release()
|
||||
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
if err := s.db.Delete(key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return iter.Error()
|
||||
}
|
||||
|
||||
// Close the memory storage
|
||||
func (s *Storage) Close() error {
|
||||
s.done <- struct{}{} // GC stop
|
||||
close(s.done)
|
||||
return s.db.Close()
|
||||
}
|
||||
|
||||
// Return database client
|
||||
func (s *Storage) Conn() *leveldb.DB {
|
||||
return s.db
|
||||
}
|
||||
|
||||
// gc is a helper function to clean up expired keys
|
||||
func (s *Storage) gc() {
|
||||
ticker := time.NewTicker(s.gcInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.done:
|
||||
return
|
||||
case <-ticker.C:
|
||||
iter := s.db.NewIterator(nil, nil)
|
||||
batch := new(leveldb.Batch)
|
||||
|
||||
for iter.Next() {
|
||||
key := iter.Key()
|
||||
data := iter.Value()
|
||||
|
||||
var stored item
|
||||
if err := json.Unmarshal(data, &stored); err != nil {
|
||||
continue
|
||||
}
|
||||
if !stored.ExpireAt.IsZero() && time.Now().After(stored.ExpireAt) {
|
||||
batch.Delete(key)
|
||||
}
|
||||
}
|
||||
|
||||
iter.Release()
|
||||
|
||||
if batch.Len() > 0 {
|
||||
_ = s.db.Write(batch, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
284
leveldb/leveldb_test.go
Normal file
284
leveldb/leveldb_test.go
Normal file
@@ -0,0 +1,284 @@
|
||||
package leveldb
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func removeAllFiles(dir string) error {
|
||||
return os.RemoveAll(dir)
|
||||
}
|
||||
|
||||
func Test_New_EmptyConfig(t *testing.T) {
|
||||
db := New()
|
||||
require.NotNil(t, db)
|
||||
|
||||
_, err := os.Stat("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_New_WithConfig(t *testing.T) {
|
||||
db := New(Config{
|
||||
Path: "./testdb",
|
||||
})
|
||||
require.NotNil(t, db)
|
||||
_, err := os.Stat("./testdb")
|
||||
require.Nil(t, err)
|
||||
|
||||
err = removeAllFiles("./testdb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_Set_Overwrite(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
db.Set([]byte("key"), []byte("value"), time.Second*1)
|
||||
db.Set([]byte("key"), []byte("value2"), time.Second*1)
|
||||
|
||||
value, err := db.Get([]byte("key"))
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, []byte("value2"), value)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_Get_For0Second(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
db.Set([]byte("key"), []byte("value"), 0)
|
||||
|
||||
_, err := db.Get([]byte("key"))
|
||||
require.Nil(t, err)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_Get_ForExpired100Millisecond(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
require.NoError(t, db.Set([]byte("key"), []byte("value"), time.Millisecond*100))
|
||||
|
||||
// Anahtarın silinmesini bekle
|
||||
deadline := time.Now().Add(time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
value, err := db.Get([]byte("key"))
|
||||
if err == nil && value == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
value, err := db.Get([]byte("key"))
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, value)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_Delete_WhileThereIsData(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
db.Set([]byte("key"), []byte("value"), time.Second*1)
|
||||
|
||||
err := db.Delete("key")
|
||||
require.Nil(t, err)
|
||||
|
||||
value, err := db.Get([]byte("key"))
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, value)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
|
||||
}
|
||||
|
||||
func Test_Reset(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
db.Set([]byte("key1"), []byte("value1"), time.Second*1)
|
||||
db.Set([]byte("key2"), []byte("value2"), time.Second*1)
|
||||
db.Set([]byte("key3"), []byte("value3"), time.Second*1)
|
||||
|
||||
require.NoError(t, db.Reset())
|
||||
|
||||
value, err := db.Get([]byte("key1"))
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, value)
|
||||
|
||||
value, err = db.Get([]byte("key2"))
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, value)
|
||||
|
||||
value, err = db.Get([]byte("key3"))
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, value)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
|
||||
}
|
||||
|
||||
func Test_Close(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
db.Close()
|
||||
|
||||
err := db.Conn().Put([]byte("key"), []byte("value"), nil)
|
||||
require.Error(t, err)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_GarbageCollection_AfterWorking(t *testing.T) {
|
||||
db := New(Config{
|
||||
GCInterval: time.Millisecond * 100,
|
||||
})
|
||||
|
||||
require.NoError(t, db.Set([]byte("key"), []byte("value"), time.Millisecond*100))
|
||||
|
||||
deadline := time.Now().Add(time.Second)
|
||||
for time.Now().Before(deadline) {
|
||||
_, err := db.Conn().Get([]byte("key"), nil)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
value, err := db.Conn().Get([]byte("key"), nil)
|
||||
require.Error(t, err)
|
||||
require.Equal(t, []byte{}, value)
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Test_GarbageCollection_BeforeWorking(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, removeAllFiles("./fiber.leveldb"))
|
||||
})
|
||||
|
||||
db := New(Config{
|
||||
GCInterval: time.Second * 1,
|
||||
})
|
||||
require.NoError(t, db.Set([]byte("key"), []byte("value"), time.Second*1))
|
||||
|
||||
value, err := db.Conn().Get([]byte("key"), nil)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, value)
|
||||
}
|
||||
|
||||
func Test_GarbageCollection_Interval(t *testing.T) {
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, removeAllFiles("./fiber.leveldb"))
|
||||
})
|
||||
|
||||
db := New(Config{
|
||||
GCInterval: time.Hour, // Uzun aralık
|
||||
})
|
||||
require.NoError(t, db.Set([]byte("key"), []byte("value"), time.Millisecond))
|
||||
|
||||
// GC çalışmadığı için değer hala var olmalı
|
||||
deadline := time.Now().Add(time.Millisecond * 100)
|
||||
for time.Now().Before(deadline) {
|
||||
value, err := db.Conn().Get([]byte("key"), nil)
|
||||
if err == nil && value != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond * 10)
|
||||
}
|
||||
|
||||
t.Error("value should still exist as GC hasn't run yet")
|
||||
}
|
||||
|
||||
func Test_Close_Channel(t *testing.T) {
|
||||
db := New()
|
||||
|
||||
err := db.Close()
|
||||
require.Nil(t, err)
|
||||
|
||||
select {
|
||||
case _, ok := <-db.done:
|
||||
require.False(t, ok, "channel should be closed")
|
||||
default:
|
||||
t.Error("channel should be closed")
|
||||
}
|
||||
|
||||
err = removeAllFiles("./fiber.leveldb")
|
||||
require.Nil(t, err)
|
||||
}
|
||||
|
||||
func Benchmark_Set(b *testing.B) {
|
||||
db := New()
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
_ = removeAllFiles("./fiber.leveldb")
|
||||
}()
|
||||
|
||||
key := []byte("test_key")
|
||||
value := []byte("test_value")
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if err := db.Set(key, value, 0); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_Get(b *testing.B) {
|
||||
db := New()
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
_ = removeAllFiles("./fiber.leveldb")
|
||||
}()
|
||||
|
||||
key := []byte("test_key")
|
||||
value := []byte("test_value")
|
||||
if err := db.Set(key, value, 0); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if _, err := db.Get(key); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func Benchmark_Delete(b *testing.B) {
|
||||
db := New()
|
||||
defer func() {
|
||||
_ = db.Close()
|
||||
_ = removeAllFiles("./fiber.leveldb")
|
||||
}()
|
||||
|
||||
key := "test_key"
|
||||
if err := db.Set([]byte(key), []byte("test_value"), 0); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
if err := db.Delete(key); err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@@ -3,9 +3,15 @@ id: neo4j
|
||||
title: Neo4j
|
||||
---
|
||||
|
||||

|
||||
[](https://gofiber.io/discord)
|
||||

|
||||

|
||||

|
||||
|
||||
A Neo4j storage driver using [neo4j/neo4j-go-driver](https://github.com/neo4j/neo4j-go-driver).
|
||||
|
||||
> **Note: Requires latest two release of Golang**
|
||||
> **Note: Requires latest two releases of Golang**
|
||||
|
||||
### Table of Contents
|
||||
|
||||
@@ -18,7 +24,7 @@ A Neo4j storage driver using [neo4j/neo4j-go-driver](https://github.com/neo4j/ne
|
||||
### Signatures
|
||||
|
||||
```go
|
||||
func New(config ...Config) Storage
|
||||
func New(config ...Config) *Storage
|
||||
func (s *Storage) Get(key string) ([]byte, error)
|
||||
func (s *Storage) Set(key string, val []byte, exp time.Duration) error
|
||||
func (s *Storage) Delete(key string) error
|
||||
@@ -46,17 +52,19 @@ go get github.com/gofiber/storage/neo4j
|
||||
Import the storage package.
|
||||
|
||||
```go
|
||||
import neo4jstore "github.com/gofiber/storage/neo4j"
|
||||
import "github.com/gofiber/storage/neo4j"
|
||||
```
|
||||
|
||||
You can use the following possibilities to create a storage:
|
||||
|
||||
> The `neo4j` package name used in this example is the package name (and default import name) for this storage driver. Feel free import it with a custom name to avoid confusing it with the neo4j-go-driver package which also uses `neo4j` as package name (and default import name).
|
||||
|
||||
```go
|
||||
// Initialize default config
|
||||
store := neo4j.New()
|
||||
|
||||
// Initialize custom config
|
||||
store := neo4j.New(neo4jstore.Config{
|
||||
store := neo4j.New(neo4j.Config{
|
||||
DB: driver,
|
||||
Node: "fiber_storage",
|
||||
Reset: false,
|
||||
@@ -66,12 +74,14 @@ store := neo4j.New(neo4jstore.Config{
|
||||
|
||||
### Config
|
||||
|
||||
> The `neo4j`, `auth`, and `config` package names used here belong to the neo4j-go-driver package.
|
||||
|
||||
```go
|
||||
// Config defines the config for storage.
|
||||
type Config struct {
|
||||
// Connection pool
|
||||
//
|
||||
// DB neo4j.DriverWithContext object will override connection uri and other connection fields.
|
||||
// DB neo4j.DriverWithContext object will override connection URI and other connection fields.
|
||||
//
|
||||
// Optional. Default is nil.
|
||||
DB neo4j.DriverWithContext
|
||||
@@ -108,12 +118,12 @@ type Config struct {
|
||||
// Optional. Default is "fiber_storage"
|
||||
Node string
|
||||
|
||||
// Reset clears any existing keys in existing Table
|
||||
// Reset clears any existing keys (Nodes)
|
||||
//
|
||||
// Optional. Default is false
|
||||
Reset bool
|
||||
|
||||
// Time before deleting expired keys
|
||||
// Time before deleting expired keys (Nodes)
|
||||
//
|
||||
// Optional. Default is 10 * time.Second
|
||||
GCInterval time.Duration
|
||||
|
Reference in New Issue
Block a user