WIP: raft

This commit is contained in:
Ingo Oppermann
2023-04-13 09:42:19 +02:00
parent ffdf6d3323
commit e6b64dbe97
256 changed files with 37676 additions and 666 deletions

View File

@@ -624,6 +624,8 @@ func (a *api) start() error {
if cluster, err := cluster.New(cluster.ClusterConfig{
ID: cfg.ID,
Name: cfg.Name,
Path: filepath.Join(cfg.DB.Dir, "cluster"),
IPLimiter: a.sessionsLimiter,
Logger: a.log.logger.core.WithComponent("Cluster"),
}); err != nil {
@@ -792,7 +794,7 @@ func (a *api) start() error {
}
certmagic.Default.Storage = &certmagic.FileStorage{
Path: cfg.DB.Dir + "/cert",
Path: filepath.Join(cfg.DB.Dir, "cert"),
}
certmagic.Default.DefaultServerName = cfg.Host.Name[0]
certmagic.Default.Logger = zap.NewNop()

View File

@@ -5,11 +5,17 @@ import (
"errors"
"fmt"
"io"
"path/filepath"
"sync"
"time"
"github.com/datarhei/core/v16/log"
"github.com/datarhei/core/v16/net"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/raft"
raftboltdb "github.com/hashicorp/raft-boltdb/v2"
"go.etcd.io/bbolt"
)
var ErrNodeNotFound = errors.New("node not found")
@@ -44,12 +50,16 @@ type Cluster interface {
type ClusterConfig struct {
ID string
Name string
Path string
IPLimiter net.IPLimiter
Logger log.Logger
}
type cluster struct {
id string
id string
name string
path string
nodes map[string]*node // List of known nodes
idfiles map[string][]string // Map from nodeid to list of files
@@ -70,6 +80,8 @@ type cluster struct {
func New(config ClusterConfig) (Cluster, error) {
c := &cluster{
id: config.ID,
name: config.Name,
path: config.Path,
nodes: map[string]*node{},
idfiles: map[string][]string{},
idupdate: map[string]time.Time{},
@@ -87,6 +99,60 @@ func New(config ClusterConfig) (Cluster, error) {
c.logger = log.New("")
}
fsm, err := NewFSM()
if err != nil {
return nil, err
}
snapshotLogger := NewLogger(c.logger.WithComponent("raft"), hclog.Debug).Named("snapshot")
snapShotStore, err := raft.NewFileSnapshotStoreWithLogger(filepath.Join(c.path, "snapshots"), 10, snapshotLogger)
if err != nil {
return nil, err
}
boltdb, err := raftboltdb.New(raftboltdb.Options{
Path: filepath.Join(c.path, "store.db"),
BoltOptions: &bbolt.Options{
Timeout: 5 * time.Second,
},
})
if err != nil {
return nil, err
}
boltdb.Stats()
raftConfig := raft.DefaultConfig()
raftConfig.Logger = NewLogger(c.logger.WithComponent("raft"), hclog.Debug)
raftTransport, err := raft.NewTCPTransportWithConfig("127.0.0.1:8090", nil, &raft.NetworkTransportConfig{
ServerAddressProvider: nil,
Logger: NewLogger(c.logger.WithComponent("raft"), hclog.Debug).Named("transport"),
Stream: &raft.TCPStreamLayer{},
MaxPool: 5,
Timeout: 5 * time.Second,
})
if err != nil {
boltdb.Close()
return nil, err
}
node, err := raft.NewRaft(raftConfig, fsm, boltdb, boltdb, snapShotStore, raftTransport)
if err != nil {
boltdb.Close()
return nil, err
}
node.BootstrapCluster(raft.Configuration{
Servers: []raft.Server{
{
Suffrage: raft.Voter,
ID: raft.ServerID(config.Name),
Address: raftTransport.LocalAddr(),
},
},
})
ctx, cancel := context.WithCancel(context.Background())
c.cancel = cancel

34
cluster/fsm.go Normal file
View File

@@ -0,0 +1,34 @@
package cluster
import (
"io"
"github.com/hashicorp/raft"
)
// Implement a FSM
type fsm struct{}
func NewFSM() (raft.FSM, error) {
return &fsm{}, nil
}
func (f *fsm) Apply(*raft.Log) interface{} {
return nil
}
func (f *fsm) Snapshot() (raft.FSMSnapshot, error) {
return &fsmSnapshot{}, nil
}
func (f *fsm) Restore(snapshot io.ReadCloser) error {
return nil
}
type fsmSnapshot struct{}
func (s *fsmSnapshot) Persist(sink raft.SnapshotSink) error {
return nil
}
func (s *fsmSnapshot) Release() {}

195
cluster/logger.go Normal file
View File

@@ -0,0 +1,195 @@
package cluster
import (
"io"
golog "log"
"os"
"github.com/datarhei/core/v16/log"
hclog "github.com/hashicorp/go-hclog"
)
// Mimic the hashicorp logger
type hclogger struct {
logger log.Logger
level hclog.Level
name string
args []interface{}
}
func NewLogger(logger log.Logger, lvl hclog.Level) hclog.Logger {
level := log.Linfo
switch lvl {
case hclog.Trace:
level = log.Ldebug
case hclog.Debug:
level = log.Ldebug
case hclog.Info:
level = log.Linfo
case hclog.Warn:
level = log.Lwarn
case hclog.Error:
level = log.Lerror
}
return &hclogger{
logger: logger.WithOutput(log.NewSyncWriter(log.NewConsoleWriter(os.Stderr, level, true))),
level: lvl,
}
}
func (l *hclogger) Log(level hclog.Level, msg string, args ...interface{}) {
fields := l.argFields(args...)
logger := l.logger.WithFields(fields)
switch level {
case hclog.Trace:
logger = logger.Debug()
case hclog.Debug:
logger = logger.Debug()
case hclog.Info:
logger = logger.Info()
case hclog.Warn:
logger = logger.Warn()
case hclog.Error:
logger = logger.Error()
}
logger.Log(msg)
}
func (l *hclogger) argFields(args ...interface{}) log.Fields {
if len(args)%2 != 0 {
args = args[:len(args)-1]
}
fields := log.Fields{}
for i := 0; i < len(args); i = i + 2 {
key, ok := args[i].(string)
if !ok {
continue
}
fields[key] = args[i+1]
}
return fields
}
func (l *hclogger) Trace(msg string, args ...interface{}) {
l.Log(hclog.Trace, msg, args...)
}
func (l *hclogger) Debug(msg string, args ...interface{}) {
l.Log(hclog.Debug, msg, args...)
}
func (l *hclogger) Info(msg string, args ...interface{}) {
l.Log(hclog.Info, msg, args...)
}
func (l *hclogger) Warn(msg string, args ...interface{}) {
l.Log(hclog.Warn, msg, args...)
}
func (l *hclogger) Error(msg string, args ...interface{}) {
l.Log(hclog.Error, msg, args...)
}
func (l *hclogger) IsTrace() bool {
return l.level == hclog.Trace
}
func (l *hclogger) IsDebug() bool {
return l.level == hclog.Debug
}
func (l *hclogger) IsInfo() bool {
return l.level == hclog.Info
}
func (l *hclogger) IsWarn() bool {
return l.level == hclog.Warn
}
func (l *hclogger) IsError() bool {
return l.level == hclog.Error
}
func (l *hclogger) ImpliedArgs() []interface{} {
return l.args
}
func (l *hclogger) With(args ...interface{}) hclog.Logger {
fields := l.argFields(args)
return &hclogger{
logger: l.logger.WithFields(fields),
level: l.level,
name: l.name,
args: args,
}
}
func (l *hclogger) Name() string {
return l.name
}
func (l *hclogger) Named(name string) hclog.Logger {
if len(l.name) != 0 {
name = l.name + "." + name
}
return &hclogger{
logger: l.logger.WithField("logname", name),
level: l.level,
name: name,
args: l.args,
}
}
func (l *hclogger) ResetNamed(name string) hclog.Logger {
return &hclogger{
logger: l.logger.WithField("logname", name),
level: l.level,
name: name,
args: l.args,
}
}
func (l *hclogger) SetLevel(lvl hclog.Level) {
level := log.Linfo
switch lvl {
case hclog.Trace:
level = log.Ldebug
case hclog.Debug:
level = log.Ldebug
case hclog.Info:
level = log.Linfo
case hclog.Warn:
level = log.Lwarn
case hclog.Error:
level = log.Lerror
}
l.logger = l.logger.WithOutput(log.NewSyncWriter(log.NewConsoleWriter(os.Stderr, level, true)))
l.level = lvl
}
func (l *hclogger) GetLevel() hclog.Level {
return l.level
}
func (l *hclogger) StandardLogger(opts *hclog.StandardLoggerOptions) *golog.Logger {
return golog.New(io.Discard, "", 0)
}
func (l *hclogger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer {
return io.Discard
}

13
go.mod
View File

@@ -13,11 +13,12 @@ require (
github.com/gobwas/glob v0.2.3
github.com/golang-jwt/jwt/v4 v4.4.3
github.com/google/uuid v1.3.0
github.com/hashicorp/raft-boltdb/v2 v2.2.2
github.com/invopop/jsonschema v0.4.0
github.com/joho/godotenv v1.4.0
github.com/labstack/echo/v4 v4.9.1
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/mattn/go-isatty v0.0.17
github.com/mattn/go-isatty v0.0.18
github.com/minio/minio-go/v7 v7.0.47
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3
github.com/prometheus/client_golang v1.14.0
@@ -27,6 +28,7 @@ require (
github.com/swaggo/swag v1.8.7
github.com/vektah/gqlparser/v2 v2.5.1
github.com/xeipuuv/gojsonschema v1.2.0
go.etcd.io/bbolt v1.3.7
go.uber.org/zap v1.24.0
golang.org/x/mod v0.7.0
golang.org/x/net v0.7.0
@@ -35,12 +37,15 @@ require (
require (
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/agnivade/levenshtein v1.1.1 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boltdb/bolt v1.3.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/fatih/color v1.15.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
github.com/go-openapi/jsonreference v0.20.0 // indirect
@@ -51,7 +56,11 @@ require (
github.com/golang-jwt/jwt v3.2.2+incompatible // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/hashicorp/go-hclog v1.5.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.5 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/raft v1.4.0 // indirect
github.com/iancoleman/orderedmap v0.2.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
@@ -96,7 +105,7 @@ require (
go.uber.org/goleak v1.1.12 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.6.0 // indirect
golang.org/x/sys v0.7.0 // indirect
golang.org/x/text v0.7.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.4.0 // indirect

111
go.sum
View File

@@ -2,6 +2,8 @@ github.com/99designs/gqlgen v0.17.20 h1:O7WzccIhKB1dm+7g6dhQcULINftfiLSBg2l/mwbp
github.com/99designs/gqlgen v0.17.20/go.mod h1:Mja2HI23kWT1VRH09hvWshFgOzKswpO20o4ScpJIES4=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/toml v1.1.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc=
@@ -12,22 +14,36 @@ github.com/agiledragon/gomonkey/v2 v2.3.1/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaW
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/agnivade/levenshtein v1.1.1 h1:QY8M92nrzkmr798gCo3kmMyqXFzdQVpxLlGPRBij0P8=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0 h1:jfIu9sQUG6Ig+0+Ap1h4unLjW6YQJpKZVmUzxsD4E/Q=
github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/atrox/haikunatorgo/v2 v2.0.1 h1:FCVx2KL2YvZtI1rI9WeEHxeLRrKGr0Dd4wfCJiUXupc=
github.com/atrox/haikunatorgo/v2 v2.0.1/go.mod h1:BBQmx2o+1Z5poziaHRgddAZKOpijwfKdAmMnSYlFK70=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c h1:8XZeJrs4+ZYhJeJ2aZxADI2tGADS15AzIF8MQ8XAhT4=
github.com/benburkert/openpgp v0.0.0-20160410205803-c2471f86866c/go.mod h1:x1vxHcL/9AVzuk5HOloOEPrtJY0MaalYr78afXZ+pWI=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/boltdb/bolt v1.3.1 h1:JQmyP4ZBrce+ZQu0dY660FMfatumYDLun9hBCUVIkF4=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/caddyserver/certmagic v0.17.2 h1:o30seC1T/dBqBCNNGNHWwj2i5/I/FMjBbTAhjADP3nE=
github.com/caddyserver/certmagic v0.17.2/go.mod h1:ouWUuC490GOLJzkyN35eXfV8bSbwMwSf4bdhkIxtdQE=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
@@ -43,7 +59,14 @@ github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+
github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
@@ -67,17 +90,23 @@ github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/j
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ=
github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang-jwt/jwt v3.2.2+incompatible h1:IfV12K8xAKAnZqdXVzCZ+TOjboZ2keLg81eXfW3O+oY=
github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I=
github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU=
github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
@@ -88,8 +117,28 @@ github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ=
github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c=
github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft v1.4.0 h1:tn28S/AWv0BtRQgwZv/1NELu8sCvI0FixqL8C8MYKeY=
github.com/hashicorp/raft v1.4.0/go.mod h1:nz64BIjXphDLATfKGG5RzHtNUPioLeKFsXEm88yTVew=
github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea h1:RxcPJuutPRM8PUOyiweMmkuNO+RJyfy2jds2gfvgNmU=
github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I=
github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw=
github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY=
github.com/iancoleman/orderedmap v0.0.0-20190318233801-ac98e3ecb4b0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
github.com/iancoleman/orderedmap v0.2.0 h1:sq1N/TFpYH++aViPcaKjys3bDClUEU7s5B+z6jq8pNA=
github.com/iancoleman/orderedmap v0.2.0/go.mod h1:N0Wam8K1arqPXNWjMo21EXnBPOPp36vB07FNRdD2geA=
@@ -99,9 +148,12 @@ github.com/joho/godotenv v1.4.0 h1:3l4+N6zfMWnkbPEXKng2o2/MR5mSwTrBih4ZEkkz1lg=
github.com/joho/godotenv v1.4.0/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM=
github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw=
github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4=
@@ -109,6 +161,8 @@ github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa02
github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.3 h1:sxCkb+qR91z4vsqw4vGGZlDgPz3G7gjaLyK3V8y70BU=
github.com/klauspost/cpuid/v2 v2.2.3/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
@@ -139,13 +193,17 @@ github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJ
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/matryer/moq v0.2.7/go.mod h1:kITsx543GOENm48TUAQyJ9+SAvFSr7iGQXPoth/VUBk=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98=
github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mholt/acmez v1.0.4 h1:N3cE4Pek+dSolbsofIkAYz6H1d3pE+2G0os7QHslf80=
@@ -164,15 +222,21 @@ github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/otiai10/copy v1.7.0/go.mod h1:rmRl6QPdJj6EiUqXQ/4Nn2lLXoNQjFCQbbNrxgc/t3U=
github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE=
github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs=
github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo=
github.com/otiai10/mint v1.3.3/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@@ -185,12 +249,26 @@ github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3g
github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3 h1:Y7qCvg282QmlyrVQuL2fgGwebuw7zvfnRym09r+dUGc=
github.com/prep/average v0.0.0-20200506183628-d26c465f48c3/go.mod h1:0ZE5gcyWKS151WBDIpmLshHY0l+3edpuKnBUWVVbWKk=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
@@ -211,19 +289,24 @@ github.com/shoenig/go-m1cpu v0.1.4/go.mod h1:Wwvst4LR89UxjeFtLRMrpgRiyY4xPsejnVZ
github.com/shoenig/test v0.6.3 h1:GVXWJFk9PiOjN0KoJ7VrJGH6uLPnqxR7/fe3HUPfE0c=
github.com/shoenig/test v0.6.3/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.3.1-0.20190311161405-34c6fa2dc709/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
@@ -239,6 +322,7 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.8.1 h1:CGuYNZF9IKZY/rfBe3lJpccSoIY1ytfvmgQT90cNOl4=
github.com/urfave/cli/v2 v2.8.1/go.mod h1:Z41J9TPoffeoqP0Iza0YbAhGvymRdZAd2uPmZ5JxRdY=
@@ -264,6 +348,9 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ=
go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ=
go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
@@ -276,6 +363,7 @@ go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTV
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60=
go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
@@ -291,8 +379,11 @@ golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
@@ -307,14 +398,23 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -330,6 +430,7 @@ golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20211103235746-7861aae1554b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -337,8 +438,9 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -372,6 +474,7 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w=
google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -381,9 +484,11 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=

View File

@@ -55,12 +55,8 @@ type Fields map[string]interface{}
//
// The context is a string that represents who wrote the message.
type Logger interface {
// WithOutput adds an output to the Logger. The messages are written to the
// provided writer if the log level of the message is more or equally critical
// than level. Pass true for useColoe if colored output is desired. If the
// writer doesn't support colored output, it will be automatically disabled.
// The returned value implements the LoggerOutput interface which allows to
// change the log level at any later point in time.
// WithOutput sets an output to the Logger. The messages are written to the
// provided writer.
WithOutput(w Writer) Logger
// With returns a new Logger with the given context. The context may

26
vendor/github.com/armon/go-metrics/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,26 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
/metrics.out
.idea

13
vendor/github.com/armon/go-metrics/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,13 @@
language: go
go:
- "1.x"
env:
- GO111MODULE=on
install:
- go get ./...
script:
- go test ./...

20
vendor/github.com/armon/go-metrics/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Armon Dadgar
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

91
vendor/github.com/armon/go-metrics/README.md generated vendored Normal file
View File

@@ -0,0 +1,91 @@
go-metrics
==========
This library provides a `metrics` package which can be used to instrument code,
expose application metrics, and profile runtime performance in a flexible manner.
Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
Sinks
-----
The `metrics` package makes use of a `MetricSink` interface to support delivery
to any type of backend. Currently the following sinks are provided:
* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
* InmemSink : Provides in-memory aggregation, can be used to export stats
* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
* BlackholeSink : Sinks to nowhere
In addition to the sinks, the `InmemSignal` can be used to catch a signal,
and dump a formatted output of recent metrics. For example, when a process gets
a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
Labels
------
Most metrics do have an equivalent ending with `WithLabels`, such methods
allow to push metrics with labels and use some features of underlying Sinks
(ex: translated into Prometheus labels).
Since some of these labels may increase greatly cardinality of metrics, the
library allow to filter labels using a blacklist/whitelist filtering system
which is global to all metrics.
* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
no tags are filetered at all, but it allow to a user to globally block some tags with high
cardinality at application level.
Examples
--------
Here is an example of using the package:
```go
func SlowMethod() {
// Profiling the runtime of a method
defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
}
// Configure a statsite sink as the global metrics sink
sink, _ := metrics.NewStatsiteSink("statsite:8125")
metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
// Emit a Key/Value pair
metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
```
Here is an example of setting up a signal handler:
```go
// Setup the inmem sink and signal handler
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
sig := metrics.DefaultInmemSignal(inm)
metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
// Run some code
inm.SetGauge([]string{"foo"}, 42)
inm.EmitKey([]string{"bar"}, 30)
inm.IncrCounter([]string{"baz"}, 42)
inm.IncrCounter([]string{"baz"}, 1)
inm.IncrCounter([]string{"baz"}, 80)
inm.AddSample([]string{"method", "wow"}, 42)
inm.AddSample([]string{"method", "wow"}, 100)
inm.AddSample([]string{"method", "wow"}, 22)
....
```
When a signal comes in, output like the following will be dumped to stderr:
[2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
[2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
[2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
[2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513

12
vendor/github.com/armon/go-metrics/const_unix.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build !windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
DefaultSignal = syscall.SIGUSR1
)

13
vendor/github.com/armon/go-metrics/const_windows.go generated vendored Normal file
View File

@@ -0,0 +1,13 @@
// +build windows
package metrics
import (
"syscall"
)
const (
// DefaultSignal is used with DefaultInmemSignal
// Windows has no SIGUSR1, use SIGBREAK
DefaultSignal = syscall.Signal(21)
)

339
vendor/github.com/armon/go-metrics/inmem.go generated vendored Normal file
View File

@@ -0,0 +1,339 @@
package metrics
import (
"bytes"
"fmt"
"math"
"net/url"
"strings"
"sync"
"time"
)
var spaceReplacer = strings.NewReplacer(" ", "_")
// InmemSink provides a MetricSink that does in-memory aggregation
// without sending metrics over a network. It can be embedded within
// an application to provide profiling information.
type InmemSink struct {
// How long is each aggregation interval
interval time.Duration
// Retain controls how many metrics interval we keep
retain time.Duration
// maxIntervals is the maximum length of intervals.
// It is retain / interval.
maxIntervals int
// intervals is a slice of the retained intervals
intervals []*IntervalMetrics
intervalLock sync.RWMutex
rateDenom float64
}
// IntervalMetrics stores the aggregated metrics
// for a specific interval
type IntervalMetrics struct {
sync.RWMutex
// The start time of the interval
Interval time.Time
// Gauges maps the key to the last set value
Gauges map[string]GaugeValue
// Points maps the string to the list of emitted values
// from EmitKey
Points map[string][]float32
// Counters maps the string key to a sum of the counter
// values
Counters map[string]SampledValue
// Samples maps the key to an AggregateSample,
// which has the rolled up view of a sample
Samples map[string]SampledValue
// done is closed when this interval has ended, and a new IntervalMetrics
// has been created to receive any future metrics.
done chan struct{}
}
// NewIntervalMetrics creates a new IntervalMetrics for a given interval
func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
return &IntervalMetrics{
Interval: intv,
Gauges: make(map[string]GaugeValue),
Points: make(map[string][]float32),
Counters: make(map[string]SampledValue),
Samples: make(map[string]SampledValue),
done: make(chan struct{}),
}
}
// AggregateSample is used to hold aggregate metrics
// about a sample
type AggregateSample struct {
Count int // The count of emitted pairs
Rate float64 // The values rate per time unit (usually 1 second)
Sum float64 // The sum of values
SumSq float64 `json:"-"` // The sum of squared values
Min float64 // Minimum value
Max float64 // Maximum value
LastUpdated time.Time `json:"-"` // When value was last updated
}
// Computes a Stddev of the values
func (a *AggregateSample) Stddev() float64 {
num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
div := float64(a.Count * (a.Count - 1))
if div == 0 {
return 0
}
return math.Sqrt(num / div)
}
// Computes a mean of the values
func (a *AggregateSample) Mean() float64 {
if a.Count == 0 {
return 0
}
return a.Sum / float64(a.Count)
}
// Ingest is used to update a sample
func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
a.Count++
a.Sum += v
a.SumSq += (v * v)
if v < a.Min || a.Count == 1 {
a.Min = v
}
if v > a.Max || a.Count == 1 {
a.Max = v
}
a.Rate = float64(a.Sum) / rateDenom
a.LastUpdated = time.Now()
}
func (a *AggregateSample) String() string {
if a.Count == 0 {
return "Count: 0"
} else if a.Stddev() == 0 {
return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
} else {
return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
}
}
// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
params := u.Query()
interval, err := time.ParseDuration(params.Get("interval"))
if err != nil {
return nil, fmt.Errorf("Bad 'interval' param: %s", err)
}
retain, err := time.ParseDuration(params.Get("retain"))
if err != nil {
return nil, fmt.Errorf("Bad 'retain' param: %s", err)
}
return NewInmemSink(interval, retain), nil
}
// NewInmemSink is used to construct a new in-memory sink.
// Uses an aggregation interval and maximum retention period.
func NewInmemSink(interval, retain time.Duration) *InmemSink {
rateTimeUnit := time.Second
i := &InmemSink{
interval: interval,
retain: retain,
maxIntervals: int(retain / interval),
rateDenom: float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
}
i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
return i
}
func (i *InmemSink) SetGauge(key []string, val float32) {
i.SetGaugeWithLabels(key, val, nil)
}
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
}
func (i *InmemSink) EmitKey(key []string, val float32) {
k := i.flattenKey(key)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
vals := intv.Points[k]
intv.Points[k] = append(vals, val)
}
func (i *InmemSink) IncrCounter(key []string, val float32) {
i.IncrCounterWithLabels(key, val, nil)
}
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Counters[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Counters[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
func (i *InmemSink) AddSample(key []string, val float32) {
i.AddSampleWithLabels(key, val, nil)
}
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval()
intv.Lock()
defer intv.Unlock()
agg, ok := intv.Samples[k]
if !ok {
agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Samples[k] = agg
}
agg.Ingest(float64(val), i.rateDenom)
}
// Data is used to retrieve all the aggregated metrics
// Intervals may be in use, and a read lock should be acquired
func (i *InmemSink) Data() []*IntervalMetrics {
// Get the current interval, forces creation
i.getInterval()
i.intervalLock.RLock()
defer i.intervalLock.RUnlock()
n := len(i.intervals)
intervals := make([]*IntervalMetrics, n)
copy(intervals[:n-1], i.intervals[:n-1])
current := i.intervals[n-1]
// make its own copy for current interval
intervals[n-1] = &IntervalMetrics{}
copyCurrent := intervals[n-1]
current.RLock()
*copyCurrent = *current
// RWMutex is not safe to copy, so create a new instance on the copy
copyCurrent.RWMutex = sync.RWMutex{}
copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
for k, v := range current.Gauges {
copyCurrent.Gauges[k] = v
}
// saved values will be not change, just copy its link
copyCurrent.Points = make(map[string][]float32, len(current.Points))
for k, v := range current.Points {
copyCurrent.Points[k] = v
}
copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
for k, v := range current.Counters {
copyCurrent.Counters[k] = v.deepCopy()
}
copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
for k, v := range current.Samples {
copyCurrent.Samples[k] = v.deepCopy()
}
current.RUnlock()
return intervals
}
// getInterval returns the current interval. A new interval is created if no
// previous interval exists, or if the current time is beyond the window for the
// current interval.
func (i *InmemSink) getInterval() *IntervalMetrics {
intv := time.Now().Truncate(i.interval)
// Attempt to return the existing interval first, because it only requires
// a read lock.
i.intervalLock.RLock()
n := len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
defer i.intervalLock.RUnlock()
return i.intervals[n-1]
}
i.intervalLock.RUnlock()
i.intervalLock.Lock()
defer i.intervalLock.Unlock()
// Re-check for an existing interval now that the lock is re-acquired.
n = len(i.intervals)
if n > 0 && i.intervals[n-1].Interval == intv {
return i.intervals[n-1]
}
current := NewIntervalMetrics(intv)
i.intervals = append(i.intervals, current)
if n > 0 {
close(i.intervals[n-1].done)
}
n++
// Prune old intervals if the count exceeds the max.
if n >= i.maxIntervals {
copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
i.intervals = i.intervals[:i.maxIntervals]
}
return current
}
// Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string {
buf := &bytes.Buffer{}
joined := strings.Join(parts, ".")
spaceReplacer.WriteString(buf, joined)
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
key := i.flattenKey(parts)
buf := bytes.NewBufferString(key)
for _, label := range labels {
spaceReplacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key
}

162
vendor/github.com/armon/go-metrics/inmem_endpoint.go generated vendored Normal file
View File

@@ -0,0 +1,162 @@
package metrics
import (
"context"
"fmt"
"net/http"
"sort"
"time"
)
// MetricsSummary holds a roll-up of metrics info for a given interval
type MetricsSummary struct {
Timestamp string
Gauges []GaugeValue
Points []PointValue
Counters []SampledValue
Samples []SampledValue
}
type GaugeValue struct {
Name string
Hash string `json:"-"`
Value float32
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
type PointValue struct {
Name string
Points []float32
}
type SampledValue struct {
Name string
Hash string `json:"-"`
*AggregateSample
Mean float64
Stddev float64
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
// deepCopy allocates a new instance of AggregateSample
func (source *SampledValue) deepCopy() SampledValue {
dest := *source
if source.AggregateSample != nil {
dest.AggregateSample = &AggregateSample{}
*dest.AggregateSample = *source.AggregateSample
}
return dest
}
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
var interval *IntervalMetrics
n := len(data)
switch {
case n == 0:
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
interval = data[0]
default:
// Show the most recent finished interval if we have one
interval = data[n-2]
}
return newMetricSummaryFromInterval(interval), nil
}
func newMetricSummaryFromInterval(interval *IntervalMetrics) MetricsSummary {
interval.RLock()
defer interval.RUnlock()
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
Points: make([]PointValue, 0, len(interval.Points)),
}
// Format and sort the output of each metric type, so it gets displayed in a
// deterministic order.
for name, points := range interval.Points {
summary.Points = append(summary.Points, PointValue{name, points})
}
sort.Slice(summary.Points, func(i, j int) bool {
return summary.Points[i].Name < summary.Points[j].Name
})
for hash, value := range interval.Gauges {
value.Hash = hash
value.DisplayLabels = make(map[string]string)
for _, label := range value.Labels {
value.DisplayLabels[label.Name] = label.Value
}
value.Labels = nil
summary.Gauges = append(summary.Gauges, value)
}
sort.Slice(summary.Gauges, func(i, j int) bool {
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
})
summary.Counters = formatSamples(interval.Counters)
summary.Samples = formatSamples(interval.Samples)
return summary
}
func formatSamples(source map[string]SampledValue) []SampledValue {
output := make([]SampledValue, 0, len(source))
for hash, sample := range source {
displayLabels := make(map[string]string)
for _, label := range sample.Labels {
displayLabels[label.Name] = label.Value
}
output = append(output, SampledValue{
Name: sample.Name,
Hash: hash,
AggregateSample: sample.AggregateSample,
Mean: sample.AggregateSample.Mean(),
Stddev: sample.AggregateSample.Stddev(),
DisplayLabels: displayLabels,
})
}
sort.Slice(output, func(i, j int) bool {
return output[i].Hash < output[j].Hash
})
return output
}
type Encoder interface {
Encode(interface{}) error
}
// Stream writes metrics using encoder.Encode each time an interval ends. Runs
// until the request context is cancelled, or the encoder returns an error.
// The caller is responsible for logging any errors from encoder.
func (i *InmemSink) Stream(ctx context.Context, encoder Encoder) {
interval := i.getInterval()
for {
select {
case <-interval.done:
summary := newMetricSummaryFromInterval(interval)
if err := encoder.Encode(summary); err != nil {
return
}
// update interval to the next one
interval = i.getInterval()
case <-ctx.Done():
return
}
}
}

117
vendor/github.com/armon/go-metrics/inmem_signal.go generated vendored Normal file
View File

@@ -0,0 +1,117 @@
package metrics
import (
"bytes"
"fmt"
"io"
"os"
"os/signal"
"strings"
"sync"
"syscall"
)
// InmemSignal is used to listen for a given signal, and when received,
// to dump the current metrics from the InmemSink to an io.Writer
type InmemSignal struct {
signal syscall.Signal
inm *InmemSink
w io.Writer
sigCh chan os.Signal
stop bool
stopCh chan struct{}
stopLock sync.Mutex
}
// NewInmemSignal creates a new InmemSignal which listens for a given signal,
// and dumps the current metrics out to a writer
func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
i := &InmemSignal{
signal: sig,
inm: inmem,
w: w,
sigCh: make(chan os.Signal, 1),
stopCh: make(chan struct{}),
}
signal.Notify(i.sigCh, sig)
go i.run()
return i
}
// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
// and writes output to stderr. Windows uses SIGBREAK
func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
}
// Stop is used to stop the InmemSignal from listening
func (i *InmemSignal) Stop() {
i.stopLock.Lock()
defer i.stopLock.Unlock()
if i.stop {
return
}
i.stop = true
close(i.stopCh)
signal.Stop(i.sigCh)
}
// run is a long running routine that handles signals
func (i *InmemSignal) run() {
for {
select {
case <-i.sigCh:
i.dumpStats()
case <-i.stopCh:
return
}
}
}
// dumpStats is used to dump the data to output writer
func (i *InmemSignal) dumpStats() {
buf := bytes.NewBuffer(nil)
data := i.inm.Data()
// Skip the last period which is still being aggregated
for j := 0; j < len(data)-1; j++ {
intv := data[j]
intv.RLock()
for _, val := range intv.Gauges {
name := i.flattenLabels(val.Name, val.Labels)
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
}
for name, vals := range intv.Points {
for _, val := range vals {
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
}
}
for _, agg := range intv.Counters {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
for _, agg := range intv.Samples {
name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
}
intv.RUnlock()
}
// Write out the bytes
i.w.Write(buf.Bytes())
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
buf := bytes.NewBufferString(name)
replacer := strings.NewReplacer(" ", "_", ":", "_")
for _, label := range labels {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, label.Value)
}
return buf.String()
}

299
vendor/github.com/armon/go-metrics/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,299 @@
package metrics
import (
"runtime"
"strings"
"time"
iradix "github.com/hashicorp/go-immutable-radix"
)
type Label struct {
Name string
Value string
}
func (m *Metrics) SetGauge(key []string, val float32) {
m.SetGaugeWithLabels(key, val, nil)
}
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" {
if m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
} else if m.EnableHostname {
key = insert(0, m.HostName, key)
}
}
if m.EnableTypePrefix {
key = insert(0, "gauge", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) EmitKey(key []string, val float32) {
if m.EnableTypePrefix {
key = insert(0, "kv", key)
}
if m.ServiceName != "" {
key = insert(0, m.ServiceName, key)
}
allowed, _ := m.allowMetric(key, nil)
if !allowed {
return
}
m.sink.EmitKey(key, val)
}
func (m *Metrics) IncrCounter(key []string, val float32) {
m.IncrCounterWithLabels(key, val, nil)
}
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "counter", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) AddSample(key []string, val float32) {
m.AddSampleWithLabels(key, val, nil)
}
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "sample", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
m.sink.AddSampleWithLabels(key, val, labelsFiltered)
}
func (m *Metrics) MeasureSince(key []string, start time.Time) {
m.MeasureSinceWithLabels(key, start, nil)
}
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix {
key = insert(0, "timer", key)
}
if m.ServiceName != "" {
if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
allowed, labelsFiltered := m.allowMetric(key, labels)
if !allowed {
return
}
now := time.Now()
elapsed := now.Sub(start)
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
}
// UpdateFilter overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilter(allow, block []string) {
m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
}
// UpdateFilterAndLabels overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
m.filterLock.Lock()
defer m.filterLock.Unlock()
m.AllowedPrefixes = allow
m.BlockedPrefixes = block
if allowedLabels == nil {
// Having a white list means we take only elements from it
m.allowedLabels = nil
} else {
m.allowedLabels = make(map[string]bool)
for _, v := range allowedLabels {
m.allowedLabels[v] = true
}
}
m.blockedLabels = make(map[string]bool)
for _, v := range blockedLabels {
m.blockedLabels[v] = true
}
m.AllowedLabels = allowedLabels
m.BlockedLabels = blockedLabels
m.filter = iradix.New()
for _, prefix := range m.AllowedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
}
for _, prefix := range m.BlockedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
}
}
func (m *Metrics) Shutdown() {
if ss, ok := m.sink.(ShutdownSink); ok {
ss.Shutdown()
}
}
// labelIsAllowed return true if a should be included in metric
// the caller should lock m.filterLock while calling this method
func (m *Metrics) labelIsAllowed(label *Label) bool {
labelName := (*label).Name
if m.blockedLabels != nil {
_, ok := m.blockedLabels[labelName]
if ok {
// If present, let's remove this label
return false
}
}
if m.allowedLabels != nil {
_, ok := m.allowedLabels[labelName]
return ok
}
// Allow by default
return true
}
// filterLabels return only allowed labels
// the caller should lock m.filterLock while calling this method
func (m *Metrics) filterLabels(labels []Label) []Label {
if labels == nil {
return nil
}
toReturn := []Label{}
for _, label := range labels {
if m.labelIsAllowed(&label) {
toReturn = append(toReturn, label)
}
}
return toReturn
}
// Returns whether the metric should be allowed based on configured prefix filters
// Also return the applicable labels
func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
m.filterLock.RLock()
defer m.filterLock.RUnlock()
if m.filter == nil || m.filter.Len() == 0 {
return m.Config.FilterDefault, m.filterLabels(labels)
}
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
if !ok {
return m.Config.FilterDefault, m.filterLabels(labels)
}
return allowed.(bool), m.filterLabels(labels)
}
// Periodically collects runtime stats to publish
func (m *Metrics) collectStats() {
for {
time.Sleep(m.ProfileInterval)
m.EmitRuntimeStats()
}
}
// Emits various runtime statsitics
func (m *Metrics) EmitRuntimeStats() {
// Export number of Goroutines
numRoutines := runtime.NumGoroutine()
m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
// Export memory stats
var stats runtime.MemStats
runtime.ReadMemStats(&stats)
m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
// Export info about the last few GC runs
num := stats.NumGC
// Handle wrap around
if num < m.lastNumGC {
m.lastNumGC = 0
}
// Ensure we don't scan more than 256
if num-m.lastNumGC >= 256 {
m.lastNumGC = num - 255
}
for i := m.lastNumGC; i < num; i++ {
pause := stats.PauseNs[i%256]
m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
}
m.lastNumGC = num
}
// Creates a new slice with the provided string value as the first element
// and the provided slice values as the remaining values.
// Ordering of the values in the provided input slice is kept in tact in the output slice.
func insert(i int, v string, s []string) []string {
// Allocate new slice to avoid modifying the input slice
newS := make([]string, len(s)+1)
// Copy s[0, i-1] into newS
for j := 0; j < i; j++ {
newS[j] = s[j]
}
// Insert provided element at index i
newS[i] = v
// Copy s[i, len(s)-1] into newS starting at newS[i+1]
for j := i; j < len(s); j++ {
newS[j+1] = s[j]
}
return newS
}

132
vendor/github.com/armon/go-metrics/sink.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
package metrics
import (
"fmt"
"net/url"
)
// The MetricSink interface is used to transmit metrics information
// to an external system
type MetricSink interface {
// A Gauge should retain the last value it is set to
SetGauge(key []string, val float32)
SetGaugeWithLabels(key []string, val float32, labels []Label)
// Should emit a Key/Value pair for each call
EmitKey(key []string, val float32)
// Counters should accumulate values
IncrCounter(key []string, val float32)
IncrCounterWithLabels(key []string, val float32, labels []Label)
// Samples are for timing information, where quantiles are used
AddSample(key []string, val float32)
AddSampleWithLabels(key []string, val float32, labels []Label)
}
type ShutdownSink interface {
MetricSink
// Shutdown the metric sink, flush metrics to storage, and cleanup resources.
// Called immediately prior to application exit. Implementations must block
// until metrics are flushed to storage.
Shutdown()
}
// BlackholeSink is used to just blackhole messages
type BlackholeSink struct{}
func (*BlackholeSink) SetGauge(key []string, val float32) {}
func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) EmitKey(key []string, val float32) {}
func (*BlackholeSink) IncrCounter(key []string, val float32) {}
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) AddSample(key []string, val float32) {}
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
// FanoutSink is used to sink to fanout values to multiple sinks
type FanoutSink []MetricSink
func (fh FanoutSink) SetGauge(key []string, val float32) {
fh.SetGaugeWithLabels(key, val, nil)
}
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.SetGaugeWithLabels(key, val, labels)
}
}
func (fh FanoutSink) EmitKey(key []string, val float32) {
for _, s := range fh {
s.EmitKey(key, val)
}
}
func (fh FanoutSink) IncrCounter(key []string, val float32) {
fh.IncrCounterWithLabels(key, val, nil)
}
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.IncrCounterWithLabels(key, val, labels)
}
}
func (fh FanoutSink) AddSample(key []string, val float32) {
fh.AddSampleWithLabels(key, val, nil)
}
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh {
s.AddSampleWithLabels(key, val, labels)
}
}
func (fh FanoutSink) Shutdown() {
for _, s := range fh {
if ss, ok := s.(ShutdownSink); ok {
ss.Shutdown()
}
}
}
// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
// by each sink type
type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
// sinkRegistry supports the generic NewMetricSink function by mapping URL
// schemes to metric sink factory functions
var sinkRegistry = map[string]sinkURLFactoryFunc{
"statsd": NewStatsdSinkFromURL,
"statsite": NewStatsiteSinkFromURL,
"inmem": NewInmemSinkFromURL,
}
// NewMetricSinkFromURL allows a generic URL input to configure any of the
// supported sinks. The scheme of the URL identifies the type of the sink, the
// and query parameters are used to set options.
//
// "statsd://" - Initializes a StatsdSink. The host and port are passed through
// as the "addr" of the sink
//
// "statsite://" - Initializes a StatsiteSink. The host and port become the
// "addr" of the sink
//
// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
// "interval" and "duration" query parameters must be specified with valid
// durations, see NewInmemSink for details.
func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
u, err := url.Parse(urlStr)
if err != nil {
return nil, err
}
sinkURLFactoryFunc := sinkRegistry[u.Scheme]
if sinkURLFactoryFunc == nil {
return nil, fmt.Errorf(
"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
}
return sinkURLFactoryFunc(u)
}

158
vendor/github.com/armon/go-metrics/start.go generated vendored Normal file
View File

@@ -0,0 +1,158 @@
package metrics
import (
"os"
"sync"
"sync/atomic"
"time"
iradix "github.com/hashicorp/go-immutable-radix"
)
// Config is used to configure metrics settings
type Config struct {
ServiceName string // Prefixed with keys to separate services
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
EnableHostname bool // Enable prefixing gauge values with hostname
EnableHostnameLabel bool // Enable adding hostname to labels
EnableServiceLabel bool // Enable adding service to labels
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
TimerGranularity time.Duration // Granularity of timers.
ProfileInterval time.Duration // Interval to profile runtime metrics
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
AllowedLabels []string // A list of metric labels to allow, with '.' as the separator
BlockedLabels []string // A list of metric labels to block, with '.' as the separator
FilterDefault bool // Whether to allow metrics by default
}
// Metrics represents an instance of a metrics sink that can
// be used to emit
type Metrics struct {
Config
lastNumGC uint32
sink MetricSink
filter *iradix.Tree
allowedLabels map[string]bool
blockedLabels map[string]bool
filterLock sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
}
// Shared global metrics instance
var globalMetrics atomic.Value // *Metrics
func init() {
// Initialize to a blackhole sink to avoid errors
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
}
// Default returns the shared global metrics instance.
func Default() *Metrics {
return globalMetrics.Load().(*Metrics)
}
// DefaultConfig provides a sane default configuration
func DefaultConfig(serviceName string) *Config {
c := &Config{
ServiceName: serviceName, // Use client provided service
HostName: "",
EnableHostname: true, // Enable hostname prefix
EnableRuntimeMetrics: true, // Enable runtime profiling
EnableTypePrefix: false, // Disable type prefix
TimerGranularity: time.Millisecond, // Timers are in milliseconds
ProfileInterval: time.Second, // Poll runtime every second
FilterDefault: true, // Don't filter metrics by default
}
// Try to get the hostname
name, _ := os.Hostname()
c.HostName = name
return c
}
// New is used to create a new instance of Metrics
func New(conf *Config, sink MetricSink) (*Metrics, error) {
met := &Metrics{}
met.Config = *conf
met.sink = sink
met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
// Start the runtime collector
if conf.EnableRuntimeMetrics {
go met.collectStats()
}
return met, nil
}
// NewGlobal is the same as New, but it assigns the metrics object to be
// used globally as well as returning it.
func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
metrics, err := New(conf, sink)
if err == nil {
globalMetrics.Store(metrics)
}
return metrics, err
}
// Proxy all the methods to the globalMetrics instance
func SetGauge(key []string, val float32) {
globalMetrics.Load().(*Metrics).SetGauge(key, val)
}
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
}
func EmitKey(key []string, val float32) {
globalMetrics.Load().(*Metrics).EmitKey(key, val)
}
func IncrCounter(key []string, val float32) {
globalMetrics.Load().(*Metrics).IncrCounter(key, val)
}
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
}
func AddSample(key []string, val float32) {
globalMetrics.Load().(*Metrics).AddSample(key, val)
}
func AddSampleWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
}
func MeasureSince(key []string, start time.Time) {
globalMetrics.Load().(*Metrics).MeasureSince(key, start)
}
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
}
func UpdateFilter(allow, block []string) {
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
}
// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
// and blockedLabels - when not nil - allow filtering of labels in order to
// block/allow globally labels (especially useful when having large number of
// values for a given label). See README.md for more information about usage.
func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
}
// Shutdown disables metric collection, then blocks while attempting to flush metrics to storage.
// WARNING: Not all MetricSink backends support this functionality, and calling this will cause them to leak resources.
// This is intended for use immediately prior to application exit.
func Shutdown() {
m := globalMetrics.Load().(*Metrics)
// Swap whatever MetricSink is currently active with a BlackholeSink. Callers must not have a
// reason to expect that calls to the library will successfully collect metrics after Shutdown
// has been called.
globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
m.Shutdown()
}

184
vendor/github.com/armon/go-metrics/statsd.go generated vendored Normal file
View File

@@ -0,0 +1,184 @@
package metrics
import (
"bytes"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// statsdMaxLen is the maximum size of a packet
// to send to statsd
statsdMaxLen = 1400
)
// StatsdSink provides a MetricSink that can be used
// with a statsite or statsd metrics server. It uses
// only UDP packets, while StatsiteSink uses TCP.
type StatsdSink struct {
addr string
metricQueue chan string
}
// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsdSink(u.Host)
}
// NewStatsdSink is used to create a new StatsdSink
func NewStatsdSink(addr string) (*StatsdSink, error) {
s := &StatsdSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsd
func (s *StatsdSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsdSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsdSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsdSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsdSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsdSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Create a buffer
buf := bytes.NewBuffer(nil)
// Attempt to connect
sock, err = net.Dial("udp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
goto WAIT
}
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Check if this would overflow the packet size
if len(metric)+buf.Len() > statsdMaxLen {
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error writing to statsd! Err: %s", err)
goto WAIT
}
}
// Append to the buffer
buf.WriteString(metric)
case <-ticker.C:
if buf.Len() == 0 {
continue
}
_, err := sock.Write(buf.Bytes())
buf.Reset()
if err != nil {
log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

172
vendor/github.com/armon/go-metrics/statsite.go generated vendored Normal file
View File

@@ -0,0 +1,172 @@
package metrics
import (
"bufio"
"fmt"
"log"
"net"
"net/url"
"strings"
"time"
)
const (
// We force flush the statsite metrics after this period of
// inactivity. Prevents stats from getting stuck in a buffer
// forever.
flushInterval = 100 * time.Millisecond
)
// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
// (and tested) from NewMetricSinkFromURL.
func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
return NewStatsiteSink(u.Host)
}
// StatsiteSink provides a MetricSink that can be used with a
// statsite metrics server
type StatsiteSink struct {
addr string
metricQueue chan string
}
// NewStatsiteSink is used to create a new StatsiteSink
func NewStatsiteSink(addr string) (*StatsiteSink, error) {
s := &StatsiteSink{
addr: addr,
metricQueue: make(chan string, 4096),
}
go s.flushMetrics()
return s, nil
}
// Close is used to stop flushing to statsite
func (s *StatsiteSink) Shutdown() {
close(s.metricQueue)
}
func (s *StatsiteSink) SetGauge(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounter(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces
func (s *StatsiteSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
}
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue
func (s *StatsiteSink) pushMetric(m string) {
select {
case s.metricQueue <- m:
default:
}
}
// Flushes metrics
func (s *StatsiteSink) flushMetrics() {
var sock net.Conn
var err error
var wait <-chan time.Time
var buffered *bufio.Writer
ticker := time.NewTicker(flushInterval)
defer ticker.Stop()
CONNECT:
// Attempt to connect
sock, err = net.Dial("tcp", s.addr)
if err != nil {
log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
goto WAIT
}
// Create a buffered writer
buffered = bufio.NewWriter(sock)
for {
select {
case metric, ok := <-s.metricQueue:
// Get a metric from the queue
if !ok {
goto QUIT
}
// Try to send to statsite
_, err := buffered.Write([]byte(metric))
if err != nil {
log.Printf("[ERR] Error writing to statsite! Err: %s", err)
goto WAIT
}
case <-ticker.C:
if err := buffered.Flush(); err != nil {
log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
goto WAIT
}
}
}
WAIT:
// Wait for a while
wait = time.After(time.Duration(5) * time.Second)
for {
select {
// Dequeue the messages to avoid backlog
case _, ok := <-s.metricQueue:
if !ok {
goto QUIT
}
case <-wait:
goto CONNECT
}
}
QUIT:
s.metricQueue = nil
}

4
vendor/github.com/boltdb/bolt/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
*.prof
*.test
*.swp
/bin/

20
vendor/github.com/boltdb/bolt/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Ben Johnson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

18
vendor/github.com/boltdb/bolt/Makefile generated vendored Normal file
View File

@@ -0,0 +1,18 @@
BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
default: build
race:
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
# go get github.com/kisielk/errcheck
errcheck:
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
test:
@go test -v -cover .
@go test -v ./cmd/bolt
.PHONY: fmt test

916
vendor/github.com/boltdb/bolt/README.md generated vendored Normal file
View File

@@ -0,0 +1,916 @@
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg)
====
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
[LMDB project][lmdb]. The goal of the project is to provide a simple,
fast, and reliable database for projects that don't require a full database
server such as Postgres or MySQL.
Since Bolt is meant to be used as such a low-level piece of functionality,
simplicity is key. The API will be small and only focus on getting values
and setting values. That's it.
[hyc_symas]: https://twitter.com/hyc_symas
[lmdb]: http://symas.com/mdb/
## Project Status
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
test coverage and randomized black box testing are used to ensure database
consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
## Table of Contents
- [Getting Started](#getting-started)
- [Installing](#installing)
- [Opening a database](#opening-a-database)
- [Transactions](#transactions)
- [Read-write transactions](#read-write-transactions)
- [Read-only transactions](#read-only-transactions)
- [Batch read-write transactions](#batch-read-write-transactions)
- [Managing transactions manually](#managing-transactions-manually)
- [Using buckets](#using-buckets)
- [Using key/value pairs](#using-keyvalue-pairs)
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
- [Iterating over keys](#iterating-over-keys)
- [Prefix scans](#prefix-scans)
- [Range scans](#range-scans)
- [ForEach()](#foreach)
- [Nested buckets](#nested-buckets)
- [Database backups](#database-backups)
- [Statistics](#statistics)
- [Read-Only Mode](#read-only-mode)
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
- [Resources](#resources)
- [Comparison with other databases](#comparison-with-other-databases)
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
- [LevelDB, RocksDB](#leveldb-rocksdb)
- [LMDB](#lmdb)
- [Caveats & Limitations](#caveats--limitations)
- [Reading the Source](#reading-the-source)
- [Other Projects Using Bolt](#other-projects-using-bolt)
## Getting Started
### Installing
To start using Bolt, install Go and run `go get`:
```sh
$ go get github.com/boltdb/bolt/...
```
This will retrieve the library and install the `bolt` command line utility into
your `$GOBIN` path.
### Opening a database
The top-level object in Bolt is a `DB`. It is represented as a single file on
your disk and represents a consistent snapshot of your data.
To open your database, simply use the `bolt.Open()` function:
```go
package main
import (
"log"
"github.com/boltdb/bolt"
)
func main() {
// Open the my.db data file in your current directory.
// It will be created if it doesn't exist.
db, err := bolt.Open("my.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
...
}
```
Please note that Bolt obtains a file lock on the data file so multiple processes
cannot open the same database at the same time. Opening an already open Bolt
database will cause it to hang until the other process closes it. To prevent
an indefinite wait you can pass a timeout option to the `Open()` function:
```go
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
```
### Transactions
Bolt allows only one read-write transaction at a time but allows as many
read-only transactions as you want at a time. Each transaction has a consistent
view of the data as it existed when the transaction started.
Individual transactions and all objects created from them (e.g. buckets, keys)
are not thread safe. To work with data in multiple goroutines you must start
a transaction for each one or use locking to ensure only one goroutine accesses
a transaction at a time. Creating transaction from the `DB` is thread safe.
Read-only transactions and read-write transactions should not depend on one
another and generally shouldn't be opened simultaneously in the same goroutine.
This can cause a deadlock as the read-write transaction needs to periodically
re-map the data file but it cannot do so while a read-only transaction is open.
#### Read-write transactions
To start a read-write transaction, you can use the `DB.Update()` function:
```go
err := db.Update(func(tx *bolt.Tx) error {
...
return nil
})
```
Inside the closure, you have a consistent view of the database. You commit the
transaction by returning `nil` at the end. You can also rollback the transaction
at any point by returning an error. All database operations are allowed inside
a read-write transaction.
Always check the return error as it will report any disk failures that can cause
your transaction to not complete. If you return an error within your closure
it will be passed through.
#### Read-only transactions
To start a read-only transaction, you can use the `DB.View()` function:
```go
err := db.View(func(tx *bolt.Tx) error {
...
return nil
})
```
You also get a consistent view of the database within this closure, however,
no mutating operations are allowed within a read-only transaction. You can only
retrieve buckets, retrieve values, and copy the database within a read-only
transaction.
#### Batch read-write transactions
Each `DB.Update()` waits for disk to commit the writes. This overhead
can be minimized by combining multiple updates with the `DB.Batch()`
function:
```go
err := db.Batch(func(tx *bolt.Tx) error {
...
return nil
})
```
Concurrent Batch calls are opportunistically combined into larger
transactions. Batch is only useful when there are multiple goroutines
calling it.
The trade-off is that `Batch` can call the given
function multiple times, if parts of the transaction fail. The
function must be idempotent and side effects must take effect only
after a successful return from `DB.Batch()`.
For example: don't display messages from inside the function, instead
set variables in the enclosing scope:
```go
var id uint64
err := db.Batch(func(tx *bolt.Tx) error {
// Find last key in bucket, decode as bigendian uint64, increment
// by one, encode back to []byte, and add new key.
...
id = newValue
return nil
})
if err != nil {
return ...
}
fmt.Println("Allocated ID %d", id)
```
#### Managing transactions manually
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
function. These helper functions will start the transaction, execute a function,
and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions.
You can use the `DB.Begin()` function directly but **please** be sure to close
the transaction.
```go
// Start a writable transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Use the transaction...
_, err := tx.CreateBucket([]byte("MyBucket"))
if err != nil {
return err
}
// Commit the transaction and check for error.
if err := tx.Commit(); err != nil {
return err
}
```
The first argument to `DB.Begin()` is a boolean stating if the transaction
should be writable.
### Using buckets
Buckets are collections of key/value pairs within the database. All keys in a
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
function:
```go
db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket([]byte("MyBucket"))
if err != nil {
return fmt.Errorf("create bucket: %s", err)
}
return nil
})
```
You can also create a bucket only if it doesn't exist by using the
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
function for all your top-level buckets after you open your database so you can
guarantee that they exist for future transactions.
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
### Using key/value pairs
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
```go
db.Update(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("MyBucket"))
err := b.Put([]byte("answer"), []byte("42"))
return err
})
```
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
```go
db.View(func(tx *bolt.Tx) error {
b := tx.Bucket([]byte("MyBucket"))
v := b.Get([]byte("answer"))
fmt.Printf("The answer is: %s\n", v)
return nil
})
```
The `Get()` function does not return an error because its operation is
guaranteed to work (unless there is some kind of system failure). If the key
exists then it will return its byte slice value. If it doesn't exist then it
will return `nil`. It's important to note that you can have a zero-length value
set to a key which is different than the key not existing.
Use the `Bucket.Delete()` function to delete a key from the bucket.
Please note that values returned from `Get()` are only valid while the
transaction is open. If you need to use a value outside of the transaction
then you must use `copy()` to copy it to another byte slice.
### Autoincrementing integer for the bucket
By using the `NextSequence()` function, you can let Bolt determine a sequence
which can be used as the unique identifier for your key/value pairs. See the
example below.
```go
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
func (s *Store) CreateUser(u *User) error {
return s.db.Update(func(tx *bolt.Tx) error {
// Retrieve the users bucket.
// This should be created when the DB is first opened.
b := tx.Bucket([]byte("users"))
// Generate ID for the user.
// This returns an error only if the Tx is closed or not writeable.
// That can't happen in an Update() call so I ignore the error check.
id, _ := b.NextSequence()
u.ID = int(id)
// Marshal user data into bytes.
buf, err := json.Marshal(u)
if err != nil {
return err
}
// Persist bytes to users bucket.
return b.Put(itob(u.ID), buf)
})
}
// itob returns an 8-byte big endian representation of v.
func itob(v int) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, uint64(v))
return b
}
type User struct {
ID int
...
}
```
### Iterating over keys
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
iteration over these keys extremely fast. To iterate over keys we'll use a
`Cursor`:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
return nil
})
```
The cursor allows you to move to a specific point in the list of keys and move
forward or backward through the keys one at a time.
The following functions are available on the cursor:
```
First() Move to the first key.
Last() Move to the last key.
Seek() Move to a specific key.
Next() Move to the next key.
Prev() Move to the previous key.
```
Each of those functions has a return signature of `(key []byte, value []byte)`.
When you have iterated to the end of the cursor then `Next()` will return a
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
before calling `Next()` or `Prev()`. If you do not seek to a position then
these functions will return a `nil` key.
During iteration, if the key is non-`nil` but the value is `nil`, that means
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
access the sub-bucket.
#### Prefix scans
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234")
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
return nil
})
```
#### Range scans
Another common use case is scanning over a range such as a time range. If you
use a sortable time encoding such as RFC3339 then you can query a specific
date range like this:
```go
db.View(func(tx *bolt.Tx) error {
// Assume our events bucket exists and has RFC3339 encoded time keys.
c := tx.Bucket([]byte("Events")).Cursor()
// Our time range spans the 90's decade.
min := []byte("1990-01-01T00:00:00Z")
max := []byte("2000-01-01T00:00:00Z")
// Iterate over the 90's.
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
fmt.Printf("%s: %s\n", k, v)
}
return nil
})
```
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
#### ForEach()
You can also use the function `ForEach()` if you know you'll be iterating over
all the keys in a bucket:
```go
db.View(func(tx *bolt.Tx) error {
// Assume bucket exists and has keys
b := tx.Bucket([]byte("MyBucket"))
b.ForEach(func(k, v []byte) error {
fmt.Printf("key=%s, value=%s\n", k, v)
return nil
})
return nil
})
```
Please note that keys and values in `ForEach()` are only valid while
the transaction is open. If you need to use a key or value outside of
the transaction, you must use `copy()` to copy it to another byte
slice.
### Nested buckets
You can also store a bucket in a key to create nested buckets. The API is the
same as the bucket management API on the `DB` object:
```go
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error
```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
function to write a consistent view of the database to a writer. If you call
this from a read-only transaction, it will perform a hot backup and not block
your other database reads and writes.
By default, it will use a regular file handle which will utilize the operating
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
documentation for information about optimizing for larger-than-RAM datasets.
One common use case is to backup over HTTP so you can use tools like `cURL` to
do database backups:
```go
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
err := db.View(func(tx *bolt.Tx) error {
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
_, err := tx.WriteTo(w)
return err
})
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
```
Then you can backup using this command:
```sh
$ curl http://localhost/backup > my.db
```
Or you can open your browser to `http://localhost/backup` and it will download
automatically.
If you want to backup to another file you can use the `Tx.CopyFile()` helper
function.
### Statistics
The database keeps a running count of many of the internal operations it
performs so you can better understand what's going on. By grabbing a snapshot
of these stats at two points in time we can see what operations were performed
in that time range.
For example, we could start a goroutine to log stats every 10 seconds:
```go
go func() {
// Grab the initial stats.
prev := db.Stats()
for {
// Wait for 10s.
time.Sleep(10 * time.Second)
// Grab the current stats and diff them.
stats := db.Stats()
diff := stats.Sub(&prev)
// Encode stats to JSON and print to STDERR.
json.NewEncoder(os.Stderr).Encode(diff)
// Save stats for the next loop.
prev = stats
}
}()
```
It's also useful to pipe these stats to a service such as statsd for monitoring
or to provide an HTTP endpoint that will perform a fixed-length sample.
### Read-Only Mode
Sometimes it is useful to create a shared, read-only Bolt database. To this,
set the `Options.ReadOnly` flag when opening your database. Read-only mode
uses a shared lock to allow multiple processes to read from the database but
it will block any processes from opening the database in read-write mode.
```go
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
if err != nil {
log.Fatal(err)
}
```
### Mobile Use (iOS/Android)
Bolt is able to run on mobile devices by leveraging the binding feature of the
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
contain your database logic and a reference to a `*bolt.DB` with a initializing
constructor that takes in a filepath where the database file will be stored.
Neither Android nor iOS require extra permissions or cleanup from using this method.
```go
func NewBoltDB(filepath string) *BoltDB {
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
return &BoltDB{db}
}
type BoltDB struct {
db *bolt.DB
...
}
func (b *BoltDB) Path() string {
return b.db.Path()
}
func (b *BoltDB) Close() {
b.db.Close()
}
```
Database logic should be defined as methods on this wrapper struct.
To initialize this struct from the native language (both platforms now sync
their local storage to the cloud. These snippets disable that functionality for the
database file):
#### Android
```java
String path;
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
path = getNoBackupFilesDir().getAbsolutePath();
} else{
path = getFilesDir().getAbsolutePath();
}
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
```
#### iOS
```objc
- (void)demo {
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
NSUserDomainMask,
YES) objectAtIndex:0];
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
[self addSkipBackupAttributeToItemAtPath:demo.path];
//Some DB Logic would go here
[demo close];
}
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
{
NSURL* URL= [NSURL fileURLWithPath: filePathString];
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
NSError *error = nil;
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
forKey: NSURLIsExcludedFromBackupKey error: &error];
if(!success){
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
}
return success;
}
```
## Resources
For more information on getting started with Bolt, check out the following articles:
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
## Comparison with other databases
### Postgres, MySQL, & other relational databases
Relational databases structure data into rows and are only accessible through
the use of SQL. This approach provides flexibility in how you store and query
your data but also incurs overhead in parsing and planning SQL statements. Bolt
accesses all data by a byte slice key. This makes Bolt fast to read and write
data by key but provides no built-in support for joining values together.
Most relational databases (with the exception of SQLite) are standalone servers
that run separately from your application. This gives your systems
flexibility to connect multiple application servers to a single database
server but also adds overhead in serializing and transporting data over the
network. Bolt runs as a library included in your application so all data access
has to go through your application's process. This brings data closer to your
application but limits multi-process access to the data.
### LevelDB, RocksDB
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
they are libraries bundled into the application, however, their underlying
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
random writes by using a write ahead log and multi-tiered, sorted files called
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
have trade-offs.
If you require a high random write throughput (>10,000 w/sec) or you need to use
spinning disks then LevelDB could be a good choice. If your application is
read-heavy or does a lot of range scans then Bolt could be a good choice.
One other important consideration is that LevelDB does not have transactions.
It supports batch writing of key/values pairs and it supports read snapshots
but it will not give you the ability to do a compare-and-swap operation safely.
Bolt supports fully serializable ACID transactions.
### LMDB
Bolt was originally a port of LMDB so it is architecturally similar. Both use
a B+tree, have ACID semantics with fully serializable transactions, and support
lock-free MVCC using a single writer and multiple readers.
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
several unsafe actions such as direct writes for the sake of performance. Bolt
opts to disallow actions which can leave the database in a corrupted state. The
only exception to this in Bolt is `DB.NoSync`.
There are also a few differences in API. LMDB requires a maximum mmap size when
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
automatically. LMDB overloads the getter and setter functions with multiple
flags whereas Bolt splits these specialized cases into their own functions.
## Caveats & Limitations
It's important to pick the right tool for the job and Bolt is no exception.
Here are a few things to note when evaluating and using Bolt:
* Bolt is good for read intensive workloads. Sequential write performance is
also fast but random writes can be slow. You can use `DB.Batch()` or add a
write-ahead log to help mitigate this issue.
* Bolt uses a B+tree internally so there can be a lot of random page access.
SSDs provide a significant performance boost over spinning disks.
* Try to avoid long running read transactions. Bolt uses copy-on-write so
old pages cannot be reclaimed while an old transaction is using them.
* Byte slices returned from Bolt are only valid during a transaction. Once the
transaction has been committed or rolled back then the memory they point to
can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor
page utilization.
* Use larger buckets in general. Smaller buckets causes poor page utilization
once they become larger than the page size (typically 4KB).
* Bulk loading a lot of random writes into a new bucket can be slow as the
page will not split until the transaction is committed. Randomly inserting
more than 100,000 key/value pairs into a single new bucket in a single
transaction is not advised.
* Bolt uses a memory-mapped file so the underlying operating system handles the
caching of the data. Typically, the OS will cache as much of the file as it
can in memory and will release memory as needed to other processes. This means
that Bolt can show very high memory usage when working with large databases.
However, this is expected and the OS will release memory as needed. Bolt can
handle databases much larger than the available physical RAM, provided its
memory-map fits in the process virtual address space. It may be problematic
on 32-bits systems.
* The data structures in the Bolt database are memory mapped so the data file
will be endian specific. This means that you cannot copy a Bolt file from a
little endian machine to a big endian machine and have it work. For most
users this is not a concern since most modern CPUs are little endian.
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
and return free pages back to the disk. Instead, Bolt maintains a free list
of unused pages within its data file. These free pages can be reused by later
transactions. This works well for many use cases as databases generally tend
to grow. However, it's important to note that deleting large chunks of data
will not allow you to reclaim that space on disk.
For more information on page allocation, [see this comment][page-allocation].
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
## Reading the Source
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
transactional key/value database so it can be a good starting point for people
interested in how databases work.
The best places to start are the main entry points into Bolt:
- `Open()` - Initializes the reference to the database. It's responsible for
creating the database if it doesn't exist, obtaining an exclusive lock on the
file, reading the meta pages, & memory-mapping the file.
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
value of the `writable` argument. This requires briefly obtaining the "meta"
lock to keep track of open transactions. Only one read-write transaction can
exist at a time so the "rwlock" is acquired during the life of a read-write
transaction.
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
arguments, a cursor is used to traverse the B+tree to the page and position
where they key & value will be written. Once the position is found, the bucket
materializes the underlying page and the page's parent pages into memory as
"nodes". These nodes are where mutations occur during read-write transactions.
These changes get flushed to disk during commit.
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
to move to the page & position of a key/value pair. During a read-only
transaction, the key and value data is returned as a direct reference to the
underlying mmap file so there's no allocation overhead. For read-write
transactions, this data may reference the mmap file or one of the in-memory
node values.
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
or in-memory nodes. It can seek to a specific key, move to the first or last
value, or it can move forward or backward. The cursor handles the movement up
and down the B+tree transparently to the end user.
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
into pages to be written to disk. Writing to disk then occurs in two phases.
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
new meta page with an incremented transaction ID is written and another
`fsync()` occurs. This two phase write ensures that partially written data
pages are ignored in the event of a crash since the meta page pointing to them
is never written. Partially written meta pages are invalidated because they
are written with a checksum.
If you have additional notes that could be helpful for others, please submit
them via pull request.
## Other Projects Using Bolt
Below is a list of public, open source projects that use Bolt:
* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files.
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
backed by boltdb.
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
simple tx and key scans.
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB.
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
If you are using Bolt in a project please send a pull request to add it to the list.

18
vendor/github.com/boltdb/bolt/appveyor.yml generated vendored Normal file
View File

@@ -0,0 +1,18 @@
version: "{build}"
os: Windows Server 2012 R2
clone_folder: c:\gopath\src\github.com\boltdb\bolt
environment:
GOPATH: c:\gopath
install:
- echo %PATH%
- echo %GOPATH%
- go version
- go env
- go get -v -t ./...
build_script:
- go test -v ./...

10
vendor/github.com/boltdb/bolt/bolt_386.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

10
vendor/github.com/boltdb/bolt/bolt_amd64.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

28
vendor/github.com/boltdb/bolt/bolt_arm.go generated vendored Normal file
View File

@@ -0,0 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

12
vendor/github.com/boltdb/bolt/bolt_arm64.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build arm64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

10
vendor/github.com/boltdb/bolt/bolt_linux.go generated vendored Normal file
View File

@@ -0,0 +1,10 @@
package bolt
import (
"syscall"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd()))
}

27
vendor/github.com/boltdb/bolt/bolt_openbsd.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package bolt
import (
"syscall"
"unsafe"
)
const (
msAsync = 1 << iota // perform asynchronous writes
msSync // perform synchronous writes
msInvalidate // invalidate cached data
)
func msync(db *DB) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
if errno != 0 {
return errno
}
return nil
}
func fdatasync(db *DB) error {
if db.data != nil {
return msync(db)
}
return db.file.Sync()
}

9
vendor/github.com/boltdb/bolt/bolt_ppc.go generated vendored Normal file
View File

@@ -0,0 +1,9 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

12
vendor/github.com/boltdb/bolt/bolt_ppc64.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build ppc64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
vendor/github.com/boltdb/bolt/bolt_s390x.go generated vendored Normal file
View File

@@ -0,0 +1,12 @@
// +build s390x
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

89
vendor/github.com/boltdb/bolt/bolt_unix.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
// +build !windows,!plan9,!solaris
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
flag := syscall.LOCK_SH
if exclusive {
flag = syscall.LOCK_EX
}
// Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := syscall.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}
// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = e1
}
return
}

90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Whence = 0
lock.Pid = 0
if exclusive {
lock.Type = syscall.F_WRLCK
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

144
vendor/github.com/boltdb/bolt/bolt_windows.go generated vendored Normal file
View File

@@ -0,0 +1,144 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path + lockExt)
return err
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
}
// Open a file mapping handle.
sizelo := uint32(sz >> 32)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
if addr == 0 {
return os.NewSyscallError("MapViewOfFile", errno)
}
// Close mapping handle.
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return os.NewSyscallError("CloseHandle", err)
}
// Convert to a byte array.
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
db.datasz = sz
return nil
}
// munmap unmaps a pointer from a file.
// Based on: https://github.com/edsrzf/mmap-go
func munmap(db *DB) error {
if db.data == nil {
return nil
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
}

8
vendor/github.com/boltdb/bolt/boltsync_unix.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// +build !windows,!plan9,!linux,!openbsd
package bolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}

777
vendor/github.com/boltdb/bolt/bucket.go generated vendored Normal file
View File

@@ -0,0 +1,777 @@
package bolt
import (
"bytes"
"fmt"
"unsafe"
)
const (
// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
// MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = (1 << 31) - 2
)
const (
maxUint = ^uint(0)
minUint = 0
maxInt = int(^uint(0) >> 1)
minInt = -maxInt - 1
)
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
minFillPercent = 0.1
maxFillPercent = 1.0
)
// DefaultFillPercent is the percentage that split pages are filled.
// This value can be changed by setting Bucket.FillPercent.
const DefaultFillPercent = 0.5
// Bucket represents a collection of key/value pairs inside the database.
type Bucket struct {
*bucket
tx *Tx // the associated transaction
buckets map[string]*Bucket // subbucket cache
page *page // inline page reference
rootNode *node // materialized node for the root page.
nodes map[pgid]*node // node cache
// Sets the threshold for filling nodes when they split. By default,
// the bucket will fill to 50% but it can be useful to increase this
// amount if you know that your write workloads are mostly append-only.
//
// This is non-persisted across transactions so it must be set in every Tx.
FillPercent float64
}
// bucket represents the on-file representation of a bucket.
// This is stored as the "value" of a bucket key. If the bucket is small enough,
// then its root page can be stored inline in the "value", after the bucket
// header. In the case of inline buckets, the "root" will be 0.
type bucket struct {
root pgid // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
// newBucket returns a new bucket associated with a transaction.
func newBucket(tx *Tx) Bucket {
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
if tx.writable {
b.buckets = make(map[string]*Bucket)
b.nodes = make(map[pgid]*node)
}
return b
}
// Tx returns the tx of the bucket.
func (b *Bucket) Tx() *Tx {
return b.tx
}
// Root returns the root of the bucket.
func (b *Bucket) Root() pgid {
return b.root
}
// Writable returns whether the bucket is writable.
func (b *Bucket) Writable() bool {
return b.tx.writable
}
// Cursor creates a cursor associated with the bucket.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics.
b.tx.stats.CursorCount++
// Allocate and return a cursor.
return &Cursor{
bucket: b,
stack: make([]elemRef, 0),
}
}
// Bucket retrieves a nested bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil {
if child := b.buckets[string(name)]; child != nil {
return child
}
}
// Move cursor to key.
c := b.Cursor()
k, v, flags := c.seek(name)
// Return nil if the key doesn't exist or it is not a bucket.
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
return nil
}
// Otherwise create a bucket and cache it.
var child = b.openBucket(v)
if b.buckets != nil {
b.buckets[string(name)] = child
}
return child
}
// Helper method that re-interprets a sub-bucket value
// from a parent into a Bucket
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
}
// Save a reference to the inline page if the bucket is inline.
if child.root == 0 {
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
}
return &child
}
// CreateBucket creates a new bucket at the given key and returns the new bucket.
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil {
return nil, ErrTxClosed
} else if !b.tx.writable {
return nil, ErrTxNotWritable
} else if len(key) == 0 {
return nil, ErrBucketNameRequired
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key.
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
var bucket = Bucket{
bucket: &bucket{},
rootNode: &node{isLeaf: true},
FillPercent: DefaultFillPercent,
}
var value = bucket.write()
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, bucketLeafFlag)
// Since subbuckets are not allowed on inline buckets, we need to
// dereference the inline page, if it exists. This will cause the bucket
// to be treated as a regular, non-inline bucket for the rest of the tx.
b.page = nil
return b.Bucket(key), nil
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key)
if err == ErrBucketExists {
return b.Bucket(key), nil
} else if err != nil {
return nil, err
}
return child, nil
}
// DeleteBucket deletes a bucket at the given key.
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
func (b *Bucket) DeleteBucket(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if bucket doesn't exist or is not a bucket.
if !bytes.Equal(key, k) {
return ErrBucketNotFound
} else if (flags & bucketLeafFlag) == 0 {
return ErrIncompatibleValue
}
// Recursively delete all child buckets.
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if v == nil {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
}
return nil
})
if err != nil {
return err
}
// Remove cached copy.
delete(b.buckets, string(key))
// Release all bucket pages to freelist.
child.nodes = nil
child.rootNode = nil
child.free()
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Get retrieves the value for a key in the bucket.
// Returns a nil value if the key does not exist or if the key is a nested bucket.
// The returned value is only valid for the life of the transaction.
func (b *Bucket) Get(key []byte) []byte {
k, v, flags := b.Cursor().seek(key)
// Return nil if this is a bucket.
if (flags & bucketLeafFlag) != 0 {
return nil
}
// If our target node isn't the same key as what's passed in then return nil.
if !bytes.Equal(key, k) {
return nil
}
return v
}
// Put sets the value for a key in the bucket.
// If the key exist then its previous value will be overwritten.
// Supplied value must remain valid for the life of the transaction.
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
} else if len(key) == 0 {
return ErrKeyRequired
} else if len(key) > MaxKeySize {
return ErrKeyTooLarge
} else if int64(len(value)) > MaxValueSize {
return ErrValueTooLarge
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key with a bucket value.
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, 0)
return nil
}
// Delete removes a key from the bucket.
// If the key does not exist then nothing is done and a nil error is returned.
// Returns an error if the bucket was created from a read-only transaction.
func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
_, _, flags := c.seek(key)
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
} else if !b.Writable() {
return 0, ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence++
return b.bucket.sequence, nil
}
// ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if err := fn(k, v); err != nil {
return err
}
}
return nil
}
// Stat returns stats on a bucket.
func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats
pageSize := b.tx.db.pageSize
s.BucketN += 1
if b.root == 0 {
s.InlineBucketN += 1
}
b.forEachPage(func(p *page, depth int) {
if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count)
// used totals the used bytes for the page
used := pageHeaderSize
if p.count != 0 {
// If page has any elements, add all element headers.
used += leafPageElementSize * int(p.count-1)
// Add all element key, value sizes.
// The computation takes advantage of the fact that the position
// of the last element's key/value equals to the total of the sizes
// of all previous elements' keys and values.
// It also includes the last element's header.
lastElement := p.leafPageElement(p.count - 1)
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
}
if b.root == 0 {
// For inlined bucket just update the inline stats
s.InlineBucketInuse += used
} else {
// For non-inlined bucket update all the leaf stats
s.LeafPageN++
s.LeafInuse += used
s.LeafOverflowN += int(p.overflow)
// Collect stats from sub-buckets.
// Do that by iterating over all element headers
// looking for the ones with the bucketLeafFlag.
for i := uint16(0); i < p.count; i++ {
e := p.leafPageElement(i)
if (e.flags & bucketLeafFlag) != 0 {
// For any bucket element, open the element value
// and recursively call Stats on the contained bucket.
subStats.Add(b.openBucket(e.value()).Stats())
}
}
}
} else if (p.flags & branchPageFlag) != 0 {
s.BranchPageN++
lastElement := p.branchPageElement(p.count - 1)
// used totals the used bytes for the page
// Add header and all element headers.
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
// Add size of all keys and values.
// Again, use the fact that last element's position equals to
// the total of key, value sizes of all previous elements.
used += int(lastElement.pos + lastElement.ksize)
s.BranchInuse += used
s.BranchOverflowN += int(p.overflow)
}
// Keep track of maximum page depth.
if depth+1 > s.Depth {
s.Depth = (depth + 1)
}
})
// Alloc stats can be computed from page counts and pageSize.
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
// Add the max depth of sub-buckets to get total nested depth.
s.Depth += subStats.Depth
// Add the stats for all sub-buckets
s.Add(subStats)
return s
}
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
if b.page != nil {
fn(b.page, 0)
return
}
// Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn)
}
// forEachPageNode iterates over every page (or node) in a bucket.
// This also includes inline pages.
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
// If we have an inline page or root node then just use that.
if b.page != nil {
fn(b.page, nil, 0)
return
}
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
// Execute function.
fn(p, n, depth)
// Recursively loop over children.
if p != nil {
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
b._forEachPageNode(elem.pgid, depth+1, fn)
}
}
} else {
if !n.isLeaf {
for _, inode := range n.inodes {
b._forEachPageNode(inode.pgid, depth+1, fn)
}
}
}
}
// spill writes all the nodes for this bucket to dirty pages.
func (b *Bucket) spill() error {
// Spill all child buckets first.
for name, child := range b.buckets {
// If the child bucket is small enough and it has no child buckets then
// write it inline into the parent bucket's page. Otherwise spill it
// like a normal bucket and make the parent value a pointer to the page.
var value []byte
if child.inlineable() {
child.free()
value = child.write()
} else {
if err := child.spill(); err != nil {
return err
}
// Update the child bucket header in this bucket.
value = make([]byte, unsafe.Sizeof(bucket{}))
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *child.bucket
}
// Skip writing the bucket if there are no materialized nodes.
if child.rootNode == nil {
continue
}
// Update parent node.
var c = b.Cursor()
k, _, flags := c.seek([]byte(name))
if !bytes.Equal([]byte(name), k) {
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
}
if flags&bucketLeafFlag == 0 {
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
}
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
}
// Ignore if there's not a materialized root node.
if b.rootNode == nil {
return nil
}
// Spill nodes.
if err := b.rootNode.spill(); err != nil {
return err
}
b.rootNode = b.rootNode.root()
// Update the root node for this bucket.
if b.rootNode.pgid >= b.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
}
b.root = b.rootNode.pgid
return nil
}
// inlineable returns true if a bucket is small enough to be written inline
// and if it contains no subbuckets. Otherwise returns false.
func (b *Bucket) inlineable() bool {
var n = b.rootNode
// Bucket must only contain a single leaf node.
if n == nil || !n.isLeaf {
return false
}
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
// our threshold for inline bucket size.
var size = pageHeaderSize
for _, inode := range n.inodes {
size += leafPageElementSize + len(inode.key) + len(inode.value)
if inode.flags&bucketLeafFlag != 0 {
return false
} else if size > b.maxInlineBucketSize() {
return false
}
}
return true
}
// Returns the maximum total size of a bucket to make it a candidate for inlining.
func (b *Bucket) maxInlineBucketSize() int {
return b.tx.db.pageSize / 4
}
// write allocates and writes a bucket to a byte slice.
func (b *Bucket) write() []byte {
// Allocate the appropriate size.
var n = b.rootNode
var value = make([]byte, bucketHeaderSize+n.size())
// Write a bucket header.
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *b.bucket
// Convert byte slice to a fake page and write the root node.
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
n.write(p)
return value
}
// rebalance attempts to balance all nodes.
func (b *Bucket) rebalance() {
for _, n := range b.nodes {
n.rebalance()
}
for _, child := range b.buckets {
child.rebalance()
}
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
return n
}
// Otherwise create a node and cache it.
n := &node{bucket: b, parent: parent}
if parent == nil {
b.rootNode = n
} else {
parent.children = append(parent.children, n)
}
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
// Update statistics.
b.tx.stats.NodeCount++
return n
}
// free recursively frees all pages in the bucket.
func (b *Bucket) free() {
if b.root == 0 {
return
}
var tx = b.tx
b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil {
tx.db.freelist.free(tx.meta.txid, p)
} else {
n.free()
}
})
b.root = 0
}
// dereference removes all references to the old mmap.
func (b *Bucket) dereference() {
if b.rootNode != nil {
b.rootNode.root().dereference()
}
for _, child := range b.buckets {
child.dereference()
}
}
// pageNode returns the in-memory node, if it exists.
// Otherwise returns the underlying page.
func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Inline buckets have a fake page embedded in their value so treat them
// differently. We'll return the rootNode (if available) or the fake page.
if b.root == 0 {
if id != 0 {
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
}
if b.rootNode != nil {
return nil, b.rootNode
}
return b.page, nil
}
// Check the node cache for non-inline buckets.
if b.nodes != nil {
if n := b.nodes[id]; n != nil {
return nil, n
}
}
// Finally lookup the page from the transaction if no node is materialized.
return b.tx.page(id), nil
}
// BucketStats records statistics about resources used by a bucket.
type BucketStats struct {
// Page count statistics.
BranchPageN int // number of logical branch pages
BranchOverflowN int // number of physical branch overflow pages
LeafPageN int // number of logical leaf pages
LeafOverflowN int // number of physical leaf overflow pages
// Tree statistics.
KeyN int // number of keys/value pairs
Depth int // number of levels in B+tree
// Page size utilization.
BranchAlloc int // bytes allocated for physical branch pages
BranchInuse int // bytes actually used for branch data
LeafAlloc int // bytes allocated for physical leaf pages
LeafInuse int // bytes actually used for leaf data
// Bucket statistics
BucketN int // total number of buckets including the top bucket
InlineBucketN int // total number on inlined buckets
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
}
func (s *BucketStats) Add(other BucketStats) {
s.BranchPageN += other.BranchPageN
s.BranchOverflowN += other.BranchOverflowN
s.LeafPageN += other.LeafPageN
s.LeafOverflowN += other.LeafOverflowN
s.KeyN += other.KeyN
if s.Depth < other.Depth {
s.Depth = other.Depth
}
s.BranchAlloc += other.BranchAlloc
s.BranchInuse += other.BranchInuse
s.LeafAlloc += other.LeafAlloc
s.LeafInuse += other.LeafInuse
s.BucketN += other.BucketN
s.InlineBucketN += other.InlineBucketN
s.InlineBucketInuse += other.InlineBucketInuse
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}

400
vendor/github.com/boltdb/bolt/cursor.go generated vendored Normal file
View File

@@ -0,0 +1,400 @@
package bolt
import (
"bytes"
"fmt"
"sort"
)
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
//
// Keys and values returned from the cursor are only valid for the life of the transaction.
//
// Changing data while traversing with a cursor may cause it to be invalidated
// and return unexpected keys and/or values. You must reposition your cursor
// after mutating data.
type Cursor struct {
bucket *Bucket
stack []elemRef
}
// Bucket returns the bucket that this cursor was created from.
func (c *Cursor) Bucket() *Bucket {
return c.bucket
}
// First moves the cursor to the first item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
c.next()
}
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Last() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
ref := elemRef{page: p, node: n}
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Next moves the cursor to the next item in the bucket and returns its key and value.
// If the cursor is at the end of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Next() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.next()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one.
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
k, v, flags = c.next()
}
if k == nil {
return nil, nil
} else if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Delete removes the current key/value under the cursor from the bucket.
// Delete fails if current key/value is a bucket or if the transaction is not writable.
func (c *Cursor) Delete() error {
if c.bucket.tx.db == nil {
return ErrTxClosed
} else if !c.bucket.Writable() {
return ErrTxNotWritable
}
key, _, flags := c.keyValue()
// Return an error if current value is a bucket.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
c.node().del(key)
return nil
}
// seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.root)
ref := &c.stack[len(c.stack)-1]
// If the cursor is pointing to the end of page/node then return nil.
if ref.index >= ref.count() {
return nil, nil, 0
}
// If this is a bucket then return a nil value.
return c.keyValue()
}
// first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) first() {
for {
// Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the first element to the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
// last moves the cursor to the last leaf element under the last page in the stack.
func (c *Cursor) last() {
for {
// Exit when we hit a leaf page.
ref := &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the last element in the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
c.stack = append(c.stack, nextRef)
}
}
// next moves to the next leaf element and returns the key and value.
// If the cursor is at the last leaf element then it stays there and returns nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
for {
// Attempt to move over one element until we're successful.
// Move up the stack as we hit the end of each page in our stack.
var i int
for i = len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index < elem.count()-1 {
elem.index++
break
}
}
// If we've hit the root page then stop and return. This will leave the
// cursor on the last element of the last page.
if i == -1 {
return nil, nil, 0
}
// Otherwise start from where we left off in the stack and find the
// first element of the first leaf page.
c.stack = c.stack[:i+1]
c.first()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
}
return c.keyValue()
}
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
}
e := elemRef{page: p, node: n}
c.stack = append(c.stack, e)
// If we're on a leaf page/node then find the specific node.
if e.isLeaf() {
c.nsearch(key)
return
}
if n != nil {
c.searchNode(key, n)
return
}
c.searchPage(key, p)
}
func (c *Cursor) searchNode(key []byte, n *node) {
var exact bool
index := sort.Search(len(n.inodes), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(n.inodes[i].key, key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, n.inodes[index].pgid)
}
func (c *Cursor) searchPage(key []byte, p *page) {
// Binary search for the correct range.
inodes := p.branchPageElements()
var exact bool
index := sort.Search(int(p.count), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(inodes[i].key(), key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, inodes[index].pgid)
}
// nsearch searches the leaf node on the top of the stack for a key.
func (c *Cursor) nsearch(key []byte) {
e := &c.stack[len(c.stack)-1]
p, n := e.page, e.node
// If we have a node then search its inodes.
if n != nil {
index := sort.Search(len(n.inodes), func(i int) bool {
return bytes.Compare(n.inodes[i].key, key) != -1
})
e.index = index
return
}
// If we have a page then search its leaf elements.
inodes := p.leafPageElements()
index := sort.Search(int(p.count), func(i int) bool {
return bytes.Compare(inodes[i].key(), key) != -1
})
e.index = index
}
// keyValue returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
if ref.count() == 0 || ref.index >= ref.count() {
return nil, nil, 0
}
// Retrieve value from node.
if ref.node != nil {
inode := &ref.node.inodes[ref.index]
return inode.key, inode.value, inode.flags
}
// Or retrieve value from page.
elem := ref.page.leafPageElement(uint16(ref.index))
return elem.key(), elem.value(), elem.flags
}
// node returns the node that the cursor is currently positioned on.
func (c *Cursor) node() *node {
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
// If the top of the stack is a leaf node then just return it.
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
return ref.node
}
// Start from root and traverse down the hierarchy.
var n = c.stack[0].node
if n == nil {
n = c.bucket.node(c.stack[0].page.id, nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
_assert(!n.isLeaf, "expected branch node")
n = n.childAt(int(ref.index))
}
_assert(n.isLeaf, "expected leaf node")
return n
}
// elemRef represents a reference to an element on a given page/node.
type elemRef struct {
page *page
node *node
index int
}
// isLeaf returns whether the ref is pointing at a leaf page/node.
func (r *elemRef) isLeaf() bool {
if r.node != nil {
return r.node.isLeaf
}
return (r.page.flags & leafPageFlag) != 0
}
// count returns the number of inodes or page elements.
func (r *elemRef) count() int {
if r.node != nil {
return len(r.node.inodes)
}
return int(r.page.count)
}

1039
vendor/github.com/boltdb/bolt/db.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

44
vendor/github.com/boltdb/bolt/doc.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
/*
Package bolt implements a low-level key/value store in pure Go. It supports
fully serializable transactions, ACID semantics, and lock-free MVCC with
multiple readers and a single writer. Bolt can be used for projects that
want a simple data store without the need to add large dependencies such as
Postgres or MySQL.
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
optimized for fast read access and does not require recovery in the event of a
system crash. Transactions which have not finished committing will simply be
rolled back in the event of a crash.
The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux.
Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is
a collection of unique keys that are associated with values.
Transactions provide either read-only or read-write access to the database.
Read-only transactions can retrieve key/value pairs and can use Cursors to
iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time.
Caveats
The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and
values returned from Bolt cannot be changed. Writing to a read-only byte slice
will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic.
*/
package bolt

71
vendor/github.com/boltdb/bolt/errors.go generated vendored Normal file
View File

@@ -0,0 +1,71 @@
package bolt
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrDatabaseOpen is returned when opening a database that is
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when either meta page checksum does not match.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)

252
vendor/github.com/boltdb/bolt/freelist.go generated vendored Normal file
View File

@@ -0,0 +1,252 @@
package bolt
import (
"fmt"
"sort"
"unsafe"
)
// freelist represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
type freelist struct {
ids []pgid // all free and available free page ids.
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids.
}
// newFreelist returns an empty, initialized freelist.
func newFreelist() *freelist {
return &freelist{
pending: make(map[txid][]pgid),
cache: make(map[pgid]bool),
}
}
// size returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
func (f *freelist) count() int {
return f.free_count() + f.pending_count()
}
// free_count returns count of free pages
func (f *freelist) free_count() int {
return len(f.ids)
}
// pending_count returns count of pending pages
func (f *freelist) pending_count() int {
var count int
for _, list := range f.pending {
count += len(list)
}
return count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
func (f *freelist) allocate(n int) pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := pgid(0); i < pgid(n); i++ {
delete(f.cache, initial+i)
}
return initial
}
previd = id
}
return 0
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
func (f *freelist) free(txid txid, p *page) {
if p.id <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
}
// Free page and all its overflow pages.
var ids = f.pending[txid]
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
ids = append(ids, id)
f.cache[id] = true
}
f.pending[txid] = ids
}
// release moves all page ids for a transaction id (or older) to the freelist.
func (f *freelist) release(txid txid) {
m := make(pgids, 0)
for tid, ids := range f.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, ids...)
delete(f.pending, tid)
}
}
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// rollback removes the pages from a given pending tx.
func (f *freelist) rollback(txid txid) {
// Remove page ids from cache.
for _, id := range f.pending[txid] {
delete(f.cache, id)
}
// Remove pages from pending list.
delete(f.pending, txid)
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
}
// read initializes the freelist from a freelist page.
func (f *freelist) read(p *page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
idx, count := 0, int(p.count)
if count == 0xFFFF {
idx = 1
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
}
// Copy the list of page ids from the freelist.
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
}
// write writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
}
// reload reads the freelist from a page and filters out pending items.
func (f *freelist) reload(p *page) {
f.read(p)
// Build a cache of only pending pages.
pcache := make(map[pgid]bool)
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
var a []pgid
for _, id := range f.ids {
if !pcache[id] {
a = append(a, id)
}
}
f.ids = a
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
f.reindex()
}
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
f.cache[pendingID] = true
}
}
}

604
vendor/github.com/boltdb/bolt/node.go generated vendored Normal file
View File

@@ -0,0 +1,604 @@
package bolt
import (
"bytes"
"fmt"
"sort"
"unsafe"
)
// node represents an in-memory, deserialized page.
type node struct {
bucket *Bucket
isLeaf bool
unbalanced bool
spilled bool
key []byte
pgid pgid
parent *node
children nodes
inodes inodes
}
// root returns the top-level node this node is attached to.
func (n *node) root() *node {
if n.parent == nil {
return n
}
return n.parent.root()
}
// minKeys returns the minimum number of inodes this node should have.
func (n *node) minKeys() int {
if n.isLeaf {
return 1
}
return 2
}
// size returns the size of the node after serialization.
func (n *node) size() int {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
}
return sz
}
// sizeLessThan returns true if the node is less than a given size.
// This is an optimization to avoid calculating a large node when we only need
// to know if it fits inside a certain page size.
func (n *node) sizeLessThan(v int) bool {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
if sz >= v {
return false
}
}
return true
}
// pageElementSize returns the size of each page element based on the type of node.
func (n *node) pageElementSize() int {
if n.isLeaf {
return leafPageElementSize
}
return branchPageElementSize
}
// childAt returns the child node at a given index.
func (n *node) childAt(index int) *node {
if n.isLeaf {
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
}
return n.bucket.node(n.inodes[index].pgid, n)
}
// childIndex returns the index of a given child node.
func (n *node) childIndex(child *node) int {
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
return index
}
// numChildren returns the number of children.
func (n *node) numChildren() int {
return len(n.inodes)
}
// nextSibling returns the next node with the same parent.
func (n *node) nextSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index >= n.parent.numChildren()-1 {
return nil
}
return n.parent.childAt(index + 1)
}
// prevSibling returns the previous node with the same parent.
func (n *node) prevSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index == 0 {
return nil
}
return n.parent.childAt(index - 1)
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
panic("put: zero-length new key")
}
// Find insertion index.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
// Add capacity and shift nodes if we don't have an exact match and need to insert.
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
if !exact {
n.inodes = append(n.inodes, inode{})
copy(n.inodes[index+1:], n.inodes[index:])
}
inode := &n.inodes[index]
inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
_assert(len(inode.key) > 0, "put: zero-length inode key")
}
// del removes a key from the node.
func (n *node) del(key []byte) {
// Find index of key.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
// Exit if the key isn't found.
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
return
}
// Delete inode from the node.
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
// Mark the node as needing rebalancing.
n.unbalanced = true
}
// read initializes the node from a page.
func (n *node) read(p *page) {
n.pgid = p.id
n.isLeaf = ((p.flags & leafPageFlag) != 0)
n.inodes = make(inodes, int(p.count))
for i := 0; i < int(p.count); i++ {
inode := &n.inodes[i]
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
inode.flags = elem.flags
inode.key = elem.key()
inode.value = elem.value()
} else {
elem := p.branchPageElement(uint16(i))
inode.pgid = elem.pgid
inode.key = elem.key()
}
_assert(len(inode.key) > 0, "read: zero-length inode key")
}
// Save first key so we can find the node in the parent when we spill.
if len(n.inodes) > 0 {
n.key = n.inodes[0].key
_assert(len(n.key) > 0, "read: zero-length node key")
} else {
n.key = nil
}
}
// write writes the items onto one or more pages.
func (n *node) write(p *page) {
// Initialize page.
if n.isLeaf {
p.flags |= leafPageFlag
} else {
p.flags |= branchPageFlag
}
if len(n.inodes) >= 0xFFFF {
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {
_assert(len(item.key) > 0, "write: zero-length inode key")
// Write the page element.
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
elem := p.branchPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred")
}
// If the length of key+value is larger than the max allocation size
// then we need to reallocate the byte array pointer.
//
// See: https://github.com/boltdb/bolt/pull/335
klen, vlen := len(item.key), len(item.value)
if len(b) < klen+vlen {
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
}
// Write data for the element to the end of the page.
copy(b[0:], item.key)
b = b[klen:]
copy(b[0:], item.value)
b = b[vlen:]
}
// DEBUG ONLY: n.dump()
}
// split breaks up a node into multiple smaller nodes, if appropriate.
// This should only be called from the spill() function.
func (n *node) split(pageSize int) []*node {
var nodes []*node
node := n
for {
// Split node into two.
a, b := node.splitTwo(pageSize)
nodes = append(nodes, a)
// If we can't split then exit the loop.
if b == nil {
break
}
// Set node to b so it gets split on the next iteration.
node = b
}
return nodes
}
// splitTwo breaks up a node into two smaller nodes, if appropriate.
// This should only be called from the split() function.
func (n *node) splitTwo(pageSize int) (*node, *node) {
// Ignore the split if the page doesn't have at least enough nodes for
// two pages or if the nodes can fit in a single page.
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
return n, nil
}
// Determine the threshold before starting a new node.
var fillPercent = n.bucket.FillPercent
if fillPercent < minFillPercent {
fillPercent = minFillPercent
} else if fillPercent > maxFillPercent {
fillPercent = maxFillPercent
}
threshold := int(float64(pageSize) * fillPercent)
// Determine split position and sizes of the two pages.
splitIndex, _ := n.splitIndex(threshold)
// Split node into two separate nodes.
// If there's no parent then we'll need to create one.
if n.parent == nil {
n.parent = &node{bucket: n.bucket, children: []*node{n}}
}
// Create a new node and add it to the parent.
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
n.parent.children = append(n.parent.children, next)
// Split inodes across two nodes.
next.inodes = n.inodes[splitIndex:]
n.inodes = n.inodes[:splitIndex]
// Update the statistics.
n.bucket.tx.stats.Split++
return n, next
}
// splitIndex finds the position where a page will fill a given threshold.
// It returns the index as well as the size of the first page.
// This is only be called from split().
func (n *node) splitIndex(threshold int) (index, sz int) {
sz = pageHeaderSize
// Loop until we only have the minimum number of keys required for the second page.
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
index = i
inode := n.inodes[i]
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
// If we have at least the minimum number of keys and adding another
// node would put us over the threshold then exit and return.
if i >= minKeysPerPage && sz+elsize > threshold {
break
}
// Add the element size to the total size.
sz += elsize
}
return
}
// spill writes the nodes to dirty pages and splits nodes as it goes.
// Returns an error if dirty pages cannot be allocated.
func (n *node) spill() error {
var tx = n.bucket.tx
if n.spilled {
return nil
}
// Spill child nodes first. Child nodes can materialize sibling nodes in
// the case of split-merge so we cannot use a range loop. We have to check
// the children size on every loop iteration.
sort.Sort(n.children)
for i := 0; i < len(n.children); i++ {
if err := n.children[i].spill(); err != nil {
return err
}
}
// We no longer need the child list because it's only used for spill tracking.
n.children = nil
// Split nodes into appropriate sizes. The first node will always be n.
var nodes = n.split(tx.db.pageSize)
for _, node := range nodes {
// Add node's page to the freelist if it's not new.
if node.pgid > 0 {
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
node.pgid = 0
}
// Allocate contiguous space for the node.
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
if err != nil {
return err
}
// Write the node.
if p.id >= tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
}
node.pgid = p.id
node.write(p)
node.spilled = true
// Insert into parent inodes.
if node.parent != nil {
var key = node.key
if key == nil {
key = node.inodes[0].key
}
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
node.key = node.inodes[0].key
_assert(len(node.key) > 0, "spill: zero-length node key")
}
// Update the statistics.
tx.stats.Spill++
}
// If the root node split and created a new root then we need to spill that
// as well. We'll clear out the children to make sure it doesn't try to respill.
if n.parent != nil && n.parent.pgid == 0 {
n.children = nil
return n.parent.spill()
}
return nil
}
// rebalance attempts to combine the node with sibling nodes if the node fill
// size is below a threshold or if there are not enough keys.
func (n *node) rebalance() {
if !n.unbalanced {
return
}
n.unbalanced = false
// Update statistics.
n.bucket.tx.stats.Rebalance++
// Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
// Root node has special handling.
if n.parent == nil {
// If root node is a branch and only has one node then collapse it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move root's child up.
child := n.bucket.node(n.inodes[0].pgid, n)
n.isLeaf = child.isLeaf
n.inodes = child.inodes[:]
n.children = child.children
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = n
}
}
// Remove old child.
child.parent = nil
delete(n.bucket.nodes, child.pgid)
child.free()
}
return
}
// If node has no keys then just remove it.
if n.numChildren() == 0 {
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
n.parent.rebalance()
return
}
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
// Destination node is right sibling if idx == 0, otherwise left sibling.
var target *node
var useNextSibling = (n.parent.childIndex(n) == 0)
if useNextSibling {
target = n.nextSibling()
} else {
target = n.prevSibling()
}
// If both this node and the target node are too small then merge them.
if useNextSibling {
// Reparent all child nodes being moved.
for _, inode := range target.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes from target and remove target.
n.inodes = append(n.inodes, target.inodes...)
n.parent.del(target.key)
n.parent.removeChild(target)
delete(n.bucket.nodes, target.pgid)
target.free()
} else {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = target
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes to target and remove node.
target.inodes = append(target.inodes, n.inodes...)
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
}
// Either this node or the target node was deleted from the parent so rebalance it.
n.parent.rebalance()
}
// removes a node from the list of in-memory children.
// This does not affect the inodes.
func (n *node) removeChild(target *node) {
for i, child := range n.children {
if child == target {
n.children = append(n.children[:i], n.children[i+1:]...)
return
}
}
}
// dereference causes the node to copy all its inode key/value references to heap memory.
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
func (n *node) dereference() {
if n.key != nil {
key := make([]byte, len(n.key))
copy(key, n.key)
n.key = key
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
}
for i := range n.inodes {
inode := &n.inodes[i]
key := make([]byte, len(inode.key))
copy(key, inode.key)
inode.key = key
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
value := make([]byte, len(inode.value))
copy(value, inode.value)
inode.value = value
}
// Recursively dereference children.
for _, child := range n.children {
child.dereference()
}
// Update statistics.
n.bucket.tx.stats.NodeDeref++
}
// free adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
n.pgid = 0
}
}
// dump writes the contents of the node to STDERR for debugging purposes.
/*
func (n *node) dump() {
// Write node header.
var typ = "branch"
if n.isLeaf {
typ = "leaf"
}
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
// Write out abbreviated version of each item.
for _, item := range n.inodes {
if n.isLeaf {
if item.flags&bucketLeafFlag != 0 {
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
} else {
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
}
} else {
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
}
}
warn("")
}
*/
type nodes []*node
func (s nodes) Len() int { return len(s) }
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
// inode represents an internal node inside of a node.
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type inode struct {
flags uint32
pgid pgid
key []byte
value []byte
}
type inodes []inode

197
vendor/github.com/boltdb/bolt/page.go generated vendored Normal file
View File

@@ -0,0 +1,197 @@
package bolt
import (
"fmt"
"os"
"sort"
"unsafe"
)
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
const minKeysPerPage = 2
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
const (
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
freelistPageFlag = 0x10
)
const (
bucketLeafFlag = 0x01
)
type pgid uint64
type page struct {
id pgid
flags uint16
count uint16
overflow uint32
ptr uintptr
}
// typ returns a human readable page type string used for debugging.
func (p *page) typ() string {
if (p.flags & branchPageFlag) != 0 {
return "branch"
} else if (p.flags & leafPageFlag) != 0 {
return "leaf"
} else if (p.flags & metaPageFlag) != 0 {
return "meta"
} else if (p.flags & freelistPageFlag) != 0 {
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
// meta returns a pointer to the metadata section of the page.
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(&p.ptr))
}
// leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
return n
}
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// branchPageElement retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement {
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
}
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// dump writes n bytes of the page to STDERR as hex output.
func (p *page) hexdump(n int) {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
fmt.Fprintf(os.Stderr, "%x\n", buf)
}
type pages []*page
func (s pages) Len() int { return len(s) }
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
// branchPageElement represents a node on a branch page.
type branchPageElement struct {
pos uint32
ksize uint32
pgid pgid
}
// key returns a byte slice of the node key.
func (n *branchPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
}
// leafPageElement represents a node on a leaf page.
type leafPageElement struct {
flags uint32
pos uint32
ksize uint32
vsize uint32
}
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
}
// PageInfo represents human readable information about a page.
type PageInfo struct {
ID int
Type string
Count int
OverflowCount int
}
type pgids []pgid
func (s pgids) Len() int { return len(s) }
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
// merge returns the sorted union of a and b.
func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
if b[0] < a[0] {
lead, follow = b, a
}
// Continue while there are elements in the lead.
for len(lead) > 0 {
// Merge largest prefix of lead that is ahead of follow[0].
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
merged = append(merged, lead[:n]...)
if n >= len(lead) {
break
}
// Swap lead and follow.
lead, follow = follow, lead[n:]
}
// Append what's left in follow.
_ = append(merged, follow...)
}

684
vendor/github.com/boltdb/bolt/tx.go generated vendored Normal file
View File

@@ -0,0 +1,684 @@
package bolt
import (
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"unsafe"
)
// txid represents the internal transaction identifier.
type txid uint64
// Tx represents a read-only or read/write transaction on the database.
// Read-only transactions can be used for retrieving values for keys and creating cursors.
// Read/write transactions can create and remove buckets and create and remove keys.
//
// IMPORTANT: You must commit or rollback transactions when you are done with
// them. Pages can not be reclaimed by the writer until no more transactions
// are using them. A long running read transaction can cause the database to
// quickly grow.
type Tx struct {
writable bool
managed bool
db *DB
meta *meta
root Bucket
pages map[pgid]*page
stats TxStats
commitHandlers []func()
// WriteFlag specifies the flag for write-related methods like WriteTo().
// Tx opens the database file with the specified flag to copy the data.
//
// By default, the flag is unset, which works well for mostly in-memory
// workloads. For databases that are much larger than available RAM,
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
WriteFlag int
}
// init initializes the transaction.
func (tx *Tx) init(db *DB) {
tx.db = db
tx.pages = nil
// Copy the meta page since it can be changed by the writer.
tx.meta = &meta{}
db.meta().copy(tx.meta)
// Copy over the root bucket.
tx.root = newBucket(tx)
tx.root.bucket = &bucket{}
*tx.root.bucket = tx.meta.root
// Increment the transaction id and add a page cache for writable transactions.
if tx.writable {
tx.pages = make(map[pgid]*page)
tx.meta.txid += txid(1)
}
}
// ID returns the transaction id.
func (tx *Tx) ID() int {
return int(tx.meta.txid)
}
// DB returns a reference to the database that created the transaction.
func (tx *Tx) DB() *DB {
return tx.db
}
// Size returns current database size in bytes as seen by this transaction.
func (tx *Tx) Size() int64 {
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
}
// Writable returns whether the transaction can perform write operations.
func (tx *Tx) Writable() bool {
return tx.writable
}
// Cursor creates a cursor associated with the root bucket.
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
// Stats retrieves a copy of the current transaction statistics.
func (tx *Tx) Stats() TxStats {
return tx.stats
}
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name)
}
// CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name)
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name)
}
// DeleteBucket deletes a bucket.
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
func (tx *Tx) DeleteBucket(name []byte) error {
return tx.root.DeleteBucket(name)
}
// ForEach executes a function for each bucket in the root.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
if err := fn(k, tx.root.Bucket(k)); err != nil {
return err
}
return nil
})
}
// OnCommit adds a handler function to be executed after the transaction successfully commits.
func (tx *Tx) OnCommit(fn func()) {
tx.commitHandlers = append(tx.commitHandlers, fn)
}
// Commit writes all changes to disk and updates the meta page.
// Returns an error if a disk write error occurs, or if Commit is
// called on a read-only transaction.
func (tx *Tx) Commit() error {
_assert(!tx.managed, "managed tx commit not allowed")
if tx.db == nil {
return ErrTxClosed
} else if !tx.writable {
return ErrTxNotWritable
}
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
// Rebalance nodes which have had deletions.
var startTime = time.Now()
tx.root.rebalance()
if tx.stats.Rebalance > 0 {
tx.stats.RebalanceTime += time.Since(startTime)
}
// spill data onto dirty pages.
startTime = time.Now()
if err := tx.root.spill(); err != nil {
tx.rollback()
return err
}
tx.stats.SpillTime += time.Since(startTime)
// Free the old root bucket.
tx.meta.root.root = tx.root.root
opgid := tx.meta.pgid
// Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
return err
}
if err := tx.db.freelist.write(p); err != nil {
tx.rollback()
return err
}
tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk.
startTime = time.Now()
if err := tx.write(); err != nil {
tx.rollback()
return err
}
// If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
for {
err, ok := <-ch
if !ok {
break
}
errs = append(errs, err.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
}
}
// Write meta to disk.
if err := tx.writeMeta(); err != nil {
tx.rollback()
return err
}
tx.stats.WriteTime += time.Since(startTime)
// Finalize the transaction.
tx.close()
// Execute commit handlers now that the locks have been removed.
for _, fn := range tx.commitHandlers {
fn()
}
return nil
}
// Rollback closes the transaction and ignores all previous updates. Read-only
// transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
_assert(!tx.managed, "managed tx rollback not allowed")
if tx.db == nil {
return ErrTxClosed
}
tx.rollback()
return nil
}
func (tx *Tx) rollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.rollback(tx.meta.txid)
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
}
tx.close()
}
func (tx *Tx) close() {
if tx.db == nil {
return
}
if tx.writable {
// Grab freelist stats.
var freelistFreeN = tx.db.freelist.free_count()
var freelistPendingN = tx.db.freelist.pending_count()
var freelistAlloc = tx.db.freelist.size()
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
// Merge statistics.
tx.db.statlock.Lock()
tx.db.stats.FreePageN = freelistFreeN
tx.db.stats.PendingPageN = freelistPendingN
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
tx.db.stats.FreelistInuse = freelistAlloc
tx.db.stats.TxStats.add(&tx.stats)
tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
// Clear all references.
tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
}
// Copy writes the entire database to a writer.
// This function exists for backwards compatibility. Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w)
return err
}
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader with WriteFlag
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if err != nil {
return 0, err
}
defer func() { _ = f.Close() }()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = metaPageFlag
*page.meta() = *tx.meta
// Write meta 0.
page.id = 0
page.meta().checksum = page.meta().sum64()
nn, err := w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.id = 1
page.meta().txid -= 1
page.meta().checksum = page.meta().sum64()
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
// Copy data pages.
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
n += wn
if err != nil {
return n, err
}
return n, f.Close()
}
// CopyFile copies the entire database to file at the given path.
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
err = tx.Copy(f)
if err != nil {
_ = f.Close()
return err
}
return f.Close()
}
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
return ch
}
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references", int(id))
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(count)
if err != nil {
return nil, err
}
// Save to our page cache.
tx.pages[p.id] = p
// Update statistics.
tx.stats.PageCount++
tx.stats.PageAlloc += count * tx.db.pageSize
return p, nil
}
// write writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
pages := make(pages, 0, len(tx.pages))
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages)
// Write pages to disk in order.
for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize
offset := int64(p.id) * int64(tx.db.pageSize)
// Write out page in "max allocation" sized chunks.
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
for {
// Limit our write to our max allocation size.
sz := size
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
// Write chunk to disk.
buf := ptr[:sz]
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err
}
// Update statistics.
tx.stats.Write++
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
}
}
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil
}
// writeMeta writes the meta to the disk.
func (tx *Tx) writeMeta() error {
// Create a temporary buffer for the meta page.
buf := make([]byte, tx.db.pageSize)
p := tx.db.pageInBuffer(buf, 0)
tx.meta.write(p)
// Write the meta page to file.
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Update statistics.
tx.stats.Write++
return nil
}
// page returns a reference to the page with a given id.
// If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
if p, ok := tx.pages[id]; ok {
return p
}
}
// Otherwise return directly from the mmap.
return tx.db.page(id)
}
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
// Execute function.
fn(p, depth)
// Recursively loop over children.
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
tx.forEachPage(elem.pgid, depth+1, fn)
}
}
}
// Page returns page information for a given page number.
// This is only safe for concurrent use when used by a writable transaction.
func (tx *Tx) Page(id int) (*PageInfo, error) {
if tx.db == nil {
return nil, ErrTxClosed
} else if pgid(id) >= tx.meta.pgid {
return nil, nil
}
// Build the page info.
p := tx.db.page(pgid(id))
info := &PageInfo{
ID: id,
Count: int(p.count),
OverflowCount: int(p.overflow),
}
// Determine the type (or if it's free).
if tx.db.freelist.freed(pgid(id)) {
info.Type = "free"
} else {
info.Type = p.typ()
}
return info, nil
}
// TxStats represents statistics about the actions performed by the transaction.
type TxStats struct {
// Page statistics.
PageCount int // number of page allocations
PageAlloc int // total bytes allocated
// Cursor statistics.
CursorCount int // number of cursors created
// Node statistics
NodeCount int // number of node allocations
NodeDeref int // number of node dereferences
// Rebalance statistics.
Rebalance int // number of node rebalances
RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics.
Split int // number of nodes split
Spill int // number of nodes spilled
SpillTime time.Duration // total time spent spilling
// Write statistics.
Write int // number of writes performed
WriteTime time.Duration // total time spent writing to disk
}
func (s *TxStats) add(other *TxStats) {
s.PageCount += other.PageCount
s.PageAlloc += other.PageAlloc
s.CursorCount += other.CursorCount
s.NodeCount += other.NodeCount
s.NodeDeref += other.NodeDeref
s.Rebalance += other.Rebalance
s.RebalanceTime += other.RebalanceTime
s.Split += other.Split
s.Spill += other.Spill
s.SpillTime += other.SpillTime
s.Write += other.Write
s.WriteTime += other.WriteTime
}
// Sub calculates and returns the difference between two sets of transaction stats.
// This is useful when obtaining stats at two different points and time and
// you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats
diff.PageCount = s.PageCount - other.PageCount
diff.PageAlloc = s.PageAlloc - other.PageAlloc
diff.CursorCount = s.CursorCount - other.CursorCount
diff.NodeCount = s.NodeCount - other.NodeCount
diff.NodeDeref = s.NodeDeref - other.NodeDeref
diff.Rebalance = s.Rebalance - other.Rebalance
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
diff.Split = s.Split - other.Split
diff.Spill = s.Spill - other.Spill
diff.SpillTime = s.SpillTime - other.SpillTime
diff.Write = s.Write - other.Write
diff.WriteTime = s.WriteTime - other.WriteTime
return diff
}

20
vendor/github.com/fatih/color/LICENSE.md generated vendored Normal file
View File

@@ -0,0 +1,20 @@
The MIT License (MIT)
Copyright (c) 2013 Fatih Arslan
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

176
vendor/github.com/fatih/color/README.md generated vendored Normal file
View File

@@ -0,0 +1,176 @@
# color [![](https://github.com/fatih/color/workflows/build/badge.svg)](https://github.com/fatih/color/actions) [![PkgGoDev](https://pkg.go.dev/badge/github.com/fatih/color)](https://pkg.go.dev/github.com/fatih/color)
Color lets you use colorized outputs in terms of [ANSI Escape
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
has support for Windows too! The API can be used in several ways, pick one that
suits you.
![Color](https://user-images.githubusercontent.com/438920/96832689-03b3e000-13f4-11eb-9803-46f4c4de3406.jpg)
## Install
```bash
go get github.com/fatih/color
```
## Examples
### Standard colors
```go
// Print with default helper functions
color.Cyan("Prints text in cyan.")
// A newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// These are using the default foreground colors
color.Red("We have red")
color.Magenta("And many others ..")
```
### Mix and reuse colors
```go
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with white background.")
```
### Use your own output (io.Writer)
```go
// Use your own io.Writer output
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
blue := color.New(color.FgBlue)
blue.Fprint(writer, "This will print text in blue.")
```
### Custom print functions (PrintFunc)
```go
// Create a custom print function for convenience
red := color.New(color.FgRed).PrintfFunc()
red("Warning")
red("Error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("Don't forget this...")
```
### Custom fprint functions (FprintFunc)
```go
blue := color.New(color.FgBlue).FprintfFunc()
blue(myWriter, "important notice: %s", stars)
// Mix up with multiple attributes
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
success(myWriter, "Don't forget this...")
```
### Insert into noncolor strings (SprintFunc)
```go
// Create SprintXxx functions to mix strings with other non-colorized strings:
yellow := color.New(color.FgYellow).SprintFunc()
red := color.New(color.FgRed).SprintFunc()
fmt.Printf("This is a %s and this is %s.\n", yellow("warning"), red("error"))
info := color.New(color.FgWhite, color.BgGreen).SprintFunc()
fmt.Printf("This %s rocks!\n", info("package"))
// Use helper functions
fmt.Println("This", color.RedString("warning"), "should be not neglected.")
fmt.Printf("%v %v\n", color.GreenString("Info:"), "an important message.")
// Windows supported too! Just don't forget to change the output to color.Output
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
```
### Plug into existing code
```go
// Use handy standard colors
color.Set(color.FgYellow)
fmt.Println("Existing text will now be in yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // Don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // Use it in your function
fmt.Println("All text will now be bold magenta.")
```
### Disable/Enable color
There might be a case where you want to explicitly disable/enable color output. the
`go-isatty` package will automatically disable color output for non-tty output streams
(for example if the output were piped directly to `less`).
The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
variable is set to a non-empty string.
`Color` has support to disable/enable colors programmatically both globally and
for single color definitions. For example suppose you have a CLI app and a
`-no-color` bool flag. You can easily disable the color output with:
```go
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
```
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
```go
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
```
## GitHub Actions
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
## Todo
* Save/Return previous values
* Evaluate fmt.Formatter interface
## Credits
* [Fatih Arslan](https://github.com/fatih)
* Windows support via @mattn: [colorable](https://github.com/mattn/go-colorable)
## License
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details

616
vendor/github.com/fatih/color/color.go generated vendored Normal file
View File

@@ -0,0 +1,616 @@
package color
import (
"fmt"
"io"
"os"
"strconv"
"strings"
"sync"
"github.com/mattn/go-colorable"
"github.com/mattn/go-isatty"
)
var (
// NoColor defines if the output is colorized or not. It's dynamically set to
// false or true based on the stdout's file descriptor referring to a terminal
// or not. It's also set to true if the NO_COLOR environment variable is
// set (regardless of its value). This is a global option and affects all
// colors. For more control over each color block use the methods
// DisableColor() individually.
NoColor = noColorIsSet() || os.Getenv("TERM") == "dumb" ||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
// Output defines the standard output of the print functions. By default,
// os.Stdout is used.
Output = colorable.NewColorableStdout()
// Error defines a color supporting writer for os.Stderr.
Error = colorable.NewColorableStderr()
// colorsCache is used to reduce the count of created Color objects and
// allows to reuse already created objects with required Attribute.
colorsCache = make(map[Attribute]*Color)
colorsCacheMu sync.Mutex // protects colorsCache
)
// noColorIsSet returns true if the environment variable NO_COLOR is set to a non-empty string.
func noColorIsSet() bool {
return os.Getenv("NO_COLOR") != ""
}
// Color defines a custom color object which is defined by SGR parameters.
type Color struct {
params []Attribute
noColor *bool
}
// Attribute defines a single SGR Code
type Attribute int
const escape = "\x1b"
// Base attributes
const (
Reset Attribute = iota
Bold
Faint
Italic
Underline
BlinkSlow
BlinkRapid
ReverseVideo
Concealed
CrossedOut
)
// Foreground text colors
const (
FgBlack Attribute = iota + 30
FgRed
FgGreen
FgYellow
FgBlue
FgMagenta
FgCyan
FgWhite
)
// Foreground Hi-Intensity text colors
const (
FgHiBlack Attribute = iota + 90
FgHiRed
FgHiGreen
FgHiYellow
FgHiBlue
FgHiMagenta
FgHiCyan
FgHiWhite
)
// Background text colors
const (
BgBlack Attribute = iota + 40
BgRed
BgGreen
BgYellow
BgBlue
BgMagenta
BgCyan
BgWhite
)
// Background Hi-Intensity text colors
const (
BgHiBlack Attribute = iota + 100
BgHiRed
BgHiGreen
BgHiYellow
BgHiBlue
BgHiMagenta
BgHiCyan
BgHiWhite
)
// New returns a newly created color object.
func New(value ...Attribute) *Color {
c := &Color{
params: make([]Attribute, 0),
}
if noColorIsSet() {
c.noColor = boolPtr(true)
}
c.Add(value...)
return c
}
// Set sets the given parameters immediately. It will change the color of
// output with the given SGR parameters until color.Unset() is called.
func Set(p ...Attribute) *Color {
c := New(p...)
c.Set()
return c
}
// Unset resets all escape attributes and clears the output. Usually should
// be called after Set().
func Unset() {
if NoColor {
return
}
fmt.Fprintf(Output, "%s[%dm", escape, Reset)
}
// Set sets the SGR sequence.
func (c *Color) Set() *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprint(Output, c.format())
return c
}
func (c *Color) unset() {
if c.isNoColorSet() {
return
}
Unset()
}
// SetWriter is used to set the SGR sequence with the given io.Writer. This is
// a low-level function, and users should use the higher-level functions, such
// as color.Fprint, color.Print, etc.
func (c *Color) SetWriter(w io.Writer) *Color {
if c.isNoColorSet() {
return c
}
fmt.Fprint(w, c.format())
return c
}
// UnsetWriter resets all escape attributes and clears the output with the give
// io.Writer. Usually should be called after SetWriter().
func (c *Color) UnsetWriter(w io.Writer) {
if c.isNoColorSet() {
return
}
if NoColor {
return
}
fmt.Fprintf(w, "%s[%dm", escape, Reset)
}
// Add is used to chain SGR parameters. Use as many as parameters to combine
// and create custom color objects. Example: Add(color.FgRed, color.Underline).
func (c *Color) Add(value ...Attribute) *Color {
c.params = append(c.params, value...)
return c
}
// Fprint formats using the default formats for its operands and writes to w.
// Spaces are added between operands when neither is a string.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
c.SetWriter(w)
defer c.UnsetWriter(w)
return fmt.Fprint(w, a...)
}
// Print formats using the default formats for its operands and writes to
// standard output. Spaces are added between operands when neither is a
// string. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Print(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprint(Output, a...)
}
// Fprintf formats according to a format specifier and writes to w.
// It returns the number of bytes written and any write error encountered.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
c.SetWriter(w)
defer c.UnsetWriter(w)
return fmt.Fprintf(w, format, a...)
}
// Printf formats according to a format specifier and writes to standard output.
// It returns the number of bytes written and any write error encountered.
// This is the standard fmt.Printf() method wrapped with the given color.
func (c *Color) Printf(format string, a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintf(Output, format, a...)
}
// Fprintln formats using the default formats for its operands and writes to w.
// Spaces are always added between operands and a newline is appended.
// On Windows, users should wrap w with colorable.NewColorable() if w is of
// type *os.File.
func (c *Color) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
c.SetWriter(w)
defer c.UnsetWriter(w)
return fmt.Fprintln(w, a...)
}
// Println formats using the default formats for its operands and writes to
// standard output. Spaces are always added between operands and a newline is
// appended. It returns the number of bytes written and any write error
// encountered. This is the standard fmt.Print() method wrapped with the given
// color.
func (c *Color) Println(a ...interface{}) (n int, err error) {
c.Set()
defer c.unset()
return fmt.Fprintln(Output, a...)
}
// Sprint is just like Print, but returns a string instead of printing it.
func (c *Color) Sprint(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
// Sprintln is just like Println, but returns a string instead of printing it.
func (c *Color) Sprintln(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
// Sprintf is just like Printf, but returns a string instead of printing it.
func (c *Color) Sprintf(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
// FprintFunc returns a new function that prints the passed arguments as
// colorized with color.Fprint().
func (c *Color) FprintFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprint(w, a...)
}
}
// PrintFunc returns a new function that prints the passed arguments as
// colorized with color.Print().
func (c *Color) PrintFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Print(a...)
}
}
// FprintfFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintf().
func (c *Color) FprintfFunc() func(w io.Writer, format string, a ...interface{}) {
return func(w io.Writer, format string, a ...interface{}) {
c.Fprintf(w, format, a...)
}
}
// PrintfFunc returns a new function that prints the passed arguments as
// colorized with color.Printf().
func (c *Color) PrintfFunc() func(format string, a ...interface{}) {
return func(format string, a ...interface{}) {
c.Printf(format, a...)
}
}
// FprintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Fprintln().
func (c *Color) FprintlnFunc() func(w io.Writer, a ...interface{}) {
return func(w io.Writer, a ...interface{}) {
c.Fprintln(w, a...)
}
}
// PrintlnFunc returns a new function that prints the passed arguments as
// colorized with color.Println().
func (c *Color) PrintlnFunc() func(a ...interface{}) {
return func(a ...interface{}) {
c.Println(a...)
}
}
// SprintFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprint(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output, example:
//
// put := New(FgYellow).SprintFunc()
// fmt.Fprintf(color.Output, "This is a %s", put("warning"))
func (c *Color) SprintFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprint(a...))
}
}
// SprintfFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintf(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintfFunc() func(format string, a ...interface{}) string {
return func(format string, a ...interface{}) string {
return c.wrap(fmt.Sprintf(format, a...))
}
}
// SprintlnFunc returns a new function that returns colorized strings for the
// given arguments with fmt.Sprintln(). Useful to put into or mix into other
// string. Windows users should use this in conjunction with color.Output.
func (c *Color) SprintlnFunc() func(a ...interface{}) string {
return func(a ...interface{}) string {
return c.wrap(fmt.Sprintln(a...))
}
}
// sequence returns a formatted SGR sequence to be plugged into a "\x1b[...m"
// an example output might be: "1;36" -> bold cyan
func (c *Color) sequence() string {
format := make([]string, len(c.params))
for i, v := range c.params {
format[i] = strconv.Itoa(int(v))
}
return strings.Join(format, ";")
}
// wrap wraps the s string with the colors attributes. The string is ready to
// be printed.
func (c *Color) wrap(s string) string {
if c.isNoColorSet() {
return s
}
return c.format() + s + c.unformat()
}
func (c *Color) format() string {
return fmt.Sprintf("%s[%sm", escape, c.sequence())
}
func (c *Color) unformat() string {
return fmt.Sprintf("%s[%dm", escape, Reset)
}
// DisableColor disables the color output. Useful to not change any existing
// code and still being able to output. Can be used for flags like
// "--no-color". To enable back use EnableColor() method.
func (c *Color) DisableColor() {
c.noColor = boolPtr(true)
}
// EnableColor enables the color output. Use it in conjunction with
// DisableColor(). Otherwise, this method has no side effects.
func (c *Color) EnableColor() {
c.noColor = boolPtr(false)
}
func (c *Color) isNoColorSet() bool {
// check first if we have user set action
if c.noColor != nil {
return *c.noColor
}
// if not return the global option, which is disabled by default
return NoColor
}
// Equals returns a boolean value indicating whether two colors are equal.
func (c *Color) Equals(c2 *Color) bool {
if len(c.params) != len(c2.params) {
return false
}
for _, attr := range c.params {
if !c2.attrExists(attr) {
return false
}
}
return true
}
func (c *Color) attrExists(a Attribute) bool {
for _, attr := range c.params {
if attr == a {
return true
}
}
return false
}
func boolPtr(v bool) *bool {
return &v
}
func getCachedColor(p Attribute) *Color {
colorsCacheMu.Lock()
defer colorsCacheMu.Unlock()
c, ok := colorsCache[p]
if !ok {
c = New(p)
colorsCache[p] = c
}
return c
}
func colorPrint(format string, p Attribute, a ...interface{}) {
c := getCachedColor(p)
if !strings.HasSuffix(format, "\n") {
format += "\n"
}
if len(a) == 0 {
c.Print(format)
} else {
c.Printf(format, a...)
}
}
func colorString(format string, p Attribute, a ...interface{}) string {
c := getCachedColor(p)
if len(a) == 0 {
return c.SprintFunc()(format)
}
return c.SprintfFunc()(format, a...)
}
// Black is a convenient helper function to print with black foreground. A
// newline is appended to format by default.
func Black(format string, a ...interface{}) { colorPrint(format, FgBlack, a...) }
// Red is a convenient helper function to print with red foreground. A
// newline is appended to format by default.
func Red(format string, a ...interface{}) { colorPrint(format, FgRed, a...) }
// Green is a convenient helper function to print with green foreground. A
// newline is appended to format by default.
func Green(format string, a ...interface{}) { colorPrint(format, FgGreen, a...) }
// Yellow is a convenient helper function to print with yellow foreground.
// A newline is appended to format by default.
func Yellow(format string, a ...interface{}) { colorPrint(format, FgYellow, a...) }
// Blue is a convenient helper function to print with blue foreground. A
// newline is appended to format by default.
func Blue(format string, a ...interface{}) { colorPrint(format, FgBlue, a...) }
// Magenta is a convenient helper function to print with magenta foreground.
// A newline is appended to format by default.
func Magenta(format string, a ...interface{}) { colorPrint(format, FgMagenta, a...) }
// Cyan is a convenient helper function to print with cyan foreground. A
// newline is appended to format by default.
func Cyan(format string, a ...interface{}) { colorPrint(format, FgCyan, a...) }
// White is a convenient helper function to print with white foreground. A
// newline is appended to format by default.
func White(format string, a ...interface{}) { colorPrint(format, FgWhite, a...) }
// BlackString is a convenient helper function to return a string with black
// foreground.
func BlackString(format string, a ...interface{}) string { return colorString(format, FgBlack, a...) }
// RedString is a convenient helper function to return a string with red
// foreground.
func RedString(format string, a ...interface{}) string { return colorString(format, FgRed, a...) }
// GreenString is a convenient helper function to return a string with green
// foreground.
func GreenString(format string, a ...interface{}) string { return colorString(format, FgGreen, a...) }
// YellowString is a convenient helper function to return a string with yellow
// foreground.
func YellowString(format string, a ...interface{}) string { return colorString(format, FgYellow, a...) }
// BlueString is a convenient helper function to return a string with blue
// foreground.
func BlueString(format string, a ...interface{}) string { return colorString(format, FgBlue, a...) }
// MagentaString is a convenient helper function to return a string with magenta
// foreground.
func MagentaString(format string, a ...interface{}) string {
return colorString(format, FgMagenta, a...)
}
// CyanString is a convenient helper function to return a string with cyan
// foreground.
func CyanString(format string, a ...interface{}) string { return colorString(format, FgCyan, a...) }
// WhiteString is a convenient helper function to return a string with white
// foreground.
func WhiteString(format string, a ...interface{}) string { return colorString(format, FgWhite, a...) }
// HiBlack is a convenient helper function to print with hi-intensity black foreground. A
// newline is appended to format by default.
func HiBlack(format string, a ...interface{}) { colorPrint(format, FgHiBlack, a...) }
// HiRed is a convenient helper function to print with hi-intensity red foreground. A
// newline is appended to format by default.
func HiRed(format string, a ...interface{}) { colorPrint(format, FgHiRed, a...) }
// HiGreen is a convenient helper function to print with hi-intensity green foreground. A
// newline is appended to format by default.
func HiGreen(format string, a ...interface{}) { colorPrint(format, FgHiGreen, a...) }
// HiYellow is a convenient helper function to print with hi-intensity yellow foreground.
// A newline is appended to format by default.
func HiYellow(format string, a ...interface{}) { colorPrint(format, FgHiYellow, a...) }
// HiBlue is a convenient helper function to print with hi-intensity blue foreground. A
// newline is appended to format by default.
func HiBlue(format string, a ...interface{}) { colorPrint(format, FgHiBlue, a...) }
// HiMagenta is a convenient helper function to print with hi-intensity magenta foreground.
// A newline is appended to format by default.
func HiMagenta(format string, a ...interface{}) { colorPrint(format, FgHiMagenta, a...) }
// HiCyan is a convenient helper function to print with hi-intensity cyan foreground. A
// newline is appended to format by default.
func HiCyan(format string, a ...interface{}) { colorPrint(format, FgHiCyan, a...) }
// HiWhite is a convenient helper function to print with hi-intensity white foreground. A
// newline is appended to format by default.
func HiWhite(format string, a ...interface{}) { colorPrint(format, FgHiWhite, a...) }
// HiBlackString is a convenient helper function to return a string with hi-intensity black
// foreground.
func HiBlackString(format string, a ...interface{}) string {
return colorString(format, FgHiBlack, a...)
}
// HiRedString is a convenient helper function to return a string with hi-intensity red
// foreground.
func HiRedString(format string, a ...interface{}) string { return colorString(format, FgHiRed, a...) }
// HiGreenString is a convenient helper function to return a string with hi-intensity green
// foreground.
func HiGreenString(format string, a ...interface{}) string {
return colorString(format, FgHiGreen, a...)
}
// HiYellowString is a convenient helper function to return a string with hi-intensity yellow
// foreground.
func HiYellowString(format string, a ...interface{}) string {
return colorString(format, FgHiYellow, a...)
}
// HiBlueString is a convenient helper function to return a string with hi-intensity blue
// foreground.
func HiBlueString(format string, a ...interface{}) string { return colorString(format, FgHiBlue, a...) }
// HiMagentaString is a convenient helper function to return a string with hi-intensity magenta
// foreground.
func HiMagentaString(format string, a ...interface{}) string {
return colorString(format, FgHiMagenta, a...)
}
// HiCyanString is a convenient helper function to return a string with hi-intensity cyan
// foreground.
func HiCyanString(format string, a ...interface{}) string { return colorString(format, FgHiCyan, a...) }
// HiWhiteString is a convenient helper function to return a string with hi-intensity white
// foreground.
func HiWhiteString(format string, a ...interface{}) string {
return colorString(format, FgHiWhite, a...)
}

19
vendor/github.com/fatih/color/color_windows.go generated vendored Normal file
View File

@@ -0,0 +1,19 @@
package color
import (
"os"
"golang.org/x/sys/windows"
)
func init() {
// Opt-in for ansi color support for current process.
// https://learn.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences#output-sequences
var outMode uint32
out := windows.Handle(os.Stdout.Fd())
if err := windows.GetConsoleMode(out, &outMode); err != nil {
return
}
outMode |= windows.ENABLE_PROCESSED_OUTPUT | windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING
_ = windows.SetConsoleMode(out, outMode)
}

134
vendor/github.com/fatih/color/doc.go generated vendored Normal file
View File

@@ -0,0 +1,134 @@
/*
Package color is an ANSI color package to output colorized or SGR defined
output to the standard output. The API can be used in several way, pick one
that suits you.
Use simple and default helper functions with predefined foreground colors:
color.Cyan("Prints text in cyan.")
// a newline will be appended automatically
color.Blue("Prints %s in blue.", "text")
// More default foreground colors..
color.Red("We have red")
color.Yellow("Yellow color too!")
color.Magenta("And many others ..")
// Hi-intensity colors
color.HiGreen("Bright green color.")
color.HiBlack("Bright black means gray..")
color.HiWhite("Shiny white color!")
However, there are times when custom color mixes are required. Below are some
examples to create custom color objects and use the print functions of each
separate color object.
// Create a new color object
c := color.New(color.FgCyan).Add(color.Underline)
c.Println("Prints cyan text with an underline.")
// Or just add them to New()
d := color.New(color.FgCyan, color.Bold)
d.Printf("This prints bold cyan %s\n", "too!.")
// Mix up foreground and background colors, create new mixes!
red := color.New(color.FgRed)
boldRed := red.Add(color.Bold)
boldRed.Println("This will print text in bold red.")
whiteBackground := red.Add(color.BgWhite)
whiteBackground.Println("Red text with White background.")
// Use your own io.Writer output
color.New(color.FgBlue).Fprintln(myWriter, "blue color!")
blue := color.New(color.FgBlue)
blue.Fprint(myWriter, "This will print text in blue.")
You can create PrintXxx functions to simplify even more:
// Create a custom print function for convenient
red := color.New(color.FgRed).PrintfFunc()
red("warning")
red("error: %s", err)
// Mix up multiple attributes
notice := color.New(color.Bold, color.FgGreen).PrintlnFunc()
notice("don't forget this...")
You can also FprintXxx functions to pass your own io.Writer:
blue := color.New(FgBlue).FprintfFunc()
blue(myWriter, "important notice: %s", stars)
// Mix up with multiple attributes
success := color.New(color.Bold, color.FgGreen).FprintlnFunc()
success(myWriter, don't forget this...")
Or create SprintXxx functions to mix strings with other non-colorized strings:
yellow := New(FgYellow).SprintFunc()
red := New(FgRed).SprintFunc()
fmt.Printf("this is a %s and this is %s.\n", yellow("warning"), red("error"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Printf("this %s rocks!\n", info("package"))
Windows support is enabled by default. All Print functions work as intended.
However, only for color.SprintXXX functions, user should use fmt.FprintXXX and
set the output to color.Output:
fmt.Fprintf(color.Output, "Windows support: %s", color.GreenString("PASS"))
info := New(FgWhite, BgGreen).SprintFunc()
fmt.Fprintf(color.Output, "this %s rocks!\n", info("package"))
Using with existing code is possible. Just use the Set() method to set the
standard output to the given parameters. That way a rewrite of an existing
code is not required.
// Use handy standard colors.
color.Set(color.FgYellow)
fmt.Println("Existing text will be now in Yellow")
fmt.Printf("This one %s\n", "too")
color.Unset() // don't forget to unset
// You can mix up parameters
color.Set(color.FgMagenta, color.Bold)
defer color.Unset() // use it in your function
fmt.Println("All text will be now bold magenta.")
There might be a case where you want to disable color output (for example to
pipe the standard output of your app to somewhere else). `Color` has support to
disable colors both globally and for single color definition. For example
suppose you have a CLI app and a `--no-color` bool flag. You can easily disable
the color output with:
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
if *flagNoColor {
color.NoColor = true // disables colorized output
}
You can also disable the color by setting the NO_COLOR environment variable to any value.
It also has support for single color definitions (local). You can
disable/enable color output on the fly:
c := color.New(color.FgCyan)
c.Println("Prints cyan text")
c.DisableColor()
c.Println("This is printed without any color")
c.EnableColor()
c.Println("This prints again cyan...")
*/
package color

1
vendor/github.com/hashicorp/go-hclog/.gitignore generated vendored Normal file
View File

@@ -0,0 +1 @@
.idea*

19
vendor/github.com/hashicorp/go-hclog/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,19 @@
Copyright (c) 2017 HashiCorp, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

148
vendor/github.com/hashicorp/go-hclog/README.md generated vendored Normal file
View File

@@ -0,0 +1,148 @@
# go-hclog
[![Go Documentation](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)][godocs]
[godocs]: https://godoc.org/github.com/hashicorp/go-hclog
`go-hclog` is a package for Go that provides a simple key/value logging
interface for use in development and production environments.
It provides logging levels that provide decreased output based upon the
desired amount of output, unlike the standard library `log` package.
It provides `Printf` style logging of values via `hclog.Fmt()`.
It provides a human readable output mode for use in development as well as
JSON output mode for production.
## Stability Note
This library has reached 1.0 stability. Its API can be considered solidified
and promised through future versions.
## Installation and Docs
Install using `go get github.com/hashicorp/go-hclog`.
Full documentation is available at
http://godoc.org/github.com/hashicorp/go-hclog
## Usage
### Use the global logger
```go
hclog.Default().Info("hello world")
```
```text
2017-07-05T16:15:55.167-0700 [INFO ] hello world
```
(Note timestamps are removed in future examples for brevity.)
### Create a new logger
```go
appLogger := hclog.New(&hclog.LoggerOptions{
Name: "my-app",
Level: hclog.LevelFromString("DEBUG"),
})
```
### Emit an Info level message with 2 key/value pairs
```go
input := "5.5"
_, err := strconv.ParseInt(input, 10, 32)
if err != nil {
appLogger.Info("Invalid input for ParseInt", "input", input, "error", err)
}
```
```text
... [INFO ] my-app: Invalid input for ParseInt: input=5.5 error="strconv.ParseInt: parsing "5.5": invalid syntax"
```
### Create a new Logger for a major subsystem
```go
subsystemLogger := appLogger.Named("transport")
subsystemLogger.Info("we are transporting something")
```
```text
... [INFO ] my-app.transport: we are transporting something
```
Notice that logs emitted by `subsystemLogger` contain `my-app.transport`,
reflecting both the application and subsystem names.
### Create a new Logger with fixed key/value pairs
Using `With()` will include a specific key-value pair in all messages emitted
by that logger.
```go
requestID := "5fb446b6-6eba-821d-df1b-cd7501b6a363"
requestLogger := subsystemLogger.With("request", requestID)
requestLogger.Info("we are transporting a request")
```
```text
... [INFO ] my-app.transport: we are transporting a request: request=5fb446b6-6eba-821d-df1b-cd7501b6a363
```
This allows sub Loggers to be context specific without having to thread that
into all the callers.
### Using `hclog.Fmt()`
```go
totalBandwidth := 200
appLogger.Info("total bandwidth exceeded", "bandwidth", hclog.Fmt("%d GB/s", totalBandwidth))
```
```text
... [INFO ] my-app: total bandwidth exceeded: bandwidth="200 GB/s"
```
### Use this with code that uses the standard library logger
If you want to use the standard library's `log.Logger` interface you can wrap
`hclog.Logger` by calling the `StandardLogger()` method. This allows you to use
it with the familiar `Println()`, `Printf()`, etc. For example:
```go
stdLogger := appLogger.StandardLogger(&hclog.StandardLoggerOptions{
InferLevels: true,
})
// Printf() is provided by stdlib log.Logger interface, not hclog.Logger
stdLogger.Printf("[DEBUG] %+v", stdLogger)
```
```text
... [DEBUG] my-app: &{mu:{state:0 sema:0} prefix: flag:0 out:0xc42000a0a0 buf:[]}
```
Alternatively, you may configure the system-wide logger:
```go
// log the standard logger from 'import "log"'
log.SetOutput(appLogger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
log.SetPrefix("")
log.SetFlags(0)
log.Printf("[DEBUG] %d", 42)
```
```text
... [DEBUG] my-app: 42
```
Notice that if `appLogger` is initialized with the `INFO` log level _and_ you
specify `InferLevels: true`, you will not see any output here. You must change
`appLogger` to `DEBUG` to see output. See the docs for more information.
If the log lines start with a timestamp you can use the
`InferLevelsWithTimestamp` option to try and ignore them.

44
vendor/github.com/hashicorp/go-hclog/colorize_unix.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
//go:build !windows
// +build !windows
package hclog
import (
"github.com/mattn/go-isatty"
)
// hasFD is used to check if the writer has an Fd value to check
// if it's a terminal.
type hasFD interface {
Fd() uintptr
}
// setColorization will mutate the values of this logger
// to appropriately configure colorization options. It provides
// a wrapper to the output stream on Windows systems.
func (l *intLogger) setColorization(opts *LoggerOptions) {
if opts.Color != AutoColor {
return
}
if sc, ok := l.writer.w.(SupportsColor); ok {
if !sc.SupportsColor() {
l.headerColor = ColorOff
l.writer.color = ColorOff
}
return
}
fi, ok := l.writer.w.(hasFD)
if !ok {
return
}
if !isatty.IsTerminal(fi.Fd()) {
l.headerColor = ColorOff
l.writer.color = ColorOff
}
}

View File

@@ -0,0 +1,41 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
//go:build windows
// +build windows
package hclog
import (
"os"
colorable "github.com/mattn/go-colorable"
)
// setColorization will mutate the values of this logger
// to appropriately configure colorization options. It provides
// a wrapper to the output stream on Windows systems.
func (l *intLogger) setColorization(opts *LoggerOptions) {
if opts.Color == ColorOff {
return
}
fi, ok := l.writer.w.(*os.File)
if !ok {
l.writer.color = ColorOff
l.headerColor = ColorOff
return
}
cfi := colorable.NewColorable(fi)
// NewColorable detects if color is possible and if it's not, then it
// returns the original value. So we can test if we got the original
// value back to know if color is possible.
if cfi == fi {
l.writer.color = ColorOff
l.headerColor = ColorOff
} else {
l.writer.w = cfi
}
}

41
vendor/github.com/hashicorp/go-hclog/context.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"context"
)
// WithContext inserts a logger into the context and is retrievable
// with FromContext. The optional args can be set with the same syntax as
// Logger.With to set fields on the inserted logger. This will not modify
// the logger argument in-place.
func WithContext(ctx context.Context, logger Logger, args ...interface{}) context.Context {
// While we could call logger.With even with zero args, we have this
// check to avoid unnecessary allocations around creating a copy of a
// logger.
if len(args) > 0 {
logger = logger.With(args...)
}
return context.WithValue(ctx, contextKey, logger)
}
// FromContext returns a logger from the context. This will return L()
// (the default logger) if no logger is found in the context. Therefore,
// this will never return a nil value.
func FromContext(ctx context.Context) Logger {
logger, _ := ctx.Value(contextKey).(Logger)
if logger == nil {
return L()
}
return logger
}
// Unexported new type so that our context key never collides with another.
type contextKeyType struct{}
// contextKey is the key used for the context to store the logger.
var contextKey = contextKeyType{}

74
vendor/github.com/hashicorp/go-hclog/exclude.go generated vendored Normal file
View File

@@ -0,0 +1,74 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"regexp"
"strings"
)
// ExcludeByMessage provides a simple way to build a list of log messages that
// can be queried and matched. This is meant to be used with the Exclude
// option on Options to suppress log messages. This does not hold any mutexs
// within itself, so normal usage would be to Add entries at setup and none after
// Exclude is going to be called. Exclude is called with a mutex held within
// the Logger, so that doesn't need to use a mutex. Example usage:
//
// f := new(ExcludeByMessage)
// f.Add("Noisy log message text")
// appLogger.Exclude = f.Exclude
type ExcludeByMessage struct {
messages map[string]struct{}
}
// Add a message to be filtered. Do not call this after Exclude is to be called
// due to concurrency issues.
func (f *ExcludeByMessage) Add(msg string) {
if f.messages == nil {
f.messages = make(map[string]struct{})
}
f.messages[msg] = struct{}{}
}
// Return true if the given message should be included
func (f *ExcludeByMessage) Exclude(level Level, msg string, args ...interface{}) bool {
_, ok := f.messages[msg]
return ok
}
// ExcludeByPrefix is a simple type to match a message string that has a common prefix.
type ExcludeByPrefix string
// Matches an message that starts with the prefix.
func (p ExcludeByPrefix) Exclude(level Level, msg string, args ...interface{}) bool {
return strings.HasPrefix(msg, string(p))
}
// ExcludeByRegexp takes a regexp and uses it to match a log message string. If it matches
// the log entry is excluded.
type ExcludeByRegexp struct {
Regexp *regexp.Regexp
}
// Exclude the log message if the message string matches the regexp
func (e ExcludeByRegexp) Exclude(level Level, msg string, args ...interface{}) bool {
return e.Regexp.MatchString(msg)
}
// ExcludeFuncs is a slice of functions that will called to see if a log entry
// should be filtered or not. It stops calling functions once at least one returns
// true.
type ExcludeFuncs []func(level Level, msg string, args ...interface{}) bool
// Calls each function until one of them returns true
func (ff ExcludeFuncs) Exclude(level Level, msg string, args ...interface{}) bool {
for _, f := range ff {
if f(level, msg, args...) {
return true
}
}
return false
}

67
vendor/github.com/hashicorp/go-hclog/global.go generated vendored Normal file
View File

@@ -0,0 +1,67 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"sync"
"time"
)
var (
protect sync.Once
def Logger
// DefaultOptions is used to create the Default logger. These are read
// only when the Default logger is created, so set them as soon as the
// process starts.
DefaultOptions = &LoggerOptions{
Level: DefaultLevel,
Output: DefaultOutput,
TimeFn: time.Now,
}
)
// Default returns a globally held logger. This can be a good starting
// place, and then you can use .With() and .Named() to create sub-loggers
// to be used in more specific contexts.
// The value of the Default logger can be set via SetDefault() or by
// changing the options in DefaultOptions.
//
// This method is goroutine safe, returning a global from memory, but
// care should be used if SetDefault() is called it random times
// in the program as that may result in race conditions and an unexpected
// Logger being returned.
func Default() Logger {
protect.Do(func() {
// If SetDefault was used before Default() was called, we need to
// detect that here.
if def == nil {
def = New(DefaultOptions)
}
})
return def
}
// L is a short alias for Default().
func L() Logger {
return Default()
}
// SetDefault changes the logger to be returned by Default()and L()
// to the one given. This allows packages to use the default logger
// and have higher level packages change it to match the execution
// environment. It returns any old default if there is one.
//
// NOTE: This is expected to be called early in the program to setup
// a default logger. As such, it does not attempt to make itself
// not racy with regard to the value of the default logger. Ergo
// if it is called in goroutines, you may experience race conditions
// with other goroutines retrieving the default logger. Basically,
// don't do that.
func SetDefault(log Logger) Logger {
old := def
def = log
return old
}

207
vendor/github.com/hashicorp/go-hclog/interceptlogger.go generated vendored Normal file
View File

@@ -0,0 +1,207 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"io"
"log"
"sync"
"sync/atomic"
)
var _ Logger = &interceptLogger{}
type interceptLogger struct {
Logger
mu *sync.Mutex
sinkCount *int32
Sinks map[SinkAdapter]struct{}
}
func NewInterceptLogger(opts *LoggerOptions) InterceptLogger {
l := newLogger(opts)
if l.callerOffset > 0 {
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and interceptLogger.log
l.callerOffset += 2
}
intercept := &interceptLogger{
Logger: l,
mu: new(sync.Mutex),
sinkCount: new(int32),
Sinks: make(map[SinkAdapter]struct{}),
}
atomic.StoreInt32(intercept.sinkCount, 0)
return intercept
}
func (i *interceptLogger) Log(level Level, msg string, args ...interface{}) {
i.log(level, msg, args...)
}
// log is used to make the caller stack frame lookup consistent. If Warn,Info,etc
// all called Log then direct calls to Log would have a different stack frame
// depth. By having all the methods call the same helper we ensure the stack
// frame depth is the same.
func (i *interceptLogger) log(level Level, msg string, args ...interface{}) {
i.Logger.Log(level, msg, args...)
if atomic.LoadInt32(i.sinkCount) == 0 {
return
}
i.mu.Lock()
defer i.mu.Unlock()
for s := range i.Sinks {
s.Accept(i.Name(), level, msg, i.retrieveImplied(args...)...)
}
}
// Emit the message and args at TRACE level to log and sinks
func (i *interceptLogger) Trace(msg string, args ...interface{}) {
i.log(Trace, msg, args...)
}
// Emit the message and args at DEBUG level to log and sinks
func (i *interceptLogger) Debug(msg string, args ...interface{}) {
i.log(Debug, msg, args...)
}
// Emit the message and args at INFO level to log and sinks
func (i *interceptLogger) Info(msg string, args ...interface{}) {
i.log(Info, msg, args...)
}
// Emit the message and args at WARN level to log and sinks
func (i *interceptLogger) Warn(msg string, args ...interface{}) {
i.log(Warn, msg, args...)
}
// Emit the message and args at ERROR level to log and sinks
func (i *interceptLogger) Error(msg string, args ...interface{}) {
i.log(Error, msg, args...)
}
func (i *interceptLogger) retrieveImplied(args ...interface{}) []interface{} {
top := i.Logger.ImpliedArgs()
cp := make([]interface{}, len(top)+len(args))
copy(cp, top)
copy(cp[len(top):], args)
return cp
}
// Create a new sub-Logger that a name descending from the current name.
// This is used to create a subsystem specific Logger.
// Registered sinks will subscribe to these messages as well.
func (i *interceptLogger) Named(name string) Logger {
return i.NamedIntercept(name)
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy. Registered sinks will subscribe
// to these messages as well.
func (i *interceptLogger) ResetNamed(name string) Logger {
return i.ResetNamedIntercept(name)
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
// Registered sinks will subscribe to these messages as well.
func (i *interceptLogger) NamedIntercept(name string) InterceptLogger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.Named(name)
return &sub
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy. Registered sinks will subscribe
// to these messages as well.
func (i *interceptLogger) ResetNamedIntercept(name string) InterceptLogger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.ResetNamed(name)
return &sub
}
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (i *interceptLogger) With(args ...interface{}) Logger {
var sub interceptLogger
sub = *i
sub.Logger = i.Logger.With(args...)
return &sub
}
// RegisterSink attaches a SinkAdapter to interceptLoggers sinks.
func (i *interceptLogger) RegisterSink(sink SinkAdapter) {
i.mu.Lock()
defer i.mu.Unlock()
i.Sinks[sink] = struct{}{}
atomic.AddInt32(i.sinkCount, 1)
}
// DeregisterSink removes a SinkAdapter from interceptLoggers sinks.
func (i *interceptLogger) DeregisterSink(sink SinkAdapter) {
i.mu.Lock()
defer i.mu.Unlock()
delete(i.Sinks, sink)
atomic.AddInt32(i.sinkCount, -1)
}
func (i *interceptLogger) StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger {
return i.StandardLogger(opts)
}
func (i *interceptLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(i.StandardWriter(opts), "", 0)
}
func (i *interceptLogger) StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer {
return i.StandardWriter(opts)
}
func (i *interceptLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
return &stdlogAdapter{
log: i,
inferLevels: opts.InferLevels,
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
forceLevel: opts.ForceLevel,
}
}
func (i *interceptLogger) ResetOutput(opts *LoggerOptions) error {
if or, ok := i.Logger.(OutputResettable); ok {
return or.ResetOutput(opts)
} else {
return nil
}
}
func (i *interceptLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
if or, ok := i.Logger.(OutputResettable); ok {
return or.ResetOutputWithFlush(opts, flushable)
} else {
return nil
}
}

918
vendor/github.com/hashicorp/go-hclog/intlogger.go generated vendored Normal file
View File

@@ -0,0 +1,918 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"bytes"
"encoding"
"encoding/json"
"errors"
"fmt"
"io"
"log"
"reflect"
"runtime"
"sort"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"unicode"
"unicode/utf8"
"github.com/fatih/color"
)
// TimeFormat is the time format to use for plain (non-JSON) output.
// This is a version of RFC3339 that contains millisecond precision.
const TimeFormat = "2006-01-02T15:04:05.000Z0700"
// TimeFormatJSON is the time format to use for JSON output.
// This is a version of RFC3339 that contains microsecond precision.
const TimeFormatJSON = "2006-01-02T15:04:05.000000Z07:00"
// errJsonUnsupportedTypeMsg is included in log json entries, if an arg cannot be serialized to json
const errJsonUnsupportedTypeMsg = "logging contained values that don't serialize to json"
var (
_levelToBracket = map[Level]string{
Debug: "[DEBUG]",
Trace: "[TRACE]",
Info: "[INFO] ",
Warn: "[WARN] ",
Error: "[ERROR]",
}
_levelToColor = map[Level]*color.Color{
Debug: color.New(color.FgHiWhite),
Trace: color.New(color.FgHiGreen),
Info: color.New(color.FgHiBlue),
Warn: color.New(color.FgHiYellow),
Error: color.New(color.FgHiRed),
}
faintBoldColor = color.New(color.Faint, color.Bold)
faintColor = color.New(color.Faint)
faintMultiLinePrefix = faintColor.Sprint(" | ")
faintFieldSeparator = faintColor.Sprint("=")
faintFieldSeparatorWithNewLine = faintColor.Sprint("=\n")
)
// Make sure that intLogger is a Logger
var _ Logger = &intLogger{}
// intLogger is an internal logger implementation. Internal in that it is
// defined entirely by this package.
type intLogger struct {
json bool
callerOffset int
name string
timeFormat string
timeFn TimeFunction
disableTime bool
// This is an interface so that it's shared by any derived loggers, since
// those derived loggers share the bufio.Writer as well.
mutex Locker
writer *writer
level *int32
headerColor ColorOption
fieldColor ColorOption
implied []interface{}
exclude func(level Level, msg string, args ...interface{}) bool
// create subloggers with their own level setting
independentLevels bool
subloggerHook func(sub Logger) Logger
}
// New returns a configured logger.
func New(opts *LoggerOptions) Logger {
return newLogger(opts)
}
// NewSinkAdapter returns a SinkAdapter with configured settings
// defined by LoggerOptions
func NewSinkAdapter(opts *LoggerOptions) SinkAdapter {
l := newLogger(opts)
if l.callerOffset > 0 {
// extra frames for interceptLogger.{Warn,Info,Log,etc...}, and SinkAdapter.Accept
l.callerOffset += 2
}
return l
}
func newLogger(opts *LoggerOptions) *intLogger {
if opts == nil {
opts = &LoggerOptions{}
}
output := opts.Output
if output == nil {
output = DefaultOutput
}
level := opts.Level
if level == NoLevel {
level = DefaultLevel
}
mutex := opts.Mutex
if mutex == nil {
mutex = new(sync.Mutex)
}
var (
primaryColor ColorOption = ColorOff
headerColor ColorOption = ColorOff
fieldColor ColorOption = ColorOff
)
switch {
case opts.ColorHeaderOnly:
headerColor = opts.Color
case opts.ColorHeaderAndFields:
fieldColor = opts.Color
headerColor = opts.Color
default:
primaryColor = opts.Color
}
l := &intLogger{
json: opts.JSONFormat,
name: opts.Name,
timeFormat: TimeFormat,
timeFn: time.Now,
disableTime: opts.DisableTime,
mutex: mutex,
writer: newWriter(output, primaryColor),
level: new(int32),
exclude: opts.Exclude,
independentLevels: opts.IndependentLevels,
headerColor: headerColor,
fieldColor: fieldColor,
subloggerHook: opts.SubloggerHook,
}
if opts.IncludeLocation {
l.callerOffset = offsetIntLogger + opts.AdditionalLocationOffset
}
if l.json {
l.timeFormat = TimeFormatJSON
}
if opts.TimeFn != nil {
l.timeFn = opts.TimeFn
}
if opts.TimeFormat != "" {
l.timeFormat = opts.TimeFormat
}
if l.subloggerHook == nil {
l.subloggerHook = identityHook
}
l.setColorization(opts)
atomic.StoreInt32(l.level, int32(level))
return l
}
func identityHook(logger Logger) Logger {
return logger
}
// offsetIntLogger is the stack frame offset in the call stack for the caller to
// one of the Warn, Info, Log, etc methods.
const offsetIntLogger = 3
// Log a message and a set of key/value pairs if the given level is at
// or more severe that the threshold configured in the Logger.
func (l *intLogger) log(name string, level Level, msg string, args ...interface{}) {
if level < Level(atomic.LoadInt32(l.level)) {
return
}
t := l.timeFn()
l.mutex.Lock()
defer l.mutex.Unlock()
if l.exclude != nil && l.exclude(level, msg, args...) {
return
}
if l.json {
l.logJSON(t, name, level, msg, args...)
} else {
l.logPlain(t, name, level, msg, args...)
}
l.writer.Flush(level)
}
// Cleanup a path by returning the last 2 segments of the path only.
func trimCallerPath(path string) string {
// lovely borrowed from zap
// nb. To make sure we trim the path correctly on Windows too, we
// counter-intuitively need to use '/' and *not* os.PathSeparator here,
// because the path given originates from Go stdlib, specifically
// runtime.Caller() which (as of Mar/17) returns forward slashes even on
// Windows.
//
// See https://github.com/golang/go/issues/3335
// and https://github.com/golang/go/issues/18151
//
// for discussion on the issue on Go side.
// Find the last separator.
idx := strings.LastIndexByte(path, '/')
if idx == -1 {
return path
}
// Find the penultimate separator.
idx = strings.LastIndexByte(path[:idx], '/')
if idx == -1 {
return path
}
return path[idx+1:]
}
// isNormal indicates if the rune is one allowed to exist as an unquoted
// string value. This is a subset of ASCII, `-` through `~`.
func isNormal(r rune) bool {
return 0x2D <= r && r <= 0x7E // - through ~
}
// needsQuoting returns false if all the runes in string are normal, according
// to isNormal
func needsQuoting(str string) bool {
for _, r := range str {
if !isNormal(r) {
return true
}
}
return false
}
// logPlain is the non-JSON logging format function which writes directly
// to the underlying writer the logger was initialized with.
//
// If the logger was initialized with a color function, it also handles
// applying the color to the log message.
//
// Color Options
// 1. No color.
// 2. Color the whole log line, based on the level.
// 3. Color only the header (level) part of the log line.
// 4. Color both the header and fields of the log line.
func (l *intLogger) logPlain(t time.Time, name string, level Level, msg string, args ...interface{}) {
if !l.disableTime {
l.writer.WriteString(t.Format(l.timeFormat))
l.writer.WriteByte(' ')
}
s, ok := _levelToBracket[level]
if ok {
if l.headerColor != ColorOff {
color := _levelToColor[level]
color.Fprint(l.writer, s)
} else {
l.writer.WriteString(s)
}
} else {
l.writer.WriteString("[?????]")
}
if l.callerOffset > 0 {
if _, file, line, ok := runtime.Caller(l.callerOffset); ok {
l.writer.WriteByte(' ')
l.writer.WriteString(trimCallerPath(file))
l.writer.WriteByte(':')
l.writer.WriteString(strconv.Itoa(line))
l.writer.WriteByte(':')
}
}
l.writer.WriteByte(' ')
if name != "" {
l.writer.WriteString(name)
if msg != "" {
l.writer.WriteString(": ")
l.writer.WriteString(msg)
}
} else if msg != "" {
l.writer.WriteString(msg)
}
args = append(l.implied, args...)
var stacktrace CapturedStacktrace
if len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
stacktrace = cs
} else {
extra := args[len(args)-1]
args = append(args[:len(args)-1], MissingKey, extra)
}
}
l.writer.WriteByte(':')
// Handle the field arguments, which come in pairs (key=val).
FOR:
for i := 0; i < len(args); i = i + 2 {
var (
key string
val string
raw bool
)
// Convert the field value to a string.
switch st := args[i+1].(type) {
case string:
val = st
if st == "" {
val = `""`
raw = true
}
case int:
val = strconv.FormatInt(int64(st), 10)
case int64:
val = strconv.FormatInt(int64(st), 10)
case int32:
val = strconv.FormatInt(int64(st), 10)
case int16:
val = strconv.FormatInt(int64(st), 10)
case int8:
val = strconv.FormatInt(int64(st), 10)
case uint:
val = strconv.FormatUint(uint64(st), 10)
case uint64:
val = strconv.FormatUint(uint64(st), 10)
case uint32:
val = strconv.FormatUint(uint64(st), 10)
case uint16:
val = strconv.FormatUint(uint64(st), 10)
case uint8:
val = strconv.FormatUint(uint64(st), 10)
case Hex:
val = "0x" + strconv.FormatUint(uint64(st), 16)
case Octal:
val = "0" + strconv.FormatUint(uint64(st), 8)
case Binary:
val = "0b" + strconv.FormatUint(uint64(st), 2)
case CapturedStacktrace:
stacktrace = st
continue FOR
case Format:
val = fmt.Sprintf(st[0].(string), st[1:]...)
case Quote:
raw = true
val = strconv.Quote(string(st))
default:
v := reflect.ValueOf(st)
if v.Kind() == reflect.Slice {
val = l.renderSlice(v)
raw = true
} else {
val = fmt.Sprintf("%v", st)
}
}
// Convert the field key to a string.
switch st := args[i].(type) {
case string:
key = st
default:
key = fmt.Sprintf("%s", st)
}
// Optionally apply the ANSI "faint" and "bold"
// SGR values to the key.
if l.fieldColor != ColorOff {
key = faintBoldColor.Sprint(key)
}
// Values may contain multiple lines, and that format
// is preserved, with each line prefixed with a " | "
// to show it's part of a collection of lines.
//
// Values may also need quoting, if not all the runes
// in the value string are "normal", like if they
// contain ANSI escape sequences.
if strings.Contains(val, "\n") {
l.writer.WriteString("\n ")
l.writer.WriteString(key)
if l.fieldColor != ColorOff {
l.writer.WriteString(faintFieldSeparatorWithNewLine)
writeIndent(l.writer, val, faintMultiLinePrefix)
} else {
l.writer.WriteString("=\n")
writeIndent(l.writer, val, " | ")
}
l.writer.WriteString(" ")
} else if !raw && needsQuoting(val) {
l.writer.WriteByte(' ')
l.writer.WriteString(key)
if l.fieldColor != ColorOff {
l.writer.WriteString(faintFieldSeparator)
} else {
l.writer.WriteByte('=')
}
l.writer.WriteByte('"')
writeEscapedForOutput(l.writer, val, true)
l.writer.WriteByte('"')
} else {
l.writer.WriteByte(' ')
l.writer.WriteString(key)
if l.fieldColor != ColorOff {
l.writer.WriteString(faintFieldSeparator)
} else {
l.writer.WriteByte('=')
}
l.writer.WriteString(val)
}
}
}
l.writer.WriteString("\n")
if stacktrace != "" {
l.writer.WriteString(string(stacktrace))
l.writer.WriteString("\n")
}
}
func writeIndent(w *writer, str string, indent string) {
for {
nl := strings.IndexByte(str, "\n"[0])
if nl == -1 {
if str != "" {
w.WriteString(indent)
writeEscapedForOutput(w, str, false)
w.WriteString("\n")
}
return
}
w.WriteString(indent)
writeEscapedForOutput(w, str[:nl], false)
w.WriteString("\n")
str = str[nl+1:]
}
}
func needsEscaping(str string) bool {
for _, b := range str {
if !unicode.IsPrint(b) || b == '"' {
return true
}
}
return false
}
const (
lowerhex = "0123456789abcdef"
)
var bufPool = sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
func writeEscapedForOutput(w io.Writer, str string, escapeQuotes bool) {
if !needsEscaping(str) {
w.Write([]byte(str))
return
}
bb := bufPool.Get().(*bytes.Buffer)
bb.Reset()
defer bufPool.Put(bb)
for _, r := range str {
if escapeQuotes && r == '"' {
bb.WriteString(`\"`)
} else if unicode.IsPrint(r) {
bb.WriteRune(r)
} else {
switch r {
case '\a':
bb.WriteString(`\a`)
case '\b':
bb.WriteString(`\b`)
case '\f':
bb.WriteString(`\f`)
case '\n':
bb.WriteString(`\n`)
case '\r':
bb.WriteString(`\r`)
case '\t':
bb.WriteString(`\t`)
case '\v':
bb.WriteString(`\v`)
default:
switch {
case r < ' ':
bb.WriteString(`\x`)
bb.WriteByte(lowerhex[byte(r)>>4])
bb.WriteByte(lowerhex[byte(r)&0xF])
case !utf8.ValidRune(r):
r = 0xFFFD
fallthrough
case r < 0x10000:
bb.WriteString(`\u`)
for s := 12; s >= 0; s -= 4 {
bb.WriteByte(lowerhex[r>>uint(s)&0xF])
}
default:
bb.WriteString(`\U`)
for s := 28; s >= 0; s -= 4 {
bb.WriteByte(lowerhex[r>>uint(s)&0xF])
}
}
}
}
}
w.Write(bb.Bytes())
}
func (l *intLogger) renderSlice(v reflect.Value) string {
var buf bytes.Buffer
buf.WriteRune('[')
for i := 0; i < v.Len(); i++ {
if i > 0 {
buf.WriteString(", ")
}
sv := v.Index(i)
var val string
switch sv.Kind() {
case reflect.String:
val = strconv.Quote(sv.String())
case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64:
val = strconv.FormatInt(sv.Int(), 10)
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
val = strconv.FormatUint(sv.Uint(), 10)
default:
val = fmt.Sprintf("%v", sv.Interface())
if strings.ContainsAny(val, " \t\n\r") {
val = strconv.Quote(val)
}
}
buf.WriteString(val)
}
buf.WriteRune(']')
return buf.String()
}
// JSON logging function
func (l *intLogger) logJSON(t time.Time, name string, level Level, msg string, args ...interface{}) {
vals := l.jsonMapEntry(t, name, level, msg)
args = append(l.implied, args...)
if args != nil && len(args) > 0 {
if len(args)%2 != 0 {
cs, ok := args[len(args)-1].(CapturedStacktrace)
if ok {
args = args[:len(args)-1]
vals["stacktrace"] = cs
} else {
extra := args[len(args)-1]
args = append(args[:len(args)-1], MissingKey, extra)
}
}
for i := 0; i < len(args); i = i + 2 {
val := args[i+1]
switch sv := val.(type) {
case error:
// Check if val is of type error. If error type doesn't
// implement json.Marshaler or encoding.TextMarshaler
// then set val to err.Error() so that it gets marshaled
switch sv.(type) {
case json.Marshaler, encoding.TextMarshaler:
default:
val = sv.Error()
}
case Format:
val = fmt.Sprintf(sv[0].(string), sv[1:]...)
}
var key string
switch st := args[i].(type) {
case string:
key = st
default:
key = fmt.Sprintf("%s", st)
}
vals[key] = val
}
}
err := json.NewEncoder(l.writer).Encode(vals)
if err != nil {
if _, ok := err.(*json.UnsupportedTypeError); ok {
plainVal := l.jsonMapEntry(t, name, level, msg)
plainVal["@warn"] = errJsonUnsupportedTypeMsg
json.NewEncoder(l.writer).Encode(plainVal)
}
}
}
func (l intLogger) jsonMapEntry(t time.Time, name string, level Level, msg string) map[string]interface{} {
vals := map[string]interface{}{
"@message": msg,
}
if !l.disableTime {
vals["@timestamp"] = t.Format(l.timeFormat)
}
var levelStr string
switch level {
case Error:
levelStr = "error"
case Warn:
levelStr = "warn"
case Info:
levelStr = "info"
case Debug:
levelStr = "debug"
case Trace:
levelStr = "trace"
default:
levelStr = "all"
}
vals["@level"] = levelStr
if name != "" {
vals["@module"] = name
}
if l.callerOffset > 0 {
if _, file, line, ok := runtime.Caller(l.callerOffset + 1); ok {
vals["@caller"] = fmt.Sprintf("%s:%d", file, line)
}
}
return vals
}
// Emit the message and args at the provided level
func (l *intLogger) Log(level Level, msg string, args ...interface{}) {
l.log(l.Name(), level, msg, args...)
}
// Emit the message and args at DEBUG level
func (l *intLogger) Debug(msg string, args ...interface{}) {
l.log(l.Name(), Debug, msg, args...)
}
// Emit the message and args at TRACE level
func (l *intLogger) Trace(msg string, args ...interface{}) {
l.log(l.Name(), Trace, msg, args...)
}
// Emit the message and args at INFO level
func (l *intLogger) Info(msg string, args ...interface{}) {
l.log(l.Name(), Info, msg, args...)
}
// Emit the message and args at WARN level
func (l *intLogger) Warn(msg string, args ...interface{}) {
l.log(l.Name(), Warn, msg, args...)
}
// Emit the message and args at ERROR level
func (l *intLogger) Error(msg string, args ...interface{}) {
l.log(l.Name(), Error, msg, args...)
}
// Indicate that the logger would emit TRACE level logs
func (l *intLogger) IsTrace() bool {
return Level(atomic.LoadInt32(l.level)) == Trace
}
// Indicate that the logger would emit DEBUG level logs
func (l *intLogger) IsDebug() bool {
return Level(atomic.LoadInt32(l.level)) <= Debug
}
// Indicate that the logger would emit INFO level logs
func (l *intLogger) IsInfo() bool {
return Level(atomic.LoadInt32(l.level)) <= Info
}
// Indicate that the logger would emit WARN level logs
func (l *intLogger) IsWarn() bool {
return Level(atomic.LoadInt32(l.level)) <= Warn
}
// Indicate that the logger would emit ERROR level logs
func (l *intLogger) IsError() bool {
return Level(atomic.LoadInt32(l.level)) <= Error
}
const MissingKey = "EXTRA_VALUE_AT_END"
// Return a sub-Logger for which every emitted log message will contain
// the given key/value pairs. This is used to create a context specific
// Logger.
func (l *intLogger) With(args ...interface{}) Logger {
var extra interface{}
if len(args)%2 != 0 {
extra = args[len(args)-1]
args = args[:len(args)-1]
}
sl := l.copy()
result := make(map[string]interface{}, len(l.implied)+len(args))
keys := make([]string, 0, len(l.implied)+len(args))
// Read existing args, store map and key for consistent sorting
for i := 0; i < len(l.implied); i += 2 {
key := l.implied[i].(string)
keys = append(keys, key)
result[key] = l.implied[i+1]
}
// Read new args, store map and key for consistent sorting
for i := 0; i < len(args); i += 2 {
key := args[i].(string)
_, exists := result[key]
if !exists {
keys = append(keys, key)
}
result[key] = args[i+1]
}
// Sort keys to be consistent
sort.Strings(keys)
sl.implied = make([]interface{}, 0, len(l.implied)+len(args))
for _, k := range keys {
sl.implied = append(sl.implied, k)
sl.implied = append(sl.implied, result[k])
}
if extra != nil {
sl.implied = append(sl.implied, MissingKey, extra)
}
return l.subloggerHook(sl)
}
// Create a new sub-Logger that a name decending from the current name.
// This is used to create a subsystem specific Logger.
func (l *intLogger) Named(name string) Logger {
sl := l.copy()
if sl.name != "" {
sl.name = sl.name + "." + name
} else {
sl.name = name
}
return l.subloggerHook(sl)
}
// Create a new sub-Logger with an explicit name. This ignores the current
// name. This is used to create a standalone logger that doesn't fall
// within the normal hierarchy.
func (l *intLogger) ResetNamed(name string) Logger {
sl := l.copy()
sl.name = name
return l.subloggerHook(sl)
}
func (l *intLogger) ResetOutput(opts *LoggerOptions) error {
if opts.Output == nil {
return errors.New("given output is nil")
}
l.mutex.Lock()
defer l.mutex.Unlock()
return l.resetOutput(opts)
}
func (l *intLogger) ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error {
if opts.Output == nil {
return errors.New("given output is nil")
}
if flushable == nil {
return errors.New("flushable is nil")
}
l.mutex.Lock()
defer l.mutex.Unlock()
if err := flushable.Flush(); err != nil {
return err
}
return l.resetOutput(opts)
}
func (l *intLogger) resetOutput(opts *LoggerOptions) error {
l.writer = newWriter(opts.Output, opts.Color)
l.setColorization(opts)
return nil
}
// Update the logging level on-the-fly. This will affect all subloggers as
// well.
func (l *intLogger) SetLevel(level Level) {
atomic.StoreInt32(l.level, int32(level))
}
// Returns the current level
func (l *intLogger) GetLevel() Level {
return Level(atomic.LoadInt32(l.level))
}
// Create a *log.Logger that will send it's data through this Logger. This
// allows packages that expect to be using the standard library log to actually
// use this logger.
func (l *intLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
if opts == nil {
opts = &StandardLoggerOptions{}
}
return log.New(l.StandardWriter(opts), "", 0)
}
func (l *intLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
newLog := *l
if l.callerOffset > 0 {
// the stack is
// logger.printf() -> l.Output() ->l.out.writer(hclog:stdlogAdaptor.write) -> hclog:stdlogAdaptor.dispatch()
// So plus 4.
newLog.callerOffset = l.callerOffset + 4
}
return &stdlogAdapter{
log: &newLog,
inferLevels: opts.InferLevels,
inferLevelsWithTimestamp: opts.InferLevelsWithTimestamp,
forceLevel: opts.ForceLevel,
}
}
// Accept implements the SinkAdapter interface
func (i *intLogger) Accept(name string, level Level, msg string, args ...interface{}) {
i.log(name, level, msg, args...)
}
// ImpliedArgs returns the loggers implied args
func (i *intLogger) ImpliedArgs() []interface{} {
return i.implied
}
// Name returns the loggers name
func (i *intLogger) Name() string {
return i.name
}
// copy returns a shallow copy of the intLogger, replacing the level pointer
// when necessary
func (l *intLogger) copy() *intLogger {
sl := *l
if l.independentLevels {
sl.level = new(int32)
*sl.level = *l.level
}
return &sl
}

393
vendor/github.com/hashicorp/go-hclog/logger.go generated vendored Normal file
View File

@@ -0,0 +1,393 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"io"
"log"
"os"
"strings"
"time"
)
var (
// DefaultOutput is used as the default log output.
DefaultOutput io.Writer = os.Stderr
// DefaultLevel is used as the default log level.
DefaultLevel = Info
)
// Level represents a log level.
type Level int32
const (
// NoLevel is a special level used to indicate that no level has been
// set and allow for a default to be used.
NoLevel Level = 0
// Trace is the most verbose level. Intended to be used for the tracing
// of actions in code, such as function enters/exits, etc.
Trace Level = 1
// Debug information for programmer low-level analysis.
Debug Level = 2
// Info information about steady state operations.
Info Level = 3
// Warn information about rare but handled events.
Warn Level = 4
// Error information about unrecoverable events.
Error Level = 5
// Off disables all logging output.
Off Level = 6
)
// Format is a simple convenience type for when formatting is required. When
// processing a value of this type, the logger automatically treats the first
// argument as a Printf formatting string and passes the rest as the values
// to be formatted. For example: L.Info(Fmt{"%d beans/day", beans}).
type Format []interface{}
// Fmt returns a Format type. This is a convenience function for creating a Format
// type.
func Fmt(str string, args ...interface{}) Format {
return append(Format{str}, args...)
}
// A simple shortcut to format numbers in hex when displayed with the normal
// text output. For example: L.Info("header value", Hex(17))
type Hex int
// A simple shortcut to format numbers in octal when displayed with the normal
// text output. For example: L.Info("perms", Octal(17))
type Octal int
// A simple shortcut to format numbers in binary when displayed with the normal
// text output. For example: L.Info("bits", Binary(17))
type Binary int
// A simple shortcut to format strings with Go quoting. Control and
// non-printable characters will be escaped with their backslash equivalents in
// output. Intended for untrusted or multiline strings which should be logged
// as concisely as possible.
type Quote string
// ColorOption expresses how the output should be colored, if at all.
type ColorOption uint8
const (
// ColorOff is the default coloration, and does not
// inject color codes into the io.Writer.
ColorOff ColorOption = iota
// AutoColor checks if the io.Writer is a tty,
// and if so enables coloring.
AutoColor
// ForceColor will enable coloring, regardless of whether
// the io.Writer is a tty or not.
ForceColor
)
// SupportsColor is an optional interface that can be implemented by the output
// value. If implemented and SupportsColor() returns true, then AutoColor will
// enable colorization.
type SupportsColor interface {
SupportsColor() bool
}
// LevelFromString returns a Level type for the named log level, or "NoLevel" if
// the level string is invalid. This facilitates setting the log level via
// config or environment variable by name in a predictable way.
func LevelFromString(levelStr string) Level {
// We don't care about case. Accept both "INFO" and "info".
levelStr = strings.ToLower(strings.TrimSpace(levelStr))
switch levelStr {
case "trace":
return Trace
case "debug":
return Debug
case "info":
return Info
case "warn":
return Warn
case "error":
return Error
case "off":
return Off
default:
return NoLevel
}
}
func (l Level) String() string {
switch l {
case Trace:
return "trace"
case Debug:
return "debug"
case Info:
return "info"
case Warn:
return "warn"
case Error:
return "error"
case NoLevel:
return "none"
case Off:
return "off"
default:
return "unknown"
}
}
// Logger describes the interface that must be implemented by all loggers.
type Logger interface {
// Args are alternating key, val pairs
// keys must be strings
// vals can be any type, but display is implementation specific
// Emit a message and key/value pairs at a provided log level
Log(level Level, msg string, args ...interface{})
// Emit a message and key/value pairs at the TRACE level
Trace(msg string, args ...interface{})
// Emit a message and key/value pairs at the DEBUG level
Debug(msg string, args ...interface{})
// Emit a message and key/value pairs at the INFO level
Info(msg string, args ...interface{})
// Emit a message and key/value pairs at the WARN level
Warn(msg string, args ...interface{})
// Emit a message and key/value pairs at the ERROR level
Error(msg string, args ...interface{})
// Indicate if TRACE logs would be emitted. This and the other Is* guards
// are used to elide expensive logging code based on the current level.
IsTrace() bool
// Indicate if DEBUG logs would be emitted. This and the other Is* guards
IsDebug() bool
// Indicate if INFO logs would be emitted. This and the other Is* guards
IsInfo() bool
// Indicate if WARN logs would be emitted. This and the other Is* guards
IsWarn() bool
// Indicate if ERROR logs would be emitted. This and the other Is* guards
IsError() bool
// ImpliedArgs returns With key/value pairs
ImpliedArgs() []interface{}
// Creates a sublogger that will always have the given key/value pairs
With(args ...interface{}) Logger
// Returns the Name of the logger
Name() string
// Create a logger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
Named(name string) Logger
// Create a logger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamed(name string) Logger
// Updates the level. This should affect all related loggers as well,
// unless they were created with IndependentLevels. If an
// implementation cannot update the level on the fly, it should no-op.
SetLevel(level Level)
// Returns the current level
GetLevel() Level
// Return a value that conforms to the stdlib log.Logger interface
StandardLogger(opts *StandardLoggerOptions) *log.Logger
// Return a value that conforms to io.Writer, which can be passed into log.SetOutput()
StandardWriter(opts *StandardLoggerOptions) io.Writer
}
// StandardLoggerOptions can be used to configure a new standard logger.
type StandardLoggerOptions struct {
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
InferLevels bool
// Indicate that some minimal parsing should be done on strings to try
// and detect their level and re-emit them while ignoring possible
// timestamp values in the beginning of the string.
// This supports the strings like [ERROR], [ERR] [TRACE], [WARN], [INFO],
// [DEBUG] and strip it off before reapplying it.
// The timestamp detection may result in false positives and incomplete
// string outputs.
InferLevelsWithTimestamp bool
// ForceLevel is used to force all output from the standard logger to be at
// the specified level. Similar to InferLevels, this will strip any level
// prefix contained in the logged string before applying the forced level.
// If set, this override InferLevels.
ForceLevel Level
}
type TimeFunction = func() time.Time
// LoggerOptions can be used to configure a new logger.
type LoggerOptions struct {
// Name of the subsystem to prefix logs with
Name string
// The threshold for the logger. Anything less severe is suppressed
Level Level
// Where to write the logs to. Defaults to os.Stderr if nil
Output io.Writer
// An optional Locker in case Output is shared. This can be a sync.Mutex or
// a NoopLocker if the caller wants control over output, e.g. for batching
// log lines.
Mutex Locker
// Control if the output should be in JSON.
JSONFormat bool
// Include file and line information in each log line
IncludeLocation bool
// AdditionalLocationOffset is the number of additional stack levels to skip
// when finding the file and line information for the log line
AdditionalLocationOffset int
// The time format to use instead of the default
TimeFormat string
// A function which is called to get the time object that is formatted using `TimeFormat`
TimeFn TimeFunction
// Control whether or not to display the time at all. This is required
// because setting TimeFormat to empty assumes the default format.
DisableTime bool
// Color the output. On Windows, colored logs are only available for io.Writers that
// are concretely instances of *os.File.
Color ColorOption
// Only color the header, not the body. This can help with readability of long messages.
ColorHeaderOnly bool
// Color the header and message body fields. This can help with readability
// of long messages with multiple fields.
ColorHeaderAndFields bool
// A function which is called with the log information and if it returns true the value
// should not be logged.
// This is useful when interacting with a system that you wish to suppress the log
// message for (because it's too noisy, etc)
Exclude func(level Level, msg string, args ...interface{}) bool
// IndependentLevels causes subloggers to be created with an independent
// copy of this logger's level. This means that using SetLevel on this
// logger will not affect any subloggers, and SetLevel on any subloggers
// will not affect the parent or sibling loggers.
IndependentLevels bool
// SubloggerHook registers a function that is called when a sublogger via
// Named, With, or ResetNamed is created. If defined, the function is passed
// the newly created Logger and the returned Logger is returned from the
// original function. This option allows customization via interception and
// wrapping of Logger instances.
SubloggerHook func(sub Logger) Logger
}
// InterceptLogger describes the interface for using a logger
// that can register different output sinks.
// This is useful for sending lower level log messages
// to a different output while keeping the root logger
// at a higher one.
type InterceptLogger interface {
// Logger is the root logger for an InterceptLogger
Logger
// RegisterSink adds a SinkAdapter to the InterceptLogger
RegisterSink(sink SinkAdapter)
// DeregisterSink removes a SinkAdapter from the InterceptLogger
DeregisterSink(sink SinkAdapter)
// Create a interceptlogger that will prepend the name string on the front of all messages.
// If the logger already has a name, the new value will be appended to the current
// name. That way, a major subsystem can use this to decorate all it's own logs
// without losing context.
NamedIntercept(name string) InterceptLogger
// Create a interceptlogger that will prepend the name string on the front of all messages.
// This sets the name of the logger to the value directly, unlike Named which honor
// the current name as well.
ResetNamedIntercept(name string) InterceptLogger
// Deprecated: use StandardLogger
StandardLoggerIntercept(opts *StandardLoggerOptions) *log.Logger
// Deprecated: use StandardWriter
StandardWriterIntercept(opts *StandardLoggerOptions) io.Writer
}
// SinkAdapter describes the interface that must be implemented
// in order to Register a new sink to an InterceptLogger
type SinkAdapter interface {
Accept(name string, level Level, msg string, args ...interface{})
}
// Flushable represents a method for flushing an output buffer. It can be used
// if Resetting the log to use a new output, in order to flush the writes to
// the existing output beforehand.
type Flushable interface {
Flush() error
}
// OutputResettable provides ways to swap the output in use at runtime
type OutputResettable interface {
// ResetOutput swaps the current output writer with the one given in the
// opts. Color options given in opts will be used for the new output.
ResetOutput(opts *LoggerOptions) error
// ResetOutputWithFlush swaps the current output writer with the one given
// in the opts, first calling Flush on the given Flushable. Color options
// given in opts will be used for the new output.
ResetOutputWithFlush(opts *LoggerOptions, flushable Flushable) error
}
// Locker is used for locking output. If not set when creating a logger, a
// sync.Mutex will be used internally.
type Locker interface {
// Lock is called when the output is going to be changed or written to
Lock()
// Unlock is called when the operation that called Lock() completes
Unlock()
}
// NoopLocker implements locker but does nothing. This is useful if the client
// wants tight control over locking, in order to provide grouping of log
// entries or other functionality.
type NoopLocker struct{}
// Lock does nothing
func (n NoopLocker) Lock() {}
// Unlock does nothing
func (n NoopLocker) Unlock() {}
var _ Locker = (*NoopLocker)(nil)

63
vendor/github.com/hashicorp/go-hclog/nulllogger.go generated vendored Normal file
View File

@@ -0,0 +1,63 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"io"
"io/ioutil"
"log"
)
// NewNullLogger instantiates a Logger for which all calls
// will succeed without doing anything.
// Useful for testing purposes.
func NewNullLogger() Logger {
return &nullLogger{}
}
type nullLogger struct{}
func (l *nullLogger) Log(level Level, msg string, args ...interface{}) {}
func (l *nullLogger) Trace(msg string, args ...interface{}) {}
func (l *nullLogger) Debug(msg string, args ...interface{}) {}
func (l *nullLogger) Info(msg string, args ...interface{}) {}
func (l *nullLogger) Warn(msg string, args ...interface{}) {}
func (l *nullLogger) Error(msg string, args ...interface{}) {}
func (l *nullLogger) IsTrace() bool { return false }
func (l *nullLogger) IsDebug() bool { return false }
func (l *nullLogger) IsInfo() bool { return false }
func (l *nullLogger) IsWarn() bool { return false }
func (l *nullLogger) IsError() bool { return false }
func (l *nullLogger) ImpliedArgs() []interface{} { return []interface{}{} }
func (l *nullLogger) With(args ...interface{}) Logger { return l }
func (l *nullLogger) Name() string { return "" }
func (l *nullLogger) Named(name string) Logger { return l }
func (l *nullLogger) ResetNamed(name string) Logger { return l }
func (l *nullLogger) SetLevel(level Level) {}
func (l *nullLogger) GetLevel() Level { return NoLevel }
func (l *nullLogger) StandardLogger(opts *StandardLoggerOptions) *log.Logger {
return log.New(l.StandardWriter(opts), "", log.LstdFlags)
}
func (l *nullLogger) StandardWriter(opts *StandardLoggerOptions) io.Writer {
return ioutil.Discard
}

109
vendor/github.com/hashicorp/go-hclog/stacktrace.go generated vendored Normal file
View File

@@ -0,0 +1,109 @@
// Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package hclog
import (
"bytes"
"runtime"
"strconv"
"strings"
"sync"
)
var (
_stacktraceIgnorePrefixes = []string{
"runtime.goexit",
"runtime.main",
}
_stacktracePool = sync.Pool{
New: func() interface{} {
return newProgramCounters(64)
},
}
)
// CapturedStacktrace represents a stacktrace captured by a previous call
// to log.Stacktrace. If passed to a logging function, the stacktrace
// will be appended.
type CapturedStacktrace string
// Stacktrace captures a stacktrace of the current goroutine and returns
// it to be passed to a logging function.
func Stacktrace() CapturedStacktrace {
return CapturedStacktrace(takeStacktrace())
}
func takeStacktrace() string {
programCounters := _stacktracePool.Get().(*programCounters)
defer _stacktracePool.Put(programCounters)
var buffer bytes.Buffer
for {
// Skip the call to runtime.Counters and takeStacktrace so that the
// program counters start at the caller of takeStacktrace.
n := runtime.Callers(2, programCounters.pcs)
if n < cap(programCounters.pcs) {
programCounters.pcs = programCounters.pcs[:n]
break
}
// Don't put the too-short counter slice back into the pool; this lets
// the pool adjust if we consistently take deep stacktraces.
programCounters = newProgramCounters(len(programCounters.pcs) * 2)
}
i := 0
frames := runtime.CallersFrames(programCounters.pcs)
for frame, more := frames.Next(); more; frame, more = frames.Next() {
if shouldIgnoreStacktraceFunction(frame.Function) {
continue
}
if i != 0 {
buffer.WriteByte('\n')
}
i++
buffer.WriteString(frame.Function)
buffer.WriteByte('\n')
buffer.WriteByte('\t')
buffer.WriteString(frame.File)
buffer.WriteByte(':')
buffer.WriteString(strconv.Itoa(int(frame.Line)))
}
return buffer.String()
}
func shouldIgnoreStacktraceFunction(function string) bool {
for _, prefix := range _stacktraceIgnorePrefixes {
if strings.HasPrefix(function, prefix) {
return true
}
}
return false
}
type programCounters struct {
pcs []uintptr
}
func newProgramCounters(size int) *programCounters {
return &programCounters{make([]uintptr, size)}
}

113
vendor/github.com/hashicorp/go-hclog/stdlog.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"bytes"
"log"
"regexp"
"strings"
)
// Regex to ignore characters commonly found in timestamp formats from the
// beginning of inputs.
var logTimestampRegexp = regexp.MustCompile(`^[\d\s\:\/\.\+-TZ]*`)
// Provides a io.Writer to shim the data out of *log.Logger
// and back into our Logger. This is basically the only way to
// build upon *log.Logger.
type stdlogAdapter struct {
log Logger
inferLevels bool
inferLevelsWithTimestamp bool
forceLevel Level
}
// Take the data, infer the levels if configured, and send it through
// a regular Logger.
func (s *stdlogAdapter) Write(data []byte) (int, error) {
str := string(bytes.TrimRight(data, " \t\n"))
if s.forceLevel != NoLevel {
// Use pickLevel to strip log levels included in the line since we are
// forcing the level
_, str := s.pickLevel(str)
// Log at the forced level
s.dispatch(str, s.forceLevel)
} else if s.inferLevels {
if s.inferLevelsWithTimestamp {
str = s.trimTimestamp(str)
}
level, str := s.pickLevel(str)
s.dispatch(str, level)
} else {
s.log.Info(str)
}
return len(data), nil
}
func (s *stdlogAdapter) dispatch(str string, level Level) {
switch level {
case Trace:
s.log.Trace(str)
case Debug:
s.log.Debug(str)
case Info:
s.log.Info(str)
case Warn:
s.log.Warn(str)
case Error:
s.log.Error(str)
default:
s.log.Info(str)
}
}
// Detect, based on conventions, what log level this is.
func (s *stdlogAdapter) pickLevel(str string) (Level, string) {
switch {
case strings.HasPrefix(str, "[DEBUG]"):
return Debug, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[TRACE]"):
return Trace, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[INFO]"):
return Info, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[WARN]"):
return Warn, strings.TrimSpace(str[6:])
case strings.HasPrefix(str, "[ERROR]"):
return Error, strings.TrimSpace(str[7:])
case strings.HasPrefix(str, "[ERR]"):
return Error, strings.TrimSpace(str[5:])
default:
return Info, str
}
}
func (s *stdlogAdapter) trimTimestamp(str string) string {
idx := logTimestampRegexp.FindStringIndex(str)
return str[idx[1]:]
}
type logWriter struct {
l *log.Logger
}
func (l *logWriter) Write(b []byte) (int, error) {
l.l.Println(string(bytes.TrimRight(b, " \n\t")))
return len(b), nil
}
// Takes a standard library logger and returns a Logger that will write to it
func FromStandardLogger(l *log.Logger, opts *LoggerOptions) Logger {
var dl LoggerOptions = *opts
// Use the time format that log.Logger uses
dl.DisableTime = true
dl.Output = &logWriter{l}
return New(&dl)
}

85
vendor/github.com/hashicorp/go-hclog/writer.go generated vendored Normal file
View File

@@ -0,0 +1,85 @@
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: MIT
package hclog
import (
"bytes"
"io"
)
type writer struct {
b bytes.Buffer
w io.Writer
color ColorOption
}
func newWriter(w io.Writer, color ColorOption) *writer {
return &writer{w: w, color: color}
}
func (w *writer) Flush(level Level) (err error) {
var unwritten = w.b.Bytes()
if w.color != ColorOff {
color := _levelToColor[level]
unwritten = []byte(color.Sprintf("%s", unwritten))
}
if lw, ok := w.w.(LevelWriter); ok {
_, err = lw.LevelWrite(level, unwritten)
} else {
_, err = w.w.Write(unwritten)
}
w.b.Reset()
return err
}
func (w *writer) Write(p []byte) (int, error) {
return w.b.Write(p)
}
func (w *writer) WriteByte(c byte) error {
return w.b.WriteByte(c)
}
func (w *writer) WriteString(s string) (int, error) {
return w.b.WriteString(s)
}
// LevelWriter is the interface that wraps the LevelWrite method.
type LevelWriter interface {
LevelWrite(level Level, p []byte) (n int, err error)
}
// LeveledWriter writes all log messages to the standard writer,
// except for log levels that are defined in the overrides map.
type LeveledWriter struct {
standard io.Writer
overrides map[Level]io.Writer
}
// NewLeveledWriter returns an initialized LeveledWriter.
//
// standard will be used as the default writer for all log levels,
// except for log levels that are defined in the overrides map.
func NewLeveledWriter(standard io.Writer, overrides map[Level]io.Writer) *LeveledWriter {
return &LeveledWriter{
standard: standard,
overrides: overrides,
}
}
// Write implements io.Writer.
func (lw *LeveledWriter) Write(p []byte) (int, error) {
return lw.standard.Write(p)
}
// LevelWrite implements LevelWriter.
func (lw *LeveledWriter) LevelWrite(level Level, p []byte) (int, error) {
w, ok := lw.overrides[level]
if !ok {
w = lw.standard
}
return w.Write(p)
}

View File

@@ -0,0 +1,24 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof

View File

@@ -0,0 +1,23 @@
# UNRELEASED
# 1.3.0 (September 17th, 2020)
FEATURES
* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)]
# 1.2.0 (March 18th, 2020)
FEATURES
* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)]
# 1.1.0 (May 22nd, 2019)
FEATURES
* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
# 1.0.0 (August 30th, 2018)
* go mod adopted

363
vendor/github.com/hashicorp/go-immutable-radix/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,363 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

View File

@@ -0,0 +1,66 @@
go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master)
=========
Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
The package only provides a single `Tree` implementation, optimized for sparse nodes.
As a radix tree, it provides the following:
* O(k) operations. In many cases, this can be faster than a hash table since
the hash function is an O(k) operation, and hash tables have very poor cache locality.
* Minimum / Maximum value lookups
* Ordered iteration
A tree supports using a transaction to batch multiple updates (insert, delete)
in a more efficient manner than performing each operation one at a time.
For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
Documentation
=============
The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
Example
=======
Below is a simple example of usage
```go
// Create a tree
r := iradix.New()
r, _, _ = r.Insert([]byte("foo"), 1)
r, _, _ = r.Insert([]byte("bar"), 2)
r, _, _ = r.Insert([]byte("foobar"), 2)
// Find the longest prefix match
m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
if string(m) != "foo" {
panic("should be foo")
}
```
Here is an example of performing a range scan of the keys.
```go
// Create a tree
r := iradix.New()
r, _, _ = r.Insert([]byte("001"), 1)
r, _, _ = r.Insert([]byte("002"), 2)
r, _, _ = r.Insert([]byte("005"), 5)
r, _, _ = r.Insert([]byte("010"), 10)
r, _, _ = r.Insert([]byte("100"), 10)
// Range scan over the keys that sort lexicographically between [003, 050)
it := r.Root().Iterator()
it.SeekLowerBound([]byte("003"))
for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
if key >= "050" {
break
}
fmt.Println(key)
}
// Output:
// 005
// 010
```

View File

@@ -0,0 +1,21 @@
package iradix
import "sort"
type edges []edge
func (e edges) Len() int {
return len(e)
}
func (e edges) Less(i, j int) bool {
return e[i].label < e[j].label
}
func (e edges) Swap(i, j int) {
e[i], e[j] = e[j], e[i]
}
func (e edges) Sort() {
sort.Sort(e)
}

View File

@@ -0,0 +1,676 @@
package iradix
import (
"bytes"
"strings"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
// defaultModifiedCache is the default size of the modified node
// cache used per transaction. This is used to cache the updates
// to the nodes near the root, while the leaves do not need to be
// cached. This is important for very large transactions to prevent
// the modified cache from growing to be enormous. This is also used
// to set the max size of the mutation notify maps since those should
// also be bounded in a similar way.
defaultModifiedCache = 8192
)
// Tree implements an immutable radix tree. This can be treated as a
// Dictionary abstract data type. The main advantage over a standard
// hash map is prefix-based lookups and ordered iteration. The immutability
// means that it is safe to concurrently read from a Tree without any
// coordination.
type Tree struct {
root *Node
size int
}
// New returns an empty Tree
func New() *Tree {
t := &Tree{
root: &Node{
mutateCh: make(chan struct{}),
},
}
return t
}
// Len is used to return the number of elements in the tree
func (t *Tree) Len() int {
return t.size
}
// Txn is a transaction on the tree. This transaction is applied
// atomically and returns a new tree when committed. A transaction
// is not thread safe, and should only be used by a single goroutine.
type Txn struct {
// root is the modified root for the transaction.
root *Node
// snap is a snapshot of the root node for use if we have to run the
// slow notify algorithm.
snap *Node
// size tracks the size of the tree as it is modified during the
// transaction.
size int
// writable is a cache of writable nodes that have been created during
// the course of the transaction. This allows us to re-use the same
// nodes for further writes and avoid unnecessary copies of nodes that
// have never been exposed outside the transaction. This will only hold
// up to defaultModifiedCache number of entries.
writable *simplelru.LRU
// trackChannels is used to hold channels that need to be notified to
// signal mutation of the tree. This will only hold up to
// defaultModifiedCache number of entries, after which we will set the
// trackOverflow flag, which will cause us to use a more expensive
// algorithm to perform the notifications. Mutation tracking is only
// performed if trackMutate is true.
trackChannels map[chan struct{}]struct{}
trackOverflow bool
trackMutate bool
}
// Txn starts a new transaction that can be used to mutate the tree
func (t *Tree) Txn() *Txn {
txn := &Txn{
root: t.root,
snap: t.root,
size: t.size,
}
return txn
}
// Clone makes an independent copy of the transaction. The new transaction
// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread.
func (t *Txn) Clone() *Txn {
// reset the writable node cache to avoid leaking future writes into the clone
t.writable = nil
txn := &Txn{
root: t.root,
snap: t.snap,
size: t.size,
}
return txn
}
// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
// then notifications will be issued for affected internal nodes and leaves when
// the transaction is committed.
func (t *Txn) TrackMutate(track bool) {
t.trackMutate = track
}
// trackChannel safely attempts to track the given mutation channel, setting the
// overflow flag if we can no longer track any more. This limits the amount of
// state that will accumulate during a transaction and we have a slower algorithm
// to switch to if we overflow.
func (t *Txn) trackChannel(ch chan struct{}) {
// In overflow, make sure we don't store any more objects.
if t.trackOverflow {
return
}
// If this would overflow the state we reject it and set the flag (since
// we aren't tracking everything that's required any longer).
if len(t.trackChannels) >= defaultModifiedCache {
// Mark that we are in the overflow state
t.trackOverflow = true
// Clear the map so that the channels can be garbage collected. It is
// safe to do this since we have already overflowed and will be using
// the slow notify algorithm.
t.trackChannels = nil
return
}
// Create the map on the fly when we need it.
if t.trackChannels == nil {
t.trackChannels = make(map[chan struct{}]struct{})
}
// Otherwise we are good to track it.
t.trackChannels[ch] = struct{}{}
}
// writeNode returns a node to be modified, if the current node has already been
// modified during the course of the transaction, it is used in-place. Set
// forLeafUpdate to true if you are getting a write node to update the leaf,
// which will set leaf mutation tracking appropriately as well.
func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
// Ensure the writable set exists.
if t.writable == nil {
lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
if err != nil {
panic(err)
}
t.writable = lru
}
// If this node has already been modified, we can continue to use it
// during this transaction. We know that we don't need to track it for
// a node update since the node is writable, but if this is for a leaf
// update we track it, in case the initial write to this node didn't
// update the leaf.
if _, ok := t.writable.Get(n); ok {
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
return n
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && forLeafUpdate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Copy the existing node. If you have set forLeafUpdate it will be
// safe to replace this leaf with another after you get your node for
// writing. You MUST replace it, because the channel associated with
// this leaf will be closed when this transaction is committed.
nc := &Node{
mutateCh: make(chan struct{}),
leaf: n.leaf,
}
if n.prefix != nil {
nc.prefix = make([]byte, len(n.prefix))
copy(nc.prefix, n.prefix)
}
if len(n.edges) != 0 {
nc.edges = make([]edge, len(n.edges))
copy(nc.edges, n.edges)
}
// Mark this node as writable.
t.writable.Add(nc, nil)
return nc
}
// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
// Returns the size of the subtree visited
func (t *Txn) trackChannelsAndCount(n *Node) int {
// Count only leaf nodes
leaves := 0
if n.leaf != nil {
leaves = 1
}
// Mark this node as being mutated.
if t.trackMutate {
t.trackChannel(n.mutateCh)
}
// Mark its leaf as being mutated, if appropriate.
if t.trackMutate && n.leaf != nil {
t.trackChannel(n.leaf.mutateCh)
}
// Recurse on the children
for _, e := range n.edges {
leaves += t.trackChannelsAndCount(e.node)
}
return leaves
}
// mergeChild is called to collapse the given node with its child. This is only
// called when the given node is not a leaf and has a single edge.
func (t *Txn) mergeChild(n *Node) {
// Mark the child node as being mutated since we are about to abandon
// it. We don't need to mark the leaf since we are retaining it if it
// is there.
e := n.edges[0]
child := e.node
if t.trackMutate {
t.trackChannel(child.mutateCh)
}
// Merge the nodes.
n.prefix = concat(n.prefix, child.prefix)
n.leaf = child.leaf
if len(child.edges) != 0 {
n.edges = make([]edge, len(child.edges))
copy(n.edges, child.edges)
} else {
n.edges = nil
}
}
// insert does a recursive insertion
func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
// Handle key exhaustion
if len(search) == 0 {
var oldVal interface{}
didUpdate := false
if n.isLeaf() {
oldVal = n.leaf.val
didUpdate = true
}
nc := t.writeNode(n, true)
nc.leaf = &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
return nc, oldVal, didUpdate
}
// Look for the edge
idx, child := n.getEdge(search[0])
// No edge, create one
if child == nil {
e := edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
},
prefix: search,
},
}
nc := t.writeNode(n, false)
nc.addEdge(e)
return nc, nil, false
}
// Determine longest prefix of the search key on match
commonPrefix := longestPrefix(search, child.prefix)
if commonPrefix == len(child.prefix) {
search = search[commonPrefix:]
newChild, oldVal, didUpdate := t.insert(child, k, search, v)
if newChild != nil {
nc := t.writeNode(n, false)
nc.edges[idx].node = newChild
return nc, oldVal, didUpdate
}
return nil, oldVal, didUpdate
}
// Split the node
nc := t.writeNode(n, false)
splitNode := &Node{
mutateCh: make(chan struct{}),
prefix: search[:commonPrefix],
}
nc.replaceEdge(edge{
label: search[0],
node: splitNode,
})
// Restore the existing child node
modChild := t.writeNode(child, false)
splitNode.addEdge(edge{
label: modChild.prefix[commonPrefix],
node: modChild,
})
modChild.prefix = modChild.prefix[commonPrefix:]
// Create a new leaf node
leaf := &leafNode{
mutateCh: make(chan struct{}),
key: k,
val: v,
}
// If the new key is a subset, add to to this node
search = search[commonPrefix:]
if len(search) == 0 {
splitNode.leaf = leaf
return nc, nil, false
}
// Create a new edge for the node
splitNode.addEdge(edge{
label: search[0],
node: &Node{
mutateCh: make(chan struct{}),
leaf: leaf,
prefix: search,
},
})
return nc, nil, false
}
// delete does a recursive deletion
func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
// Check for key exhaustion
if len(search) == 0 {
if !n.isLeaf() {
return nil, nil
}
// Copy the pointer in case we are in a transaction that already
// modified this node since the node will be reused. Any changes
// made to the node will not affect returning the original leaf
// value.
oldLeaf := n.leaf
// Remove the leaf node
nc := t.writeNode(n, true)
nc.leaf = nil
// Check if this node should be merged
if n != t.root && len(nc.edges) == 1 {
t.mergeChild(nc)
}
return nc, oldLeaf
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
if child == nil || !bytes.HasPrefix(search, child.prefix) {
return nil, nil
}
// Consume the search prefix
search = search[len(child.prefix):]
newChild, leaf := t.delete(n, child, search)
if newChild == nil {
return nil, nil
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, leaf
}
// delete does a recursive deletion
func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
// Check for key exhaustion
if len(search) == 0 {
nc := t.writeNode(n, true)
if n.isLeaf() {
nc.leaf = nil
}
nc.edges = nil
return nc, t.trackChannelsAndCount(n)
}
// Look for an edge
label := search[0]
idx, child := n.getEdge(label)
// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
return nil, 0
}
// Consume the search prefix
if len(child.prefix) > len(search) {
search = []byte("")
} else {
search = search[len(child.prefix):]
}
newChild, numDeletions := t.deletePrefix(n, child, search)
if newChild == nil {
return nil, 0
}
// Copy this node. WATCH OUT - it's safe to pass "false" here because we
// will only ADD a leaf via nc.mergeChild() if there isn't one due to
// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
// so be careful if you change any of the logic here.
nc := t.writeNode(n, false)
// Delete the edge if the node has no edges
if newChild.leaf == nil && len(newChild.edges) == 0 {
nc.delEdge(label)
if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
t.mergeChild(nc)
}
} else {
nc.edges[idx].node = newChild
}
return nc, numDeletions
}
// Insert is used to add or update a given key. The return provides
// the previous value and a bool indicating if any was set.
func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
if newRoot != nil {
t.root = newRoot
}
if !didUpdate {
t.size++
}
return oldVal, didUpdate
}
// Delete is used to delete a given key. Returns the old value if any,
// and a bool indicating if the key was set.
func (t *Txn) Delete(k []byte) (interface{}, bool) {
newRoot, leaf := t.delete(nil, t.root, k)
if newRoot != nil {
t.root = newRoot
}
if leaf != nil {
t.size--
return leaf.val, true
}
return nil, false
}
// DeletePrefix is used to delete an entire subtree that matches the prefix
// This will delete all nodes under that prefix
func (t *Txn) DeletePrefix(prefix []byte) bool {
newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
if newRoot != nil {
t.root = newRoot
t.size = t.size - numDeletions
return true
}
return false
}
// Root returns the current root of the radix tree within this
// transaction. The root is not safe across insert and delete operations,
// but can be used to read the current state during a transaction.
func (t *Txn) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Txn) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// GetWatch is used to lookup a specific key, returning
// the watch channel, value and if it was found
func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
return t.root.GetWatch(k)
}
// Commit is used to finalize the transaction and return a new tree. If mutation
// tracking is turned on then notifications will also be issued.
func (t *Txn) Commit() *Tree {
nt := t.CommitOnly()
if t.trackMutate {
t.Notify()
}
return nt
}
// CommitOnly is used to finalize the transaction and return a new tree, but
// does not issue any notifications until Notify is called.
func (t *Txn) CommitOnly() *Tree {
nt := &Tree{t.root, t.size}
t.writable = nil
return nt
}
// slowNotify does a complete comparison of the before and after trees in order
// to trigger notifications. This doesn't require any additional state but it
// is very expensive to compute.
func (t *Txn) slowNotify() {
snapIter := t.snap.rawIterator()
rootIter := t.root.rawIterator()
for snapIter.Front() != nil || rootIter.Front() != nil {
// If we've exhausted the nodes in the old snapshot, we know
// there's nothing remaining to notify.
if snapIter.Front() == nil {
return
}
snapElem := snapIter.Front()
// If we've exhausted the nodes in the new root, we know we need
// to invalidate everything that remains in the old snapshot. We
// know from the loop condition there's something in the old
// snapshot.
if rootIter.Front() == nil {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// Do one string compare so we can check the various conditions
// below without repeating the compare.
cmp := strings.Compare(snapIter.Path(), rootIter.Path())
// If the snapshot is behind the root, then we must have deleted
// this node during the transaction.
if cmp < 0 {
close(snapElem.mutateCh)
if snapElem.isLeaf() {
close(snapElem.leaf.mutateCh)
}
snapIter.Next()
continue
}
// If the snapshot is ahead of the root, then we must have added
// this node during the transaction.
if cmp > 0 {
rootIter.Next()
continue
}
// If we have the same path, then we need to see if we mutated a
// node and possibly the leaf.
rootElem := rootIter.Front()
if snapElem != rootElem {
close(snapElem.mutateCh)
if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
close(snapElem.leaf.mutateCh)
}
}
snapIter.Next()
rootIter.Next()
}
}
// Notify is used along with TrackMutate to trigger notifications. This must
// only be done once a transaction is committed via CommitOnly, and it is called
// automatically by Commit.
func (t *Txn) Notify() {
if !t.trackMutate {
return
}
// If we've overflowed the tracking state we can't use it in any way and
// need to do a full tree compare.
if t.trackOverflow {
t.slowNotify()
} else {
for ch := range t.trackChannels {
close(ch)
}
}
// Clean up the tracking state so that a re-notify is safe (will trigger
// the else clause above which will be a no-op).
t.trackChannels = nil
t.trackOverflow = false
}
// Insert is used to add or update a given key. The return provides
// the new tree, previous value and a bool indicating if any was set.
func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Insert(k, v)
return txn.Commit(), old, ok
}
// Delete is used to delete a given key. Returns the new tree,
// old value if any, and a bool indicating if the key was set.
func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
txn := t.Txn()
old, ok := txn.Delete(k)
return txn.Commit(), old, ok
}
// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
// and a bool indicating if the prefix matched any nodes
func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
txn := t.Txn()
ok := txn.DeletePrefix(k)
return txn.Commit(), ok
}
// Root returns the root node of the tree which can be used for richer
// query operations.
func (t *Tree) Root() *Node {
return t.root
}
// Get is used to lookup a specific key, returning
// the value and if it was found
func (t *Tree) Get(k []byte) (interface{}, bool) {
return t.root.Get(k)
}
// longestPrefix finds the length of the shared prefix
// of two strings
func longestPrefix(k1, k2 []byte) int {
max := len(k1)
if l := len(k2); l < max {
max = l
}
var i int
for i = 0; i < max; i++ {
if k1[i] != k2[i] {
break
}
}
return i
}
// concat two byte slices, returning a third new copy
func concat(a, b []byte) []byte {
c := make([]byte, len(a)+len(b))
copy(c, a)
copy(c[len(a):], b)
return c
}

205
vendor/github.com/hashicorp/go-immutable-radix/iter.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
package iradix
import (
"bytes"
)
// Iterator is used to iterate over a set of nodes
// in pre-order
type Iterator struct {
node *Node
stack []edges
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
// Wipe the stack
i.stack = nil
n := i.node
watch = n.mutateCh
search := prefix
for {
// Check for key exhaustion
if len(search) == 0 {
i.node = n
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
i.node = nil
return
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
i.node = n
return
} else {
i.node = nil
return
}
}
}
// SeekPrefix is used to seek the iterator to a given prefix
func (i *Iterator) SeekPrefix(prefix []byte) {
i.SeekPrefixWatch(prefix)
}
func (i *Iterator) recurseMin(n *Node) *Node {
// Traverse to the minimum child
if n.leaf != nil {
return n
}
nEdges := len(n.edges)
if nEdges > 1 {
// Add all the other edges to the stack (the min node will be added as
// we recurse)
i.stack = append(i.stack, n.edges[1:])
}
if nEdges > 0 {
return i.recurseMin(n.edges[0].node)
}
// Shouldn't be possible
return nil
}
// SeekLowerBound is used to seek the iterator to the smallest key that is
// greater or equal to the given key. There is no watch variant as it's hard to
// predict based on the radix structure which node(s) changes might affect the
// result.
func (i *Iterator) SeekLowerBound(key []byte) {
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
// go because we need only a subset of edges of many nodes in the path to the
// leaf with the lower bound. Note that the iterator will still recurse into
// children that we don't traverse on the way to the reverse lower bound as it
// walks the stack.
i.stack = []edges{}
// i.node starts off in the common case as pointing to the root node of the
// tree. By the time we return we have either found a lower bound and setup
// the stack to traverse all larger keys, or we have not and the stack and
// node should both be nil to prevent the iterator from assuming it is just
// iterating the whole tree from the root node. Either way this needs to end
// up as nil so just set it here.
n := i.node
i.node = nil
search := key
found := func(n *Node) {
i.stack = append(i.stack, edges{edge{node: n}})
}
findMin := func(n *Node) {
n = i.recurseMin(n)
if n != nil {
found(n)
return
}
}
for {
// Compare current prefix with the search key's same-length prefix.
var prefixCmp int
if len(n.prefix) < len(search) {
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
} else {
prefixCmp = bytes.Compare(n.prefix, search)
}
if prefixCmp > 0 {
// Prefix is larger, that means the lower bound is greater than the search
// and from now on we need to follow the minimum path to the smallest
// leaf under this subtree.
findMin(n)
return
}
if prefixCmp < 0 {
// Prefix is smaller than search prefix, that means there is no lower
// bound
i.node = nil
return
}
// Prefix is equal, we are still heading for an exact match. If this is a
// leaf and an exact match we're done.
if n.leaf != nil && bytes.Equal(n.leaf.key, key) {
found(n)
return
}
// Consume the search prefix if the current node has one. Note that this is
// safe because if n.prefix is longer than the search slice prefixCmp would
// have been > 0 above and the method would have already returned.
search = search[len(n.prefix):]
if len(search) == 0 {
// We've exhausted the search key, but the current node is not an exact
// match or not a leaf. That means that the leaf value if it exists, and
// all child nodes must be strictly greater, the smallest key in this
// subtree must be the lower bound.
findMin(n)
return
}
// Otherwise, take the lower bound next edge.
idx, lbNode := n.getLowerBoundEdge(search[0])
if lbNode == nil {
return
}
// Create stack edges for the all strictly higher edges in this node.
if idx+1 < len(n.edges) {
i.stack = append(i.stack, n.edges[idx+1:])
}
// Recurse
n = lbNode
}
}
// Next returns the next node in order
func (i *Iterator) Next() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if i.stack == nil && i.node != nil {
i.stack = []edges{
{
edge{node: i.node},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack
n := len(i.stack)
last := i.stack[n-1]
elem := last[0].node
// Update the stack
if len(last) > 1 {
i.stack[n-1] = last[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier
if len(elem.edges) > 0 {
i.stack = append(i.stack, elem.edges)
}
// Return the leaf values if any
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
}
return nil, nil, false
}

334
vendor/github.com/hashicorp/go-immutable-radix/node.go generated vendored Normal file
View File

@@ -0,0 +1,334 @@
package iradix
import (
"bytes"
"sort"
)
// WalkFn is used when walking the tree. Takes a
// key and value, returning if iteration should
// be terminated.
type WalkFn func(k []byte, v interface{}) bool
// leafNode is used to represent a value
type leafNode struct {
mutateCh chan struct{}
key []byte
val interface{}
}
// edge is used to represent an edge node
type edge struct {
label byte
node *Node
}
// Node is an immutable node in the radix tree
type Node struct {
// mutateCh is closed if this node is modified
mutateCh chan struct{}
// leaf is used to store possible leaf
leaf *leafNode
// prefix is the common prefix we ignore
prefix []byte
// Edges should be stored in-order for iteration.
// We avoid a fully materialized slice to save memory,
// since in most cases we expect to be sparse
edges edges
}
func (n *Node) isLeaf() bool {
return n.leaf != nil
}
func (n *Node) addEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
n.edges = append(n.edges, e)
if idx != num {
copy(n.edges[idx+1:], n.edges[idx:num])
n.edges[idx] = e
}
}
func (n *Node) replaceEdge(e edge) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= e.label
})
if idx < num && n.edges[idx].label == e.label {
n.edges[idx].node = e.node
return
}
panic("replacing missing edge")
}
func (n *Node) getEdge(label byte) (int, *Node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
return idx, n.edges[idx].node
}
return -1, nil
}
func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
// we want lower bound behavior so return even if it's not an exact match
if idx < num {
return idx, n.edges[idx].node
}
return -1, nil
}
func (n *Node) delEdge(label byte) {
num := len(n.edges)
idx := sort.Search(num, func(i int) bool {
return n.edges[i].label >= label
})
if idx < num && n.edges[idx].label == label {
copy(n.edges[idx:], n.edges[idx+1:])
n.edges[len(n.edges)-1] = edge{}
n.edges = n.edges[:len(n.edges)-1]
}
}
func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
search := k
watch := n.mutateCh
for {
// Check for key exhaustion
if len(search) == 0 {
if n.isLeaf() {
return n.leaf.mutateCh, n.leaf.val, true
}
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Update to the finest granularity as the search makes progress
watch = n.mutateCh
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
return watch, nil, false
}
func (n *Node) Get(k []byte) (interface{}, bool) {
_, val, ok := n.GetWatch(k)
return val, ok
}
// LongestPrefix is like Get, but instead of an
// exact match, it will return the longest prefix match.
func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
var last *leafNode
search := k
for {
// Look for a leaf node
if n.isLeaf() {
last = n.leaf
}
// Check for key exhaution
if len(search) == 0 {
break
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
if last != nil {
return last.key, last.val, true
}
return nil, nil, false
}
// Minimum is used to return the minimum value in the tree
func (n *Node) Minimum() ([]byte, interface{}, bool) {
for {
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
}
if len(n.edges) > 0 {
n = n.edges[0].node
} else {
break
}
}
return nil, nil, false
}
// Maximum is used to return the maximum value in the tree
func (n *Node) Maximum() ([]byte, interface{}, bool) {
for {
if num := len(n.edges); num > 0 {
n = n.edges[num-1].node
continue
}
if n.isLeaf() {
return n.leaf.key, n.leaf.val, true
} else {
break
}
}
return nil, nil, false
}
// Iterator is used to return an iterator at
// the given node to walk the tree
func (n *Node) Iterator() *Iterator {
return &Iterator{node: n}
}
// ReverseIterator is used to return an iterator at
// the given node to walk the tree backwards
func (n *Node) ReverseIterator() *ReverseIterator {
return NewReverseIterator(n)
}
// rawIterator is used to return a raw iterator at the given node to walk the
// tree.
func (n *Node) rawIterator() *rawIterator {
iter := &rawIterator{node: n}
iter.Next()
return iter
}
// Walk is used to walk the tree
func (n *Node) Walk(fn WalkFn) {
recursiveWalk(n, fn)
}
// WalkBackwards is used to walk the tree in reverse order
func (n *Node) WalkBackwards(fn WalkFn) {
reverseRecursiveWalk(n, fn)
}
// WalkPrefix is used to walk the tree under a prefix
func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
search := prefix
for {
// Check for key exhaution
if len(search) == 0 {
recursiveWalk(n, fn)
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
break
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else if bytes.HasPrefix(n.prefix, search) {
// Child may be under our search prefix
recursiveWalk(n, fn)
return
} else {
break
}
}
}
// WalkPath is used to walk the tree, but only visiting nodes
// from the root down to a given leaf. Where WalkPrefix walks
// all the entries *under* the given prefix, this walks the
// entries *above* the given prefix.
func (n *Node) WalkPath(path []byte, fn WalkFn) {
search := path
for {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return
}
// Check for key exhaution
if len(search) == 0 {
return
}
// Look for an edge
_, n = n.getEdge(search[0])
if n == nil {
return
}
// Consume the search prefix
if bytes.HasPrefix(search, n.prefix) {
search = search[len(n.prefix):]
} else {
break
}
}
}
// recursiveWalk is used to do a pre-order walk of a node
// recursively. Returns true if the walk should be aborted
func recursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children
for _, e := range n.edges {
if recursiveWalk(e.node, fn) {
return true
}
}
return false
}
// reverseRecursiveWalk is used to do a reverse pre-order
// walk of a node recursively. Returns true if the walk
// should be aborted
func reverseRecursiveWalk(n *Node, fn WalkFn) bool {
// Visit the leaf values if any
if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
return true
}
// Recurse on the children in reverse order
for i := len(n.edges) - 1; i >= 0; i-- {
e := n.edges[i]
if reverseRecursiveWalk(e.node, fn) {
return true
}
}
return false
}

View File

@@ -0,0 +1,78 @@
package iradix
// rawIterator visits each of the nodes in the tree, even the ones that are not
// leaves. It keeps track of the effective path (what a leaf at a given node
// would be called), which is useful for comparing trees.
type rawIterator struct {
// node is the starting node in the tree for the iterator.
node *Node
// stack keeps track of edges in the frontier.
stack []rawStackEntry
// pos is the current position of the iterator.
pos *Node
// path is the effective path of the current iterator position,
// regardless of whether the current node is a leaf.
path string
}
// rawStackEntry is used to keep track of the cumulative common path as well as
// its associated edges in the frontier.
type rawStackEntry struct {
path string
edges edges
}
// Front returns the current node that has been iterated to.
func (i *rawIterator) Front() *Node {
return i.pos
}
// Path returns the effective path of the current node, even if it's not actually
// a leaf.
func (i *rawIterator) Path() string {
return i.path
}
// Next advances the iterator to the next node.
func (i *rawIterator) Next() {
// Initialize our stack if needed.
if i.stack == nil && i.node != nil {
i.stack = []rawStackEntry{
{
edges: edges{
edge{node: i.node},
},
},
}
}
for len(i.stack) > 0 {
// Inspect the last element of the stack.
n := len(i.stack)
last := i.stack[n-1]
elem := last.edges[0].node
// Update the stack.
if len(last.edges) > 1 {
i.stack[n-1].edges = last.edges[1:]
} else {
i.stack = i.stack[:n-1]
}
// Push the edges onto the frontier.
if len(elem.edges) > 0 {
path := last.path + string(elem.prefix)
i.stack = append(i.stack, rawStackEntry{path, elem.edges})
}
i.pos = elem
i.path = last.path + string(elem.prefix)
return
}
i.pos = nil
i.path = ""
}

View File

@@ -0,0 +1,239 @@
package iradix
import (
"bytes"
)
// ReverseIterator is used to iterate over a set of nodes
// in reverse in-order
type ReverseIterator struct {
i *Iterator
// expandedParents stores the set of parent nodes whose relevant children have
// already been pushed into the stack. This can happen during seek or during
// iteration.
//
// Unlike forward iteration we need to recurse into children before we can
// output the value stored in an internal leaf since all children are greater.
// We use this to track whether we have already ensured all the children are
// in the stack.
expandedParents map[*Node]struct{}
}
// NewReverseIterator returns a new ReverseIterator at a node
func NewReverseIterator(n *Node) *ReverseIterator {
return &ReverseIterator{
i: &Iterator{node: n},
}
}
// SeekPrefixWatch is used to seek the iterator to a given prefix
// and returns the watch channel of the finest granularity
func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
return ri.i.SeekPrefixWatch(prefix)
}
// SeekPrefix is used to seek the iterator to a given prefix
func (ri *ReverseIterator) SeekPrefix(prefix []byte) {
ri.i.SeekPrefixWatch(prefix)
}
// SeekReverseLowerBound is used to seek the iterator to the largest key that is
// lower or equal to the given key. There is no watch variant as it's hard to
// predict based on the radix structure which node(s) changes might affect the
// result.
func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) {
// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
// go because we need only a subset of edges of many nodes in the path to the
// leaf with the lower bound. Note that the iterator will still recurse into
// children that we don't traverse on the way to the reverse lower bound as it
// walks the stack.
ri.i.stack = []edges{}
// ri.i.node starts off in the common case as pointing to the root node of the
// tree. By the time we return we have either found a lower bound and setup
// the stack to traverse all larger keys, or we have not and the stack and
// node should both be nil to prevent the iterator from assuming it is just
// iterating the whole tree from the root node. Either way this needs to end
// up as nil so just set it here.
n := ri.i.node
ri.i.node = nil
search := key
if ri.expandedParents == nil {
ri.expandedParents = make(map[*Node]struct{})
}
found := func(n *Node) {
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
// We need to mark this node as expanded in advance too otherwise the
// iterator will attempt to walk all of its children even though they are
// greater than the lower bound we have found. We've expanded it in the
// sense that all of its children that we want to walk are already in the
// stack (i.e. none of them).
ri.expandedParents[n] = struct{}{}
}
for {
// Compare current prefix with the search key's same-length prefix.
var prefixCmp int
if len(n.prefix) < len(search) {
prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
} else {
prefixCmp = bytes.Compare(n.prefix, search)
}
if prefixCmp < 0 {
// Prefix is smaller than search prefix, that means there is no exact
// match for the search key. But we are looking in reverse, so the reverse
// lower bound will be the largest leaf under this subtree, since it is
// the value that would come right before the current search key if it
// were in the tree. So we need to follow the maximum path in this subtree
// to find it. Note that this is exactly what the iterator will already do
// if it finds a node in the stack that has _not_ been marked as expanded
// so in this one case we don't call `found` and instead let the iterator
// do the expansion and recursion through all the children.
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
return
}
if prefixCmp > 0 {
// Prefix is larger than search prefix, or there is no prefix but we've
// also exhausted the search key. Either way, that means there is no
// reverse lower bound since nothing comes before our current search
// prefix.
return
}
// If this is a leaf, something needs to happen! Note that if it's a leaf
// and prefixCmp was zero (which it must be to get here) then the leaf value
// is either an exact match for the search, or it's lower. It can't be
// greater.
if n.isLeaf() {
// Firstly, if it's an exact match, we're done!
if bytes.Equal(n.leaf.key, key) {
found(n)
return
}
// It's not so this node's leaf value must be lower and could still be a
// valid contender for reverse lower bound.
// If it has no children then we are also done.
if len(n.edges) == 0 {
// This leaf is the lower bound.
found(n)
return
}
// Finally, this leaf is internal (has children) so we'll keep searching,
// but we need to add it to the iterator's stack since it has a leaf value
// that needs to be iterated over. It needs to be added to the stack
// before its children below as it comes first.
ri.i.stack = append(ri.i.stack, edges{edge{node: n}})
// We also need to mark it as expanded since we'll be adding any of its
// relevant children below and so don't want the iterator to re-add them
// on its way back up the stack.
ri.expandedParents[n] = struct{}{}
}
// Consume the search prefix. Note that this is safe because if n.prefix is
// longer than the search slice prefixCmp would have been > 0 above and the
// method would have already returned.
search = search[len(n.prefix):]
if len(search) == 0 {
// We've exhausted the search key but we are not at a leaf. That means all
// children are greater than the search key so a reverse lower bound
// doesn't exist in this subtree. Note that there might still be one in
// the whole radix tree by following a different path somewhere further
// up. If that's the case then the iterator's stack will contain all the
// smaller nodes already and Previous will walk through them correctly.
return
}
// Otherwise, take the lower bound next edge.
idx, lbNode := n.getLowerBoundEdge(search[0])
// From here, we need to update the stack with all values lower than
// the lower bound edge. Since getLowerBoundEdge() returns -1 when the
// search prefix is larger than all edges, we need to place idx at the
// last edge index so they can all be place in the stack, since they
// come before our search prefix.
if idx == -1 {
idx = len(n.edges)
}
// Create stack edges for the all strictly lower edges in this node.
if len(n.edges[:idx]) > 0 {
ri.i.stack = append(ri.i.stack, n.edges[:idx])
}
// Exit if there's no lower bound edge. The stack will have the previous
// nodes already.
if lbNode == nil {
return
}
// Recurse
n = lbNode
}
}
// Previous returns the previous node in reverse order
func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) {
// Initialize our stack if needed
if ri.i.stack == nil && ri.i.node != nil {
ri.i.stack = []edges{
{
edge{node: ri.i.node},
},
}
}
if ri.expandedParents == nil {
ri.expandedParents = make(map[*Node]struct{})
}
for len(ri.i.stack) > 0 {
// Inspect the last element of the stack
n := len(ri.i.stack)
last := ri.i.stack[n-1]
m := len(last)
elem := last[m-1].node
_, alreadyExpanded := ri.expandedParents[elem]
// If this is an internal node and we've not seen it already, we need to
// leave it in the stack so we can return its possible leaf value _after_
// we've recursed through all its children.
if len(elem.edges) > 0 && !alreadyExpanded {
// record that we've seen this node!
ri.expandedParents[elem] = struct{}{}
// push child edges onto stack and skip the rest of the loop to recurse
// into the largest one.
ri.i.stack = append(ri.i.stack, elem.edges)
continue
}
// Remove the node from the stack
if m > 1 {
ri.i.stack[n-1] = last[:m-1]
} else {
ri.i.stack = ri.i.stack[:n-1]
}
// We don't need this state any more as it's no longer in the stack so we
// won't visit it again
if alreadyExpanded {
delete(ri.expandedParents, elem)
}
// If this is a leaf, return it
if elem.leaf != nil {
return elem.leaf.key, elem.leaf.val, true
}
// it's not a leaf so keep walking the stack to find the previous leaf
}
return nil, nil, false
}

25
vendor/github.com/hashicorp/go-msgpack/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2012, 2013 Ugorji Nwoke.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used
to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

143
vendor/github.com/hashicorp/go-msgpack/codec/0doc.go generated vendored Normal file
View File

@@ -0,0 +1,143 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
/*
High Performance, Feature-Rich Idiomatic Go encoding library for msgpack and binc .
Supported Serialization formats are:
- msgpack: [https://github.com/msgpack/msgpack]
- binc: [http://github.com/ugorji/binc]
To install:
go get github.com/ugorji/go/codec
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
This was achieved by taking extreme care on:
- managing allocation
- function frame size (important due to Go's use of split stacks),
- reflection use (and by-passing reflection for common types)
- recursion implications
- zero-copy mode (encoding/decoding to byte slice without using temp buffers)
- Correct.
Care was taken to precisely handle corner cases like:
overflows, nil maps and slices, nil value in stream, etc.
- Efficient zero-copying into temporary byte buffers
when encoding into or decoding from a byte slice.
- Standard field renaming via tags
- Encoding from any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Decoding into pointer to any non-nil typed value
(struct, slice, map, int, float32, bool, string, reflect.Value, etc)
- Supports extension functions to handle the encode/decode of custom types
- Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
- Schema-less decoding
(decode into a pointer to a nil interface{} as opposed to a typed non-nil value).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Msgpack Specific:
- Provides extension functions to handle spec-defined extensions (binary, timestamp)
- Options to resolve ambiguities in handling raw bytes (as string or []byte)
during schema-less decoding (decoding into a nil interface{})
- RPC Server/Client Codec for msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
- Fast Paths for some container types:
For some container types, we circumvent reflection and its associated overhead
and allocation costs, and encode/decode directly. These types are:
[]interface{}
[]int
[]string
map[interface{}]interface{}
map[int]interface{}
map[string]interface{}
Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
Usage
Typical usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
Representative Benchmark Results
Run the benchmark suite using:
go test -bi -bench=. -benchmem
To run full benchmark suite (including against vmsgpack and bson),
see notes in ext_dep_test.go
*/
package codec

174
vendor/github.com/hashicorp/go-msgpack/codec/README.md generated vendored Normal file
View File

@@ -0,0 +1,174 @@
# Codec
High Performance and Feature-Rich Idiomatic Go Library providing
encode/decode support for different serialization formats.
Supported Serialization formats are:
- msgpack: [https://github.com/msgpack/msgpack]
- binc: [http://github.com/ugorji/binc]
To install:
go get github.com/ugorji/go/codec
Online documentation: [http://godoc.org/github.com/ugorji/go/codec]
The idiomatic Go support is as seen in other encoding packages in
the standard library (ie json, xml, gob, etc).
Rich Feature Set includes:
- Simple but extremely powerful and feature-rich API
- Very High Performance.
Our extensive benchmarks show us outperforming Gob, Json and Bson by 2-4X.
This was achieved by taking extreme care on:
- managing allocation
- function frame size (important due to Go's use of split stacks),
- reflection use (and by-passing reflection for common types)
- recursion implications
- zero-copy mode (encoding/decoding to byte slice without using temp buffers)
- Correct.
Care was taken to precisely handle corner cases like:
overflows, nil maps and slices, nil value in stream, etc.
- Efficient zero-copying into temporary byte buffers
when encoding into or decoding from a byte slice.
- Standard field renaming via tags
- Encoding from any value
(struct, slice, map, primitives, pointers, interface{}, etc)
- Decoding into pointer to any non-nil typed value
(struct, slice, map, int, float32, bool, string, reflect.Value, etc)
- Supports extension functions to handle the encode/decode of custom types
- Support Go 1.2 encoding.BinaryMarshaler/BinaryUnmarshaler
- Schema-less decoding
(decode into a pointer to a nil interface{} as opposed to a typed non-nil value).
Includes Options to configure what specific map or slice type to use
when decoding an encoded list or map into a nil interface{}
- Provides a RPC Server and Client Codec for net/rpc communication protocol.
- Msgpack Specific:
- Provides extension functions to handle spec-defined extensions (binary, timestamp)
- Options to resolve ambiguities in handling raw bytes (as string or []byte)
during schema-less decoding (decoding into a nil interface{})
- RPC Server/Client Codec for msgpack-rpc protocol defined at:
https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
- Fast Paths for some container types:
For some container types, we circumvent reflection and its associated overhead
and allocation costs, and encode/decode directly. These types are:
[]interface{}
[]int
[]string
map[interface{}]interface{}
map[int]interface{}
map[string]interface{}
## Extension Support
Users can register a function to handle the encoding or decoding of
their custom types.
There are no restrictions on what the custom type can be. Some examples:
type BisSet []int
type BitSet64 uint64
type UUID string
type MyStructWithUnexportedFields struct { a int; b bool; c []int; }
type GifImage struct { ... }
As an illustration, MyStructWithUnexportedFields would normally be
encoded as an empty map because it has no exported fields, while UUID
would be encoded as a string. However, with extension support, you can
encode any of these however you like.
## RPC
RPC Client and Server Codecs are implemented, so the codecs can be used
with the standard net/rpc package.
## Usage
Typical usage model:
// create and configure Handle
var (
bh codec.BincHandle
mh codec.MsgpackHandle
)
mh.MapType = reflect.TypeOf(map[string]interface{}(nil))
// configure extensions
// e.g. for msgpack, define functions and enable Time support for tag 1
// mh.AddExt(reflect.TypeOf(time.Time{}), 1, myMsgpackTimeEncodeExtFn, myMsgpackTimeDecodeExtFn)
// create and use decoder/encoder
var (
r io.Reader
w io.Writer
b []byte
h = &bh // or mh to use msgpack
)
dec = codec.NewDecoder(r, h)
dec = codec.NewDecoderBytes(b, h)
err = dec.Decode(&v)
enc = codec.NewEncoder(w, h)
enc = codec.NewEncoderBytes(&b, h)
err = enc.Encode(v)
//RPC Server
go func() {
for {
conn, err := listener.Accept()
rpcCodec := codec.GoRpc.ServerCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ServerCodec(conn, h)
rpc.ServeCodec(rpcCodec)
}
}()
//RPC Communication (client side)
conn, err = net.Dial("tcp", "localhost:5555")
rpcCodec := codec.GoRpc.ClientCodec(conn, h)
//OR rpcCodec := codec.MsgpackSpecRpc.ClientCodec(conn, h)
client := rpc.NewClientWithCodec(rpcCodec)
## Representative Benchmark Results
A sample run of benchmark using "go test -bi -bench=. -benchmem":
/proc/cpuinfo: Intel(R) Core(TM) i7-2630QM CPU @ 2.00GHz (HT)
..............................................
BENCHMARK INIT: 2013-10-16 11:02:50.345970786 -0400 EDT
To run full benchmark comparing encodings (MsgPack, Binc, JSON, GOB, etc), use: "go test -bench=."
Benchmark:
Struct recursive Depth: 1
ApproxDeepSize Of benchmark Struct: 4694 bytes
Benchmark One-Pass Run:
v-msgpack: len: 1600 bytes
bson: len: 3025 bytes
msgpack: len: 1560 bytes
binc: len: 1187 bytes
gob: len: 1972 bytes
json: len: 2538 bytes
..............................................
PASS
Benchmark__Msgpack____Encode 50000 54359 ns/op 14953 B/op 83 allocs/op
Benchmark__Msgpack____Decode 10000 106531 ns/op 14990 B/op 410 allocs/op
Benchmark__Binc_NoSym_Encode 50000 53956 ns/op 14966 B/op 83 allocs/op
Benchmark__Binc_NoSym_Decode 10000 103751 ns/op 14529 B/op 386 allocs/op
Benchmark__Binc_Sym___Encode 50000 65961 ns/op 17130 B/op 88 allocs/op
Benchmark__Binc_Sym___Decode 10000 106310 ns/op 15857 B/op 287 allocs/op
Benchmark__Gob________Encode 10000 135944 ns/op 21189 B/op 237 allocs/op
Benchmark__Gob________Decode 5000 405390 ns/op 83460 B/op 1841 allocs/op
Benchmark__Json_______Encode 20000 79412 ns/op 13874 B/op 102 allocs/op
Benchmark__Json_______Decode 10000 247979 ns/op 14202 B/op 493 allocs/op
Benchmark__Bson_______Encode 10000 121762 ns/op 27814 B/op 514 allocs/op
Benchmark__Bson_______Decode 10000 162126 ns/op 16514 B/op 789 allocs/op
Benchmark__VMsgpack___Encode 50000 69155 ns/op 12370 B/op 344 allocs/op
Benchmark__VMsgpack___Decode 10000 151609 ns/op 20307 B/op 571 allocs/op
ok ugorji.net/codec 30.827s
To run full benchmark suite (including against vmsgpack and bson),
see notes in ext\_dep\_test.go

786
vendor/github.com/hashicorp/go-msgpack/codec/binc.go generated vendored Normal file
View File

@@ -0,0 +1,786 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"math"
// "reflect"
// "sync/atomic"
"time"
//"fmt"
)
const bincDoPrune = true // No longer needed. Needed before as C lib did not support pruning.
//var _ = fmt.Printf
// vd as low 4 bits (there are 16 slots)
const (
bincVdSpecial byte = iota
bincVdPosInt
bincVdNegInt
bincVdFloat
bincVdString
bincVdByteArray
bincVdArray
bincVdMap
bincVdTimestamp
bincVdSmallInt
bincVdUnicodeOther
bincVdSymbol
bincVdDecimal
_ // open slot
_ // open slot
bincVdCustomExt = 0x0f
)
const (
bincSpNil byte = iota
bincSpFalse
bincSpTrue
bincSpNan
bincSpPosInf
bincSpNegInf
bincSpZeroFloat
bincSpZero
bincSpNegOne
)
const (
bincFlBin16 byte = iota
bincFlBin32
_ // bincFlBin32e
bincFlBin64
_ // bincFlBin64e
// others not currently supported
)
type bincEncDriver struct {
w encWriter
m map[string]uint16 // symbols
s uint32 // symbols sequencer
b [8]byte
}
func (e *bincEncDriver) isBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (e *bincEncDriver) encodeBuiltin(rt uintptr, v interface{}) {
switch rt {
case timeTypId:
bs := encodeTime(v.(time.Time))
e.w.writen1(bincVdTimestamp<<4 | uint8(len(bs)))
e.w.writeb(bs)
}
}
func (e *bincEncDriver) encodeNil() {
e.w.writen1(bincVdSpecial<<4 | bincSpNil)
}
func (e *bincEncDriver) encodeBool(b bool) {
if b {
e.w.writen1(bincVdSpecial<<4 | bincSpTrue)
} else {
e.w.writen1(bincVdSpecial<<4 | bincSpFalse)
}
}
func (e *bincEncDriver) encodeFloat32(f float32) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
e.w.writen1(bincVdFloat<<4 | bincFlBin32)
e.w.writeUint32(math.Float32bits(f))
}
func (e *bincEncDriver) encodeFloat64(f float64) {
if f == 0 {
e.w.writen1(bincVdSpecial<<4 | bincSpZeroFloat)
return
}
bigen.PutUint64(e.b[:], math.Float64bits(f))
if bincDoPrune {
i := 7
for ; i >= 0 && (e.b[i] == 0); i-- {
}
i++
if i <= 6 {
e.w.writen1(bincVdFloat<<4 | 0x8 | bincFlBin64)
e.w.writen1(byte(i))
e.w.writeb(e.b[:i])
return
}
}
e.w.writen1(bincVdFloat<<4 | bincFlBin64)
e.w.writeb(e.b[:])
}
func (e *bincEncDriver) encIntegerPrune(bd byte, pos bool, v uint64, lim uint8) {
if lim == 4 {
bigen.PutUint32(e.b[:lim], uint32(v))
} else {
bigen.PutUint64(e.b[:lim], v)
}
if bincDoPrune {
i := pruneSignExt(e.b[:lim], pos)
e.w.writen1(bd | lim - 1 - byte(i))
e.w.writeb(e.b[i:lim])
} else {
e.w.writen1(bd | lim - 1)
e.w.writeb(e.b[:lim])
}
}
func (e *bincEncDriver) encodeInt(v int64) {
const nbd byte = bincVdNegInt << 4
switch {
case v >= 0:
e.encUint(bincVdPosInt<<4, true, uint64(v))
case v == -1:
e.w.writen1(bincVdSpecial<<4 | bincSpNegOne)
default:
e.encUint(bincVdNegInt<<4, false, uint64(-v))
}
}
func (e *bincEncDriver) encodeUint(v uint64) {
e.encUint(bincVdPosInt<<4, true, v)
}
func (e *bincEncDriver) encUint(bd byte, pos bool, v uint64) {
switch {
case v == 0:
e.w.writen1(bincVdSpecial<<4 | bincSpZero)
case pos && v >= 1 && v <= 16:
e.w.writen1(bincVdSmallInt<<4 | byte(v-1))
case v <= math.MaxUint8:
e.w.writen2(bd|0x0, byte(v))
case v <= math.MaxUint16:
e.w.writen1(bd | 0x01)
e.w.writeUint16(uint16(v))
case v <= math.MaxUint32:
e.encIntegerPrune(bd, pos, v, 4)
default:
e.encIntegerPrune(bd, pos, v, 8)
}
}
func (e *bincEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(bincVdCustomExt<<4, uint64(length))
e.w.writen1(xtag)
}
func (e *bincEncDriver) encodeArrayPreamble(length int) {
e.encLen(bincVdArray<<4, uint64(length))
}
func (e *bincEncDriver) encodeMapPreamble(length int) {
e.encLen(bincVdMap<<4, uint64(length))
}
func (e *bincEncDriver) encodeString(c charEncoding, v string) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writestr(v)
}
}
func (e *bincEncDriver) encodeSymbol(v string) {
// if WriteSymbolsNoRefs {
// e.encodeString(c_UTF8, v)
// return
// }
//symbols only offer benefit when string length > 1.
//This is because strings with length 1 take only 2 bytes to store
//(bd with embedded length, and single byte for string val).
l := len(v)
switch l {
case 0:
e.encBytesLen(c_UTF8, 0)
return
case 1:
e.encBytesLen(c_UTF8, 1)
e.w.writen1(v[0])
return
}
if e.m == nil {
e.m = make(map[string]uint16, 16)
}
ui, ok := e.m[v]
if ok {
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8)
e.w.writeUint16(ui)
}
} else {
e.s++
ui = uint16(e.s)
//ui = uint16(atomic.AddUint32(&e.s, 1))
e.m[v] = ui
var lenprec uint8
switch {
case l <= math.MaxUint8:
// lenprec = 0
case l <= math.MaxUint16:
lenprec = 1
case int64(l) <= math.MaxUint32:
lenprec = 2
default:
lenprec = 3
}
if ui <= math.MaxUint8 {
e.w.writen2(bincVdSymbol<<4|0x0|0x4|lenprec, byte(ui))
} else {
e.w.writen1(bincVdSymbol<<4 | 0x8 | 0x4 | lenprec)
e.w.writeUint16(ui)
}
switch lenprec {
case 0:
e.w.writen1(byte(l))
case 1:
e.w.writeUint16(uint16(l))
case 2:
e.w.writeUint32(uint32(l))
default:
e.w.writeUint64(uint64(l))
}
e.w.writestr(v)
}
}
func (e *bincEncDriver) encodeStringBytes(c charEncoding, v []byte) {
l := uint64(len(v))
e.encBytesLen(c, l)
if l > 0 {
e.w.writeb(v)
}
}
func (e *bincEncDriver) encBytesLen(c charEncoding, length uint64) {
//TODO: support bincUnicodeOther (for now, just use string or bytearray)
if c == c_RAW {
e.encLen(bincVdByteArray<<4, length)
} else {
e.encLen(bincVdString<<4, length)
}
}
func (e *bincEncDriver) encLen(bd byte, l uint64) {
if l < 12 {
e.w.writen1(bd | uint8(l+4))
} else {
e.encLenNumber(bd, l)
}
}
func (e *bincEncDriver) encLenNumber(bd byte, v uint64) {
switch {
case v <= math.MaxUint8:
e.w.writen2(bd, byte(v))
case v <= math.MaxUint16:
e.w.writen1(bd | 0x01)
e.w.writeUint16(uint16(v))
case v <= math.MaxUint32:
e.w.writen1(bd | 0x02)
e.w.writeUint32(uint32(v))
default:
e.w.writen1(bd | 0x03)
e.w.writeUint64(uint64(v))
}
}
//------------------------------------
type bincDecDriver struct {
r decReader
bdRead bool
bdType valueType
bd byte
vd byte
vs byte
b [8]byte
m map[uint32]string // symbols (use uint32 as key, as map optimizes for it)
}
func (d *bincDecDriver) initReadNext() {
if d.bdRead {
return
}
d.bd = d.r.readn1()
d.vd = d.bd >> 4
d.vs = d.bd & 0x0f
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *bincDecDriver) currentEncodedType() valueType {
if d.bdType == valueTypeUnset {
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil:
d.bdType = valueTypeNil
case bincSpFalse, bincSpTrue:
d.bdType = valueTypeBool
case bincSpNan, bincSpNegInf, bincSpPosInf, bincSpZeroFloat:
d.bdType = valueTypeFloat
case bincSpZero:
d.bdType = valueTypeUint
case bincSpNegOne:
d.bdType = valueTypeInt
default:
decErr("currentEncodedType: Unrecognized special value 0x%x", d.vs)
}
case bincVdSmallInt:
d.bdType = valueTypeUint
case bincVdPosInt:
d.bdType = valueTypeUint
case bincVdNegInt:
d.bdType = valueTypeInt
case bincVdFloat:
d.bdType = valueTypeFloat
case bincVdString:
d.bdType = valueTypeString
case bincVdSymbol:
d.bdType = valueTypeSymbol
case bincVdByteArray:
d.bdType = valueTypeBytes
case bincVdTimestamp:
d.bdType = valueTypeTimestamp
case bincVdCustomExt:
d.bdType = valueTypeExt
case bincVdArray:
d.bdType = valueTypeArray
case bincVdMap:
d.bdType = valueTypeMap
default:
decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.vd)
}
}
return d.bdType
}
func (d *bincDecDriver) tryDecodeAsNil() bool {
if d.bd == bincVdSpecial<<4|bincSpNil {
d.bdRead = false
return true
}
return false
}
func (d *bincDecDriver) isBuiltinType(rt uintptr) bool {
return rt == timeTypId
}
func (d *bincDecDriver) decodeBuiltin(rt uintptr, v interface{}) {
switch rt {
case timeTypId:
if d.vd != bincVdTimestamp {
decErr("Invalid d.vd. Expecting 0x%x. Received: 0x%x", bincVdTimestamp, d.vd)
}
tt, err := decodeTime(d.r.readn(int(d.vs)))
if err != nil {
panic(err)
}
var vt *time.Time = v.(*time.Time)
*vt = tt
d.bdRead = false
}
}
func (d *bincDecDriver) decFloatPre(vs, defaultLen byte) {
if vs&0x8 == 0 {
d.r.readb(d.b[0:defaultLen])
} else {
l := d.r.readn1()
if l > 8 {
decErr("At most 8 bytes used to represent float. Received: %v bytes", l)
}
for i := l; i < 8; i++ {
d.b[i] = 0
}
d.r.readb(d.b[0:l])
}
}
func (d *bincDecDriver) decFloat() (f float64) {
//if true { f = math.Float64frombits(d.r.readUint64()); break; }
switch vs := d.vs; vs & 0x7 {
case bincFlBin32:
d.decFloatPre(vs, 4)
f = float64(math.Float32frombits(bigen.Uint32(d.b[0:4])))
case bincFlBin64:
d.decFloatPre(vs, 8)
f = math.Float64frombits(bigen.Uint64(d.b[0:8]))
default:
decErr("only float32 and float64 are supported. d.vd: 0x%x, d.vs: 0x%x", d.vd, d.vs)
}
return
}
func (d *bincDecDriver) decUint() (v uint64) {
// need to inline the code (interface conversion and type assertion expensive)
switch d.vs {
case 0:
v = uint64(d.r.readn1())
case 1:
d.r.readb(d.b[6:])
v = uint64(bigen.Uint16(d.b[6:]))
case 2:
d.b[4] = 0
d.r.readb(d.b[5:])
v = uint64(bigen.Uint32(d.b[4:]))
case 3:
d.r.readb(d.b[4:])
v = uint64(bigen.Uint32(d.b[4:]))
case 4, 5, 6:
lim := int(7 - d.vs)
d.r.readb(d.b[lim:])
for i := 0; i < lim; i++ {
d.b[i] = 0
}
v = uint64(bigen.Uint64(d.b[:]))
case 7:
d.r.readb(d.b[:])
v = uint64(bigen.Uint64(d.b[:]))
default:
decErr("unsigned integers with greater than 64 bits of precision not supported")
}
return
}
func (d *bincDecDriver) decIntAny() (ui uint64, i int64, neg bool) {
switch d.vd {
case bincVdPosInt:
ui = d.decUint()
i = int64(ui)
case bincVdNegInt:
ui = d.decUint()
i = -(int64(ui))
neg = true
case bincVdSmallInt:
i = int64(d.vs) + 1
ui = uint64(d.vs) + 1
case bincVdSpecial:
switch d.vs {
case bincSpZero:
//i = 0
case bincSpNegOne:
neg = true
ui = 1
i = -1
default:
decErr("numeric decode fails for special value: d.vs: 0x%x", d.vs)
}
default:
decErr("number can only be decoded from uint or int values. d.bd: 0x%x, d.vd: 0x%x", d.bd, d.vd)
}
return
}
func (d *bincDecDriver) decodeInt(bitsize uint8) (i int64) {
_, i, _ = d.decIntAny()
checkOverflow(0, i, bitsize)
d.bdRead = false
return
}
func (d *bincDecDriver) decodeUint(bitsize uint8) (ui uint64) {
ui, i, neg := d.decIntAny()
if neg {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
checkOverflow(ui, 0, bitsize)
d.bdRead = false
return
}
func (d *bincDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
switch d.vd {
case bincVdSpecial:
d.bdRead = false
switch d.vs {
case bincSpNan:
return math.NaN()
case bincSpPosInf:
return math.Inf(1)
case bincSpZeroFloat, bincSpZero:
return
case bincSpNegInf:
return math.Inf(-1)
default:
decErr("Invalid d.vs decoding float where d.vd=bincVdSpecial: %v", d.vs)
}
case bincVdFloat:
f = d.decFloat()
default:
_, i, _ := d.decIntAny()
f = float64(i)
}
checkOverflowFloat32(f, chkOverflow32)
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *bincDecDriver) decodeBool() (b bool) {
switch d.bd {
case (bincVdSpecial | bincSpFalse):
// b = false
case (bincVdSpecial | bincSpTrue):
b = true
default:
decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
}
d.bdRead = false
return
}
func (d *bincDecDriver) readMapLen() (length int) {
if d.vd != bincVdMap {
decErr("Invalid d.vd for map. Expecting 0x%x. Got: 0x%x", bincVdMap, d.vd)
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) readArrayLen() (length int) {
if d.vd != bincVdArray {
decErr("Invalid d.vd for array. Expecting 0x%x. Got: 0x%x", bincVdArray, d.vd)
}
length = d.decLen()
d.bdRead = false
return
}
func (d *bincDecDriver) decLen() int {
if d.vs <= 3 {
return int(d.decUint())
}
return int(d.vs - 4)
}
func (d *bincDecDriver) decodeString() (s string) {
switch d.vd {
case bincVdString, bincVdByteArray:
if length := d.decLen(); length > 0 {
s = string(d.r.readn(length))
}
case bincVdSymbol:
//from vs: extract numSymbolBytes, containsStringVal, strLenPrecision,
//extract symbol
//if containsStringVal, read it and put in map
//else look in map for string value
var symbol uint32
vs := d.vs
//fmt.Printf(">>>> d.vs: 0b%b, & 0x8: %v, & 0x4: %v\n", d.vs, vs & 0x8, vs & 0x4)
if vs&0x8 == 0 {
symbol = uint32(d.r.readn1())
} else {
symbol = uint32(d.r.readUint16())
}
if d.m == nil {
d.m = make(map[uint32]string, 16)
}
if vs&0x4 == 0 {
s = d.m[symbol]
} else {
var slen int
switch vs & 0x3 {
case 0:
slen = int(d.r.readn1())
case 1:
slen = int(d.r.readUint16())
case 2:
slen = int(d.r.readUint32())
case 3:
slen = int(d.r.readUint64())
}
s = string(d.r.readn(slen))
d.m[symbol] = s
}
default:
decErr("Invalid d.vd for string. Expecting string:0x%x, bytearray:0x%x or symbol: 0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, bincVdSymbol, d.vd)
}
d.bdRead = false
return
}
func (d *bincDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
var clen int
switch d.vd {
case bincVdString, bincVdByteArray:
clen = d.decLen()
default:
decErr("Invalid d.vd for bytes. Expecting string:0x%x or bytearray:0x%x. Got: 0x%x",
bincVdString, bincVdByteArray, d.vd)
}
if clen > 0 {
// if no contents in stream, don't update the passed byteslice
if len(bs) != clen {
if len(bs) > clen {
bs = bs[:clen]
} else {
bs = make([]byte, clen)
}
bsOut = bs
changed = true
}
d.r.readb(bs)
}
d.bdRead = false
return
}
func (d *bincDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
switch d.vd {
case bincVdCustomExt:
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
xbs = d.r.readn(l)
case bincVdByteArray:
xbs, _ = d.decodeBytes(nil)
default:
decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.vd)
}
d.bdRead = false
return
}
func (d *bincDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
d.initReadNext()
switch d.vd {
case bincVdSpecial:
switch d.vs {
case bincSpNil:
vt = valueTypeNil
case bincSpFalse:
vt = valueTypeBool
v = false
case bincSpTrue:
vt = valueTypeBool
v = true
case bincSpNan:
vt = valueTypeFloat
v = math.NaN()
case bincSpPosInf:
vt = valueTypeFloat
v = math.Inf(1)
case bincSpNegInf:
vt = valueTypeFloat
v = math.Inf(-1)
case bincSpZeroFloat:
vt = valueTypeFloat
v = float64(0)
case bincSpZero:
vt = valueTypeUint
v = int64(0) // int8(0)
case bincSpNegOne:
vt = valueTypeInt
v = int64(-1) // int8(-1)
default:
decErr("decodeNaked: Unrecognized special value 0x%x", d.vs)
}
case bincVdSmallInt:
vt = valueTypeUint
v = uint64(int8(d.vs)) + 1 // int8(d.vs) + 1
case bincVdPosInt:
vt = valueTypeUint
v = d.decUint()
case bincVdNegInt:
vt = valueTypeInt
v = -(int64(d.decUint()))
case bincVdFloat:
vt = valueTypeFloat
v = d.decFloat()
case bincVdSymbol:
vt = valueTypeSymbol
v = d.decodeString()
case bincVdString:
vt = valueTypeString
v = d.decodeString()
case bincVdByteArray:
vt = valueTypeBytes
v, _ = d.decodeBytes(nil)
case bincVdTimestamp:
vt = valueTypeTimestamp
tt, err := decodeTime(d.r.readn(int(d.vs)))
if err != nil {
panic(err)
}
v = tt
case bincVdCustomExt:
vt = valueTypeExt
l := d.decLen()
var re RawExt
re.Tag = d.r.readn1()
re.Data = d.r.readn(l)
v = &re
vt = valueTypeExt
case bincVdArray:
vt = valueTypeArray
decodeFurther = true
case bincVdMap:
vt = valueTypeMap
decodeFurther = true
default:
decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.vd)
}
if !decodeFurther {
d.bdRead = false
}
return
}
//------------------------------------
//BincHandle is a Handle for the Binc Schema-Free Encoding Format
//defined at https://github.com/ugorji/binc .
//
//BincHandle currently supports all Binc features with the following EXCEPTIONS:
// - only integers up to 64 bits of precision are supported.
// big integers are unsupported.
// - Only IEEE 754 binary32 and binary64 floats are supported (ie Go float32 and float64 types).
// extended precision and decimal IEEE 754 floats are unsupported.
// - Only UTF-8 strings supported.
// Unicode_Other Binc types (UTF16, UTF32) are currently unsupported.
//Note that these EXCEPTIONS are temporary and full support is possible and may happen soon.
type BincHandle struct {
BasicHandle
}
func (h *BincHandle) newEncDriver(w encWriter) encDriver {
return &bincEncDriver{w: w}
}
func (h *BincHandle) newDecDriver(r decReader) decDriver {
return &bincDecDriver{r: r}
}
func (_ *BincHandle) writeExt() bool {
return true
}
func (h *BincHandle) getBasicHandle() *BasicHandle {
return &h.BasicHandle
}

1048
vendor/github.com/hashicorp/go-msgpack/codec/decode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

1001
vendor/github.com/hashicorp/go-msgpack/codec/encode.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

596
vendor/github.com/hashicorp/go-msgpack/codec/helper.go generated vendored Normal file
View File

@@ -0,0 +1,596 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// Contains code shared by both encode and decode.
import (
"encoding/binary"
"fmt"
"math"
"reflect"
"sort"
"strings"
"sync"
"time"
"unicode"
"unicode/utf8"
)
const (
structTagName = "codec"
// Support
// encoding.BinaryMarshaler: MarshalBinary() (data []byte, err error)
// encoding.BinaryUnmarshaler: UnmarshalBinary(data []byte) error
// This constant flag will enable or disable it.
supportBinaryMarshal = true
// Each Encoder or Decoder uses a cache of functions based on conditionals,
// so that the conditionals are not run every time.
//
// Either a map or a slice is used to keep track of the functions.
// The map is more natural, but has a higher cost than a slice/array.
// This flag (useMapForCodecCache) controls which is used.
useMapForCodecCache = false
// For some common container types, we can short-circuit an elaborate
// reflection dance and call encode/decode directly.
// The currently supported types are:
// - slices of strings, or id's (int64,uint64) or interfaces.
// - maps of str->str, str->intf, id(int64,uint64)->intf, intf->intf
shortCircuitReflectToFastPath = true
// for debugging, set this to false, to catch panic traces.
// Note that this will always cause rpc tests to fail, since they need io.EOF sent via panic.
recoverPanicToErr = true
// if checkStructForEmptyValue, check structs fields to see if an empty value.
// This could be an expensive call, so possibly disable it.
checkStructForEmptyValue = false
// if derefForIsEmptyValue, deref pointers and interfaces when checking isEmptyValue
derefForIsEmptyValue = false
)
type charEncoding uint8
const (
c_RAW charEncoding = iota
c_UTF8
c_UTF16LE
c_UTF16BE
c_UTF32LE
c_UTF32BE
)
// valueType is the stream type
type valueType uint8
const (
valueTypeUnset valueType = iota
valueTypeNil
valueTypeInt
valueTypeUint
valueTypeFloat
valueTypeBool
valueTypeString
valueTypeSymbol
valueTypeBytes
valueTypeMap
valueTypeArray
valueTypeTimestamp
valueTypeExt
valueTypeInvalid = 0xff
)
var (
bigen = binary.BigEndian
structInfoFieldName = "_struct"
cachedTypeInfo = make(map[uintptr]*typeInfo, 4)
cachedTypeInfoMutex sync.RWMutex
intfSliceTyp = reflect.TypeOf([]interface{}(nil))
intfTyp = intfSliceTyp.Elem()
strSliceTyp = reflect.TypeOf([]string(nil))
boolSliceTyp = reflect.TypeOf([]bool(nil))
uintSliceTyp = reflect.TypeOf([]uint(nil))
uint8SliceTyp = reflect.TypeOf([]uint8(nil))
uint16SliceTyp = reflect.TypeOf([]uint16(nil))
uint32SliceTyp = reflect.TypeOf([]uint32(nil))
uint64SliceTyp = reflect.TypeOf([]uint64(nil))
intSliceTyp = reflect.TypeOf([]int(nil))
int8SliceTyp = reflect.TypeOf([]int8(nil))
int16SliceTyp = reflect.TypeOf([]int16(nil))
int32SliceTyp = reflect.TypeOf([]int32(nil))
int64SliceTyp = reflect.TypeOf([]int64(nil))
float32SliceTyp = reflect.TypeOf([]float32(nil))
float64SliceTyp = reflect.TypeOf([]float64(nil))
mapIntfIntfTyp = reflect.TypeOf(map[interface{}]interface{}(nil))
mapStrIntfTyp = reflect.TypeOf(map[string]interface{}(nil))
mapStrStrTyp = reflect.TypeOf(map[string]string(nil))
mapIntIntfTyp = reflect.TypeOf(map[int]interface{}(nil))
mapInt64IntfTyp = reflect.TypeOf(map[int64]interface{}(nil))
mapUintIntfTyp = reflect.TypeOf(map[uint]interface{}(nil))
mapUint64IntfTyp = reflect.TypeOf(map[uint64]interface{}(nil))
stringTyp = reflect.TypeOf("")
timeTyp = reflect.TypeOf(time.Time{})
rawExtTyp = reflect.TypeOf(RawExt{})
mapBySliceTyp = reflect.TypeOf((*MapBySlice)(nil)).Elem()
binaryMarshalerTyp = reflect.TypeOf((*binaryMarshaler)(nil)).Elem()
binaryUnmarshalerTyp = reflect.TypeOf((*binaryUnmarshaler)(nil)).Elem()
rawExtTypId = reflect.ValueOf(rawExtTyp).Pointer()
intfTypId = reflect.ValueOf(intfTyp).Pointer()
timeTypId = reflect.ValueOf(timeTyp).Pointer()
intfSliceTypId = reflect.ValueOf(intfSliceTyp).Pointer()
strSliceTypId = reflect.ValueOf(strSliceTyp).Pointer()
boolSliceTypId = reflect.ValueOf(boolSliceTyp).Pointer()
uintSliceTypId = reflect.ValueOf(uintSliceTyp).Pointer()
uint8SliceTypId = reflect.ValueOf(uint8SliceTyp).Pointer()
uint16SliceTypId = reflect.ValueOf(uint16SliceTyp).Pointer()
uint32SliceTypId = reflect.ValueOf(uint32SliceTyp).Pointer()
uint64SliceTypId = reflect.ValueOf(uint64SliceTyp).Pointer()
intSliceTypId = reflect.ValueOf(intSliceTyp).Pointer()
int8SliceTypId = reflect.ValueOf(int8SliceTyp).Pointer()
int16SliceTypId = reflect.ValueOf(int16SliceTyp).Pointer()
int32SliceTypId = reflect.ValueOf(int32SliceTyp).Pointer()
int64SliceTypId = reflect.ValueOf(int64SliceTyp).Pointer()
float32SliceTypId = reflect.ValueOf(float32SliceTyp).Pointer()
float64SliceTypId = reflect.ValueOf(float64SliceTyp).Pointer()
mapStrStrTypId = reflect.ValueOf(mapStrStrTyp).Pointer()
mapIntfIntfTypId = reflect.ValueOf(mapIntfIntfTyp).Pointer()
mapStrIntfTypId = reflect.ValueOf(mapStrIntfTyp).Pointer()
mapIntIntfTypId = reflect.ValueOf(mapIntIntfTyp).Pointer()
mapInt64IntfTypId = reflect.ValueOf(mapInt64IntfTyp).Pointer()
mapUintIntfTypId = reflect.ValueOf(mapUintIntfTyp).Pointer()
mapUint64IntfTypId = reflect.ValueOf(mapUint64IntfTyp).Pointer()
// Id = reflect.ValueOf().Pointer()
// mapBySliceTypId = reflect.ValueOf(mapBySliceTyp).Pointer()
binaryMarshalerTypId = reflect.ValueOf(binaryMarshalerTyp).Pointer()
binaryUnmarshalerTypId = reflect.ValueOf(binaryUnmarshalerTyp).Pointer()
intBitsize uint8 = uint8(reflect.TypeOf(int(0)).Bits())
uintBitsize uint8 = uint8(reflect.TypeOf(uint(0)).Bits())
bsAll0x00 = []byte{0, 0, 0, 0, 0, 0, 0, 0}
bsAll0xff = []byte{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
)
type binaryUnmarshaler interface {
UnmarshalBinary(data []byte) error
}
type binaryMarshaler interface {
MarshalBinary() (data []byte, err error)
}
// MapBySlice represents a slice which should be encoded as a map in the stream.
// The slice contains a sequence of key-value pairs.
type MapBySlice interface {
MapBySlice()
}
// WARNING: DO NOT USE DIRECTLY. EXPORTED FOR GODOC BENEFIT. WILL BE REMOVED.
//
// BasicHandle encapsulates the common options and extension functions.
type BasicHandle struct {
extHandle
EncodeOptions
DecodeOptions
}
// Handle is the interface for a specific encoding format.
//
// Typically, a Handle is pre-configured before first time use,
// and not modified while in use. Such a pre-configured Handle
// is safe for concurrent access.
type Handle interface {
writeExt() bool
getBasicHandle() *BasicHandle
newEncDriver(w encWriter) encDriver
newDecDriver(r decReader) decDriver
}
// RawExt represents raw unprocessed extension data.
type RawExt struct {
Tag byte
Data []byte
}
type extTypeTagFn struct {
rtid uintptr
rt reflect.Type
tag byte
encFn func(reflect.Value) ([]byte, error)
decFn func(reflect.Value, []byte) error
}
type extHandle []*extTypeTagFn
// AddExt registers an encode and decode function for a reflect.Type.
// Note that the type must be a named type, and specifically not
// a pointer or Interface. An error is returned if that is not honored.
//
// To Deregister an ext, call AddExt with 0 tag, nil encfn and nil decfn.
func (o *extHandle) AddExt(
rt reflect.Type,
tag byte,
encfn func(reflect.Value) ([]byte, error),
decfn func(reflect.Value, []byte) error,
) (err error) {
// o is a pointer, because we may need to initialize it
if rt.PkgPath() == "" || rt.Kind() == reflect.Interface {
err = fmt.Errorf("codec.Handle.AddExt: Takes named type, especially not a pointer or interface: %T",
reflect.Zero(rt).Interface())
return
}
// o cannot be nil, since it is always embedded in a Handle.
// if nil, let it panic.
// if o == nil {
// err = errors.New("codec.Handle.AddExt: extHandle cannot be a nil pointer.")
// return
// }
rtid := reflect.ValueOf(rt).Pointer()
for _, v := range *o {
if v.rtid == rtid {
v.tag, v.encFn, v.decFn = tag, encfn, decfn
return
}
}
*o = append(*o, &extTypeTagFn{rtid, rt, tag, encfn, decfn})
return
}
func (o extHandle) getExt(rtid uintptr) *extTypeTagFn {
for _, v := range o {
if v.rtid == rtid {
return v
}
}
return nil
}
func (o extHandle) getExtForTag(tag byte) *extTypeTagFn {
for _, v := range o {
if v.tag == tag {
return v
}
}
return nil
}
func (o extHandle) getDecodeExtForTag(tag byte) (
rv reflect.Value, fn func(reflect.Value, []byte) error) {
if x := o.getExtForTag(tag); x != nil {
// ext is only registered for base
rv = reflect.New(x.rt).Elem()
fn = x.decFn
}
return
}
func (o extHandle) getDecodeExt(rtid uintptr) (tag byte, fn func(reflect.Value, []byte) error) {
if x := o.getExt(rtid); x != nil {
tag = x.tag
fn = x.decFn
}
return
}
func (o extHandle) getEncodeExt(rtid uintptr) (tag byte, fn func(reflect.Value) ([]byte, error)) {
if x := o.getExt(rtid); x != nil {
tag = x.tag
fn = x.encFn
}
return
}
type structFieldInfo struct {
encName string // encode name
// only one of 'i' or 'is' can be set. If 'i' is -1, then 'is' has been set.
is []int // (recursive/embedded) field index in struct
i int16 // field index in struct
omitEmpty bool
toArray bool // if field is _struct, is the toArray set?
// tag string // tag
// name string // field name
// encNameBs []byte // encoded name as byte stream
// ikind int // kind of the field as an int i.e. int(reflect.Kind)
}
func parseStructFieldInfo(fname string, stag string) *structFieldInfo {
if fname == "" {
panic("parseStructFieldInfo: No Field Name")
}
si := structFieldInfo{
// name: fname,
encName: fname,
// tag: stag,
}
if stag != "" {
for i, s := range strings.Split(stag, ",") {
if i == 0 {
if s != "" {
si.encName = s
}
} else {
switch s {
case "omitempty":
si.omitEmpty = true
case "toarray":
si.toArray = true
}
}
}
}
// si.encNameBs = []byte(si.encName)
return &si
}
type sfiSortedByEncName []*structFieldInfo
func (p sfiSortedByEncName) Len() int {
return len(p)
}
func (p sfiSortedByEncName) Less(i, j int) bool {
return p[i].encName < p[j].encName
}
func (p sfiSortedByEncName) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
// typeInfo keeps information about each type referenced in the encode/decode sequence.
//
// During an encode/decode sequence, we work as below:
// - If base is a built in type, en/decode base value
// - If base is registered as an extension, en/decode base value
// - If type is binary(M/Unm)arshaler, call Binary(M/Unm)arshal method
// - Else decode appropriately based on the reflect.Kind
type typeInfo struct {
sfi []*structFieldInfo // sorted. Used when enc/dec struct to map.
sfip []*structFieldInfo // unsorted. Used when enc/dec struct to array.
rt reflect.Type
rtid uintptr
// baseId gives pointer to the base reflect.Type, after deferencing
// the pointers. E.g. base type of ***time.Time is time.Time.
base reflect.Type
baseId uintptr
baseIndir int8 // number of indirections to get to base
mbs bool // base type (T or *T) is a MapBySlice
m bool // base type (T or *T) is a binaryMarshaler
unm bool // base type (T or *T) is a binaryUnmarshaler
mIndir int8 // number of indirections to get to binaryMarshaler type
unmIndir int8 // number of indirections to get to binaryUnmarshaler type
toArray bool // whether this (struct) type should be encoded as an array
}
func (ti *typeInfo) indexForEncName(name string) int {
//tisfi := ti.sfi
const binarySearchThreshold = 16
if sfilen := len(ti.sfi); sfilen < binarySearchThreshold {
// linear search. faster than binary search in my testing up to 16-field structs.
for i, si := range ti.sfi {
if si.encName == name {
return i
}
}
} else {
// binary search. adapted from sort/search.go.
h, i, j := 0, 0, sfilen
for i < j {
h = i + (j-i)/2
if ti.sfi[h].encName < name {
i = h + 1
} else {
j = h
}
}
if i < sfilen && ti.sfi[i].encName == name {
return i
}
}
return -1
}
func getTypeInfo(rtid uintptr, rt reflect.Type) (pti *typeInfo) {
var ok bool
cachedTypeInfoMutex.RLock()
pti, ok = cachedTypeInfo[rtid]
cachedTypeInfoMutex.RUnlock()
if ok {
return
}
cachedTypeInfoMutex.Lock()
defer cachedTypeInfoMutex.Unlock()
if pti, ok = cachedTypeInfo[rtid]; ok {
return
}
ti := typeInfo{rt: rt, rtid: rtid}
pti = &ti
var indir int8
if ok, indir = implementsIntf(rt, binaryMarshalerTyp); ok {
ti.m, ti.mIndir = true, indir
}
if ok, indir = implementsIntf(rt, binaryUnmarshalerTyp); ok {
ti.unm, ti.unmIndir = true, indir
}
if ok, _ = implementsIntf(rt, mapBySliceTyp); ok {
ti.mbs = true
}
pt := rt
var ptIndir int8
// for ; pt.Kind() == reflect.Ptr; pt, ptIndir = pt.Elem(), ptIndir+1 { }
for pt.Kind() == reflect.Ptr {
pt = pt.Elem()
ptIndir++
}
if ptIndir == 0 {
ti.base = rt
ti.baseId = rtid
} else {
ti.base = pt
ti.baseId = reflect.ValueOf(pt).Pointer()
ti.baseIndir = ptIndir
}
if rt.Kind() == reflect.Struct {
var siInfo *structFieldInfo
if f, ok := rt.FieldByName(structInfoFieldName); ok {
siInfo = parseStructFieldInfo(structInfoFieldName, f.Tag.Get(structTagName))
ti.toArray = siInfo.toArray
}
sfip := make([]*structFieldInfo, 0, rt.NumField())
rgetTypeInfo(rt, nil, make(map[string]bool), &sfip, siInfo)
// // try to put all si close together
// const tryToPutAllStructFieldInfoTogether = true
// if tryToPutAllStructFieldInfoTogether {
// sfip2 := make([]structFieldInfo, len(sfip))
// for i, si := range sfip {
// sfip2[i] = *si
// }
// for i := range sfip {
// sfip[i] = &sfip2[i]
// }
// }
ti.sfip = make([]*structFieldInfo, len(sfip))
ti.sfi = make([]*structFieldInfo, len(sfip))
copy(ti.sfip, sfip)
sort.Sort(sfiSortedByEncName(sfip))
copy(ti.sfi, sfip)
}
// sfi = sfip
cachedTypeInfo[rtid] = pti
return
}
func rgetTypeInfo(rt reflect.Type, indexstack []int, fnameToHastag map[string]bool,
sfi *[]*structFieldInfo, siInfo *structFieldInfo,
) {
// for rt.Kind() == reflect.Ptr {
// // indexstack = append(indexstack, 0)
// rt = rt.Elem()
// }
for j := 0; j < rt.NumField(); j++ {
f := rt.Field(j)
stag := f.Tag.Get(structTagName)
if stag == "-" {
continue
}
if r1, _ := utf8.DecodeRuneInString(f.Name); r1 == utf8.RuneError || !unicode.IsUpper(r1) {
continue
}
// if anonymous and there is no struct tag and its a struct (or pointer to struct), inline it.
if f.Anonymous && stag == "" {
ft := f.Type
for ft.Kind() == reflect.Ptr {
ft = ft.Elem()
}
if ft.Kind() == reflect.Struct {
indexstack2 := append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
rgetTypeInfo(ft, indexstack2, fnameToHastag, sfi, siInfo)
continue
}
}
// do not let fields with same name in embedded structs override field at higher level.
// this must be done after anonymous check, to allow anonymous field
// still include their child fields
if _, ok := fnameToHastag[f.Name]; ok {
continue
}
si := parseStructFieldInfo(f.Name, stag)
// si.ikind = int(f.Type.Kind())
if len(indexstack) == 0 {
si.i = int16(j)
} else {
si.i = -1
si.is = append(append(make([]int, 0, len(indexstack)+4), indexstack...), j)
}
if siInfo != nil {
if siInfo.omitEmpty {
si.omitEmpty = true
}
}
*sfi = append(*sfi, si)
fnameToHastag[f.Name] = stag != ""
}
}
func panicToErr(err *error) {
if recoverPanicToErr {
if x := recover(); x != nil {
//debug.PrintStack()
panicValToErr(x, err)
}
}
}
func doPanic(tag string, format string, params ...interface{}) {
params2 := make([]interface{}, len(params)+1)
params2[0] = tag
copy(params2[1:], params)
panic(fmt.Errorf("%s: "+format, params2...))
}
func checkOverflowFloat32(f float64, doCheck bool) {
if !doCheck {
return
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowFloat()
f2 := f
if f2 < 0 {
f2 = -f
}
if math.MaxFloat32 < f2 && f2 <= math.MaxFloat64 {
decErr("Overflow float32 value: %v", f2)
}
}
func checkOverflow(ui uint64, i int64, bitsize uint8) {
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize == 0 {
return
}
if i != 0 {
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
decErr("Overflow int value: %v", i)
}
}
if ui != 0 {
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
decErr("Overflow uint value: %v", ui)
}
}
}

View File

@@ -0,0 +1,132 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
// All non-std package dependencies live in this file,
// so porting to different environment is easy (just update functions).
import (
"errors"
"fmt"
"math"
"reflect"
)
var (
raisePanicAfterRecover = false
debugging = true
)
func panicValToErr(panicVal interface{}, err *error) {
switch xerr := panicVal.(type) {
case error:
*err = xerr
case string:
*err = errors.New(xerr)
default:
*err = fmt.Errorf("%v", panicVal)
}
if raisePanicAfterRecover {
panic(panicVal)
}
return
}
func hIsEmptyValue(v reflect.Value, deref, checkStruct bool) bool {
switch v.Kind() {
case reflect.Invalid:
return true
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
if deref {
if v.IsNil() {
return true
}
return hIsEmptyValue(v.Elem(), deref, checkStruct)
} else {
return v.IsNil()
}
case reflect.Struct:
if !checkStruct {
return false
}
// return true if all fields are empty. else return false.
// we cannot use equality check, because some fields may be maps/slices/etc
// and consequently the structs are not comparable.
// return v.Interface() == reflect.Zero(v.Type()).Interface()
for i, n := 0, v.NumField(); i < n; i++ {
if !hIsEmptyValue(v.Field(i), deref, checkStruct) {
return false
}
}
return true
}
return false
}
func isEmptyValue(v reflect.Value) bool {
return hIsEmptyValue(v, derefForIsEmptyValue, checkStructForEmptyValue)
}
func debugf(format string, args ...interface{}) {
if debugging {
if len(format) == 0 || format[len(format)-1] != '\n' {
format = format + "\n"
}
fmt.Printf(format, args...)
}
}
func pruneSignExt(v []byte, pos bool) (n int) {
if len(v) < 2 {
} else if pos && v[0] == 0 {
for ; v[n] == 0 && n+1 < len(v) && (v[n+1]&(1<<7) == 0); n++ {
}
} else if !pos && v[0] == 0xff {
for ; v[n] == 0xff && n+1 < len(v) && (v[n+1]&(1<<7) != 0); n++ {
}
}
return
}
func implementsIntf(typ, iTyp reflect.Type) (success bool, indir int8) {
if typ == nil {
return
}
rt := typ
// The type might be a pointer and we need to keep
// dereferencing to the base type until we find an implementation.
for {
if rt.Implements(iTyp) {
return true, indir
}
if p := rt; p.Kind() == reflect.Ptr {
indir++
if indir >= math.MaxInt8 { // insane number of indirections
return false, 0
}
rt = p.Elem()
continue
}
break
}
// No luck yet, but if this is a base type (non-pointer), the pointer might satisfy.
if typ.Kind() != reflect.Ptr {
// Not a pointer, but does the pointer work?
if reflect.PtrTo(typ).Implements(iTyp) {
return true, -1
}
}
return false, 0
}

816
vendor/github.com/hashicorp/go-msgpack/codec/msgpack.go generated vendored Normal file
View File

@@ -0,0 +1,816 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
/*
MSGPACK
Msgpack-c implementation powers the c, c++, python, ruby, etc libraries.
We need to maintain compatibility with it and how it encodes integer values
without caring about the type.
For compatibility with behaviour of msgpack-c reference implementation:
- Go intX (>0) and uintX
IS ENCODED AS
msgpack +ve fixnum, unsigned
- Go intX (<0)
IS ENCODED AS
msgpack -ve fixnum, signed
*/
package codec
import (
"fmt"
"io"
"math"
"net/rpc"
)
const (
mpPosFixNumMin byte = 0x00
mpPosFixNumMax = 0x7f
mpFixMapMin = 0x80
mpFixMapMax = 0x8f
mpFixArrayMin = 0x90
mpFixArrayMax = 0x9f
mpFixStrMin = 0xa0
mpFixStrMax = 0xbf
mpNil = 0xc0
_ = 0xc1
mpFalse = 0xc2
mpTrue = 0xc3
mpFloat = 0xca
mpDouble = 0xcb
mpUint8 = 0xcc
mpUint16 = 0xcd
mpUint32 = 0xce
mpUint64 = 0xcf
mpInt8 = 0xd0
mpInt16 = 0xd1
mpInt32 = 0xd2
mpInt64 = 0xd3
// extensions below
mpBin8 = 0xc4
mpBin16 = 0xc5
mpBin32 = 0xc6
mpExt8 = 0xc7
mpExt16 = 0xc8
mpExt32 = 0xc9
mpFixExt1 = 0xd4
mpFixExt2 = 0xd5
mpFixExt4 = 0xd6
mpFixExt8 = 0xd7
mpFixExt16 = 0xd8
mpStr8 = 0xd9 // new
mpStr16 = 0xda
mpStr32 = 0xdb
mpArray16 = 0xdc
mpArray32 = 0xdd
mpMap16 = 0xde
mpMap32 = 0xdf
mpNegFixNumMin = 0xe0
mpNegFixNumMax = 0xff
)
// MsgpackSpecRpcMultiArgs is a special type which signifies to the MsgpackSpecRpcCodec
// that the backend RPC service takes multiple arguments, which have been arranged
// in sequence in the slice.
//
// The Codec then passes it AS-IS to the rpc service (without wrapping it in an
// array of 1 element).
type MsgpackSpecRpcMultiArgs []interface{}
// A MsgpackContainer type specifies the different types of msgpackContainers.
type msgpackContainerType struct {
fixCutoff int
bFixMin, b8, b16, b32 byte
hasFixMin, has8, has8Always bool
}
var (
msgpackContainerStr = msgpackContainerType{32, mpFixStrMin, mpStr8, mpStr16, mpStr32, true, true, false}
msgpackContainerBin = msgpackContainerType{0, 0, mpBin8, mpBin16, mpBin32, false, true, true}
msgpackContainerList = msgpackContainerType{16, mpFixArrayMin, 0, mpArray16, mpArray32, true, false, false}
msgpackContainerMap = msgpackContainerType{16, mpFixMapMin, 0, mpMap16, mpMap32, true, false, false}
)
//---------------------------------------------
type msgpackEncDriver struct {
w encWriter
h *MsgpackHandle
}
func (e *msgpackEncDriver) isBuiltinType(rt uintptr) bool {
//no builtin types. All encodings are based on kinds. Types supported as extensions.
return false
}
func (e *msgpackEncDriver) encodeBuiltin(rt uintptr, v interface{}) {}
func (e *msgpackEncDriver) encodeNil() {
e.w.writen1(mpNil)
}
func (e *msgpackEncDriver) encodeInt(i int64) {
switch {
case i >= 0:
e.encodeUint(uint64(i))
case i >= -32:
e.w.writen1(byte(i))
case i >= math.MinInt8:
e.w.writen2(mpInt8, byte(i))
case i >= math.MinInt16:
e.w.writen1(mpInt16)
e.w.writeUint16(uint16(i))
case i >= math.MinInt32:
e.w.writen1(mpInt32)
e.w.writeUint32(uint32(i))
default:
e.w.writen1(mpInt64)
e.w.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) encodeUint(i uint64) {
switch {
case i <= math.MaxInt8:
e.w.writen1(byte(i))
case i <= math.MaxUint8:
e.w.writen2(mpUint8, byte(i))
case i <= math.MaxUint16:
e.w.writen1(mpUint16)
e.w.writeUint16(uint16(i))
case i <= math.MaxUint32:
e.w.writen1(mpUint32)
e.w.writeUint32(uint32(i))
default:
e.w.writen1(mpUint64)
e.w.writeUint64(uint64(i))
}
}
func (e *msgpackEncDriver) encodeBool(b bool) {
if b {
e.w.writen1(mpTrue)
} else {
e.w.writen1(mpFalse)
}
}
func (e *msgpackEncDriver) encodeFloat32(f float32) {
e.w.writen1(mpFloat)
e.w.writeUint32(math.Float32bits(f))
}
func (e *msgpackEncDriver) encodeFloat64(f float64) {
e.w.writen1(mpDouble)
e.w.writeUint64(math.Float64bits(f))
}
func (e *msgpackEncDriver) encodeExtPreamble(xtag byte, l int) {
switch {
case l == 1:
e.w.writen2(mpFixExt1, xtag)
case l == 2:
e.w.writen2(mpFixExt2, xtag)
case l == 4:
e.w.writen2(mpFixExt4, xtag)
case l == 8:
e.w.writen2(mpFixExt8, xtag)
case l == 16:
e.w.writen2(mpFixExt16, xtag)
case l < 256:
e.w.writen2(mpExt8, byte(l))
e.w.writen1(xtag)
case l < 65536:
e.w.writen1(mpExt16)
e.w.writeUint16(uint16(l))
e.w.writen1(xtag)
default:
e.w.writen1(mpExt32)
e.w.writeUint32(uint32(l))
e.w.writen1(xtag)
}
}
func (e *msgpackEncDriver) encodeArrayPreamble(length int) {
e.writeContainerLen(msgpackContainerList, length)
}
func (e *msgpackEncDriver) encodeMapPreamble(length int) {
e.writeContainerLen(msgpackContainerMap, length)
}
func (e *msgpackEncDriver) encodeString(c charEncoding, s string) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(s))
} else {
e.writeContainerLen(msgpackContainerStr, len(s))
}
if len(s) > 0 {
e.w.writestr(s)
}
}
func (e *msgpackEncDriver) encodeSymbol(v string) {
e.encodeString(c_UTF8, v)
}
func (e *msgpackEncDriver) encodeStringBytes(c charEncoding, bs []byte) {
if c == c_RAW && e.h.WriteExt {
e.writeContainerLen(msgpackContainerBin, len(bs))
} else {
e.writeContainerLen(msgpackContainerStr, len(bs))
}
if len(bs) > 0 {
e.w.writeb(bs)
}
}
func (e *msgpackEncDriver) writeContainerLen(ct msgpackContainerType, l int) {
switch {
case ct.hasFixMin && l < ct.fixCutoff:
e.w.writen1(ct.bFixMin | byte(l))
case ct.has8 && l < 256 && (ct.has8Always || e.h.WriteExt):
e.w.writen2(ct.b8, uint8(l))
case l < 65536:
e.w.writen1(ct.b16)
e.w.writeUint16(uint16(l))
default:
e.w.writen1(ct.b32)
e.w.writeUint32(uint32(l))
}
}
//---------------------------------------------
type msgpackDecDriver struct {
r decReader
h *MsgpackHandle
bd byte
bdRead bool
bdType valueType
}
func (d *msgpackDecDriver) isBuiltinType(rt uintptr) bool {
//no builtin types. All encodings are based on kinds. Types supported as extensions.
return false
}
func (d *msgpackDecDriver) decodeBuiltin(rt uintptr, v interface{}) {}
// Note: This returns either a primitive (int, bool, etc) for non-containers,
// or a containerType, or a specific type denoting nil or extension.
// It is called when a nil interface{} is passed, leaving it up to the DecDriver
// to introspect the stream and decide how best to decode.
// It deciphers the value by looking at the stream first.
func (d *msgpackDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
d.initReadNext()
bd := d.bd
switch bd {
case mpNil:
vt = valueTypeNil
d.bdRead = false
case mpFalse:
vt = valueTypeBool
v = false
case mpTrue:
vt = valueTypeBool
v = true
case mpFloat:
vt = valueTypeFloat
v = float64(math.Float32frombits(d.r.readUint32()))
case mpDouble:
vt = valueTypeFloat
v = math.Float64frombits(d.r.readUint64())
case mpUint8:
vt = valueTypeUint
v = uint64(d.r.readn1())
case mpUint16:
vt = valueTypeUint
v = uint64(d.r.readUint16())
case mpUint32:
vt = valueTypeUint
v = uint64(d.r.readUint32())
case mpUint64:
vt = valueTypeUint
v = uint64(d.r.readUint64())
case mpInt8:
vt = valueTypeInt
v = int64(int8(d.r.readn1()))
case mpInt16:
vt = valueTypeInt
v = int64(int16(d.r.readUint16()))
case mpInt32:
vt = valueTypeInt
v = int64(int32(d.r.readUint32()))
case mpInt64:
vt = valueTypeInt
v = int64(int64(d.r.readUint64()))
default:
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
// positive fixnum (always signed)
vt = valueTypeInt
v = int64(int8(bd))
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
// negative fixnum
vt = valueTypeInt
v = int64(int8(bd))
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
if d.h.RawToString {
var rvm string
vt = valueTypeString
v = &rvm
} else {
var rvm = []byte{}
vt = valueTypeBytes
v = &rvm
}
decodeFurther = true
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
var rvm = []byte{}
vt = valueTypeBytes
v = &rvm
decodeFurther = true
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
vt = valueTypeArray
decodeFurther = true
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
vt = valueTypeMap
decodeFurther = true
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
clen := d.readExtLen()
var re RawExt
re.Tag = d.r.readn1()
re.Data = d.r.readn(clen)
v = &re
vt = valueTypeExt
default:
decErr("Nil-Deciphered DecodeValue: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
}
}
if !decodeFurther {
d.bdRead = false
}
return
}
// int can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) decodeInt(bitsize uint8) (i int64) {
switch d.bd {
case mpUint8:
i = int64(uint64(d.r.readn1()))
case mpUint16:
i = int64(uint64(d.r.readUint16()))
case mpUint32:
i = int64(uint64(d.r.readUint32()))
case mpUint64:
i = int64(d.r.readUint64())
case mpInt8:
i = int64(int8(d.r.readn1()))
case mpInt16:
i = int64(int16(d.r.readUint16()))
case mpInt32:
i = int64(int32(d.r.readUint32()))
case mpInt64:
i = int64(d.r.readUint64())
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
i = int64(int8(d.bd))
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
i = int64(int8(d.bd))
default:
decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (i << (64 - bitsize)) >> (64 - bitsize); i != trunc {
decErr("Overflow int value: %v", i)
}
}
d.bdRead = false
return
}
// uint can be decoded from msgpack type: intXXX or uintXXX
func (d *msgpackDecDriver) decodeUint(bitsize uint8) (ui uint64) {
switch d.bd {
case mpUint8:
ui = uint64(d.r.readn1())
case mpUint16:
ui = uint64(d.r.readUint16())
case mpUint32:
ui = uint64(d.r.readUint32())
case mpUint64:
ui = d.r.readUint64()
case mpInt8:
if i := int64(int8(d.r.readn1())); i >= 0 {
ui = uint64(i)
} else {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
case mpInt16:
if i := int64(int16(d.r.readUint16())); i >= 0 {
ui = uint64(i)
} else {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
case mpInt32:
if i := int64(int32(d.r.readUint32())); i >= 0 {
ui = uint64(i)
} else {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
case mpInt64:
if i := int64(d.r.readUint64()); i >= 0 {
ui = uint64(i)
} else {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
default:
switch {
case d.bd >= mpPosFixNumMin && d.bd <= mpPosFixNumMax:
ui = uint64(d.bd)
case d.bd >= mpNegFixNumMin && d.bd <= mpNegFixNumMax:
decErr("Assigning negative signed value: %v, to unsigned type", int(d.bd))
default:
decErr("Unhandled single-byte unsigned integer value: %s: %x", msgBadDesc, d.bd)
}
}
// check overflow (logic adapted from std pkg reflect/value.go OverflowUint()
if bitsize > 0 {
if trunc := (ui << (64 - bitsize)) >> (64 - bitsize); ui != trunc {
decErr("Overflow uint value: %v", ui)
}
}
d.bdRead = false
return
}
// float can either be decoded from msgpack type: float, double or intX
func (d *msgpackDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
switch d.bd {
case mpFloat:
f = float64(math.Float32frombits(d.r.readUint32()))
case mpDouble:
f = math.Float64frombits(d.r.readUint64())
default:
f = float64(d.decodeInt(0))
}
checkOverflowFloat32(f, chkOverflow32)
d.bdRead = false
return
}
// bool can be decoded from bool, fixnum 0 or 1.
func (d *msgpackDecDriver) decodeBool() (b bool) {
switch d.bd {
case mpFalse, 0:
// b = false
case mpTrue, 1:
b = true
default:
decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) decodeString() (s string) {
clen := d.readContainerLen(msgpackContainerStr)
if clen > 0 {
s = string(d.r.readn(clen))
}
d.bdRead = false
return
}
// Callers must check if changed=true (to decide whether to replace the one they have)
func (d *msgpackDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
// bytes can be decoded from msgpackContainerStr or msgpackContainerBin
var clen int
switch d.bd {
case mpBin8, mpBin16, mpBin32:
clen = d.readContainerLen(msgpackContainerBin)
default:
clen = d.readContainerLen(msgpackContainerStr)
}
// if clen < 0 {
// changed = true
// panic("length cannot be zero. this cannot be nil.")
// }
if clen > 0 {
// if no contents in stream, don't update the passed byteslice
if len(bs) != clen {
// Return changed=true if length of passed slice diff from length of bytes in stream
if len(bs) > clen {
bs = bs[:clen]
} else {
bs = make([]byte, clen)
}
bsOut = bs
changed = true
}
d.r.readb(bs)
}
d.bdRead = false
return
}
// Every top-level decode funcs (i.e. decodeValue, decode) must call this first.
func (d *msgpackDecDriver) initReadNext() {
if d.bdRead {
return
}
d.bd = d.r.readn1()
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *msgpackDecDriver) currentEncodedType() valueType {
if d.bdType == valueTypeUnset {
bd := d.bd
switch bd {
case mpNil:
d.bdType = valueTypeNil
case mpFalse, mpTrue:
d.bdType = valueTypeBool
case mpFloat, mpDouble:
d.bdType = valueTypeFloat
case mpUint8, mpUint16, mpUint32, mpUint64:
d.bdType = valueTypeUint
case mpInt8, mpInt16, mpInt32, mpInt64:
d.bdType = valueTypeInt
default:
switch {
case bd >= mpPosFixNumMin && bd <= mpPosFixNumMax:
d.bdType = valueTypeInt
case bd >= mpNegFixNumMin && bd <= mpNegFixNumMax:
d.bdType = valueTypeInt
case bd == mpStr8, bd == mpStr16, bd == mpStr32, bd >= mpFixStrMin && bd <= mpFixStrMax:
if d.h.RawToString {
d.bdType = valueTypeString
} else {
d.bdType = valueTypeBytes
}
case bd == mpBin8, bd == mpBin16, bd == mpBin32:
d.bdType = valueTypeBytes
case bd == mpArray16, bd == mpArray32, bd >= mpFixArrayMin && bd <= mpFixArrayMax:
d.bdType = valueTypeArray
case bd == mpMap16, bd == mpMap32, bd >= mpFixMapMin && bd <= mpFixMapMax:
d.bdType = valueTypeMap
case bd >= mpFixExt1 && bd <= mpFixExt16, bd >= mpExt8 && bd <= mpExt32:
d.bdType = valueTypeExt
default:
decErr("currentEncodedType: Undeciphered descriptor: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
}
}
}
return d.bdType
}
func (d *msgpackDecDriver) tryDecodeAsNil() bool {
if d.bd == mpNil {
d.bdRead = false
return true
}
return false
}
func (d *msgpackDecDriver) readContainerLen(ct msgpackContainerType) (clen int) {
bd := d.bd
switch {
case bd == mpNil:
clen = -1 // to represent nil
case bd == ct.b8:
clen = int(d.r.readn1())
case bd == ct.b16:
clen = int(d.r.readUint16())
case bd == ct.b32:
clen = int(d.r.readUint32())
case (ct.bFixMin & bd) == ct.bFixMin:
clen = int(ct.bFixMin ^ bd)
default:
decErr("readContainerLen: %s: hex: %x, dec: %d", msgBadDesc, bd, bd)
}
d.bdRead = false
return
}
func (d *msgpackDecDriver) readMapLen() int {
return d.readContainerLen(msgpackContainerMap)
}
func (d *msgpackDecDriver) readArrayLen() int {
return d.readContainerLen(msgpackContainerList)
}
func (d *msgpackDecDriver) readExtLen() (clen int) {
switch d.bd {
case mpNil:
clen = -1 // to represent nil
case mpFixExt1:
clen = 1
case mpFixExt2:
clen = 2
case mpFixExt4:
clen = 4
case mpFixExt8:
clen = 8
case mpFixExt16:
clen = 16
case mpExt8:
clen = int(d.r.readn1())
case mpExt16:
clen = int(d.r.readUint16())
case mpExt32:
clen = int(d.r.readUint32())
default:
decErr("decoding ext bytes: found unexpected byte: %x", d.bd)
}
return
}
func (d *msgpackDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
xbd := d.bd
switch {
case xbd == mpBin8, xbd == mpBin16, xbd == mpBin32:
xbs, _ = d.decodeBytes(nil)
case xbd == mpStr8, xbd == mpStr16, xbd == mpStr32,
xbd >= mpFixStrMin && xbd <= mpFixStrMax:
xbs = []byte(d.decodeString())
default:
clen := d.readExtLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
xbs = d.r.readn(clen)
}
d.bdRead = false
return
}
//--------------------------------------------------
//MsgpackHandle is a Handle for the Msgpack Schema-Free Encoding Format.
type MsgpackHandle struct {
BasicHandle
// RawToString controls how raw bytes are decoded into a nil interface{}.
RawToString bool
// WriteExt flag supports encoding configured extensions with extension tags.
// It also controls whether other elements of the new spec are encoded (ie Str8).
//
// With WriteExt=false, configured extensions are serialized as raw bytes
// and Str8 is not encoded.
//
// A stream can still be decoded into a typed value, provided an appropriate value
// is provided, but the type cannot be inferred from the stream. If no appropriate
// type is provided (e.g. decoding into a nil interface{}), you get back
// a []byte or string based on the setting of RawToString.
WriteExt bool
}
func (h *MsgpackHandle) newEncDriver(w encWriter) encDriver {
return &msgpackEncDriver{w: w, h: h}
}
func (h *MsgpackHandle) newDecDriver(r decReader) decDriver {
return &msgpackDecDriver{r: r, h: h}
}
func (h *MsgpackHandle) writeExt() bool {
return h.WriteExt
}
func (h *MsgpackHandle) getBasicHandle() *BasicHandle {
return &h.BasicHandle
}
//--------------------------------------------------
type msgpackSpecRpcCodec struct {
rpcCodec
}
// /////////////// Spec RPC Codec ///////////////////
func (c *msgpackSpecRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// WriteRequest can write to both a Go service, and other services that do
// not abide by the 1 argument rule of a Go service.
// We discriminate based on if the body is a MsgpackSpecRpcMultiArgs
var bodyArr []interface{}
if m, ok := body.(MsgpackSpecRpcMultiArgs); ok {
bodyArr = ([]interface{})(m)
} else {
bodyArr = []interface{}{body}
}
r2 := []interface{}{0, uint32(r.Seq), r.ServiceMethod, bodyArr}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
var moe interface{}
if r.Error != "" {
moe = r.Error
}
if moe != nil && body != nil {
body = nil
}
r2 := []interface{}{1, uint32(r.Seq), moe, body}
return c.write(r2, nil, false, true)
}
func (c *msgpackSpecRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.parseCustomHeader(1, &r.Seq, &r.Error)
}
func (c *msgpackSpecRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.parseCustomHeader(0, &r.Seq, &r.ServiceMethod)
}
func (c *msgpackSpecRpcCodec) ReadRequestBody(body interface{}) error {
if body == nil { // read and discard
return c.read(nil)
}
bodyArr := []interface{}{body}
return c.read(&bodyArr)
}
func (c *msgpackSpecRpcCodec) parseCustomHeader(expectTypeByte byte, msgid *uint64, methodOrError *string) (err error) {
if c.cls {
return io.EOF
}
// We read the response header by hand
// so that the body can be decoded on its own from the stream at a later time.
const fia byte = 0x94 //four item array descriptor value
// Not sure why the panic of EOF is swallowed above.
// if bs1 := c.dec.r.readn1(); bs1 != fia {
// err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, bs1)
// return
// }
var b byte
b, err = c.br.ReadByte()
if err != nil {
return
}
if b != fia {
err = fmt.Errorf("Unexpected value for array descriptor: Expecting %v. Received %v", fia, b)
return
}
if err = c.read(&b); err != nil {
return
}
if b != expectTypeByte {
err = fmt.Errorf("Unexpected byte descriptor in header. Expecting %v. Received %v", expectTypeByte, b)
return
}
if err = c.read(msgid); err != nil {
return
}
if err = c.read(methodOrError); err != nil {
return
}
return
}
//--------------------------------------------------
// msgpackSpecRpc is the implementation of Rpc that uses custom communication protocol
// as defined in the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md
type msgpackSpecRpc struct{}
// MsgpackSpecRpc implements Rpc using the communication protocol defined in
// the msgpack spec at https://github.com/msgpack-rpc/msgpack-rpc/blob/master/spec.md .
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var MsgpackSpecRpc msgpackSpecRpc
func (x msgpackSpecRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
func (x msgpackSpecRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &msgpackSpecRpcCodec{newRPCCodec(conn, h)}
}
var _ decDriver = (*msgpackDecDriver)(nil)
var _ encDriver = (*msgpackEncDriver)(nil)

View File

@@ -0,0 +1,110 @@
#!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])

152
vendor/github.com/hashicorp/go-msgpack/codec/rpc.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"bufio"
"io"
"net/rpc"
"sync"
)
// Rpc provides a rpc Server or Client Codec for rpc communication.
type Rpc interface {
ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec
ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec
}
// RpcCodecBuffered allows access to the underlying bufio.Reader/Writer
// used by the rpc connection. It accomodates use-cases where the connection
// should be used by rpc and non-rpc functions, e.g. streaming a file after
// sending an rpc response.
type RpcCodecBuffered interface {
BufferedReader() *bufio.Reader
BufferedWriter() *bufio.Writer
}
// -------------------------------------
// rpcCodec defines the struct members and common methods.
type rpcCodec struct {
rwc io.ReadWriteCloser
dec *Decoder
enc *Encoder
bw *bufio.Writer
br *bufio.Reader
mu sync.Mutex
cls bool
}
func newRPCCodec(conn io.ReadWriteCloser, h Handle) rpcCodec {
bw := bufio.NewWriter(conn)
br := bufio.NewReader(conn)
return rpcCodec{
rwc: conn,
bw: bw,
br: br,
enc: NewEncoder(bw, h),
dec: NewDecoder(br, h),
}
}
func (c *rpcCodec) BufferedReader() *bufio.Reader {
return c.br
}
func (c *rpcCodec) BufferedWriter() *bufio.Writer {
return c.bw
}
func (c *rpcCodec) write(obj1, obj2 interface{}, writeObj2, doFlush bool) (err error) {
if c.cls {
return io.EOF
}
if err = c.enc.Encode(obj1); err != nil {
return
}
if writeObj2 {
if err = c.enc.Encode(obj2); err != nil {
return
}
}
if doFlush && c.bw != nil {
return c.bw.Flush()
}
return
}
func (c *rpcCodec) read(obj interface{}) (err error) {
if c.cls {
return io.EOF
}
//If nil is passed in, we should still attempt to read content to nowhere.
if obj == nil {
var obj2 interface{}
return c.dec.Decode(&obj2)
}
return c.dec.Decode(obj)
}
func (c *rpcCodec) Close() error {
if c.cls {
return io.EOF
}
c.cls = true
return c.rwc.Close()
}
func (c *rpcCodec) ReadResponseBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
type goRpcCodec struct {
rpcCodec
}
func (c *goRpcCodec) WriteRequest(r *rpc.Request, body interface{}) error {
// Must protect for concurrent access as per API
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) WriteResponse(r *rpc.Response, body interface{}) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.write(r, body, true, true)
}
func (c *goRpcCodec) ReadResponseHeader(r *rpc.Response) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestHeader(r *rpc.Request) error {
return c.read(r)
}
func (c *goRpcCodec) ReadRequestBody(body interface{}) error {
return c.read(body)
}
// -------------------------------------
// goRpc is the implementation of Rpc that uses the communication protocol
// as defined in net/rpc package.
type goRpc struct{}
// GoRpc implements Rpc using the communication protocol defined in net/rpc package.
// Its methods (ServerCodec and ClientCodec) return values that implement RpcCodecBuffered.
var GoRpc goRpc
func (x goRpc) ServerCodec(conn io.ReadWriteCloser, h Handle) rpc.ServerCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
func (x goRpc) ClientCodec(conn io.ReadWriteCloser, h Handle) rpc.ClientCodec {
return &goRpcCodec{newRPCCodec(conn, h)}
}
var _ RpcCodecBuffered = (*rpcCodec)(nil) // ensure *rpcCodec implements RpcCodecBuffered

461
vendor/github.com/hashicorp/go-msgpack/codec/simple.go generated vendored Normal file
View File

@@ -0,0 +1,461 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import "math"
const (
_ uint8 = iota
simpleVdNil = 1
simpleVdFalse = 2
simpleVdTrue = 3
simpleVdFloat32 = 4
simpleVdFloat64 = 5
// each lasts for 4 (ie n, n+1, n+2, n+3)
simpleVdPosInt = 8
simpleVdNegInt = 12
// containers: each lasts for 4 (ie n, n+1, n+2, ... n+7)
simpleVdString = 216
simpleVdByteArray = 224
simpleVdArray = 232
simpleVdMap = 240
simpleVdExt = 248
)
type simpleEncDriver struct {
h *SimpleHandle
w encWriter
//b [8]byte
}
func (e *simpleEncDriver) isBuiltinType(rt uintptr) bool {
return false
}
func (e *simpleEncDriver) encodeBuiltin(rt uintptr, v interface{}) {
}
func (e *simpleEncDriver) encodeNil() {
e.w.writen1(simpleVdNil)
}
func (e *simpleEncDriver) encodeBool(b bool) {
if b {
e.w.writen1(simpleVdTrue)
} else {
e.w.writen1(simpleVdFalse)
}
}
func (e *simpleEncDriver) encodeFloat32(f float32) {
e.w.writen1(simpleVdFloat32)
e.w.writeUint32(math.Float32bits(f))
}
func (e *simpleEncDriver) encodeFloat64(f float64) {
e.w.writen1(simpleVdFloat64)
e.w.writeUint64(math.Float64bits(f))
}
func (e *simpleEncDriver) encodeInt(v int64) {
if v < 0 {
e.encUint(uint64(-v), simpleVdNegInt)
} else {
e.encUint(uint64(v), simpleVdPosInt)
}
}
func (e *simpleEncDriver) encodeUint(v uint64) {
e.encUint(v, simpleVdPosInt)
}
func (e *simpleEncDriver) encUint(v uint64, bd uint8) {
switch {
case v <= math.MaxUint8:
e.w.writen2(bd, uint8(v))
case v <= math.MaxUint16:
e.w.writen1(bd + 1)
e.w.writeUint16(uint16(v))
case v <= math.MaxUint32:
e.w.writen1(bd + 2)
e.w.writeUint32(uint32(v))
case v <= math.MaxUint64:
e.w.writen1(bd + 3)
e.w.writeUint64(v)
}
}
func (e *simpleEncDriver) encLen(bd byte, length int) {
switch {
case length == 0:
e.w.writen1(bd)
case length <= math.MaxUint8:
e.w.writen1(bd + 1)
e.w.writen1(uint8(length))
case length <= math.MaxUint16:
e.w.writen1(bd + 2)
e.w.writeUint16(uint16(length))
case int64(length) <= math.MaxUint32:
e.w.writen1(bd + 3)
e.w.writeUint32(uint32(length))
default:
e.w.writen1(bd + 4)
e.w.writeUint64(uint64(length))
}
}
func (e *simpleEncDriver) encodeExtPreamble(xtag byte, length int) {
e.encLen(simpleVdExt, length)
e.w.writen1(xtag)
}
func (e *simpleEncDriver) encodeArrayPreamble(length int) {
e.encLen(simpleVdArray, length)
}
func (e *simpleEncDriver) encodeMapPreamble(length int) {
e.encLen(simpleVdMap, length)
}
func (e *simpleEncDriver) encodeString(c charEncoding, v string) {
e.encLen(simpleVdString, len(v))
e.w.writestr(v)
}
func (e *simpleEncDriver) encodeSymbol(v string) {
e.encodeString(c_UTF8, v)
}
func (e *simpleEncDriver) encodeStringBytes(c charEncoding, v []byte) {
e.encLen(simpleVdByteArray, len(v))
e.w.writeb(v)
}
//------------------------------------
type simpleDecDriver struct {
h *SimpleHandle
r decReader
bdRead bool
bdType valueType
bd byte
//b [8]byte
}
func (d *simpleDecDriver) initReadNext() {
if d.bdRead {
return
}
d.bd = d.r.readn1()
d.bdRead = true
d.bdType = valueTypeUnset
}
func (d *simpleDecDriver) currentEncodedType() valueType {
if d.bdType == valueTypeUnset {
switch d.bd {
case simpleVdNil:
d.bdType = valueTypeNil
case simpleVdTrue, simpleVdFalse:
d.bdType = valueTypeBool
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
d.bdType = valueTypeUint
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
d.bdType = valueTypeInt
case simpleVdFloat32, simpleVdFloat64:
d.bdType = valueTypeFloat
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
d.bdType = valueTypeString
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
d.bdType = valueTypeBytes
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
d.bdType = valueTypeExt
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
d.bdType = valueTypeArray
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
d.bdType = valueTypeMap
default:
decErr("currentEncodedType: Unrecognized d.vd: 0x%x", d.bd)
}
}
return d.bdType
}
func (d *simpleDecDriver) tryDecodeAsNil() bool {
if d.bd == simpleVdNil {
d.bdRead = false
return true
}
return false
}
func (d *simpleDecDriver) isBuiltinType(rt uintptr) bool {
return false
}
func (d *simpleDecDriver) decodeBuiltin(rt uintptr, v interface{}) {
}
func (d *simpleDecDriver) decIntAny() (ui uint64, i int64, neg bool) {
switch d.bd {
case simpleVdPosInt:
ui = uint64(d.r.readn1())
i = int64(ui)
case simpleVdPosInt + 1:
ui = uint64(d.r.readUint16())
i = int64(ui)
case simpleVdPosInt + 2:
ui = uint64(d.r.readUint32())
i = int64(ui)
case simpleVdPosInt + 3:
ui = uint64(d.r.readUint64())
i = int64(ui)
case simpleVdNegInt:
ui = uint64(d.r.readn1())
i = -(int64(ui))
neg = true
case simpleVdNegInt + 1:
ui = uint64(d.r.readUint16())
i = -(int64(ui))
neg = true
case simpleVdNegInt + 2:
ui = uint64(d.r.readUint32())
i = -(int64(ui))
neg = true
case simpleVdNegInt + 3:
ui = uint64(d.r.readUint64())
i = -(int64(ui))
neg = true
default:
decErr("decIntAny: Integer only valid from pos/neg integer1..8. Invalid descriptor: %v", d.bd)
}
// don't do this check, because callers may only want the unsigned value.
// if ui > math.MaxInt64 {
// decErr("decIntAny: Integer out of range for signed int64: %v", ui)
// }
return
}
func (d *simpleDecDriver) decodeInt(bitsize uint8) (i int64) {
_, i, _ = d.decIntAny()
checkOverflow(0, i, bitsize)
d.bdRead = false
return
}
func (d *simpleDecDriver) decodeUint(bitsize uint8) (ui uint64) {
ui, i, neg := d.decIntAny()
if neg {
decErr("Assigning negative signed value: %v, to unsigned type", i)
}
checkOverflow(ui, 0, bitsize)
d.bdRead = false
return
}
func (d *simpleDecDriver) decodeFloat(chkOverflow32 bool) (f float64) {
switch d.bd {
case simpleVdFloat32:
f = float64(math.Float32frombits(d.r.readUint32()))
case simpleVdFloat64:
f = math.Float64frombits(d.r.readUint64())
default:
if d.bd >= simpleVdPosInt && d.bd <= simpleVdNegInt+3 {
_, i, _ := d.decIntAny()
f = float64(i)
} else {
decErr("Float only valid from float32/64: Invalid descriptor: %v", d.bd)
}
}
checkOverflowFloat32(f, chkOverflow32)
d.bdRead = false
return
}
// bool can be decoded from bool only (single byte).
func (d *simpleDecDriver) decodeBool() (b bool) {
switch d.bd {
case simpleVdTrue:
b = true
case simpleVdFalse:
default:
decErr("Invalid single-byte value for bool: %s: %x", msgBadDesc, d.bd)
}
d.bdRead = false
return
}
func (d *simpleDecDriver) readMapLen() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) readArrayLen() (length int) {
d.bdRead = false
return d.decLen()
}
func (d *simpleDecDriver) decLen() int {
switch d.bd % 8 {
case 0:
return 0
case 1:
return int(d.r.readn1())
case 2:
return int(d.r.readUint16())
case 3:
ui := uint64(d.r.readUint32())
checkOverflow(ui, 0, intBitsize)
return int(ui)
case 4:
ui := d.r.readUint64()
checkOverflow(ui, 0, intBitsize)
return int(ui)
}
decErr("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
return -1
}
func (d *simpleDecDriver) decodeString() (s string) {
s = string(d.r.readn(d.decLen()))
d.bdRead = false
return
}
func (d *simpleDecDriver) decodeBytes(bs []byte) (bsOut []byte, changed bool) {
if clen := d.decLen(); clen > 0 {
// if no contents in stream, don't update the passed byteslice
if len(bs) != clen {
if len(bs) > clen {
bs = bs[:clen]
} else {
bs = make([]byte, clen)
}
bsOut = bs
changed = true
}
d.r.readb(bs)
}
d.bdRead = false
return
}
func (d *simpleDecDriver) decodeExt(verifyTag bool, tag byte) (xtag byte, xbs []byte) {
switch d.bd {
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
l := d.decLen()
xtag = d.r.readn1()
if verifyTag && xtag != tag {
decErr("Wrong extension tag. Got %b. Expecting: %v", xtag, tag)
}
xbs = d.r.readn(l)
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
xbs, _ = d.decodeBytes(nil)
default:
decErr("Invalid d.vd for extensions (Expecting extensions or byte array). Got: 0x%x", d.bd)
}
d.bdRead = false
return
}
func (d *simpleDecDriver) decodeNaked() (v interface{}, vt valueType, decodeFurther bool) {
d.initReadNext()
switch d.bd {
case simpleVdNil:
vt = valueTypeNil
case simpleVdFalse:
vt = valueTypeBool
v = false
case simpleVdTrue:
vt = valueTypeBool
v = true
case simpleVdPosInt, simpleVdPosInt + 1, simpleVdPosInt + 2, simpleVdPosInt + 3:
vt = valueTypeUint
ui, _, _ := d.decIntAny()
v = ui
case simpleVdNegInt, simpleVdNegInt + 1, simpleVdNegInt + 2, simpleVdNegInt + 3:
vt = valueTypeInt
_, i, _ := d.decIntAny()
v = i
case simpleVdFloat32:
vt = valueTypeFloat
v = d.decodeFloat(true)
case simpleVdFloat64:
vt = valueTypeFloat
v = d.decodeFloat(false)
case simpleVdString, simpleVdString + 1, simpleVdString + 2, simpleVdString + 3, simpleVdString + 4:
vt = valueTypeString
v = d.decodeString()
case simpleVdByteArray, simpleVdByteArray + 1, simpleVdByteArray + 2, simpleVdByteArray + 3, simpleVdByteArray + 4:
vt = valueTypeBytes
v, _ = d.decodeBytes(nil)
case simpleVdExt, simpleVdExt + 1, simpleVdExt + 2, simpleVdExt + 3, simpleVdExt + 4:
vt = valueTypeExt
l := d.decLen()
var re RawExt
re.Tag = d.r.readn1()
re.Data = d.r.readn(l)
v = &re
vt = valueTypeExt
case simpleVdArray, simpleVdArray + 1, simpleVdArray + 2, simpleVdArray + 3, simpleVdArray + 4:
vt = valueTypeArray
decodeFurther = true
case simpleVdMap, simpleVdMap + 1, simpleVdMap + 2, simpleVdMap + 3, simpleVdMap + 4:
vt = valueTypeMap
decodeFurther = true
default:
decErr("decodeNaked: Unrecognized d.vd: 0x%x", d.bd)
}
if !decodeFurther {
d.bdRead = false
}
return
}
//------------------------------------
// SimpleHandle is a Handle for a very simple encoding format.
//
// simple is a simplistic codec similar to binc, but not as compact.
// - Encoding of a value is always preceeded by the descriptor byte (bd)
// - True, false, nil are encoded fully in 1 byte (the descriptor)
// - Integers (intXXX, uintXXX) are encoded in 1, 2, 4 or 8 bytes (plus a descriptor byte).
// There are positive (uintXXX and intXXX >= 0) and negative (intXXX < 0) integers.
// - Floats are encoded in 4 or 8 bytes (plus a descriptor byte)
// - Lenght of containers (strings, bytes, array, map, extensions)
// are encoded in 0, 1, 2, 4 or 8 bytes.
// Zero-length containers have no length encoded.
// For others, the number of bytes is given by pow(2, bd%3)
// - maps are encoded as [bd] [length] [[key][value]]...
// - arrays are encoded as [bd] [length] [value]...
// - extensions are encoded as [bd] [length] [tag] [byte]...
// - strings/bytearrays are encoded as [bd] [length] [byte]...
//
// The full spec will be published soon.
type SimpleHandle struct {
BasicHandle
}
func (h *SimpleHandle) newEncDriver(w encWriter) encDriver {
return &simpleEncDriver{w: w, h: h}
}
func (h *SimpleHandle) newDecDriver(r decReader) decDriver {
return &simpleDecDriver{r: r, h: h}
}
func (_ *SimpleHandle) writeExt() bool {
return true
}
func (h *SimpleHandle) getBasicHandle() *BasicHandle {
return &h.BasicHandle
}
var _ decDriver = (*simpleDecDriver)(nil)
var _ encDriver = (*simpleEncDriver)(nil)

193
vendor/github.com/hashicorp/go-msgpack/codec/time.go generated vendored Normal file
View File

@@ -0,0 +1,193 @@
// Copyright (c) 2012, 2013 Ugorji Nwoke. All rights reserved.
// Use of this source code is governed by a BSD-style license found in the LICENSE file.
package codec
import (
"time"
)
var (
timeDigits = [...]byte{'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'}
)
// EncodeTime encodes a time.Time as a []byte, including
// information on the instant in time and UTC offset.
//
// Format Description
//
// A timestamp is composed of 3 components:
//
// - secs: signed integer representing seconds since unix epoch
// - nsces: unsigned integer representing fractional seconds as a
// nanosecond offset within secs, in the range 0 <= nsecs < 1e9
// - tz: signed integer representing timezone offset in minutes east of UTC,
// and a dst (daylight savings time) flag
//
// When encoding a timestamp, the first byte is the descriptor, which
// defines which components are encoded and how many bytes are used to
// encode secs and nsecs components. *If secs/nsecs is 0 or tz is UTC, it
// is not encoded in the byte array explicitly*.
//
// Descriptor 8 bits are of the form `A B C DDD EE`:
// A: Is secs component encoded? 1 = true
// B: Is nsecs component encoded? 1 = true
// C: Is tz component encoded? 1 = true
// DDD: Number of extra bytes for secs (range 0-7).
// If A = 1, secs encoded in DDD+1 bytes.
// If A = 0, secs is not encoded, and is assumed to be 0.
// If A = 1, then we need at least 1 byte to encode secs.
// DDD says the number of extra bytes beyond that 1.
// E.g. if DDD=0, then secs is represented in 1 byte.
// if DDD=2, then secs is represented in 3 bytes.
// EE: Number of extra bytes for nsecs (range 0-3).
// If B = 1, nsecs encoded in EE+1 bytes (similar to secs/DDD above)
//
// Following the descriptor bytes, subsequent bytes are:
//
// secs component encoded in `DDD + 1` bytes (if A == 1)
// nsecs component encoded in `EE + 1` bytes (if B == 1)
// tz component encoded in 2 bytes (if C == 1)
//
// secs and nsecs components are integers encoded in a BigEndian
// 2-complement encoding format.
//
// tz component is encoded as 2 bytes (16 bits). Most significant bit 15 to
// Least significant bit 0 are described below:
//
// Timezone offset has a range of -12:00 to +14:00 (ie -720 to +840 minutes).
// Bit 15 = have\_dst: set to 1 if we set the dst flag.
// Bit 14 = dst\_on: set to 1 if dst is in effect at the time, or 0 if not.
// Bits 13..0 = timezone offset in minutes. It is a signed integer in Big Endian format.
//
func encodeTime(t time.Time) []byte {
//t := rv.Interface().(time.Time)
tsecs, tnsecs := t.Unix(), t.Nanosecond()
var (
bd byte
btmp [8]byte
bs [16]byte
i int = 1
)
l := t.Location()
if l == time.UTC {
l = nil
}
if tsecs != 0 {
bd = bd | 0x80
bigen.PutUint64(btmp[:], uint64(tsecs))
f := pruneSignExt(btmp[:], tsecs >= 0)
bd = bd | (byte(7-f) << 2)
copy(bs[i:], btmp[f:])
i = i + (8 - f)
}
if tnsecs != 0 {
bd = bd | 0x40
bigen.PutUint32(btmp[:4], uint32(tnsecs))
f := pruneSignExt(btmp[:4], true)
bd = bd | byte(3-f)
copy(bs[i:], btmp[f:4])
i = i + (4 - f)
}
if l != nil {
bd = bd | 0x20
// Note that Go Libs do not give access to dst flag.
_, zoneOffset := t.Zone()
//zoneName, zoneOffset := t.Zone()
zoneOffset /= 60
z := uint16(zoneOffset)
bigen.PutUint16(btmp[:2], z)
// clear dst flags
bs[i] = btmp[0] & 0x3f
bs[i+1] = btmp[1]
i = i + 2
}
bs[0] = bd
return bs[0:i]
}
// DecodeTime decodes a []byte into a time.Time.
func decodeTime(bs []byte) (tt time.Time, err error) {
bd := bs[0]
var (
tsec int64
tnsec uint32
tz uint16
i byte = 1
i2 byte
n byte
)
if bd&(1<<7) != 0 {
var btmp [8]byte
n = ((bd >> 2) & 0x7) + 1
i2 = i + n
copy(btmp[8-n:], bs[i:i2])
//if first bit of bs[i] is set, then fill btmp[0..8-n] with 0xff (ie sign extend it)
if bs[i]&(1<<7) != 0 {
copy(btmp[0:8-n], bsAll0xff)
//for j,k := byte(0), 8-n; j < k; j++ { btmp[j] = 0xff }
}
i = i2
tsec = int64(bigen.Uint64(btmp[:]))
}
if bd&(1<<6) != 0 {
var btmp [4]byte
n = (bd & 0x3) + 1
i2 = i + n
copy(btmp[4-n:], bs[i:i2])
i = i2
tnsec = bigen.Uint32(btmp[:])
}
if bd&(1<<5) == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
return
}
// In stdlib time.Parse, when a date is parsed without a zone name, it uses "" as zone name.
// However, we need name here, so it can be shown when time is printed.
// Zone name is in form: UTC-08:00.
// Note that Go Libs do not give access to dst flag, so we ignore dst bits
i2 = i + 2
tz = bigen.Uint16(bs[i:i2])
i = i2
// sign extend sign bit into top 2 MSB (which were dst bits):
if tz&(1<<13) == 0 { // positive
tz = tz & 0x3fff //clear 2 MSBs: dst bits
} else { // negative
tz = tz | 0xc000 //set 2 MSBs: dst bits
//tzname[3] = '-' (TODO: verify. this works here)
}
tzint := int16(tz)
if tzint == 0 {
tt = time.Unix(tsec, int64(tnsec)).UTC()
} else {
// For Go Time, do not use a descriptive timezone.
// It's unnecessary, and makes it harder to do a reflect.DeepEqual.
// The Offset already tells what the offset should be, if not on UTC and unknown zone name.
// var zoneName = timeLocUTCName(tzint)
tt = time.Unix(tsec, int64(tnsec)).In(time.FixedZone("", int(tzint)*60))
}
return
}
func timeLocUTCName(tzint int16) string {
if tzint == 0 {
return "UTC"
}
var tzname = []byte("UTC+00:00")
//tzname := fmt.Sprintf("UTC%s%02d:%02d", tzsign, tz/60, tz%60) //perf issue using Sprintf. inline below.
//tzhr, tzmin := tz/60, tz%60 //faster if u convert to int first
var tzhr, tzmin int16
if tzint < 0 {
tzname[3] = '-' // (TODO: verify. this works here)
tzhr, tzmin = -tzint/60, (-tzint)%60
} else {
tzhr, tzmin = tzint/60, tzint%60
}
tzname[4] = timeDigits[tzhr/10]
tzname[5] = timeDigits[tzhr%10]
tzname[7] = timeDigits[tzmin/10]
tzname[8] = timeDigits[tzmin%10]
return string(tzname)
//return time.FixedZone(string(tzname), int(tzint)*60)
}

362
vendor/github.com/hashicorp/raft-boltdb/v2/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,362 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.

37
vendor/github.com/hashicorp/raft-boltdb/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,37 @@
raft-boltdb/v2
===========
This implementation uses the maintained version of BoltDB, [BBolt](https://github.com/etcd-io/bbolt). This is the primary version of `raft-boltdb` and should be used whenever possible.
There is no breaking API change to the library. However, there is the potential for disk format incompatibilities so it was decided to be conservative and making it a separate import path. This separate import path will allow both versions (original and v2) to be imported to perform a safe in-place upgrade of old files read with the old version and written back out with the new one.
## Metrics
The raft-boldb library emits a number of metrics utilizing github.com/armon/go-metrics. Those metrics are detailed in the following table. One note is that the application which pulls in this library may add its own prefix to the metric names. For example within [Consul](https://github.com/hashicorp/consul), the metrics will be prefixed with `consul.`.
| Metric | Unit | Type | Description |
| ----------------------------------- | ------------:| -------:|:--------------------- |
| `raft.boltdb.freelistBytes` | bytes | gauge | Represents the number of bytes necessary to encode the freelist metadata. When [`raft_boltdb.NoFreelistSync`](/docs/agent/options#NoFreelistSync) is set to `false` these metadata bytes must also be written to disk for each committed log. |
| `raft.boltdb.freePageBytes` | bytes | gauge | Represents the number of bytes of free space within the raft.db file. |
| `raft.boltdb.getLog` | ms | timer | Measures the amount of time spent reading logs from the db. |
| `raft.boltdb.logBatchSize` | bytes | sample | Measures the total size in bytes of logs being written to the db in a single batch. |
| `raft.boltdb.logsPerBatch` | logs | sample | Measures the number of logs being written per batch to the db. |
| `raft.boltdb.logSize` | bytes | sample | Measures the size of logs being written to the db. |
| `raft.boltdb.numFreePages` | pages | gauge | Represents the number of free pages within the raft.db file. |
| `raft.boltdb.numPendingPages` | pages | gauge | Represents the number of pending pages within the raft.db that will soon become free. |
| `raft.boltdb.openReadTxn` | transactions | gauge | Represents the number of open read transactions against the db |
| `raft.boltdb.storeLogs` | ms | timer | Measures the amount of time spent writing logs to the db. |
| `raft.boltdb.totalReadTxn` | transactions | gauge | Represents the total number of started read transactions against the db |
| `raft.boltdb.txstats.cursorCount` | cursors | counter | Counts the number of cursors created since Consul was started. |
| `raft.boltdb.txstats.nodeCount` | allocations | counter | Counts the number of node allocations within the db since Consul was started. |
| `raft.boltdb.txstats.nodeDeref` | dereferences | counter | Counts the number of node dereferences in the db since Consul was started. |
| `raft.boltdb.txstats.pageAlloc` | bytes | gauge | Represents the number of bytes allocated within the db since Consul was started. Note that this does not take into account space having been freed and reused. In that case, the value of this metric will still increase. |
| `raft.boltdb.txstats.pageCount` | pages | gauge | Represents the number of pages allocated since Consul was started. Note that this does not take into account space having been freed and reused. In that case, the value of this metric will still increase. |
| `raft.boltdb.txstats.rebalance` | rebalances | counter | Counts the number of node rebalances performed in the db since Consul was started. |
| `raft.boltdb.txstats.rebalanceTime` | ms | timer | Measures the time spent rebalancing nodes in the db. |
| `raft.boltdb.txstats.spill` | spills | counter | Counts the number of nodes spilled in the db since Consul was started. |
| `raft.boltdb.txstats.spillTime` | ms | timer | Measures the time spent spilling nodes in the db. |
| `raft.boltdb.txstats.split` | splits | counter | Counts the number of nodes split in the db since Consul was started. |
| `raft.boltdb.txstats.write` | writes | counter | Counts the number of writes to the db since Consul was started. |
| `raft.boltdb.txstats.writeTime` | ms | timer | Measures the amount of time spent performing writes to the db. |
| `raft.boltdb.writeCapacity` | logs/second | sample | Theoretical write capacity in terms of the number of logs that can be written per second. Each sample outputs what the capacity would be if future batched log write operations were similar to this one. This similarity encompasses 4 things: batch size, byte size, disk performance and boltdb performance. While none of these will be static and its highly likely individual samples of this metric will vary, aggregating this metric over a larger time window should provide a decent picture into how this BoltDB store can perform |

View File

@@ -0,0 +1,363 @@
package raftboltdb
import (
"errors"
"fmt"
"os"
"time"
metrics "github.com/armon/go-metrics"
v1 "github.com/boltdb/bolt"
"github.com/hashicorp/raft"
"go.etcd.io/bbolt"
)
const (
// Permissions to use on the db file. This is only used if the
// database file does not exist and needs to be created.
dbFileMode = 0600
)
var (
// Bucket names we perform transactions in
dbLogs = []byte("logs")
dbConf = []byte("conf")
// An error indicating a given key does not exist
ErrKeyNotFound = errors.New("not found")
)
// BoltStore provides access to Bbolt for Raft to store and retrieve
// log entries. It also provides key/value storage, and can be used as
// a LogStore and StableStore.
type BoltStore struct {
// conn is the underlying handle to the db.
conn *bbolt.DB
// The path to the Bolt database file
path string
}
// Options contains all the configuration used to open the Bbolt
type Options struct {
// Path is the file path to the Bbolt to use
Path string
// BoltOptions contains any specific Bbolt options you might
// want to specify [e.g. open timeout]
BoltOptions *bbolt.Options
// NoSync causes the database to skip fsync calls after each
// write to the log. This is unsafe, so it should be used
// with caution.
NoSync bool
}
// readOnly returns true if the contained bolt options say to open
// the DB in readOnly mode [this can be useful to tools that want
// to examine the log]
func (o *Options) readOnly() bool {
return o != nil && o.BoltOptions != nil && o.BoltOptions.ReadOnly
}
// NewBoltStore takes a file path and returns a connected Raft backend.
func NewBoltStore(path string) (*BoltStore, error) {
return New(Options{Path: path})
}
// New uses the supplied options to open the Bbolt and prepare it for use as a raft backend.
func New(options Options) (*BoltStore, error) {
// Try to connect
handle, err := bbolt.Open(options.Path, dbFileMode, options.BoltOptions)
if err != nil {
return nil, err
}
handle.NoSync = options.NoSync
// Create the new store
store := &BoltStore{
conn: handle,
path: options.Path,
}
// If the store was opened read-only, don't try and create buckets
if !options.readOnly() {
// Set up our buckets
if err := store.initialize(); err != nil {
store.Close()
return nil, err
}
}
return store, nil
}
// initialize is used to set up all of the buckets.
func (b *BoltStore) initialize() error {
tx, err := b.conn.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Create all the buckets
if _, err := tx.CreateBucketIfNotExists(dbLogs); err != nil {
return err
}
if _, err := tx.CreateBucketIfNotExists(dbConf); err != nil {
return err
}
return tx.Commit()
}
func (b *BoltStore) Stats() bbolt.Stats {
return b.conn.Stats()
}
// Close is used to gracefully close the DB connection.
func (b *BoltStore) Close() error {
return b.conn.Close()
}
// FirstIndex returns the first known index from the Raft log.
func (b *BoltStore) FirstIndex() (uint64, error) {
tx, err := b.conn.Begin(false)
if err != nil {
return 0, err
}
defer tx.Rollback()
curs := tx.Bucket(dbLogs).Cursor()
if first, _ := curs.First(); first == nil {
return 0, nil
} else {
return bytesToUint64(first), nil
}
}
// LastIndex returns the last known index from the Raft log.
func (b *BoltStore) LastIndex() (uint64, error) {
tx, err := b.conn.Begin(false)
if err != nil {
return 0, err
}
defer tx.Rollback()
curs := tx.Bucket(dbLogs).Cursor()
if last, _ := curs.Last(); last == nil {
return 0, nil
} else {
return bytesToUint64(last), nil
}
}
// GetLog is used to retrieve a log from Bbolt at a given index.
func (b *BoltStore) GetLog(idx uint64, log *raft.Log) error {
defer metrics.MeasureSince([]string{"raft", "boltdb", "getLog"}, time.Now())
tx, err := b.conn.Begin(false)
if err != nil {
return err
}
defer tx.Rollback()
bucket := tx.Bucket(dbLogs)
val := bucket.Get(uint64ToBytes(idx))
if val == nil {
return raft.ErrLogNotFound
}
return decodeMsgPack(val, log)
}
// StoreLog is used to store a single raft log
func (b *BoltStore) StoreLog(log *raft.Log) error {
return b.StoreLogs([]*raft.Log{log})
}
// StoreLogs is used to store a set of raft logs
func (b *BoltStore) StoreLogs(logs []*raft.Log) error {
now := time.Now()
tx, err := b.conn.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
batchSize := 0
for _, log := range logs {
key := uint64ToBytes(log.Index)
val, err := encodeMsgPack(log)
if err != nil {
return err
}
logLen := val.Len()
bucket := tx.Bucket(dbLogs)
if err := bucket.Put(key, val.Bytes()); err != nil {
return err
}
batchSize += logLen
metrics.AddSample([]string{"raft", "boltdb", "logSize"}, float32(logLen))
}
metrics.AddSample([]string{"raft", "boltdb", "logsPerBatch"}, float32(len(logs)))
metrics.AddSample([]string{"raft", "boltdb", "logBatchSize"}, float32(batchSize))
// Both the deferral and the inline function are important for this metrics
// accuracy. Deferral allows us to calculate the metric after the tx.Commit
// has finished and thus account for all the processing of the operation.
// The inlined function ensures that we do not calculate the time.Since(now)
// at the time of deferral but rather when the go runtime executes the
// deferred function.
defer func() {
metrics.AddSample([]string{"raft", "boltdb", "writeCapacity"}, (float32(1_000_000_000)/float32(time.Since(now).Nanoseconds()))*float32(len(logs)))
metrics.MeasureSince([]string{"raft", "boltdb", "storeLogs"}, now)
}()
return tx.Commit()
}
// DeleteRange is used to delete logs within a given range inclusively.
func (b *BoltStore) DeleteRange(min, max uint64) error {
minKey := uint64ToBytes(min)
tx, err := b.conn.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
curs := tx.Bucket(dbLogs).Cursor()
for k, _ := curs.Seek(minKey); k != nil; k, _ = curs.Next() {
// Handle out-of-range log index
if bytesToUint64(k) > max {
break
}
// Delete in-range log index
if err := curs.Delete(); err != nil {
return err
}
}
return tx.Commit()
}
// Set is used to set a key/value set outside of the raft log
func (b *BoltStore) Set(k, v []byte) error {
tx, err := b.conn.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
bucket := tx.Bucket(dbConf)
if err := bucket.Put(k, v); err != nil {
return err
}
return tx.Commit()
}
// Get is used to retrieve a value from the k/v store by key
func (b *BoltStore) Get(k []byte) ([]byte, error) {
tx, err := b.conn.Begin(false)
if err != nil {
return nil, err
}
defer tx.Rollback()
bucket := tx.Bucket(dbConf)
val := bucket.Get(k)
if val == nil {
return nil, ErrKeyNotFound
}
return append([]byte(nil), val...), nil
}
// SetUint64 is like Set, but handles uint64 values
func (b *BoltStore) SetUint64(key []byte, val uint64) error {
return b.Set(key, uint64ToBytes(val))
}
// GetUint64 is like Get, but handles uint64 values
func (b *BoltStore) GetUint64(key []byte) (uint64, error) {
val, err := b.Get(key)
if err != nil {
return 0, err
}
return bytesToUint64(val), nil
}
// Sync performs an fsync on the database file handle. This is not necessary
// under normal operation unless NoSync is enabled, in which this forces the
// database file to sync against the disk.
func (b *BoltStore) Sync() error {
return b.conn.Sync()
}
// MigrateToV2 reads in the source file path of a BoltDB file
// and outputs all the data migrated to a Bbolt destination file
func MigrateToV2(source, destination string) (*BoltStore, error) {
_, err := os.Stat(destination)
if err == nil {
return nil, fmt.Errorf("file exists in destination %v", destination)
}
srcDb, err := v1.Open(source, dbFileMode, &v1.Options{
ReadOnly: true,
Timeout: 1 * time.Minute,
})
if err != nil {
return nil, fmt.Errorf("failed opening source database: %v", err)
}
//Start a connection to the source
srctx, err := srcDb.Begin(false)
if err != nil {
return nil, fmt.Errorf("failed connecting to source database: %v", err)
}
defer srctx.Rollback()
//Create the destination
destDb, err := New(Options{Path: destination})
if err != nil {
return nil, fmt.Errorf("failed creating destination database: %v", err)
}
//Start a connection to the new
desttx, err := destDb.conn.Begin(true)
if err != nil {
destDb.Close()
os.Remove(destination)
return nil, fmt.Errorf("failed connecting to destination database: %v", err)
}
defer desttx.Rollback()
//Loop over both old buckets and set them in the new
buckets := [][]byte{dbConf, dbLogs}
for _, b := range buckets {
srcB := srctx.Bucket(b)
destB := desttx.Bucket(b)
err = srcB.ForEach(func(k, v []byte) error {
return destB.Put(k, v)
})
if err != nil {
destDb.Close()
os.Remove(destination)
return nil, fmt.Errorf("failed to copy %v bucket: %v", string(b), err)
}
}
//If the commit fails, clean up
if err := desttx.Commit(); err != nil {
destDb.Close()
os.Remove(destination)
return nil, fmt.Errorf("failed commiting data to destination: %v", err)
}
return destDb, nil
}

68
vendor/github.com/hashicorp/raft-boltdb/v2/metrics.go generated vendored Normal file
View File

@@ -0,0 +1,68 @@
package raftboltdb
import (
"context"
"time"
metrics "github.com/armon/go-metrics"
"go.etcd.io/bbolt"
)
const (
defaultMetricsInterval = 5 * time.Second
)
// RunMetrics should be executed in a go routine and will periodically emit
// metrics on the given interval until the context has been cancelled.
func (b *BoltStore) RunMetrics(ctx context.Context, interval time.Duration) {
if interval == 0 {
interval = defaultMetricsInterval
}
tick := time.NewTicker(interval)
defer tick.Stop()
stats := b.emitMetrics(nil)
for {
select {
case <-ctx.Done():
return
case <-tick.C:
stats = b.emitMetrics(stats)
}
}
}
func (b *BoltStore) emitMetrics(prev *bbolt.Stats) *bbolt.Stats {
newStats := b.conn.Stats()
stats := newStats
if prev != nil {
stats = newStats.Sub(prev)
}
// freelist metrics
metrics.SetGauge([]string{"raft", "boltdb", "numFreePages"}, float32(newStats.FreePageN))
metrics.SetGauge([]string{"raft", "boltdb", "numPendingPages"}, float32(newStats.PendingPageN))
metrics.SetGauge([]string{"raft", "boltdb", "freePageBytes"}, float32(newStats.FreeAlloc))
metrics.SetGauge([]string{"raft", "boltdb", "freelistBytes"}, float32(newStats.FreelistInuse))
// txn metrics
metrics.IncrCounter([]string{"raft", "boltdb", "totalReadTxn"}, float32(stats.TxN))
metrics.SetGauge([]string{"raft", "boltdb", "openReadTxn"}, float32(newStats.OpenTxN))
// tx stats
metrics.SetGauge([]string{"raft", "boltdb", "txstats", "pageCount"}, float32(newStats.TxStats.PageCount))
metrics.SetGauge([]string{"raft", "boltdb", "txstats", "pageAlloc"}, float32(newStats.TxStats.PageAlloc))
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "cursorCount"}, float32(stats.TxStats.CursorCount))
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "nodeCount"}, float32(stats.TxStats.NodeCount))
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "nodeDeref"}, float32(stats.TxStats.NodeDeref))
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "rebalance"}, float32(stats.TxStats.Rebalance))
metrics.AddSample([]string{"raft", "boltdb", "txstats", "rebalanceTime"}, float32(stats.TxStats.RebalanceTime.Nanoseconds())/1000000)
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "split"}, float32(stats.TxStats.Split))
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "spill"}, float32(stats.TxStats.Spill))
metrics.AddSample([]string{"raft", "boltdb", "txstats", "spillTime"}, float32(stats.TxStats.SpillTime.Nanoseconds())/1000000)
metrics.IncrCounter([]string{"raft", "boltdb", "txstats", "write"}, float32(stats.TxStats.Write))
metrics.AddSample([]string{"raft", "boltdb", "txstats", "writeTime"}, float32(stats.TxStats.WriteTime.Nanoseconds())/1000000)
return &newStats
}

37
vendor/github.com/hashicorp/raft-boltdb/v2/util.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package raftboltdb
import (
"bytes"
"encoding/binary"
"github.com/hashicorp/go-msgpack/codec"
)
// Decode reverses the encode operation on a byte slice input
func decodeMsgPack(buf []byte, out interface{}) error {
r := bytes.NewBuffer(buf)
hd := codec.MsgpackHandle{}
dec := codec.NewDecoder(r, &hd)
return dec.Decode(out)
}
// Encode writes an encoded object to a new bytes buffer
func encodeMsgPack(in interface{}) (*bytes.Buffer, error) {
buf := bytes.NewBuffer(nil)
hd := codec.MsgpackHandle{}
enc := codec.NewEncoder(buf, &hd)
err := enc.Encode(in)
return buf, err
}
// Converts bytes to an integer
func bytesToUint64(b []byte) uint64 {
return binary.BigEndian.Uint64(b)
}
// Converts a uint to a byte slice
func uint64ToBytes(u uint64) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, u)
return buf
}

23
vendor/github.com/hashicorp/raft/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,23 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test

52
vendor/github.com/hashicorp/raft/.golangci-lint.yml generated vendored Normal file
View File

@@ -0,0 +1,52 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
run:
deadline: 5m
linters-settings:
govet:
check-shadowing: true
golint:
min-confidence: 0
linters:
disable-all: true
enable:
- gofmt
#- golint
- govet
#- varcheck
#- typecheck
#- gosimple
issues:
exclude-use-default: false
exclude:
# ignore the false positive erros resulting from not including a comment above every `package` keyword
- should have a package comment, unless it's in another file for this package (golint)
# golint: Annoying issue about not having a comment. The rare codebase has such comments
# - (comment on exported (method|function|type|const)|should have( a package)? comment|comment should be of the form)
# errcheck: Almost all programs ignore errors on these functions and in most cases it's ok
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*printf?|os\.(Un)?Setenv). is not checked
# golint: False positive when tests are defined in package 'test'
- func name will be used as test\.Test.* by other packages, and that stutters; consider calling this
# staticcheck: Developers tend to write in C-style with an
# explicit 'break' in a 'switch', so it's ok to ignore
- ineffective break statement. Did you mean to break out of the outer loop
# gosec: Too many false-positives on 'unsafe' usage
- Use of unsafe calls should be audited
# gosec: Too many false-positives for parametrized shell calls
- Subprocess launch(ed with variable|ing should be audited)
# gosec: Duplicated errcheck checks
- G104
# gosec: Too many issues in popular repos
- (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less)
# gosec: False positive is triggered by 'src, err := ioutil.ReadFile(filename)'
- Potential file inclusion via variable

24
vendor/github.com/hashicorp/raft/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,24 @@
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: MPL-2.0
language: go
go:
# Disabled until https://github.com/armon/go-metrics/issues/59 is fixed
# - 1.6
- 1.8
- 1.9
- 1.12
- tip
install:
- make deps
- curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin latest
script:
- make integ
notifications:
flowdock:
secure: fZrcf9rlh2IrQrlch1sHkn3YI7SKvjGnAl/zyV5D6NROe1Bbr6d3QRMuCXWWdhJHzjKmXk5rIzbqJhUc0PNF7YjxGNKSzqWMQ56KcvN1k8DzlqxpqkcA3Jbs6fXCWo2fssRtZ7hj/wOP1f5n6cc7kzHDt9dgaYJ6nO2fqNPJiTc=

Some files were not shown because too many files have changed in this diff Show More