Add S3 storage support

This commit is contained in:
Ingo Oppermann
2023-01-31 14:45:58 +01:00
parent c05e16b6a0
commit f519acfd71
351 changed files with 99292 additions and 1331 deletions

View File

@@ -15,6 +15,9 @@ import (
// DiskConfig is the config required to create a new disk
// filesystem.
type DiskConfig struct {
// Namee is the name of the filesystem
Name string
// Dir is the path to the directory to observe
Dir string
@@ -109,7 +112,8 @@ func (f *diskFile) Read(p []byte) (int, error) {
// diskFilesystem implements the Filesystem interface
type diskFilesystem struct {
dir string
name string
dir string
// Max. size of the filesystem in bytes as
// given by the config
@@ -127,14 +131,20 @@ type diskFilesystem struct {
// that implements the Filesystem interface
func NewDiskFilesystem(config DiskConfig) (Filesystem, error) {
fs := &diskFilesystem{
name: config.Name,
maxSize: config.Size,
logger: config.Logger,
}
if fs.logger == nil {
fs.logger = log.New("DiskFS")
fs.logger = log.New("")
}
fs.logger = fs.logger.WithFields(log.Fields{
"name": fs.name,
"type": "disk",
})
if err := fs.Rebase(config.Dir); err != nil {
return nil, err
}
@@ -142,6 +152,10 @@ func NewDiskFilesystem(config DiskConfig) (Filesystem, error) {
return fs, nil
}
func (fs *diskFilesystem) Name() string {
return fs.name
}
func (fs *diskFilesystem) Base() string {
return fs.dir
}
@@ -172,6 +186,10 @@ func (fs *diskFilesystem) Rebase(base string) error {
return nil
}
func (fs *diskFilesystem) Type() string {
return "diskfs"
}
func (fs *diskFilesystem) Size() (int64, int64) {
// This is to cache the size for some time in order not to
// stress the underlying filesystem too much.

View File

@@ -20,10 +20,15 @@ func (d *dummyFile) Close() error { return nil }
func (d *dummyFile) Name() string { return "" }
func (d *dummyFile) Stat() (FileInfo, error) { return &dummyFileInfo{}, nil }
type dummyFilesystem struct{}
type dummyFilesystem struct {
name string
typ string
}
func (d *dummyFilesystem) Name() string { return d.name }
func (d *dummyFilesystem) Base() string { return "/" }
func (d *dummyFilesystem) Rebase(string) error { return nil }
func (d *dummyFilesystem) Type() string { return d.typ }
func (d *dummyFilesystem) Size() (int64, int64) { return 0, -1 }
func (d *dummyFilesystem) Resize(int64) {}
func (d *dummyFilesystem) Files() int64 { return 0 }
@@ -35,6 +40,9 @@ func (d *dummyFilesystem) DeleteAll() int64 { return
func (d *dummyFilesystem) List(string) []FileInfo { return []FileInfo{} }
// NewDummyFilesystem return a dummy filesystem
func NewDummyFilesystem() Filesystem {
return &dummyFilesystem{}
func NewDummyFilesystem(name, typ string) Filesystem {
return &dummyFilesystem{
name: name,
typ: typ,
}
}

View File

@@ -38,12 +38,18 @@ type File interface {
// Filesystem is an interface that provides access to a filesystem.
type Filesystem interface {
// Name returns the name of this filesystem
Name() string
// Base returns the base path of this filesystem
Base() string
// Rebase sets a new base path for this filesystem
Rebase(string) error
// Type returns the type of this filesystem
Type() string
// Size returns the consumed size and capacity of the filesystem in bytes. The
// capacity is negative if the filesystem can consume as much space as it can.
Size() (int64, int64)
@@ -67,7 +73,7 @@ type Filesystem interface {
Store(path string, r io.Reader) (int64, bool, error)
// Delete removes a file at the given path from the filesystem. Returns the size of
// the remove file in bytes. The size is negative if the file doesn't exist.
// the removed file in bytes. The size is negative if the file doesn't exist.
Delete(path string) int64
// DeleteAll removes all files from the filesystem. Returns the size of the

View File

@@ -15,6 +15,9 @@ import (
// MemConfig is the config that is required for creating
// a new memory filesystem.
type MemConfig struct {
// Namee is the name of the filesystem
Name string
// Base is the base path to be reported for this filesystem
Base string
@@ -107,6 +110,7 @@ func (f *memFile) Close() error {
}
type memFilesystem struct {
name string
base string
// Mapping of path to file
@@ -136,6 +140,7 @@ type memFilesystem struct {
// the Filesystem interface.
func NewMemFilesystem(config MemConfig) Filesystem {
fs := &memFilesystem{
name: config.Name,
base: config.Base,
maxSize: config.Size,
purge: config.Purge,
@@ -143,9 +148,11 @@ func NewMemFilesystem(config MemConfig) Filesystem {
}
if fs.logger == nil {
fs.logger = log.New("MemFS")
fs.logger = log.New("")
}
fs.logger = fs.logger.WithField("type", "mem")
fs.files = make(map[string]*memFile)
fs.dataPool = sync.Pool{
@@ -155,6 +162,7 @@ func NewMemFilesystem(config MemConfig) Filesystem {
}
fs.logger.WithFields(log.Fields{
"name": fs.name,
"size_bytes": fs.maxSize,
"purge": fs.purge,
}).Debug().Log("Created")
@@ -162,6 +170,10 @@ func NewMemFilesystem(config MemConfig) Filesystem {
return fs
}
func (fs *memFilesystem) Name() string {
return fs.name
}
func (fs *memFilesystem) Base() string {
return fs.base
}
@@ -172,6 +184,10 @@ func (fs *memFilesystem) Rebase(base string) error {
return nil
}
func (fs *memFilesystem) Type() string {
return "memfs"
}
func (fs *memFilesystem) Size() (int64, int64) {
fs.filesLock.RLock()
defer fs.filesLock.RUnlock()

389
io/fs/s3.go Normal file
View File

@@ -0,0 +1,389 @@
package fs
import (
"context"
"fmt"
"io"
"time"
"github.com/datarhei/core/v16/glob"
"github.com/datarhei/core/v16/log"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
)
type S3Config struct {
// Namee is the name of the filesystem
Name string
Base string
Endpoint string
AccessKeyID string
SecretAccessKey string
Region string
Bucket string
UseSSL bool
Logger log.Logger
}
type s3fs struct {
name string
base string
endpoint string
accessKeyID string
secretAccessKey string
region string
bucket string
useSSL bool
client *minio.Client
logger log.Logger
}
func NewS3Filesystem(config S3Config) (Filesystem, error) {
fs := &s3fs{
name: config.Name,
base: config.Base,
endpoint: config.Endpoint,
accessKeyID: config.AccessKeyID,
secretAccessKey: config.SecretAccessKey,
region: config.Region,
bucket: config.Bucket,
useSSL: config.UseSSL,
logger: config.Logger,
}
if fs.logger == nil {
fs.logger = log.New("")
}
client, err := minio.New(fs.endpoint, &minio.Options{
Creds: credentials.NewStaticV4(fs.accessKeyID, fs.secretAccessKey, ""),
Region: fs.region,
Secure: fs.useSSL,
})
if err != nil {
return nil, fmt.Errorf("can't connect to s3 endpoint %s: %w", fs.endpoint, err)
}
fs.logger = fs.logger.WithFields(log.Fields{
"name": fs.name,
"type": "s3",
"bucket": fs.bucket,
"region": fs.region,
"endpoint": fs.endpoint,
})
fs.logger.Debug().Log("Connected")
ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(30*time.Second))
defer cancel()
exists, err := client.BucketExists(ctx, fs.bucket)
if err != nil {
fs.logger.WithError(err).Log("Can't access bucket")
return nil, fmt.Errorf("can't access bucket %s: %w", fs.bucket, err)
}
if exists {
fs.logger.Debug().Log("Bucket already exists")
} else {
fs.logger.Debug().Log("Bucket doesn't exists")
err = client.MakeBucket(ctx, fs.bucket, minio.MakeBucketOptions{Region: fs.region})
if err != nil {
fs.logger.WithError(err).Log("Can't create bucket")
return nil, fmt.Errorf("can't create bucket %s: %w", fs.bucket, err)
} else {
fs.logger.Debug().Log("Bucket created")
}
}
fs.client = client
return fs, nil
}
func (fs *s3fs) Name() string {
return fs.name
}
func (fs *s3fs) Base() string {
return fs.base
}
func (fs *s3fs) Rebase(base string) error {
fs.base = base
return nil
}
func (fs *s3fs) Type() string {
return "s3fs"
}
func (fs *s3fs) Size() (int64, int64) {
size := int64(0)
files := fs.List("")
for _, file := range files {
size += file.Size()
}
return size, -1
}
func (fs *s3fs) Resize(size int64) {}
func (fs *s3fs) Files() int64 {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
Recursive: true,
})
nfiles := int64(0)
for object := range ch {
if object.Err != nil {
fs.logger.WithError(object.Err).Log("Listing object failed")
}
nfiles++
}
return nfiles
}
func (fs *s3fs) Symlink(oldname, newname string) error {
return fmt.Errorf("not implemented")
}
func (fs *s3fs) Open(path string) File {
//ctx, cancel := context.WithCancel(context.Background())
//defer cancel()
ctx := context.Background()
object, err := fs.client.GetObject(ctx, fs.bucket, path, minio.GetObjectOptions{})
if err != nil {
fs.logger.Debug().WithField("key", path).Log("Not found")
return nil
}
stat, err := object.Stat()
if err != nil {
fs.logger.Debug().WithField("key", path).Log("Stat failed")
return nil
}
file := &s3File{
data: object,
name: stat.Key,
size: stat.Size,
lastModified: stat.LastModified,
}
fs.logger.Debug().WithField("key", stat.Key).Log("Opened")
return file
}
func (fs *s3fs) Store(path string, r io.Reader) (int64, bool, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
overwrite := false
_, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
if err == nil {
overwrite = true
}
info, err := fs.client.PutObject(ctx, fs.bucket, path, r, -1, minio.PutObjectOptions{
UserMetadata: map[string]string{},
UserTags: map[string]string{},
Progress: nil,
ContentType: "",
ContentEncoding: "",
ContentDisposition: "",
ContentLanguage: "",
CacheControl: "",
Mode: "",
RetainUntilDate: time.Time{},
ServerSideEncryption: nil,
NumThreads: 0,
StorageClass: "",
WebsiteRedirectLocation: "",
PartSize: 0,
LegalHold: "",
SendContentMd5: false,
DisableContentSha256: false,
DisableMultipart: false,
Internal: minio.AdvancedPutOptions{},
})
if err != nil {
fs.logger.WithError(err).WithField("key", path).Log("Failed to store file")
return -1, false, err
}
fs.logger.Debug().WithFields(log.Fields{
"key": path,
"overwrite": overwrite,
}).Log("Stored")
return info.Size, overwrite, nil
}
func (fs *s3fs) Delete(path string) int64 {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
stat, err := fs.client.StatObject(ctx, fs.bucket, path, minio.StatObjectOptions{})
if err != nil {
fs.logger.Debug().WithField("key", path).Log("Not found")
return -1
}
err = fs.client.RemoveObject(ctx, fs.bucket, path, minio.RemoveObjectOptions{
GovernanceBypass: true,
})
if err != nil {
fs.logger.WithError(err).WithField("key", stat.Key).Log("Failed to delete file")
return -1
}
fs.logger.Debug().WithField("key", stat.Key).Log("Deleted")
return stat.Size
}
func (fs *s3fs) DeleteAll() int64 {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
totalSize := int64(0)
objectsCh := make(chan minio.ObjectInfo)
// Send object names that are needed to be removed to objectsCh
go func() {
defer close(objectsCh)
for object := range fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
Recursive: true,
}) {
if object.Err != nil {
fs.logger.WithError(object.Err).Log("Listing object failed")
continue
}
totalSize += object.Size
objectsCh <- object
}
}()
for err := range fs.client.RemoveObjects(context.Background(), fs.bucket, objectsCh, minio.RemoveObjectsOptions{
GovernanceBypass: true,
}) {
fs.logger.WithError(err.Err).WithField("key", err.ObjectName).Log("Deleting object failed")
}
fs.logger.Debug().Log("Deleted all files")
return totalSize
}
func (fs *s3fs) List(pattern string) []FileInfo {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
ch := fs.client.ListObjects(ctx, fs.bucket, minio.ListObjectsOptions{
WithVersions: false,
WithMetadata: false,
Prefix: "",
Recursive: true,
MaxKeys: 0,
StartAfter: "",
UseV1: false,
})
files := []FileInfo{}
for object := range ch {
if object.Err != nil {
fs.logger.WithError(object.Err).Log("Listing object failed")
continue
}
if len(pattern) != 0 {
if ok, _ := glob.Match(pattern, object.Key, '/'); !ok {
continue
}
}
f := &s3FileInfo{
name: object.Key,
size: object.Size,
lastModified: object.LastModified,
}
files = append(files, f)
}
return files
}
type s3FileInfo struct {
name string
size int64
lastModified time.Time
}
func (f *s3FileInfo) Name() string {
return f.name
}
func (f *s3FileInfo) Size() int64 {
return f.size
}
func (f *s3FileInfo) ModTime() time.Time {
return f.lastModified
}
func (f *s3FileInfo) IsLink() (string, bool) {
return "", false
}
func (f *s3FileInfo) IsDir() bool {
return false
}
type s3File struct {
data io.ReadCloser
name string
size int64
lastModified time.Time
}
func (f *s3File) Read(p []byte) (int, error) {
return f.data.Read(p)
}
func (f *s3File) Close() error {
return f.data.Close()
}
func (f *s3File) Name() string {
return f.name
}
func (f *s3File) Stat() (FileInfo, error) {
return &s3FileInfo{
name: f.name,
size: f.size,
lastModified: f.lastModified,
}, nil
}