mirror of
https://github.com/gofiber/storage.git
synced 2025-09-29 13:52:20 +08:00
Compare commits
11 Commits
minio/v0.1
...
s3/v2.0.0
Author | SHA1 | Date | |
---|---|---|---|
![]() |
629c25208d | ||
![]() |
bf6aa0132c | ||
![]() |
4dfe51aa01 | ||
![]() |
e735c69371 | ||
![]() |
ae41d2f5c1 | ||
![]() |
dc00bab703 | ||
![]() |
d06fbfc7f3 | ||
![]() |
e09d83368c | ||
![]() |
1ddcc7b3fc | ||
![]() |
db6c457d5c | ||
![]() |
26c755f312 |
9
.github/workflows/test-s3.yml
vendored
9
.github/workflows/test-s3.yml
vendored
@@ -20,14 +20,7 @@ jobs:
|
||||
- 1.21.x
|
||||
steps:
|
||||
- name: Install MinIO
|
||||
run: |
|
||||
docker run -d -p 9000:9000 --name minio minio/minio server /data
|
||||
|
||||
export AWS_ACCESS_KEY_ID=minioadmin
|
||||
export AWS_SECRET_ACCESS_KEY=minioadmin
|
||||
export AWS_EC2_METADATA_DISABLED=true
|
||||
|
||||
aws --endpoint-url http://127.0.0.1:9000/ s3 mb s3://testbucket
|
||||
run: docker run -d -p 9000:9000 --name minio minio/minio server /data
|
||||
- name: Fetch Repository
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Go
|
||||
|
43
s3/README.md
43
s3/README.md
@@ -31,8 +31,15 @@ func (s *Storage) Delete(key string) error
|
||||
func (s *Storage) Reset() error
|
||||
func (s *Storage) Close() error
|
||||
func (s *Storage) Conn() *s3.Client
|
||||
|
||||
// Additional useful methods.
|
||||
func (s *Storage) CreateBucker(bucket string) error
|
||||
func (s *Storage) DeleteBucket(bucket string) error
|
||||
func (s *Storage) SetWithChecksum(key string, val []byte, checksum map[types.ChecksumAlgorithm][]byte) error
|
||||
```
|
||||
|
||||
### Installation
|
||||
|
||||
S3 is tested on the 2 last [Go versions](https://golang.org/dl/) with support for modules. So make sure to initialize one first if you didn't do that yet:
|
||||
```bash
|
||||
go mod init github.com/<user>/<repo>
|
||||
@@ -43,7 +50,9 @@ go get github.com/gofiber/storage/s3/v2
|
||||
```
|
||||
|
||||
### Examples
|
||||
|
||||
Import the storage package.
|
||||
|
||||
```go
|
||||
import "github.com/gofiber/storage/s3/v2"
|
||||
```
|
||||
@@ -62,6 +71,38 @@ store := s3.New(s3.Config{
|
||||
})
|
||||
```
|
||||
|
||||
Create an object with `Set()`:
|
||||
```go
|
||||
err := store.Set("my-key", []byte("my-value"))
|
||||
```
|
||||
|
||||
Or, call `SetWithChecksum()` to create an object with checksum to
|
||||
ask S3 server to verify data integrity on server side:
|
||||
|
||||
> Currently 4 algorithms are supported:
|
||||
> - types.ChecksumAlgorithmCrc32 (`CRC32`)
|
||||
> - types.ChecksumAlgorithmCrc32c (`CRC32C`)
|
||||
> - types.ChecksumAlgorithmSha1 (`SHA1`)
|
||||
> - types.ChecksumAlgorithmSha256 (`SHA256`)
|
||||
>
|
||||
> For more information, see [PutObjectInput](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3#PutObjectInput).
|
||||
|
||||
```go
|
||||
key := "my-key"
|
||||
val := []byte("my-value")
|
||||
|
||||
hash := sha256.New()
|
||||
hash.Write(val)
|
||||
sha256sum := hash.Sum(nil)
|
||||
|
||||
// import "github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
checksum = map[types.ChecksumAlgorithm][]byte{
|
||||
types.ChecksumAlgorithmSha256: sha256sum,
|
||||
}
|
||||
|
||||
err := store.SetWithChecksum(key, val, checksum)
|
||||
```
|
||||
|
||||
### Config
|
||||
```go
|
||||
// Config defines the config for storage.
|
||||
@@ -104,7 +145,9 @@ type Credentials struct {
|
||||
```
|
||||
|
||||
### Default Config
|
||||
|
||||
The default configuration lacks Bucket, Region, and Endpoint which are all required and must be overwritten:
|
||||
|
||||
```go
|
||||
// ConfigDefault is the default config
|
||||
var ConfigDefault = Config{
|
||||
|
38
s3/init_test.go
Normal file
38
s3/init_test.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
bucket = "testbucket"
|
||||
)
|
||||
|
||||
var testStore *Storage
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
testStore = New(
|
||||
Config{
|
||||
Bucket: bucket,
|
||||
Endpoint: "http://127.0.0.1:9000/",
|
||||
Region: "us-east-1",
|
||||
Credentials: Credentials{
|
||||
AccessKey: "minioadmin",
|
||||
SecretAccessKey: "minioadmin",
|
||||
},
|
||||
RequestTimeout: 3 * time.Second,
|
||||
},
|
||||
)
|
||||
|
||||
// Create test bucket.
|
||||
_ = testStore.CreateBucket(bucket)
|
||||
|
||||
exitVal := m.Run()
|
||||
|
||||
// Delete test bucket.
|
||||
_ = testStore.DeleteBucket(bucket)
|
||||
|
||||
os.Exit(exitVal)
|
||||
}
|
6
s3/s3.go
6
s3/s3.go
@@ -159,7 +159,7 @@ func (s *Storage) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return database client
|
||||
// Conn returns database client.
|
||||
func (s *Storage) Conn() *s3.Client {
|
||||
return s.svc
|
||||
}
|
||||
@@ -186,11 +186,11 @@ func returnAWSConfig(cfg Config) (aws.Config, error) {
|
||||
})
|
||||
|
||||
if cfg.Credentials != (Credentials{}) {
|
||||
credentials := credentials.NewStaticCredentialsProvider(cfg.Credentials.AccessKey, cfg.Credentials.SecretAccessKey, "")
|
||||
creds := credentials.NewStaticCredentialsProvider(cfg.Credentials.AccessKey, cfg.Credentials.SecretAccessKey, "")
|
||||
return awsconfig.LoadDefaultConfig(context.TODO(),
|
||||
awsconfig.WithRegion(cfg.Region),
|
||||
awsconfig.WithEndpointResolverWithOptions(endpoint),
|
||||
awsconfig.WithCredentialsProvider(credentials),
|
||||
awsconfig.WithCredentialsProvider(creds),
|
||||
awsconfig.WithRetryer(func() aws.Retryer {
|
||||
return retry.AddWithMaxAttempts(retry.NewStandard(), cfg.MaxAttempts)
|
||||
}),
|
||||
|
83
s3/s3_methods.go
Normal file
83
s3/s3_methods.go
Normal file
@@ -0,0 +1,83 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/aws"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3"
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
)
|
||||
|
||||
// Additional methods for S3, but not required by gofiber Storage interface.
|
||||
|
||||
// CreateBucket creates a new bucket.
|
||||
func (s *Storage) CreateBucket(bucket string) error {
|
||||
ctx, cancel := s.requestContext()
|
||||
defer cancel()
|
||||
|
||||
_, err := s.svc.CreateBucket(ctx, &s3.CreateBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteBucket deletes a bucket.
|
||||
func (s *Storage) DeleteBucket(bucket string) error {
|
||||
ctx, cancel := s.requestContext()
|
||||
defer cancel()
|
||||
|
||||
_, err := s.svc.DeleteBucket(ctx, &s3.DeleteBucketInput{
|
||||
Bucket: aws.String(bucket),
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// SetWithChecksum sets key with value and checksum.
|
||||
//
|
||||
// Currently 4 algorithms are supported:
|
||||
// - types.ChecksumAlgorithmCrc32 (`CRC32`)
|
||||
// - types.ChecksumAlgorithmCrc32c (`CRC32C`)
|
||||
// - types.ChecksumAlgorithmSha1 (`SHA1`)
|
||||
// - types.ChecksumAlgorithmSha256 (`SHA256`)
|
||||
//
|
||||
// For more information, see [PutObjectInput](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/s3#PutObjectInput).
|
||||
func (s *Storage) SetWithChecksum(key string, val []byte, checksum map[types.ChecksumAlgorithm][]byte) error {
|
||||
if len(key) <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
poi := s3.PutObjectInput{
|
||||
Bucket: &s.bucket,
|
||||
Key: aws.String(key),
|
||||
Body: bytes.NewReader(val),
|
||||
}
|
||||
|
||||
for alg, sum := range checksum {
|
||||
// S3 requires base64 encoded checksum.
|
||||
b64str := base64.StdEncoding.EncodeToString(sum)
|
||||
|
||||
switch alg {
|
||||
case types.ChecksumAlgorithmCrc32:
|
||||
poi.ChecksumCRC32 = aws.String(b64str)
|
||||
case types.ChecksumAlgorithmCrc32c:
|
||||
poi.ChecksumCRC32C = aws.String(b64str)
|
||||
case types.ChecksumAlgorithmSha1:
|
||||
poi.ChecksumSHA1 = aws.String(b64str)
|
||||
case types.ChecksumAlgorithmSha256:
|
||||
poi.ChecksumSHA256 = aws.String(b64str)
|
||||
default:
|
||||
return fmt.Errorf("invalid checksum algorithm: %s", alg)
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := s.requestContext()
|
||||
defer cancel()
|
||||
|
||||
_, err := s.uploader.Upload(ctx, &poi)
|
||||
|
||||
return err
|
||||
}
|
50
s3/s3_methods_test.go
Normal file
50
s3/s3_methods_test.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package s3
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"testing"
|
||||
|
||||
"github.com/aws/aws-sdk-go-v2/service/s3/types"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_S3_CreateDeleteBucket(t *testing.T) {
|
||||
bkt := "test-new-bucket"
|
||||
|
||||
err := testStore.CreateBucket(bkt)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = testStore.DeleteBucket(bkt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_S3_SetWithChecksum(t *testing.T) {
|
||||
var (
|
||||
key = "set-with-checksum"
|
||||
val = []byte("doe")
|
||||
)
|
||||
|
||||
// Create SHA-256 hash and get checksum.
|
||||
sha256Hash := sha256.New()
|
||||
sha256Hash.Write(val)
|
||||
sha256sum := sha256Hash.Sum(nil)
|
||||
|
||||
checksum := map[types.ChecksumAlgorithm][]byte{
|
||||
types.ChecksumAlgorithmSha256: sha256sum,
|
||||
}
|
||||
|
||||
err := testStore.SetWithChecksum(key, val, checksum)
|
||||
require.NoError(t, err)
|
||||
|
||||
result, err := testStore.Get(key)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Compare value.
|
||||
require.Equal(t, result, val)
|
||||
|
||||
// Compare checksum.
|
||||
hash2 := sha256.New()
|
||||
hash2.Write(result)
|
||||
sha256sum2 := hash2.Sum(nil)
|
||||
require.Equal(t, sha256sum, sha256sum2)
|
||||
}
|
@@ -6,18 +6,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var testStore = New(
|
||||
Config{
|
||||
Bucket: "testbucket",
|
||||
Endpoint: "http://127.0.0.1:9000/",
|
||||
Region: "us-east-1",
|
||||
Credentials: Credentials{
|
||||
AccessKey: "minioadmin",
|
||||
SecretAccessKey: "minioadmin",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
func Test_S3_Set(t *testing.T) {
|
||||
var (
|
||||
key = "john"
|
||||
|
Reference in New Issue
Block a user